8 #ifndef Sawyer_PoolAllocator_H
9 #define Sawyer_PoolAllocator_H
11 #include <boost/version.hpp>
12 #include <boost/foreach.hpp>
13 #include <boost/static_assert.hpp>
14 #include <boost/cstdint.hpp>
16 #include <Sawyer/Assert.h>
17 #include <Sawyer/Interval.h>
18 #include <Sawyer/IntervalMap.h>
19 #include <Sawyer/Sawyer.h>
20 #include <Sawyer/Synchronization.h>
59 template<
size_t smallestCell,
size_t sizeDelta,
size_t nPools,
size_t chunkSize,
typename Sync>
62 enum { SMALLEST_CELL = smallestCell };
63 enum { SIZE_DELTA = sizeDelta };
64 enum { N_POOLS = nPools };
65 enum { CHUNK_SIZE = chunkSize };
66 enum { N_FREE_LISTS = 32 };
71 struct FreeCell { FreeCell *next; };
80 unsigned char data_[chunkSize];
82 BOOST_STATIC_ASSERT(chunkSize >=
sizeof(FreeCell));
85 ASSERT_require(
cellSize >=
sizeof(FreeCell));
86 ASSERT_require(
cellSize <= chunkSize);
87 FreeCell *retval = NULL;
89 for (
size_t i=n; i>0; --i) {
90 FreeCell *cell =
reinterpret_cast<FreeCell*
>(data_+(i-1)*
cellSize);
99 reinterpret_cast<boost::uint64_t
>(data_+chunkSize-1));
110 ChunkInfo(): chunk(NULL), nUsed(0) {}
111 ChunkInfo(
const Chunk *chunk,
size_t nUsed): chunk(chunk), nUsed(nUsed) {}
112 bool operator==(
const ChunkInfo &other)
const {
113 return chunk==other.chunk && nUsed==other.nUsed;
122 class LockEverything {
123 SAWYER_THREAD_TRAITS::Mutex *freeListMutexes_, &chunkMutex_;
126 LockEverything(SAWYER_THREAD_TRAITS::Mutex *freeListMutexes, SAWYER_THREAD_TRAITS::Mutex &chunkMutex)
127 : freeListMutexes_(freeListMutexes), chunkMutex_(chunkMutex), nLocked_(0) {
128 while (nLocked_ < N_FREE_LISTS) {
129 freeListMutexes_[nLocked_].lock();
136 while (nLocked_ > 0) {
137 freeListMutexes_[nLocked_-1].unlock();
140 chunkMutex_.unlock();
154 SAWYER_THREAD_TRAITS::Mutex freeListMutexes_[N_FREE_LISTS];
155 FreeCell *freeLists_[N_FREE_LISTS];
159 mutable SAWYER_THREAD_TRAITS::Mutex chunkMutex_;
160 std::list<Chunk*> chunks_;
166 Pool(): cellSize_(0) {
167 memset(freeLists_, 0,
sizeof freeLists_);
171 assert(cellSize_ == 0);
178 for (
typename std::list<Chunk*>::iterator ci=chunks_.begin(); ci!=chunks_.end(); ++ci)
182 bool isEmpty()
const {
183 SAWYER_THREAD_TRAITS::LockGuard lock(chunkMutex_);
184 return chunks_.empty();
190 SAWYER_THREAD_TRAITS::LockGuard lock(freeListMutexes_[freeListIdx]);
191 if (!freeLists_[freeListIdx]) {
192 Chunk *chunk =
new Chunk;
193 freeLists_[freeListIdx] = chunk->fill(cellSize_);
194 SAWYER_THREAD_TRAITS::LockGuard lock(chunkMutex_);
195 chunks_.push_back(chunk);
197 ASSERT_not_null(freeLists_[freeListIdx]);
198 FreeCell *cell = freeLists_[freeListIdx];
199 freeLists_[freeListIdx] = freeLists_[freeListIdx]->next;
205 void release(
void *cell) {
207 SAWYER_THREAD_TRAITS::LockGuard lock(freeListMutexes_[freeListIdx]);
208 ASSERT_not_null(cell);
209 FreeCell *freedCell =
reinterpret_cast<FreeCell*
>(cell);
210 freedCell->next = freeLists_[freeListIdx];
211 freeLists_[freeListIdx] = freedCell;
217 BOOST_FOREACH (
const Chunk* chunk, chunks_)
218 map.
insert(chunk->extent(), ChunkInfo(chunk, chunkSize / cellSize_));
219 for (
size_t freeListIdx = 0; freeListIdx < N_FREE_LISTS; ++freeListIdx) {
220 for (FreeCell *cell=freeLists_[freeListIdx]; cell!=NULL; cell=cell->next) {
222 ASSERT_require2(found!=map.
values().end(),
"each freelist item must be some chunk cell");
223 ASSERT_require2(found->nUsed > 0,
"freelist must be consistent with chunk capacities");
231 void reserve(
size_t nObjects) {
232 LockEverything guard(freeListMutexes_, chunkMutex_);
234 for (
size_t freeListIdx = 0; freeListIdx < N_FREE_LISTS; ++freeListIdx) {
235 for (FreeCell *cell = freeLists_[freeListIdx]; cell != NULL; cell = cell->next) {
237 if (nFree >= nObjects)
243 size_t nNeeded = nObjects - nFree;
244 const size_t cellsPerChunk = chunkSize / cellSize_;
247 Chunk *chunk =
new Chunk;
248 FreeCell *newCells = chunk->fill(cellSize_);
249 chunks_.push_back(chunk);
253 FreeCell *cell = newCells;
254 newCells = cell->next;
255 cell->next = freeLists_[freeListIdx];
256 freeLists_[freeListIdx] = cell;
257 if (++freeListIdx >= N_FREE_LISTS)
261 if (nNeeded < cellsPerChunk)
270 LockEverything guard(freeListMutexes_, chunkMutex_);
276 FreeCell *newFreeLists[N_FREE_LISTS];
277 memset(newFreeLists, 0,
sizeof newFreeLists);
278 size_t newFreeListIdx = 0;
279 for (
size_t oldFreeListIdx=0; oldFreeListIdx<N_FREE_LISTS; ++oldFreeListIdx) {
280 FreeCell *next = NULL;
281 for (FreeCell *cell = freeLists_[oldFreeListIdx]; cell != NULL; cell = next) {
283 boost::uint64_t cellAddr =
reinterpret_cast<boost::uint64_t
>(cell);
284 if (map[cellAddr].nUsed != 0) {
286 cell->next = newFreeLists[newFreeListIdx];
287 newFreeLists[newFreeListIdx] = cell;
288 if (++newFreeListIdx >= N_FREE_LISTS)
293 memcpy(freeLists_, newFreeLists,
sizeof newFreeLists);
296 typename std::list<Chunk*>::iterator iter = chunks_.begin();
297 while (iter!=chunks_.end()) {
298 Chunk *chunk = *iter;
299 boost::uint64_t cellAddr = chunk->extent().least();
300 if (map[cellAddr].nUsed == 0) {
302 iter = chunks_.erase(iter);
309 size_t showInfo(std::ostream &out)
const {
312 LockEverything guard(
const_cast<SAWYER_THREAD_TRAITS::Mutex*
>(freeListMutexes_), chunkMutex_);
316 const size_t nCells = chunkSize / cellSize_;
318 BOOST_FOREACH (
const ChunkInfo &info, cim.
values()) {
319 out <<
" chunk " <<info.chunk <<
"\t" <<info.nUsed <<
"/" <<
nCells <<
"\t= " <<100.0*info.nUsed/
nCells <<
"%\n";
320 totalUsed += info.nUsed;
325 std::pair<size_t, size_t>
nAllocated()
const {
328 LockEverything guard(
const_cast<SAWYER_THREAD_TRAITS::Mutex*
>(freeListMutexes_), chunkMutex_);
332 const size_t nCells = chunkSize / cellSize_;
335 BOOST_FOREACH (
const ChunkInfo &info, cim.
values())
349 pools_ =
new Pool[nPools];
350 for (
size_t i=0; i<nPools; ++i)
394 return size <= smallestCell ? 0 : (size - smallestCell + sizeDelta - 1) / sizeDelta;
423 ASSERT_require(size>0);
425 return pn < nPools ? pools_[pn].aquire() : ::operator
new(size);
435 void reserve(
size_t objectSize,
size_t nObjects) {
436 ASSERT_always_require(objectSize > 0);
440 pools_[pn].reserve(nObjects);
452 for (
size_t pn=0; pn<nPools; ++pn) {
453 std::pair<size_t, size_t> pp = pools_[pn].nAllocated();
455 nReserved += pp.second;
471 ASSERT_require(size>0);
474 pools_[pn].release(addr);
476 ::operator
delete(addr);
489 for (
size_t pn=0; pn<nPools; ++pn)
499 for (
size_t pn=0; pn<nPools; ++pn) {
500 if (!pools_[pn].isEmpty()) {
501 out <<
" pool #" <<pn <<
"; cellSize = " <<
cellSize(pn) <<
" bytes:\n";
502 size_t nUsed = pools_[pn].showInfo(out);
503 out <<
" total objects in use: " <<nUsed <<
"\n";