diff options
author | Campbell Barton <ideasman42@gmail.com> | 2013-08-25 20:16:38 +0400 |
---|---|---|
committer | Campbell Barton <ideasman42@gmail.com> | 2013-08-25 20:16:38 +0400 |
commit | e2bd3a4644aa0317356e801df1a2fda1d998df43 (patch) | |
tree | 245ea3f28ce772f4821a23b595d61e54a1cd7617 /source/blender/blenlib/intern | |
parent | 28243b2e5fd272d12c4349c7998b39893c20628c (diff) |
clearing the mempool can now keep more then a single element reserved.
Diffstat (limited to 'source/blender/blenlib/intern')
-rw-r--r-- | source/blender/blenlib/intern/BLI_ghash.c | 36 | ||||
-rw-r--r-- | source/blender/blenlib/intern/BLI_mempool.c | 102 | ||||
-rw-r--r-- | source/blender/blenlib/intern/edgehash.c | 14 |
3 files changed, 107 insertions, 45 deletions
diff --git a/source/blender/blenlib/intern/BLI_ghash.c b/source/blender/blenlib/intern/BLI_ghash.c index ba492db4dbf..5f42940ce1a 100644 --- a/source/blender/blenlib/intern/BLI_ghash.c +++ b/source/blender/blenlib/intern/BLI_ghash.c @@ -96,6 +96,16 @@ BLI_INLINE bool ghash_test_expand_buckets(const unsigned int nentries, const uns } /** + * Increase initial bucket size to match a reserved ammount. + */ +BLI_INLINE void ghash_buckets_reserve(GHash *gh, const unsigned int nentries_reserve) +{ + while (ghash_test_expand_buckets(nentries_reserve, gh->nbuckets)) { + gh->nbuckets = hashsizes[++gh->cursize]; + } +} + +/** * Get the hash for a key. */ BLI_INLINE unsigned int ghash_keyhash(GHash *gh, const void *key) @@ -197,9 +207,7 @@ GHash *BLI_ghash_new_ex(GHashHashFP hashfp, GHashCmpFP cmpfp, const char *info, /* if we have reserved the number of elements that this hash will contain */ if (nentries_reserve) { - while (ghash_test_expand_buckets(nentries_reserve, gh->nbuckets)) { - gh->nbuckets = hashsizes[++gh->cursize]; - } + ghash_buckets_reserve(gh, nentries_reserve); } gh->buckets = MEM_callocN(gh->nbuckets * sizeof(*gh->buckets), "buckets"); @@ -375,8 +383,10 @@ bool BLI_ghash_haskey(GHash *gh, const void *key) * * \param keyfreefp Optional callback to free the key. * \param valfreefp Optional callback to free the value. + * \param nentries_reserve Optionally reserve the number of members that the hash will hold. */ -void BLI_ghash_clear(GHash *gh, GHashKeyFreeFP keyfreefp, GHashValFreeFP valfreefp) +void BLI_ghash_clear_ex(GHash *gh, GHashKeyFreeFP keyfreefp, GHashValFreeFP valfreefp, + const unsigned int nentries_reserve) { unsigned int i; @@ -395,14 +405,26 @@ void BLI_ghash_clear(GHash *gh, GHashKeyFreeFP keyfreefp, GHashValFreeFP valfree } } - gh->cursize = 0; + gh->nbuckets = hashsizes[0]; /* gh->cursize */ gh->nentries = 0; - gh->nbuckets = hashsizes[gh->cursize]; + gh->cursize = 0; + + if (nentries_reserve) { + ghash_buckets_reserve(gh, nentries_reserve); + } MEM_freeN(gh->buckets); gh->buckets = MEM_callocN(gh->nbuckets * sizeof(*gh->buckets), "buckets"); - BLI_mempool_clear(gh->entrypool); + BLI_mempool_clear_ex(gh->entrypool, nentries_reserve ? (int)nentries_reserve : -1); +} + +/** + * Wraps #BLI_ghash_clear_ex with zero entries reserved. + */ +void BLI_ghash_clear(GHash *gh, GHashKeyFreeFP keyfreefp, GHashValFreeFP valfreefp) +{ + BLI_ghash_clear_ex(gh, keyfreefp, valfreefp, 0); } /** diff --git a/source/blender/blenlib/intern/BLI_mempool.c b/source/blender/blenlib/intern/BLI_mempool.c index 8a05ade76ef..1c3c1cd8d7a 100644 --- a/source/blender/blenlib/intern/BLI_mempool.c +++ b/source/blender/blenlib/intern/BLI_mempool.c @@ -87,6 +87,7 @@ struct BLI_mempool { /* keeps aligned to 16 bits */ BLI_freenode *free; /* free element list. Interleaved into chunk datas. */ + int maxchunks; /* use to know how many chunks to keep for BLI_mempool_clear */ int totused; /* number of elements currently in use */ #ifdef USE_TOTALLOC int totalloc; /* number of elements allocated in total */ @@ -101,6 +102,14 @@ struct BLI_mempool { # define CHUNK_DATA(chunk) (CHECK_TYPE_INLINE(chunk, BLI_mempool_chunk *), (void *)((chunk) + 1)) #endif +/** + * \return the number of chunks to allocate based on how many elements are needed. + */ +BLI_INLINE int mempool_maxchunks(const int totelem, const int pchunk) +{ + return totelem / pchunk + 1; +} + static BLI_mempool_chunk *mempool_chunk_alloc(BLI_mempool *pool) { BLI_mempool_chunk *mpchunk; @@ -183,35 +192,37 @@ static BLI_freenode *mempool_chunk_add(BLI_mempool *pool, BLI_mempool_chunk *mpc return curnode; } -static void mempool_chunk_free_all(BLI_mempool *pool) +static void mempool_chunk_free(BLI_mempool_chunk *mpchunk, const int flag) { - BLI_mempool_chunk *mpchunk, *mpchunk_next; - - if (pool->flag & BLI_MEMPOOL_SYSMALLOC) { - for (mpchunk = pool->chunks.first; mpchunk; mpchunk = mpchunk_next) { - mpchunk_next = mpchunk->next; + if (flag & BLI_MEMPOOL_SYSMALLOC) { #ifdef USE_DATA_PTR - free(CHUNK_DATA(mpchunk)); + free(CHUNK_DATA(mpchunk)); #endif - free(mpchunk); - } + free(mpchunk); } else { - for (mpchunk = pool->chunks.first; mpchunk; mpchunk = mpchunk_next) { - mpchunk_next = mpchunk->next; #ifdef USE_DATA_PTR - MEM_freeN(CHUNK_DATA(mpchunk)); + MEM_freeN(CHUNK_DATA(mpchunk)); #endif - MEM_freeN(mpchunk); - } + MEM_freeN(mpchunk); } - pool->chunks.first = pool->chunks.last = NULL; +} + +static void mempool_chunk_free_all(ListBase *chunks, const int flag) +{ + BLI_mempool_chunk *mpchunk, *mpchunk_next; + + for (mpchunk = chunks->first; mpchunk; mpchunk = mpchunk_next) { + mpchunk_next = mpchunk->next; + mempool_chunk_free(mpchunk, flag); + } + chunks->first = chunks->last = NULL; } BLI_mempool *BLI_mempool_create(int esize, int totelem, int pchunk, int flag) { BLI_mempool *pool = NULL; - BLI_freenode *lasttail = NULL, *curnode = NULL; + BLI_freenode *lasttail = NULL; int i, maxchunks; /* allocate the pool structure */ @@ -234,29 +245,23 @@ BLI_mempool *BLI_mempool_create(int esize, int totelem, int pchunk, int flag) pool->esize = esize; } + maxchunks = mempool_maxchunks(totelem, pchunk); + pool->flag = flag; pool->pchunk = pchunk; pool->csize = esize * pchunk; pool->chunks.first = pool->chunks.last = NULL; pool->free = NULL; /* mempool_chunk_add assigns */ + pool->maxchunks = maxchunks; #ifdef USE_TOTALLOC pool->totalloc = 0; #endif pool->totused = 0; - maxchunks = totelem / pchunk + 1; - if (maxchunks == 0) { - maxchunks = 1; - } - /* allocate the actual chunks */ for (i = 0; i < maxchunks; i++) { BLI_mempool_chunk *mpchunk = mempool_chunk_alloc(pool); - - curnode = mempool_chunk_add(pool, mpchunk, lasttail); - - /* set the end of this chunks memory to the new tail for next iteration */ - lasttail = curnode; + lasttail = mempool_chunk_add(pool, mpchunk, lasttail); } return pool; @@ -338,7 +343,7 @@ void BLI_mempool_free(BLI_mempool *pool, void *addr) BLI_mempool_chunk *first = pool->chunks.first; BLI_remlink(&pool->chunks, first); - mempool_chunk_free_all(pool); + mempool_chunk_free_all(&pool->chunks, pool->flag); BLI_addtail(&pool->chunks, first); #ifdef USE_TOTALLOC pool->totalloc = pool->pchunk; @@ -504,22 +509,49 @@ void *BLI_mempool_iterstep(BLI_mempool_iter *iter) #endif -void BLI_mempool_clear(BLI_mempool *pool) +void BLI_mempool_clear_ex(BLI_mempool *pool, const int totelem_reserve) { - BLI_mempool_chunk *first = pool->chunks.first; + BLI_mempool_chunk *mpchunk; + BLI_mempool_chunk *mpchunk_next; + int maxchunks; + + ListBase chunks_temp; + BLI_freenode *lasttail = NULL; + + if (totelem_reserve == -1) { + maxchunks = pool->maxchunks; + } + else { + maxchunks = mempool_maxchunks(totelem_reserve, pool->pchunk); + } - BLI_remlink(&pool->chunks, first); + /* free all after pool->maxchunks */ - mempool_chunk_free_all(pool); + for (mpchunk = BLI_findlink(&pool->chunks, maxchunks); mpchunk; mpchunk = mpchunk_next) { + mpchunk_next = mpchunk->next; + BLI_remlink(&pool->chunks, mpchunk); + mempool_chunk_free(mpchunk, pool->flag); + } - /* important for re-initializing */ + /* re-initialize */ + pool->free = NULL; pool->totused = 0; #ifdef USE_TOTALLOC pool->totalloc = 0; #endif - pool->free = NULL; - mempool_chunk_add(pool, first, NULL); + chunks_temp = pool->chunks; + pool->chunks.first = pool->chunks.last = NULL; + + while ((mpchunk = chunks_temp.first)) { + BLI_remlink(&chunks_temp, mpchunk); + lasttail = mempool_chunk_add(pool, mpchunk, lasttail); + } +} + +void BLI_mempool_clear(BLI_mempool *pool) +{ + return BLI_mempool_clear_ex(pool, -1); } /** @@ -527,7 +559,7 @@ void BLI_mempool_clear(BLI_mempool *pool) */ void BLI_mempool_destroy(BLI_mempool *pool) { - mempool_chunk_free_all(pool); + mempool_chunk_free_all(&pool->chunks, pool->flag); if (pool->flag & BLI_MEMPOOL_SYSMALLOC) { free(pool); diff --git a/source/blender/blenlib/intern/edgehash.c b/source/blender/blenlib/intern/edgehash.c index cae23356782..cf1405e8f01 100644 --- a/source/blender/blenlib/intern/edgehash.c +++ b/source/blender/blenlib/intern/edgehash.c @@ -90,6 +90,16 @@ BLI_INLINE bool edgehash_test_expand_buckets(const unsigned int nentries, const return (nentries > nbuckets * 3); } +/** + * Increase initial bucket size to match a reserved ammount. + */ +BLI_INLINE void edgehash_buckets_reserve(EdgeHash *eh, const unsigned int nentries_reserve) +{ + while (edgehash_test_expand_buckets(nentries_reserve, eh->nbuckets)) { + eh->nbuckets = _ehash_hashsizes[++eh->cursize]; + } +} + BLI_INLINE unsigned int edgehash_keyhash(EdgeHash *eh, unsigned int v0, unsigned int v1) { BLI_assert(v0 < v1); @@ -176,9 +186,7 @@ EdgeHash *BLI_edgehash_new_ex(const char *info, /* if we have reserved the number of elements that this hash will contain */ if (nentries_reserve) { - while (edgehash_test_expand_buckets(nentries_reserve, eh->nbuckets)) { - eh->nbuckets = _ehash_hashsizes[++eh->cursize]; - } + edgehash_buckets_reserve(eh, nentries_reserve); } eh->buckets = MEM_callocN(eh->nbuckets * sizeof(*eh->buckets), "eh buckets 2"); |