Welcome to mirror list, hosted at ThFree Co, Russian Federation.

git.blender.org/blender.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
Diffstat (limited to 'source/blender/blenlib/intern/BLI_mempool.c')
-rw-r--r--source/blender/blenlib/intern/BLI_mempool.c805
1 files changed, 399 insertions, 406 deletions
diff --git a/source/blender/blenlib/intern/BLI_mempool.c b/source/blender/blenlib/intern/BLI_mempool.c
index 24e3f22172e..0f65f8ecbf0 100644
--- a/source/blender/blenlib/intern/BLI_mempool.c
+++ b/source/blender/blenlib/intern/BLI_mempool.c
@@ -40,7 +40,7 @@
#include "MEM_guardedalloc.h"
-#include "BLI_strict_flags.h" /* keep last */
+#include "BLI_strict_flags.h" /* keep last */
#ifdef WITH_MEM_VALGRIND
# include "valgrind/memcheck.h"
@@ -49,16 +49,16 @@
/* note: copied from BLO_blend_defs.h, don't use here because we're in BLI */
#ifdef __BIG_ENDIAN__
/* Big Endian */
-# define MAKE_ID(a, b, c, d) ( (int)(a) << 24 | (int)(b) << 16 | (c) << 8 | (d) )
+# define MAKE_ID(a, b, c, d) ((int)(a) << 24 | (int)(b) << 16 | (c) << 8 | (d))
# define MAKE_ID_8(a, b, c, d, e, f, g, h) \
- ((int64_t)(a) << 56 | (int64_t)(b) << 48 | (int64_t)(c) << 40 | (int64_t)(d) << 32 | \
- (int64_t)(e) << 24 | (int64_t)(f) << 16 | (int64_t)(g) << 8 | (h) )
+ ((int64_t)(a) << 56 | (int64_t)(b) << 48 | (int64_t)(c) << 40 | (int64_t)(d) << 32 | \
+ (int64_t)(e) << 24 | (int64_t)(f) << 16 | (int64_t)(g) << 8 | (h))
#else
/* Little Endian */
-# define MAKE_ID(a, b, c, d) ( (int)(d) << 24 | (int)(c) << 16 | (b) << 8 | (a) )
+# define MAKE_ID(a, b, c, d) ((int)(d) << 24 | (int)(c) << 16 | (b) << 8 | (a))
# define MAKE_ID_8(a, b, c, d, e, f, g, h) \
- ((int64_t)(h) << 56 | (int64_t)(g) << 48 | (int64_t)(f) << 40 | (int64_t)(e) << 32 | \
- (int64_t)(d) << 24 | (int64_t)(c) << 16 | (int64_t)(b) << 8 | (a) )
+ ((int64_t)(h) << 56 | (int64_t)(g) << 48 | (int64_t)(f) << 40 | (int64_t)(e) << 32 | \
+ (int64_t)(d) << 24 | (int64_t)(c) << 16 | (int64_t)(b) << 8 | (a))
#endif
/**
@@ -66,9 +66,9 @@
* So having a pointer to 2/4/8... aligned memory is enough to ensure the freeword will never be used.
* To be safe, use a word thats the same in both directions.
*/
-#define FREEWORD ((sizeof(void *) > sizeof(int32_t)) ? \
- MAKE_ID_8('e', 'e', 'r', 'f', 'f', 'r', 'e', 'e') : \
- MAKE_ID('e', 'f', 'f', 'e'))
+#define FREEWORD \
+ ((sizeof(void *) > sizeof(int32_t)) ? MAKE_ID_8('e', 'e', 'r', 'f', 'f', 'r', 'e', 'e') : \
+ MAKE_ID('e', 'f', 'f', 'e'))
/**
* The 'used' word just needs to be set to something besides FREEWORD.
@@ -81,7 +81,6 @@
/* optimize pool size */
#define USE_CHUNK_POW2
-
#ifndef NDEBUG
static bool mempool_debug_memset = false;
#endif
@@ -93,9 +92,9 @@ static bool mempool_debug_memset = false;
* Each element represents a block which BLI_mempool_alloc may return.
*/
typedef struct BLI_freenode {
- struct BLI_freenode *next;
- /** Used to identify this as a freed node. */
- intptr_t freeword;
+ struct BLI_freenode *next;
+ /** Used to identify this as a freed node. */
+ intptr_t freeword;
} BLI_freenode;
/**
@@ -103,37 +102,37 @@ typedef struct BLI_freenode {
* #BLI_mempool.chunks as a double linked list.
*/
typedef struct BLI_mempool_chunk {
- struct BLI_mempool_chunk *next;
+ struct BLI_mempool_chunk *next;
} BLI_mempool_chunk;
/**
* The mempool, stores and tracks memory \a chunks and elements within those chunks \a free.
*/
struct BLI_mempool {
- /** Single linked list of allocated chunks. */
- BLI_mempool_chunk *chunks;
- /** Keep a pointer to the last, so we can append new chunks there
- * this is needed for iteration so we can loop over chunks in the order added. */
- BLI_mempool_chunk *chunk_tail;
-
- /** Element size in bytes. */
- uint esize;
- /** Chunk size in bytes. */
- uint csize;
- /** Number of elements per chunk. */
- uint pchunk;
- uint flag;
- /* keeps aligned to 16 bits */
-
- /** Free element list. Interleaved into chunk datas. */
- BLI_freenode *free;
- /** Use to know how many chunks to keep for #BLI_mempool_clear. */
- uint maxchunks;
- /** Number of elements currently in use. */
- uint totused;
+ /** Single linked list of allocated chunks. */
+ BLI_mempool_chunk *chunks;
+ /** Keep a pointer to the last, so we can append new chunks there
+ * this is needed for iteration so we can loop over chunks in the order added. */
+ BLI_mempool_chunk *chunk_tail;
+
+ /** Element size in bytes. */
+ uint esize;
+ /** Chunk size in bytes. */
+ uint csize;
+ /** Number of elements per chunk. */
+ uint pchunk;
+ uint flag;
+ /* keeps aligned to 16 bits */
+
+ /** Free element list. Interleaved into chunk datas. */
+ BLI_freenode *free;
+ /** Use to know how many chunks to keep for #BLI_mempool_clear. */
+ uint maxchunks;
+ /** Number of elements currently in use. */
+ uint totused;
#ifdef USE_TOTALLOC
- /** Number of elements allocated in total. */
- uint totalloc;
+ /** Number of elements allocated in total. */
+ uint totalloc;
#endif
};
@@ -141,8 +140,8 @@ struct BLI_mempool {
#define CHUNK_DATA(chunk) (CHECK_TYPE_INLINE(chunk, BLI_mempool_chunk *), (void *)((chunk) + 1))
-#define NODE_STEP_NEXT(node) ((void *)((char *)(node) + esize))
-#define NODE_STEP_PREV(node) ((void *)((char *)(node) - esize))
+#define NODE_STEP_NEXT(node) ((void *)((char *)(node) + esize))
+#define NODE_STEP_PREV(node) ((void *)((char *)(node)-esize))
/** Extra bytes implicitly used for every chunk alloc. */
#define CHUNK_OVERHEAD (uint)(MEM_SIZE_OVERHEAD + sizeof(BLI_mempool_chunk))
@@ -150,22 +149,22 @@ struct BLI_mempool {
#ifdef USE_CHUNK_POW2
static uint power_of_2_max_u(uint x)
{
- x -= 1;
- x = x | (x >> 1);
- x = x | (x >> 2);
- x = x | (x >> 4);
- x = x | (x >> 8);
- x = x | (x >> 16);
- return x + 1;
+ x -= 1;
+ x = x | (x >> 1);
+ x = x | (x >> 2);
+ x = x | (x >> 4);
+ x = x | (x >> 8);
+ x = x | (x >> 16);
+ return x + 1;
}
#endif
BLI_INLINE BLI_mempool_chunk *mempool_chunk_find(BLI_mempool_chunk *head, uint index)
{
- while (index-- && head) {
- head = head->next;
- }
- return head;
+ while (index-- && head) {
+ head = head->next;
+ }
+ return head;
}
/**
@@ -176,12 +175,12 @@ BLI_INLINE BLI_mempool_chunk *mempool_chunk_find(BLI_mempool_chunk *head, uint i
*/
BLI_INLINE uint mempool_maxchunks(const uint totelem, const uint pchunk)
{
- return (totelem <= pchunk) ? 1 : ((totelem / pchunk) + 1);
+ return (totelem <= pchunk) ? 1 : ((totelem / pchunk) + 1);
}
static BLI_mempool_chunk *mempool_chunk_alloc(BLI_mempool *pool)
{
- return MEM_mallocN(sizeof(BLI_mempool_chunk) + (size_t)pool->csize, "BLI_Mempool Chunk");
+ return MEM_mallocN(sizeof(BLI_mempool_chunk) + (size_t)pool->csize, "BLI_Mempool Chunk");
}
/**
@@ -193,179 +192,177 @@ static BLI_mempool_chunk *mempool_chunk_alloc(BLI_mempool *pool)
* (used when building free chunks initially)
* \return The last chunk,
*/
-static BLI_freenode *mempool_chunk_add(
- BLI_mempool *pool, BLI_mempool_chunk *mpchunk,
- BLI_freenode *last_tail)
+static BLI_freenode *mempool_chunk_add(BLI_mempool *pool,
+ BLI_mempool_chunk *mpchunk,
+ BLI_freenode *last_tail)
{
- const uint esize = pool->esize;
- BLI_freenode *curnode = CHUNK_DATA(mpchunk);
- uint j;
-
- /* append */
- if (pool->chunk_tail) {
- pool->chunk_tail->next = mpchunk;
- }
- else {
- BLI_assert(pool->chunks == NULL);
- pool->chunks = mpchunk;
- }
-
- mpchunk->next = NULL;
- pool->chunk_tail = mpchunk;
-
- if (UNLIKELY(pool->free == NULL)) {
- pool->free = curnode;
- }
-
- /* loop through the allocated data, building the pointer structures */
- j = pool->pchunk;
- if (pool->flag & BLI_MEMPOOL_ALLOW_ITER) {
- while (j--) {
- curnode->next = NODE_STEP_NEXT(curnode);
- curnode->freeword = FREEWORD;
- curnode = curnode->next;
- }
- }
- else {
- while (j--) {
- curnode->next = NODE_STEP_NEXT(curnode);
- curnode = curnode->next;
- }
- }
-
- /* terminate the list (rewind one)
- * will be overwritten if 'curnode' gets passed in again as 'last_tail' */
- curnode = NODE_STEP_PREV(curnode);
- curnode->next = NULL;
+ const uint esize = pool->esize;
+ BLI_freenode *curnode = CHUNK_DATA(mpchunk);
+ uint j;
+
+ /* append */
+ if (pool->chunk_tail) {
+ pool->chunk_tail->next = mpchunk;
+ }
+ else {
+ BLI_assert(pool->chunks == NULL);
+ pool->chunks = mpchunk;
+ }
+
+ mpchunk->next = NULL;
+ pool->chunk_tail = mpchunk;
+
+ if (UNLIKELY(pool->free == NULL)) {
+ pool->free = curnode;
+ }
+
+ /* loop through the allocated data, building the pointer structures */
+ j = pool->pchunk;
+ if (pool->flag & BLI_MEMPOOL_ALLOW_ITER) {
+ while (j--) {
+ curnode->next = NODE_STEP_NEXT(curnode);
+ curnode->freeword = FREEWORD;
+ curnode = curnode->next;
+ }
+ }
+ else {
+ while (j--) {
+ curnode->next = NODE_STEP_NEXT(curnode);
+ curnode = curnode->next;
+ }
+ }
+
+ /* terminate the list (rewind one)
+ * will be overwritten if 'curnode' gets passed in again as 'last_tail' */
+ curnode = NODE_STEP_PREV(curnode);
+ curnode->next = NULL;
#ifdef USE_TOTALLOC
- pool->totalloc += pool->pchunk;
+ pool->totalloc += pool->pchunk;
#endif
- /* final pointer in the previously allocated chunk is wrong */
- if (last_tail) {
- last_tail->next = CHUNK_DATA(mpchunk);
- }
+ /* final pointer in the previously allocated chunk is wrong */
+ if (last_tail) {
+ last_tail->next = CHUNK_DATA(mpchunk);
+ }
- return curnode;
+ return curnode;
}
static void mempool_chunk_free(BLI_mempool_chunk *mpchunk)
{
- MEM_freeN(mpchunk);
+ MEM_freeN(mpchunk);
}
static void mempool_chunk_free_all(BLI_mempool_chunk *mpchunk)
{
- BLI_mempool_chunk *mpchunk_next;
+ BLI_mempool_chunk *mpchunk_next;
- for (; mpchunk; mpchunk = mpchunk_next) {
- mpchunk_next = mpchunk->next;
- mempool_chunk_free(mpchunk);
- }
+ for (; mpchunk; mpchunk = mpchunk_next) {
+ mpchunk_next = mpchunk->next;
+ mempool_chunk_free(mpchunk);
+ }
}
-BLI_mempool *BLI_mempool_create(
- uint esize, uint totelem,
- uint pchunk, uint flag)
+BLI_mempool *BLI_mempool_create(uint esize, uint totelem, uint pchunk, uint flag)
{
- BLI_mempool *pool;
- BLI_freenode *last_tail = NULL;
- uint i, maxchunks;
+ BLI_mempool *pool;
+ BLI_freenode *last_tail = NULL;
+ uint i, maxchunks;
- /* allocate the pool structure */
- pool = MEM_mallocN(sizeof(BLI_mempool), "memory pool");
+ /* allocate the pool structure */
+ pool = MEM_mallocN(sizeof(BLI_mempool), "memory pool");
- /* set the elem size */
- if (esize < (int)MEMPOOL_ELEM_SIZE_MIN) {
- esize = (int)MEMPOOL_ELEM_SIZE_MIN;
- }
+ /* set the elem size */
+ if (esize < (int)MEMPOOL_ELEM_SIZE_MIN) {
+ esize = (int)MEMPOOL_ELEM_SIZE_MIN;
+ }
- if (flag & BLI_MEMPOOL_ALLOW_ITER) {
- esize = MAX2(esize, (uint)sizeof(BLI_freenode));
- }
+ if (flag & BLI_MEMPOOL_ALLOW_ITER) {
+ esize = MAX2(esize, (uint)sizeof(BLI_freenode));
+ }
- maxchunks = mempool_maxchunks(totelem, pchunk);
+ maxchunks = mempool_maxchunks(totelem, pchunk);
- pool->chunks = NULL;
- pool->chunk_tail = NULL;
- pool->esize = esize;
+ pool->chunks = NULL;
+ pool->chunk_tail = NULL;
+ pool->esize = esize;
- /* Optimize chunk size to powers of 2, accounting for slop-space. */
+ /* Optimize chunk size to powers of 2, accounting for slop-space. */
#ifdef USE_CHUNK_POW2
- {
- BLI_assert(power_of_2_max_u(pchunk * esize) > CHUNK_OVERHEAD);
- pchunk = (power_of_2_max_u(pchunk * esize) - CHUNK_OVERHEAD) / esize;
- }
+ {
+ BLI_assert(power_of_2_max_u(pchunk * esize) > CHUNK_OVERHEAD);
+ pchunk = (power_of_2_max_u(pchunk * esize) - CHUNK_OVERHEAD) / esize;
+ }
#endif
- pool->csize = esize * pchunk;
+ pool->csize = esize * pchunk;
- /* Ensure this is a power of 2, minus the rounding by element size. */
+ /* Ensure this is a power of 2, minus the rounding by element size. */
#if defined(USE_CHUNK_POW2) && !defined(NDEBUG)
- {
- uint final_size = (uint)MEM_SIZE_OVERHEAD + (uint)sizeof(BLI_mempool_chunk) + pool->csize;
- BLI_assert(((uint)power_of_2_max_u(final_size) - final_size) < pool->esize);
- }
+ {
+ uint final_size = (uint)MEM_SIZE_OVERHEAD + (uint)sizeof(BLI_mempool_chunk) + pool->csize;
+ BLI_assert(((uint)power_of_2_max_u(final_size) - final_size) < pool->esize);
+ }
#endif
- pool->pchunk = pchunk;
- pool->flag = flag;
- pool->free = NULL; /* mempool_chunk_add assigns */
- pool->maxchunks = maxchunks;
+ pool->pchunk = pchunk;
+ pool->flag = flag;
+ pool->free = NULL; /* mempool_chunk_add assigns */
+ pool->maxchunks = maxchunks;
#ifdef USE_TOTALLOC
- pool->totalloc = 0;
+ pool->totalloc = 0;
#endif
- pool->totused = 0;
+ pool->totused = 0;
- if (totelem) {
- /* Allocate the actual chunks. */
- for (i = 0; i < maxchunks; i++) {
- BLI_mempool_chunk *mpchunk = mempool_chunk_alloc(pool);
- last_tail = mempool_chunk_add(pool, mpchunk, last_tail);
- }
- }
+ if (totelem) {
+ /* Allocate the actual chunks. */
+ for (i = 0; i < maxchunks; i++) {
+ BLI_mempool_chunk *mpchunk = mempool_chunk_alloc(pool);
+ last_tail = mempool_chunk_add(pool, mpchunk, last_tail);
+ }
+ }
#ifdef WITH_MEM_VALGRIND
- VALGRIND_CREATE_MEMPOOL(pool, 0, false);
+ VALGRIND_CREATE_MEMPOOL(pool, 0, false);
#endif
- return pool;
+ return pool;
}
void *BLI_mempool_alloc(BLI_mempool *pool)
{
- BLI_freenode *free_pop;
+ BLI_freenode *free_pop;
- if (UNLIKELY(pool->free == NULL)) {
- /* Need to allocate a new chunk. */
- BLI_mempool_chunk *mpchunk = mempool_chunk_alloc(pool);
- mempool_chunk_add(pool, mpchunk, NULL);
- }
+ if (UNLIKELY(pool->free == NULL)) {
+ /* Need to allocate a new chunk. */
+ BLI_mempool_chunk *mpchunk = mempool_chunk_alloc(pool);
+ mempool_chunk_add(pool, mpchunk, NULL);
+ }
- free_pop = pool->free;
+ free_pop = pool->free;
- BLI_assert(pool->chunk_tail->next == NULL);
+ BLI_assert(pool->chunk_tail->next == NULL);
- if (pool->flag & BLI_MEMPOOL_ALLOW_ITER) {
- free_pop->freeword = USEDWORD;
- }
+ if (pool->flag & BLI_MEMPOOL_ALLOW_ITER) {
+ free_pop->freeword = USEDWORD;
+ }
- pool->free = free_pop->next;
- pool->totused++;
+ pool->free = free_pop->next;
+ pool->totused++;
#ifdef WITH_MEM_VALGRIND
- VALGRIND_MEMPOOL_ALLOC(pool, free_pop, pool->esize);
+ VALGRIND_MEMPOOL_ALLOC(pool, free_pop, pool->esize);
#endif
- return (void *)free_pop;
+ return (void *)free_pop;
}
void *BLI_mempool_calloc(BLI_mempool *pool)
{
- void *retval = BLI_mempool_alloc(pool);
- memset(retval, 0, (size_t)pool->esize);
- return retval;
+ void *retval = BLI_mempool_alloc(pool);
+ memset(retval, 0, (size_t)pool->esize);
+ return retval;
}
/**
@@ -375,107 +372,105 @@ void *BLI_mempool_calloc(BLI_mempool *pool)
*/
void BLI_mempool_free(BLI_mempool *pool, void *addr)
{
- BLI_freenode *newhead = addr;
+ BLI_freenode *newhead = addr;
#ifndef NDEBUG
- {
- BLI_mempool_chunk *chunk;
- bool found = false;
- for (chunk = pool->chunks; chunk; chunk = chunk->next) {
- if (ARRAY_HAS_ITEM((char *)addr, (char *)CHUNK_DATA(chunk), pool->csize)) {
- found = true;
- break;
- }
- }
- if (!found) {
- BLI_assert(!"Attempt to free data which is not in pool.\n");
- }
- }
-
- /* Enable for debugging. */
- if (UNLIKELY(mempool_debug_memset)) {
- memset(addr, 255, pool->esize);
- }
+ {
+ BLI_mempool_chunk *chunk;
+ bool found = false;
+ for (chunk = pool->chunks; chunk; chunk = chunk->next) {
+ if (ARRAY_HAS_ITEM((char *)addr, (char *)CHUNK_DATA(chunk), pool->csize)) {
+ found = true;
+ break;
+ }
+ }
+ if (!found) {
+ BLI_assert(!"Attempt to free data which is not in pool.\n");
+ }
+ }
+
+ /* Enable for debugging. */
+ if (UNLIKELY(mempool_debug_memset)) {
+ memset(addr, 255, pool->esize);
+ }
#endif
- if (pool->flag & BLI_MEMPOOL_ALLOW_ITER) {
+ if (pool->flag & BLI_MEMPOOL_ALLOW_ITER) {
#ifndef NDEBUG
- /* This will detect double free's. */
- BLI_assert(newhead->freeword != FREEWORD);
+ /* This will detect double free's. */
+ BLI_assert(newhead->freeword != FREEWORD);
#endif
- newhead->freeword = FREEWORD;
- }
+ newhead->freeword = FREEWORD;
+ }
- newhead->next = pool->free;
- pool->free = newhead;
+ newhead->next = pool->free;
+ pool->free = newhead;
- pool->totused--;
+ pool->totused--;
#ifdef WITH_MEM_VALGRIND
- VALGRIND_MEMPOOL_FREE(pool, addr);
+ VALGRIND_MEMPOOL_FREE(pool, addr);
#endif
- /* Nothing is in use; free all the chunks except the first. */
- if (UNLIKELY(pool->totused == 0) &&
- (pool->chunks->next))
- {
- const uint esize = pool->esize;
- BLI_freenode *curnode;
- uint j;
- BLI_mempool_chunk *first;
+ /* Nothing is in use; free all the chunks except the first. */
+ if (UNLIKELY(pool->totused == 0) && (pool->chunks->next)) {
+ const uint esize = pool->esize;
+ BLI_freenode *curnode;
+ uint j;
+ BLI_mempool_chunk *first;
- first = pool->chunks;
- mempool_chunk_free_all(first->next);
- first->next = NULL;
- pool->chunk_tail = first;
+ first = pool->chunks;
+ mempool_chunk_free_all(first->next);
+ first->next = NULL;
+ pool->chunk_tail = first;
#ifdef USE_TOTALLOC
- pool->totalloc = pool->pchunk;
+ pool->totalloc = pool->pchunk;
#endif
- /* Temp alloc so valgrind doesn't complain when setting free'd blocks 'next'. */
+ /* Temp alloc so valgrind doesn't complain when setting free'd blocks 'next'. */
#ifdef WITH_MEM_VALGRIND
- VALGRIND_MEMPOOL_ALLOC(pool, CHUNK_DATA(first), pool->csize);
+ VALGRIND_MEMPOOL_ALLOC(pool, CHUNK_DATA(first), pool->csize);
#endif
- curnode = CHUNK_DATA(first);
- pool->free = curnode;
+ curnode = CHUNK_DATA(first);
+ pool->free = curnode;
- j = pool->pchunk;
- while (j--) {
- curnode->next = NODE_STEP_NEXT(curnode);
- curnode = curnode->next;
- }
- curnode = NODE_STEP_PREV(curnode);
- curnode->next = NULL; /* terminate the list */
+ j = pool->pchunk;
+ while (j--) {
+ curnode->next = NODE_STEP_NEXT(curnode);
+ curnode = curnode->next;
+ }
+ curnode = NODE_STEP_PREV(curnode);
+ curnode->next = NULL; /* terminate the list */
#ifdef WITH_MEM_VALGRIND
- VALGRIND_MEMPOOL_FREE(pool, CHUNK_DATA(first));
+ VALGRIND_MEMPOOL_FREE(pool, CHUNK_DATA(first));
#endif
- }
+ }
}
int BLI_mempool_len(BLI_mempool *pool)
{
- return (int)pool->totused;
+ return (int)pool->totused;
}
void *BLI_mempool_findelem(BLI_mempool *pool, uint index)
{
- BLI_assert(pool->flag & BLI_MEMPOOL_ALLOW_ITER);
-
- if (index < pool->totused) {
- /* We could have some faster mem chunk stepping code inline. */
- BLI_mempool_iter iter;
- void *elem;
- BLI_mempool_iternew(pool, &iter);
- for (elem = BLI_mempool_iterstep(&iter); index-- != 0; elem = BLI_mempool_iterstep(&iter)) {
- /* pass */
- }
- return elem;
- }
-
- return NULL;
+ BLI_assert(pool->flag & BLI_MEMPOOL_ALLOW_ITER);
+
+ if (index < pool->totused) {
+ /* We could have some faster mem chunk stepping code inline. */
+ BLI_mempool_iter iter;
+ void *elem;
+ BLI_mempool_iternew(pool, &iter);
+ for (elem = BLI_mempool_iterstep(&iter); index-- != 0; elem = BLI_mempool_iterstep(&iter)) {
+ /* pass */
+ }
+ return elem;
+ }
+
+ return NULL;
}
/**
@@ -487,15 +482,15 @@ void *BLI_mempool_findelem(BLI_mempool *pool, uint index)
*/
void BLI_mempool_as_table(BLI_mempool *pool, void **data)
{
- BLI_mempool_iter iter;
- void *elem;
- void **p = data;
- BLI_assert(pool->flag & BLI_MEMPOOL_ALLOW_ITER);
- BLI_mempool_iternew(pool, &iter);
- while ((elem = BLI_mempool_iterstep(&iter))) {
- *p++ = elem;
- }
- BLI_assert((uint)(p - data) == pool->totused);
+ BLI_mempool_iter iter;
+ void *elem;
+ void **p = data;
+ BLI_assert(pool->flag & BLI_MEMPOOL_ALLOW_ITER);
+ BLI_mempool_iternew(pool, &iter);
+ while ((elem = BLI_mempool_iterstep(&iter))) {
+ *p++ = elem;
+ }
+ BLI_assert((uint)(p - data) == pool->totused);
}
/**
@@ -503,9 +498,9 @@ void BLI_mempool_as_table(BLI_mempool *pool, void **data)
*/
void **BLI_mempool_as_tableN(BLI_mempool *pool, const char *allocstr)
{
- void **data = MEM_mallocN((size_t)pool->totused * sizeof(void *), allocstr);
- BLI_mempool_as_table(pool, data);
- return data;
+ void **data = MEM_mallocN((size_t)pool->totused * sizeof(void *), allocstr);
+ BLI_mempool_as_table(pool, data);
+ return data;
}
/**
@@ -513,16 +508,16 @@ void **BLI_mempool_as_tableN(BLI_mempool *pool, const char *allocstr)
*/
void BLI_mempool_as_array(BLI_mempool *pool, void *data)
{
- const uint esize = pool->esize;
- BLI_mempool_iter iter;
- char *elem, *p = data;
- BLI_assert(pool->flag & BLI_MEMPOOL_ALLOW_ITER);
- BLI_mempool_iternew(pool, &iter);
- while ((elem = BLI_mempool_iterstep(&iter))) {
- memcpy(p, elem, (size_t)esize);
- p = NODE_STEP_NEXT(p);
- }
- BLI_assert((uint)(p - (char *)data) == pool->totused * esize);
+ const uint esize = pool->esize;
+ BLI_mempool_iter iter;
+ char *elem, *p = data;
+ BLI_assert(pool->flag & BLI_MEMPOOL_ALLOW_ITER);
+ BLI_mempool_iternew(pool, &iter);
+ while ((elem = BLI_mempool_iterstep(&iter))) {
+ memcpy(p, elem, (size_t)esize);
+ p = NODE_STEP_NEXT(p);
+ }
+ BLI_assert((uint)(p - (char *)data) == pool->totused * esize);
}
/**
@@ -530,9 +525,9 @@ void BLI_mempool_as_array(BLI_mempool *pool, void *data)
*/
void *BLI_mempool_as_arrayN(BLI_mempool *pool, const char *allocstr)
{
- char *data = MEM_mallocN((size_t)(pool->totused * pool->esize), allocstr);
- BLI_mempool_as_array(pool, data);
- return data;
+ char *data = MEM_mallocN((size_t)(pool->totused * pool->esize), allocstr);
+ BLI_mempool_as_array(pool, data);
+ return data;
}
/**
@@ -540,13 +535,13 @@ void *BLI_mempool_as_arrayN(BLI_mempool *pool, const char *allocstr)
*/
void BLI_mempool_iternew(BLI_mempool *pool, BLI_mempool_iter *iter)
{
- BLI_assert(pool->flag & BLI_MEMPOOL_ALLOW_ITER);
+ BLI_assert(pool->flag & BLI_MEMPOOL_ALLOW_ITER);
- iter->pool = pool;
- iter->curchunk = pool->chunks;
- iter->curindex = 0;
+ iter->pool = pool;
+ iter->curchunk = pool->chunks;
+ iter->curindex = 0;
- iter->curchunk_threaded_shared = NULL;
+ iter->curchunk_threaded_shared = NULL;
}
/**
@@ -562,31 +557,32 @@ void BLI_mempool_iternew(BLI_mempool *pool, BLI_mempool_iter *iter)
*/
BLI_mempool_iter *BLI_mempool_iter_threadsafe_create(BLI_mempool *pool, const size_t num_iter)
{
- BLI_assert(pool->flag & BLI_MEMPOOL_ALLOW_ITER);
+ BLI_assert(pool->flag & BLI_MEMPOOL_ALLOW_ITER);
- BLI_mempool_iter *iter_arr = MEM_mallocN(sizeof(*iter_arr) * num_iter, __func__);
- BLI_mempool_chunk **curchunk_threaded_shared = MEM_mallocN(sizeof(void *), __func__);
+ BLI_mempool_iter *iter_arr = MEM_mallocN(sizeof(*iter_arr) * num_iter, __func__);
+ BLI_mempool_chunk **curchunk_threaded_shared = MEM_mallocN(sizeof(void *), __func__);
- BLI_mempool_iternew(pool, iter_arr);
+ BLI_mempool_iternew(pool, iter_arr);
- *curchunk_threaded_shared = iter_arr->curchunk;
- iter_arr->curchunk_threaded_shared = curchunk_threaded_shared;
+ *curchunk_threaded_shared = iter_arr->curchunk;
+ iter_arr->curchunk_threaded_shared = curchunk_threaded_shared;
- for (size_t i = 1; i < num_iter; i++) {
- iter_arr[i] = iter_arr[0];
- *curchunk_threaded_shared = iter_arr[i].curchunk = (
- (*curchunk_threaded_shared) ? (*curchunk_threaded_shared)->next : NULL);
- }
+ for (size_t i = 1; i < num_iter; i++) {
+ iter_arr[i] = iter_arr[0];
+ *curchunk_threaded_shared = iter_arr[i].curchunk = ((*curchunk_threaded_shared) ?
+ (*curchunk_threaded_shared)->next :
+ NULL);
+ }
- return iter_arr;
+ return iter_arr;
}
-void BLI_mempool_iter_threadsafe_free(BLI_mempool_iter *iter_arr)
+void BLI_mempool_iter_threadsafe_free(BLI_mempool_iter *iter_arr)
{
- BLI_assert(iter_arr->curchunk_threaded_shared != NULL);
+ BLI_assert(iter_arr->curchunk_threaded_shared != NULL);
- MEM_freeN(iter_arr->curchunk_threaded_shared);
- MEM_freeN(iter_arr);
+ MEM_freeN(iter_arr->curchunk_threaded_shared);
+ MEM_freeN(iter_arr);
}
#if 0
@@ -594,48 +590,48 @@ void BLI_mempool_iter_threadsafe_free(BLI_mempool_iter *iter_arr)
static void *bli_mempool_iternext(BLI_mempool_iter *iter)
{
- void *ret = NULL;
-
- if (iter->curchunk == NULL || !iter->pool->totused) {
- return ret;
- }
-
- ret = ((char *)CHUNK_DATA(iter->curchunk)) + (iter->pool->esize * iter->curindex);
-
- iter->curindex++;
-
- if (iter->curindex == iter->pool->pchunk) {
- iter->curindex = 0;
- if (iter->curchunk_threaded_shared) {
- while (1) {
- iter->curchunk = *iter->curchunk_threaded_shared;
- if (iter->curchunk == NULL) {
- return ret;
- }
- if (atomic_cas_ptr(
- (void **)iter->curchunk_threaded_shared,
- iter->curchunk,
- iter->curchunk->next) == iter->curchunk)
- {
- break;
- }
- }
- }
- iter->curchunk = iter->curchunk->next;
- }
-
- return ret;
+ void *ret = NULL;
+
+ if (iter->curchunk == NULL || !iter->pool->totused) {
+ return ret;
+ }
+
+ ret = ((char *)CHUNK_DATA(iter->curchunk)) + (iter->pool->esize * iter->curindex);
+
+ iter->curindex++;
+
+ if (iter->curindex == iter->pool->pchunk) {
+ iter->curindex = 0;
+ if (iter->curchunk_threaded_shared) {
+ while (1) {
+ iter->curchunk = *iter->curchunk_threaded_shared;
+ if (iter->curchunk == NULL) {
+ return ret;
+ }
+ if (atomic_cas_ptr(
+ (void **)iter->curchunk_threaded_shared,
+ iter->curchunk,
+ iter->curchunk->next) == iter->curchunk)
+ {
+ break;
+ }
+ }
+ }
+ iter->curchunk = iter->curchunk->next;
+ }
+
+ return ret;
}
void *BLI_mempool_iterstep(BLI_mempool_iter *iter)
{
- BLI_freenode *ret;
+ BLI_freenode *ret;
- do {
- ret = bli_mempool_iternext(iter);
- } while (ret && ret->freeword == FREEWORD);
+ do {
+ ret = bli_mempool_iternext(iter);
+ } while (ret && ret->freeword == FREEWORD);
- return ret;
+ return ret;
}
#else
@@ -647,46 +643,43 @@ void *BLI_mempool_iterstep(BLI_mempool_iter *iter)
*/
void *BLI_mempool_iterstep(BLI_mempool_iter *iter)
{
- if (UNLIKELY(iter->curchunk == NULL)) {
- return NULL;
- }
-
- const uint esize = iter->pool->esize;
- BLI_freenode *curnode = POINTER_OFFSET(CHUNK_DATA(iter->curchunk), (esize * iter->curindex));
- BLI_freenode *ret;
- do {
- ret = curnode;
-
- if (++iter->curindex != iter->pool->pchunk) {
- curnode = POINTER_OFFSET(curnode, esize);
- }
- else {
- iter->curindex = 0;
- if (iter->curchunk_threaded_shared) {
- for (iter->curchunk = *iter->curchunk_threaded_shared;
- (iter->curchunk != NULL) &&
- (atomic_cas_ptr(
- (void **)iter->curchunk_threaded_shared,
- iter->curchunk,
- iter->curchunk->next) != iter->curchunk);
- iter->curchunk = *iter->curchunk_threaded_shared)
- {
- /* pass. */
- }
-
- if (UNLIKELY(iter->curchunk == NULL)) {
- return (ret->freeword == FREEWORD) ? NULL : ret;
- }
- }
- iter->curchunk = iter->curchunk->next;
- if (UNLIKELY(iter->curchunk == NULL)) {
- return (ret->freeword == FREEWORD) ? NULL : ret;
- }
- curnode = CHUNK_DATA(iter->curchunk);
- }
- } while (ret->freeword == FREEWORD);
-
- return ret;
+ if (UNLIKELY(iter->curchunk == NULL)) {
+ return NULL;
+ }
+
+ const uint esize = iter->pool->esize;
+ BLI_freenode *curnode = POINTER_OFFSET(CHUNK_DATA(iter->curchunk), (esize * iter->curindex));
+ BLI_freenode *ret;
+ do {
+ ret = curnode;
+
+ if (++iter->curindex != iter->pool->pchunk) {
+ curnode = POINTER_OFFSET(curnode, esize);
+ }
+ else {
+ iter->curindex = 0;
+ if (iter->curchunk_threaded_shared) {
+ for (iter->curchunk = *iter->curchunk_threaded_shared;
+ (iter->curchunk != NULL) && (atomic_cas_ptr((void **)iter->curchunk_threaded_shared,
+ iter->curchunk,
+ iter->curchunk->next) != iter->curchunk);
+ iter->curchunk = *iter->curchunk_threaded_shared) {
+ /* pass. */
+ }
+
+ if (UNLIKELY(iter->curchunk == NULL)) {
+ return (ret->freeword == FREEWORD) ? NULL : ret;
+ }
+ }
+ iter->curchunk = iter->curchunk->next;
+ if (UNLIKELY(iter->curchunk == NULL)) {
+ return (ret->freeword == FREEWORD) ? NULL : ret;
+ }
+ curnode = CHUNK_DATA(iter->curchunk);
+ }
+ } while (ret->freeword == FREEWORD);
+
+ return ret;
}
#endif
@@ -699,54 +692,54 @@ void *BLI_mempool_iterstep(BLI_mempool_iter *iter)
*/
void BLI_mempool_clear_ex(BLI_mempool *pool, const int totelem_reserve)
{
- BLI_mempool_chunk *mpchunk;
- BLI_mempool_chunk *mpchunk_next;
- uint maxchunks;
+ BLI_mempool_chunk *mpchunk;
+ BLI_mempool_chunk *mpchunk_next;
+ uint maxchunks;
- BLI_mempool_chunk *chunks_temp;
- BLI_freenode *last_tail = NULL;
+ BLI_mempool_chunk *chunks_temp;
+ BLI_freenode *last_tail = NULL;
#ifdef WITH_MEM_VALGRIND
- VALGRIND_DESTROY_MEMPOOL(pool);
- VALGRIND_CREATE_MEMPOOL(pool, 0, false);
+ VALGRIND_DESTROY_MEMPOOL(pool);
+ VALGRIND_CREATE_MEMPOOL(pool, 0, false);
#endif
- if (totelem_reserve == -1) {
- maxchunks = pool->maxchunks;
- }
- else {
- maxchunks = mempool_maxchunks((uint)totelem_reserve, pool->pchunk);
- }
-
- /* Free all after 'pool->maxchunks'. */
- mpchunk = mempool_chunk_find(pool->chunks, maxchunks - 1);
- if (mpchunk && mpchunk->next) {
- /* terminate */
- mpchunk_next = mpchunk->next;
- mpchunk->next = NULL;
- mpchunk = mpchunk_next;
-
- do {
- mpchunk_next = mpchunk->next;
- mempool_chunk_free(mpchunk);
- } while ((mpchunk = mpchunk_next));
- }
-
- /* re-initialize */
- pool->free = NULL;
- pool->totused = 0;
+ if (totelem_reserve == -1) {
+ maxchunks = pool->maxchunks;
+ }
+ else {
+ maxchunks = mempool_maxchunks((uint)totelem_reserve, pool->pchunk);
+ }
+
+ /* Free all after 'pool->maxchunks'. */
+ mpchunk = mempool_chunk_find(pool->chunks, maxchunks - 1);
+ if (mpchunk && mpchunk->next) {
+ /* terminate */
+ mpchunk_next = mpchunk->next;
+ mpchunk->next = NULL;
+ mpchunk = mpchunk_next;
+
+ do {
+ mpchunk_next = mpchunk->next;
+ mempool_chunk_free(mpchunk);
+ } while ((mpchunk = mpchunk_next));
+ }
+
+ /* re-initialize */
+ pool->free = NULL;
+ pool->totused = 0;
#ifdef USE_TOTALLOC
- pool->totalloc = 0;
+ pool->totalloc = 0;
#endif
- chunks_temp = pool->chunks;
- pool->chunks = NULL;
- pool->chunk_tail = NULL;
+ chunks_temp = pool->chunks;
+ pool->chunks = NULL;
+ pool->chunk_tail = NULL;
- while ((mpchunk = chunks_temp)) {
- chunks_temp = mpchunk->next;
- last_tail = mempool_chunk_add(pool, mpchunk, last_tail);
- }
+ while ((mpchunk = chunks_temp)) {
+ chunks_temp = mpchunk->next;
+ last_tail = mempool_chunk_add(pool, mpchunk, last_tail);
+ }
}
/**
@@ -754,7 +747,7 @@ void BLI_mempool_clear_ex(BLI_mempool *pool, const int totelem_reserve)
*/
void BLI_mempool_clear(BLI_mempool *pool)
{
- BLI_mempool_clear_ex(pool, -1);
+ BLI_mempool_clear_ex(pool, -1);
}
/**
@@ -762,18 +755,18 @@ void BLI_mempool_clear(BLI_mempool *pool)
*/
void BLI_mempool_destroy(BLI_mempool *pool)
{
- mempool_chunk_free_all(pool->chunks);
+ mempool_chunk_free_all(pool->chunks);
#ifdef WITH_MEM_VALGRIND
- VALGRIND_DESTROY_MEMPOOL(pool);
+ VALGRIND_DESTROY_MEMPOOL(pool);
#endif
- MEM_freeN(pool);
+ MEM_freeN(pool);
}
#ifndef NDEBUG
void BLI_mempool_set_memory_debug(void)
{
- mempool_debug_memset = true;
+ mempool_debug_memset = true;
}
#endif