Welcome to mirror list, hosted at ThFree Co, Russian Federation.

git.blender.org/blender.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
Diffstat (limited to 'intern/guardedalloc/intern')
-rw-r--r--intern/guardedalloc/intern/mallocn.c7
-rw-r--r--intern/guardedalloc/intern/mallocn_guarded_impl.c178
-rw-r--r--intern/guardedalloc/intern/mallocn_intern.h17
-rw-r--r--intern/guardedalloc/intern/mallocn_lockfree_impl.c109
4 files changed, 29 insertions, 282 deletions
diff --git a/intern/guardedalloc/intern/mallocn.c b/intern/guardedalloc/intern/mallocn.c
index d24437c85f2..e85f8eb03ed 100644
--- a/intern/guardedalloc/intern/mallocn.c
+++ b/intern/guardedalloc/intern/mallocn.c
@@ -48,18 +48,14 @@ void *(*MEM_malloc_arrayN)(size_t len, size_t size, const char *str) = MEM_lockf
void *(*MEM_mallocN_aligned)(size_t len,
size_t alignment,
const char *str) = MEM_lockfree_mallocN_aligned;
-void *(*MEM_mapallocN)(size_t len, const char *str) = MEM_lockfree_mapallocN;
void (*MEM_printmemlist_pydict)(void) = MEM_lockfree_printmemlist_pydict;
void (*MEM_printmemlist)(void) = MEM_lockfree_printmemlist;
void (*MEM_callbackmemlist)(void (*func)(void *)) = MEM_lockfree_callbackmemlist;
void (*MEM_printmemlist_stats)(void) = MEM_lockfree_printmemlist_stats;
void (*MEM_set_error_callback)(void (*func)(const char *)) = MEM_lockfree_set_error_callback;
bool (*MEM_consistency_check)(void) = MEM_lockfree_consistency_check;
-void (*MEM_set_lock_callback)(void (*lock)(void),
- void (*unlock)(void)) = MEM_lockfree_set_lock_callback;
void (*MEM_set_memory_debug)(void) = MEM_lockfree_set_memory_debug;
size_t (*MEM_get_memory_in_use)(void) = MEM_lockfree_get_memory_in_use;
-size_t (*MEM_get_mapped_memory_in_use)(void) = MEM_lockfree_get_mapped_memory_in_use;
unsigned int (*MEM_get_memory_blocks_in_use)(void) = MEM_lockfree_get_memory_blocks_in_use;
void (*MEM_reset_peak_memory)(void) = MEM_lockfree_reset_peak_memory;
size_t (*MEM_get_peak_memory)(void) = MEM_lockfree_get_peak_memory;
@@ -111,17 +107,14 @@ void MEM_use_guarded_allocator(void)
MEM_mallocN = MEM_guarded_mallocN;
MEM_malloc_arrayN = MEM_guarded_malloc_arrayN;
MEM_mallocN_aligned = MEM_guarded_mallocN_aligned;
- MEM_mapallocN = MEM_guarded_mapallocN;
MEM_printmemlist_pydict = MEM_guarded_printmemlist_pydict;
MEM_printmemlist = MEM_guarded_printmemlist;
MEM_callbackmemlist = MEM_guarded_callbackmemlist;
MEM_printmemlist_stats = MEM_guarded_printmemlist_stats;
MEM_set_error_callback = MEM_guarded_set_error_callback;
MEM_consistency_check = MEM_guarded_consistency_check;
- MEM_set_lock_callback = MEM_guarded_set_lock_callback;
MEM_set_memory_debug = MEM_guarded_set_memory_debug;
MEM_get_memory_in_use = MEM_guarded_get_memory_in_use;
- MEM_get_mapped_memory_in_use = MEM_guarded_get_mapped_memory_in_use;
MEM_get_memory_blocks_in_use = MEM_guarded_get_memory_blocks_in_use;
MEM_reset_peak_memory = MEM_guarded_reset_peak_memory;
MEM_get_peak_memory = MEM_guarded_get_peak_memory;
diff --git a/intern/guardedalloc/intern/mallocn_guarded_impl.c b/intern/guardedalloc/intern/mallocn_guarded_impl.c
index f601609c6e0..20dcbed7235 100644
--- a/intern/guardedalloc/intern/mallocn_guarded_impl.c
+++ b/intern/guardedalloc/intern/mallocn_guarded_impl.c
@@ -28,6 +28,8 @@
#include <string.h> /* memcpy */
#include <sys/types.h>
+#include <pthread.h>
+
#include "MEM_guardedalloc.h"
/* to ensure strict conversions */
@@ -51,17 +53,6 @@
//#define DEBUG_MEMCOUNTER
/* Only for debugging:
- * defining DEBUG_THREADS will enable check whether memory manager
- * is locked with a mutex when allocation is called from non-main
- * thread.
- *
- * This helps troubleshooting memory issues caused by the fact
- * guarded allocator is not thread-safe, however this check will
- * fail to check allocations from openmp threads.
- */
-//#define DEBUG_THREADS
-
-/* Only for debugging:
* Defining DEBUG_BACKTRACE will store a backtrace from where
* memory block was allocated and print this trace for all
* unfreed blocks.
@@ -104,7 +95,7 @@ typedef struct MemHead {
const char *name;
const char *nextname;
int tag2;
- short mmap; /* if true, memory was mmapped */
+ short pad1;
short alignment; /* if non-zero aligned alloc was used
* and alignment is stored here.
*/
@@ -124,24 +115,6 @@ typedef struct MemHead {
typedef MemHead MemHeadAligned;
-/* for openmp threading asserts, saves time troubleshooting
- * we may need to extend this if blender code starts using MEM_
- * functions inside OpenMP correctly with omp_set_lock() */
-
-#if 0 /* disable for now, only use to debug openmp code which doesn lock threads for malloc */
-# if defined(_OPENMP) && defined(DEBUG)
-# include <assert.h>
-# include <omp.h>
-# define DEBUG_OMP_MALLOC
-# endif
-#endif
-
-#ifdef DEBUG_THREADS
-# include <assert.h>
-# include <pthread.h>
-static pthread_t mainid;
-#endif
-
#ifdef DEBUG_BACKTRACE
# if defined(__linux__) || defined(__APPLE__)
# include <execinfo.h>
@@ -187,13 +160,11 @@ static const char *check_memlist(MemHead *memh);
/* --------------------------------------------------------------------- */
static unsigned int totblock = 0;
-static size_t mem_in_use = 0, mmap_in_use = 0, peak_mem = 0;
+static size_t mem_in_use = 0, peak_mem = 0;
static volatile struct localListBase _membase;
static volatile struct localListBase *membase = &_membase;
static void (*error_callback)(const char *) = NULL;
-static void (*thread_lock_callback)(void) = NULL;
-static void (*thread_unlock_callback)(void) = NULL;
static bool malloc_debug_memset = false;
@@ -233,40 +204,16 @@ print_error(const char *str, ...)
fputs(buf, stderr);
}
+static pthread_mutex_t thread_lock = PTHREAD_MUTEX_INITIALIZER;
+
static void mem_lock_thread(void)
{
-#ifdef DEBUG_THREADS
- static int initialized = 0;
-
- if (initialized == 0) {
- /* assume first allocation happens from main thread */
- mainid = pthread_self();
- initialized = 1;
- }
-
- if (!pthread_equal(pthread_self(), mainid) && thread_lock_callback == NULL) {
- assert(!"Memory function is called from non-main thread without lock");
- }
-#endif
-
-#ifdef DEBUG_OMP_MALLOC
- assert(omp_in_parallel() == 0);
-#endif
-
- if (thread_lock_callback)
- thread_lock_callback();
+ pthread_mutex_lock(&thread_lock);
}
static void mem_unlock_thread(void)
{
-#ifdef DEBUG_THREADS
- if (!pthread_equal(pthread_self(), mainid) && thread_lock_callback == NULL) {
- assert(!"Thread lock was removed while allocation from thread is in progress");
- }
-#endif
-
- if (thread_unlock_callback)
- thread_unlock_callback();
+ pthread_mutex_unlock(&thread_lock);
}
bool MEM_guarded_consistency_check(void)
@@ -287,12 +234,6 @@ void MEM_guarded_set_error_callback(void (*func)(const char *))
error_callback = func;
}
-void MEM_guarded_set_lock_callback(void (*lock)(void), void (*unlock)(void))
-{
- thread_lock_callback = lock;
- thread_unlock_callback = unlock;
-}
-
void MEM_guarded_set_memory_debug(void)
{
malloc_debug_memset = true;
@@ -320,10 +261,8 @@ void *MEM_guarded_dupallocN(const void *vmemh)
memh--;
#ifndef DEBUG_MEMDUPLINAME
- if (UNLIKELY(memh->mmap))
- newp = MEM_guarded_mapallocN(memh->len, "dupli_mapalloc");
- else if (LIKELY(memh->alignment == 0))
- newp = MEM_guarded_mapallocN(memh->len, "dupli_mapalloc");
+ if (LIKELY(memh->alignment == 0))
+ newp = MEM_guarded_mallocN(memh->len, "dupli_alloc");
else
newp = MEM_guarded_mallocN_aligned(memh->len, (size_t)memh->alignment, "dupli_alloc");
@@ -334,11 +273,7 @@ void *MEM_guarded_dupallocN(const void *vmemh)
MemHead *nmemh;
char *name = malloc(strlen(memh->name) + 24);
- if (UNLIKELY(memh->mmap)) {
- sprintf(name, "%s %s", "dupli_mapalloc", memh->name);
- newp = MEM_guarded_mapallocN(memh->len, name);
- }
- else if (LIKELY(memh->alignment == 0)) {
+ if (LIKELY(memh->alignment == 0)) {
sprintf(name, "%s %s", "dupli_alloc", memh->name);
newp = MEM_guarded_mallocN(memh->len, name);
}
@@ -478,7 +413,7 @@ static void make_memhead_header(MemHead *memh, size_t len, const char *str)
memh->name = str;
memh->nextname = NULL;
memh->len = len;
- memh->mmap = 0;
+ memh->pad1 = 0;
memh->alignment = 0;
memh->tag2 = MEMTAG2;
@@ -646,58 +581,6 @@ void *MEM_guarded_calloc_arrayN(size_t len, size_t size, const char *str)
return MEM_guarded_callocN(total_size, str);
}
-/* note; mmap returns zero'd memory */
-void *MEM_guarded_mapallocN(size_t len, const char *str)
-{
- MemHead *memh;
-
- /* on 64 bit, simply use calloc instead, as mmap does not support
- * allocating > 4 GB on Windows. the only reason mapalloc exists
- * is to get around address space limitations in 32 bit OSes. */
- if (sizeof(void *) >= 8)
- return MEM_guarded_callocN(len, str);
-
- len = SIZET_ALIGN_4(len);
-
-#if defined(WIN32)
- /* our windows mmap implementation is not thread safe */
- mem_lock_thread();
-#endif
- memh = mmap(NULL,
- len + sizeof(MemHead) + sizeof(MemTail),
- PROT_READ | PROT_WRITE,
- MAP_SHARED | MAP_ANON,
- -1,
- 0);
-#if defined(WIN32)
- mem_unlock_thread();
-#endif
-
- if (memh != (MemHead *)-1) {
- make_memhead_header(memh, len, str);
- memh->mmap = 1;
- atomic_add_and_fetch_z(&mmap_in_use, len);
- mem_lock_thread();
- peak_mem = mmap_in_use > peak_mem ? mmap_in_use : peak_mem;
- mem_unlock_thread();
-#ifdef DEBUG_MEMCOUNTER
- if (_mallocn_count == DEBUG_MEMCOUNTER_ERROR_VAL)
- memcount_raise(__func__);
- memh->_count = _mallocn_count++;
-#endif
- return (++memh);
- }
- else {
- print_error(
- "Mapalloc returns null, fallback to regular malloc: "
- "len=" SIZET_FORMAT " in %s, total %u\n",
- SIZET_ARG(len),
- str,
- (unsigned int)mmap_in_use);
- return MEM_guarded_callocN(len, str);
- }
-}
-
/* Memory statistics print */
typedef struct MemPrintBlock {
const char *name;
@@ -765,7 +648,7 @@ void MEM_guarded_printmemlist_stats(void)
pb++;
#ifdef USE_MALLOC_USABLE_SIZE
- if (!membl->mmap && membl->alignment == 0) {
+ if (membl->alignment == 0) {
mem_in_use_slop += (sizeof(MemHead) + sizeof(MemTail) + malloc_usable_size((void *)membl)) -
membl->len;
}
@@ -1098,27 +981,13 @@ static void rem_memblock(MemHead *memh)
free((char *)memh->name);
#endif
- if (memh->mmap) {
- atomic_sub_and_fetch_z(&mmap_in_use, memh->len);
-#if defined(WIN32)
- /* our windows mmap implementation is not thread safe */
- mem_lock_thread();
-#endif
- if (munmap(memh, memh->len + sizeof(MemHead) + sizeof(MemTail)))
- printf("Couldn't unmap memory %s\n", memh->name);
-#if defined(WIN32)
- mem_unlock_thread();
-#endif
+ if (UNLIKELY(malloc_debug_memset && memh->len))
+ memset(memh + 1, 255, memh->len);
+ if (LIKELY(memh->alignment == 0)) {
+ free(memh);
}
else {
- if (UNLIKELY(malloc_debug_memset && memh->len))
- memset(memh + 1, 255, memh->len);
- if (LIKELY(memh->alignment == 0)) {
- free(memh);
- }
- else {
- aligned_free(MEMHEAD_REAL_PTR(memh));
- }
+ aligned_free(MEMHEAD_REAL_PTR(memh));
}
}
@@ -1270,17 +1139,6 @@ size_t MEM_guarded_get_memory_in_use(void)
return _mem_in_use;
}
-size_t MEM_guarded_get_mapped_memory_in_use(void)
-{
- size_t _mmap_in_use;
-
- mem_lock_thread();
- _mmap_in_use = mmap_in_use;
- mem_unlock_thread();
-
- return _mmap_in_use;
-}
-
unsigned int MEM_guarded_get_memory_blocks_in_use(void)
{
unsigned int _totblock;
diff --git a/intern/guardedalloc/intern/mallocn_intern.h b/intern/guardedalloc/intern/mallocn_intern.h
index 876607fdb77..ef8845a66b3 100644
--- a/intern/guardedalloc/intern/mallocn_intern.h
+++ b/intern/guardedalloc/intern/mallocn_intern.h
@@ -24,13 +24,6 @@
#ifndef __MALLOCN_INTERN_H__
#define __MALLOCN_INTERN_H__
-/* mmap exception */
-#if defined(WIN32)
-# include "mmap_win.h"
-#else
-# include <sys/mman.h>
-#endif
-
#ifdef __GNUC__
# define UNUSED(x) UNUSED_##x __attribute__((__unused__))
#else
@@ -140,19 +133,14 @@ void *MEM_lockfree_mallocN_aligned(size_t len,
size_t alignment,
const char *UNUSED(str)) ATTR_MALLOC ATTR_WARN_UNUSED_RESULT
ATTR_ALLOC_SIZE(1) ATTR_NONNULL(3);
-void *MEM_lockfree_mapallocN(size_t len,
- const char *UNUSED(str)) ATTR_MALLOC ATTR_WARN_UNUSED_RESULT
- ATTR_ALLOC_SIZE(1) ATTR_NONNULL(2);
void MEM_lockfree_printmemlist_pydict(void);
void MEM_lockfree_printmemlist(void);
void MEM_lockfree_callbackmemlist(void (*func)(void *));
void MEM_lockfree_printmemlist_stats(void);
void MEM_lockfree_set_error_callback(void (*func)(const char *));
bool MEM_lockfree_consistency_check(void);
-void MEM_lockfree_set_lock_callback(void (*lock)(void), void (*unlock)(void));
void MEM_lockfree_set_memory_debug(void);
size_t MEM_lockfree_get_memory_in_use(void);
-size_t MEM_lockfree_get_mapped_memory_in_use(void);
unsigned int MEM_lockfree_get_memory_blocks_in_use(void);
void MEM_lockfree_reset_peak_memory(void);
size_t MEM_lockfree_get_peak_memory(void) ATTR_WARN_UNUSED_RESULT;
@@ -188,19 +176,14 @@ void *MEM_guarded_mallocN_aligned(size_t len,
size_t alignment,
const char *UNUSED(str)) ATTR_MALLOC ATTR_WARN_UNUSED_RESULT
ATTR_ALLOC_SIZE(1) ATTR_NONNULL(3);
-void *MEM_guarded_mapallocN(size_t len,
- const char *UNUSED(str)) ATTR_MALLOC ATTR_WARN_UNUSED_RESULT
- ATTR_ALLOC_SIZE(1) ATTR_NONNULL(2);
void MEM_guarded_printmemlist_pydict(void);
void MEM_guarded_printmemlist(void);
void MEM_guarded_callbackmemlist(void (*func)(void *));
void MEM_guarded_printmemlist_stats(void);
void MEM_guarded_set_error_callback(void (*func)(const char *));
bool MEM_guarded_consistency_check(void);
-void MEM_guarded_set_lock_callback(void (*lock)(void), void (*unlock)(void));
void MEM_guarded_set_memory_debug(void);
size_t MEM_guarded_get_memory_in_use(void);
-size_t MEM_guarded_get_mapped_memory_in_use(void);
unsigned int MEM_guarded_get_memory_blocks_in_use(void);
void MEM_guarded_reset_peak_memory(void);
size_t MEM_guarded_get_peak_memory(void) ATTR_WARN_UNUSED_RESULT;
diff --git a/intern/guardedalloc/intern/mallocn_lockfree_impl.c b/intern/guardedalloc/intern/mallocn_lockfree_impl.c
index ab7d9097669..205cc688d72 100644
--- a/intern/guardedalloc/intern/mallocn_lockfree_impl.c
+++ b/intern/guardedalloc/intern/mallocn_lockfree_impl.c
@@ -44,22 +44,18 @@ typedef struct MemHeadAligned {
} MemHeadAligned;
static unsigned int totblock = 0;
-static size_t mem_in_use = 0, mmap_in_use = 0, peak_mem = 0;
+static size_t mem_in_use = 0, peak_mem = 0;
static bool malloc_debug_memset = false;
static void (*error_callback)(const char *) = NULL;
-static void (*thread_lock_callback)(void) = NULL;
-static void (*thread_unlock_callback)(void) = NULL;
enum {
- MEMHEAD_MMAP_FLAG = 1,
- MEMHEAD_ALIGN_FLAG = 2,
+ MEMHEAD_ALIGN_FLAG = 1,
};
#define MEMHEAD_FROM_PTR(ptr) (((MemHead *)ptr) - 1)
#define PTR_FROM_MEMHEAD(memhead) (memhead + 1)
#define MEMHEAD_ALIGNED_FROM_PTR(ptr) (((MemHeadAligned *)ptr) - 1)
-#define MEMHEAD_IS_MMAP(memhead) ((memhead)->len & (size_t)MEMHEAD_MMAP_FLAG)
#define MEMHEAD_IS_ALIGNED(memhead) ((memhead)->len & (size_t)MEMHEAD_ALIGN_FLAG)
/* Uncomment this to have proper peak counter. */
@@ -93,24 +89,10 @@ print_error(const char *str, ...)
}
}
-#if defined(WIN32)
-static void mem_lock_thread(void)
-{
- if (thread_lock_callback)
- thread_lock_callback();
-}
-
-static void mem_unlock_thread(void)
-{
- if (thread_unlock_callback)
- thread_unlock_callback();
-}
-#endif
-
size_t MEM_lockfree_allocN_len(const void *vmemh)
{
if (vmemh) {
- return MEMHEAD_FROM_PTR(vmemh)->len & ~((size_t)(MEMHEAD_MMAP_FLAG | MEMHEAD_ALIGN_FLAG));
+ return MEMHEAD_FROM_PTR(vmemh)->len & ~((size_t)(MEMHEAD_ALIGN_FLAG));
}
else {
return 0;
@@ -133,29 +115,15 @@ void MEM_lockfree_freeN(void *vmemh)
atomic_sub_and_fetch_u(&totblock, 1);
atomic_sub_and_fetch_z(&mem_in_use, len);
- if (MEMHEAD_IS_MMAP(memh)) {
- atomic_sub_and_fetch_z(&mmap_in_use, len);
-#if defined(WIN32)
- /* our windows mmap implementation is not thread safe */
- mem_lock_thread();
-#endif
- if (munmap(memh, len + sizeof(MemHead)))
- printf("Couldn't unmap memory\n");
-#if defined(WIN32)
- mem_unlock_thread();
-#endif
+ if (UNLIKELY(malloc_debug_memset && len)) {
+ memset(memh + 1, 255, len);
+ }
+ if (UNLIKELY(MEMHEAD_IS_ALIGNED(memh))) {
+ MemHeadAligned *memh_aligned = MEMHEAD_ALIGNED_FROM_PTR(vmemh);
+ aligned_free(MEMHEAD_REAL_PTR(memh_aligned));
}
else {
- if (UNLIKELY(malloc_debug_memset && len)) {
- memset(memh + 1, 255, len);
- }
- if (UNLIKELY(MEMHEAD_IS_ALIGNED(memh))) {
- MemHeadAligned *memh_aligned = MEMHEAD_ALIGNED_FROM_PTR(vmemh);
- aligned_free(MEMHEAD_REAL_PTR(memh_aligned));
- }
- else {
- free(memh);
- }
+ free(memh);
}
}
@@ -165,10 +133,7 @@ void *MEM_lockfree_dupallocN(const void *vmemh)
if (vmemh) {
MemHead *memh = MEMHEAD_FROM_PTR(vmemh);
const size_t prev_size = MEM_lockfree_allocN_len(vmemh);
- if (UNLIKELY(MEMHEAD_IS_MMAP(memh))) {
- newp = MEM_lockfree_mapallocN(prev_size, "dupli_mapalloc");
- }
- else if (UNLIKELY(MEMHEAD_IS_ALIGNED(memh))) {
+ if (UNLIKELY(MEMHEAD_IS_ALIGNED(memh))) {
MemHeadAligned *memh_aligned = MEMHEAD_ALIGNED_FROM_PTR(vmemh);
newp = MEM_lockfree_mallocN_aligned(
prev_size, (size_t)memh_aligned->alignment, "dupli_malloc");
@@ -397,47 +362,6 @@ void *MEM_lockfree_mallocN_aligned(size_t len, size_t alignment, const char *str
return NULL;
}
-void *MEM_lockfree_mapallocN(size_t len, const char *str)
-{
- MemHead *memh;
-
- /* on 64 bit, simply use calloc instead, as mmap does not support
- * allocating > 4 GB on Windows. the only reason mapalloc exists
- * is to get around address space limitations in 32 bit OSes. */
- if (sizeof(void *) >= 8)
- return MEM_lockfree_callocN(len, str);
-
- len = SIZET_ALIGN_4(len);
-
-#if defined(WIN32)
- /* our windows mmap implementation is not thread safe */
- mem_lock_thread();
-#endif
- memh = mmap(NULL, len + sizeof(MemHead), PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANON, -1, 0);
-#if defined(WIN32)
- mem_unlock_thread();
-#endif
-
- if (memh != (MemHead *)-1) {
- memh->len = len | (size_t)MEMHEAD_MMAP_FLAG;
- atomic_add_and_fetch_u(&totblock, 1);
- atomic_add_and_fetch_z(&mem_in_use, len);
- atomic_add_and_fetch_z(&mmap_in_use, len);
-
- update_maximum(&peak_mem, mem_in_use);
- update_maximum(&peak_mem, mmap_in_use);
-
- return PTR_FROM_MEMHEAD(memh);
- }
- print_error(
- "Mapalloc returns null, fallback to regular malloc: "
- "len=" SIZET_FORMAT " in %s, total %u\n",
- SIZET_ARG(len),
- str,
- (unsigned int)mmap_in_use);
- return MEM_lockfree_callocN(len, str);
-}
-
void MEM_lockfree_printmemlist_pydict(void)
{
}
@@ -476,12 +400,6 @@ bool MEM_lockfree_consistency_check(void)
return true;
}
-void MEM_lockfree_set_lock_callback(void (*lock)(void), void (*unlock)(void))
-{
- thread_lock_callback = lock;
- thread_unlock_callback = unlock;
-}
-
void MEM_lockfree_set_memory_debug(void)
{
malloc_debug_memset = true;
@@ -492,11 +410,6 @@ size_t MEM_lockfree_get_memory_in_use(void)
return mem_in_use;
}
-size_t MEM_lockfree_get_mapped_memory_in_use(void)
-{
- return mmap_in_use;
-}
-
unsigned int MEM_lockfree_get_memory_blocks_in_use(void)
{
return totblock;