Welcome to mirror list, hosted at ThFree Co, Russian Federation.

git.blender.org/blender.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorBrecht Van Lommel <brecht@blender.org>2020-05-20 01:59:41 +0300
committerBrecht Van Lommel <brecht@blender.org>2020-05-20 02:03:05 +0300
commit183ba284f213903f2208349fe1dd57c07c327ad9 (patch)
tree28690e868340794bd18b5a40d56ca24d7f304cfd /source/blender/blenlib
parent120e9924c177c7a8fde06b9c5eca98e4e2a19180 (diff)
Cleanup: make guarded memory allocation always thread safe
Previously this would be enabled when threads were used, but threads are now basically always in use so there is no point. Further, this is only needed for guarded allocation with --debug-memory which is not performance critical.
Diffstat (limited to 'source/blender/blenlib')
-rw-r--r--source/blender/blenlib/BLI_threads.h3
-rw-r--r--source/blender/blenlib/intern/task_pool.cc10
-rw-r--r--source/blender/blenlib/intern/task_range.cc4
-rw-r--r--source/blender/blenlib/intern/threads.c39
4 files changed, 0 insertions, 56 deletions
diff --git a/source/blender/blenlib/BLI_threads.h b/source/blender/blenlib/BLI_threads.h
index c199417017b..03fe27c10ed 100644
--- a/source/blender/blenlib/BLI_threads.h
+++ b/source/blender/blenlib/BLI_threads.h
@@ -58,9 +58,6 @@ void BLI_threadpool_clear(struct ListBase *threadbase);
void BLI_threadpool_end(struct ListBase *threadbase);
int BLI_thread_is_main(void);
-void BLI_threaded_malloc_begin(void);
-void BLI_threaded_malloc_end(void);
-
/* System Information */
int BLI_system_thread_count(void); /* gets the number of threads the system can make use of */
diff --git a/source/blender/blenlib/intern/task_pool.cc b/source/blender/blenlib/intern/task_pool.cc
index 670787697a3..cf328ec407c 100644
--- a/source/blender/blenlib/intern/task_pool.cc
+++ b/source/blender/blenlib/intern/task_pool.cc
@@ -364,14 +364,6 @@ static void background_task_pool_free(TaskPool *pool)
static TaskPool *task_pool_create_ex(void *userdata, TaskPoolType type, TaskPriority priority)
{
- /* Ensure malloc will go fine from threads,
- *
- * This is needed because we could be in main thread here
- * and malloc could be non-thread safe at this point because
- * no other jobs are running.
- */
- BLI_threaded_malloc_begin();
-
const bool use_threads = BLI_task_scheduler_num_threads() > 1 && type != TASK_POOL_NO_THREADS;
/* Background task pool uses regular TBB scheduling if available. Only when
@@ -475,8 +467,6 @@ void BLI_task_pool_free(TaskPool *pool)
BLI_mutex_end(&pool->user_mutex);
MEM_freeN(pool);
-
- BLI_threaded_malloc_end();
}
void BLI_task_pool_push(TaskPool *pool,
diff --git a/source/blender/blenlib/intern/task_range.cc b/source/blender/blenlib/intern/task_range.cc
index da38c8fd352..67d8960434e 100644
--- a/source/blender/blenlib/intern/task_range.cc
+++ b/source/blender/blenlib/intern/task_range.cc
@@ -115,8 +115,6 @@ void BLI_task_parallel_range(const int start,
#ifdef WITH_TBB
/* Multithreading. */
if (settings->use_threading && BLI_task_scheduler_num_threads() > 1) {
- BLI_threaded_malloc_begin();
-
RangeTask task(func, userdata, settings);
const size_t grainsize = MAX2(settings->min_iter_per_thread, 1);
const tbb::blocked_range<int> range(start, stop, grainsize);
@@ -130,8 +128,6 @@ void BLI_task_parallel_range(const int start,
else {
parallel_for(range, task);
}
-
- BLI_threaded_malloc_end();
return;
}
#endif
diff --git a/source/blender/blenlib/intern/threads.c b/source/blender/blenlib/intern/threads.c
index f535798f86d..be43c27e945 100644
--- a/source/blender/blenlib/intern/threads.c
+++ b/source/blender/blenlib/intern/threads.c
@@ -104,7 +104,6 @@ static void *thread_tls_data;
* BLI_threadpool_end(&lb);
*
************************************************ */
-static SpinLock _malloc_lock;
static pthread_mutex_t _image_lock = PTHREAD_MUTEX_INITIALIZER;
static pthread_mutex_t _image_draw_lock = PTHREAD_MUTEX_INITIALIZER;
static pthread_mutex_t _viewer_lock = PTHREAD_MUTEX_INITIALIZER;
@@ -132,21 +131,9 @@ typedef struct ThreadSlot {
int avail;
} ThreadSlot;
-static void BLI_lock_malloc_thread(void)
-{
- BLI_spin_lock(&_malloc_lock);
-}
-
-static void BLI_unlock_malloc_thread(void)
-{
- BLI_spin_unlock(&_malloc_lock);
-}
-
void BLI_threadapi_init(void)
{
mainid = pthread_self();
-
- BLI_spin_init(&_malloc_lock);
if (numaAPI_Initialize() == NUMAAPI_SUCCESS) {
is_numa_available = true;
}
@@ -154,7 +141,6 @@ void BLI_threadapi_init(void)
void BLI_threadapi_exit(void)
{
- BLI_spin_end(&_malloc_lock);
}
/* tot = 0 only initializes malloc mutex in a safe way (see sequence.c)
@@ -185,8 +171,6 @@ void BLI_threadpool_init(ListBase *threadbase, void *(*do_thread)(void *), int t
unsigned int level = atomic_fetch_and_add_u(&thread_levels, 1);
if (level == 0) {
- MEM_set_lock_callback(BLI_lock_malloc_thread, BLI_unlock_malloc_thread);
-
#ifdef USE_APPLE_OMP_FIX
/* workaround for Apple gcc 4.2.1 omp vs background thread bug,
* we copy gomp thread local storage pointer to setting it again
@@ -313,11 +297,6 @@ void BLI_threadpool_end(ListBase *threadbase)
}
BLI_freelistN(threadbase);
}
-
- unsigned int level = atomic_sub_and_fetch_u(&thread_levels, 1);
- if (level == 0) {
- MEM_set_lock_callback(NULL, NULL);
- }
}
/* System Information */
@@ -811,24 +790,6 @@ void BLI_thread_queue_wait_finish(ThreadQueue *queue)
pthread_mutex_unlock(&queue->mutex);
}
-/* ************************************************ */
-
-void BLI_threaded_malloc_begin(void)
-{
- unsigned int level = atomic_fetch_and_add_u(&thread_levels, 1);
- if (level == 0) {
- MEM_set_lock_callback(BLI_lock_malloc_thread, BLI_unlock_malloc_thread);
- }
-}
-
-void BLI_threaded_malloc_end(void)
-{
- unsigned int level = atomic_sub_and_fetch_u(&thread_levels, 1);
- if (level == 0) {
- MEM_set_lock_callback(NULL, NULL);
- }
-}
-
/* **** Special functions to help performance on crazy NUMA setups. **** */
#if 0 /* UNUSED */