Welcome to mirror list, hosted at ThFree Co, Russian Federation.

git.blender.org/blender.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorBastien Montagne <montagne29@wanadoo.fr>2016-12-23 00:06:34 +0300
committerBastien Montagne <montagne29@wanadoo.fr>2017-03-03 19:42:33 +0300
commit1659d42bf704db674adce621bb071d206eefe024 (patch)
tree875896721e092f867667701930d8000a7d67b7b1 /source/blender
parentfbe84185deba4c1fdb8cb1322c7f7a00faca2813 (diff)
Fix use-after-free concurrent issues.
Diffstat (limited to 'source/blender')
-rw-r--r--source/blender/blenlib/intern/task.c17
1 files changed, 12 insertions, 5 deletions
diff --git a/source/blender/blenlib/intern/task.c b/source/blender/blenlib/intern/task.c
index d4a111d2ee0..eebe7f12589 100644
--- a/source/blender/blenlib/intern/task.c
+++ b/source/blender/blenlib/intern/task.c
@@ -240,17 +240,20 @@ static void task_free(TaskPool *pool, Task *task, const int thread_id)
static void task_pool_num_decrease(TaskPool *pool, size_t done)
{
BLI_assert(pool->num >= done);
+ TaskScheduler *scheduler = pool->scheduler;
const size_t num = atomic_sub_and_fetch_z(&pool->num, done);
+ /* WARNING! do not use pool anymore, it might be already freed by concurrent thread! */
+
/* This is needed for several things:
* - Wake up all sleeping threads on exit, before we join them.
* - Wake up 'main' thread itself in case it called BLI_task_pool_work_and_wait() and ended up sleeping there.
* - Wake up 'main' thread itself in case it called BLI_task_pool_cancel() and ended up sleeping there. */
- if (num == 0 && pool->scheduler->num_workers_sleeping != 0) {
- BLI_mutex_lock(&pool->scheduler->workers_mutex);
- BLI_condition_notify_all(&pool->scheduler->workers_condition);
- BLI_mutex_unlock(&pool->scheduler->workers_mutex);
+ if (num == 0 && scheduler->num_workers_sleeping != 0) {
+ BLI_mutex_lock(&scheduler->workers_mutex);
+ BLI_condition_notify_all(&scheduler->workers_condition);
+ BLI_mutex_unlock(&scheduler->workers_mutex);
}
}
@@ -514,6 +517,8 @@ int BLI_task_scheduler_num_threads(TaskScheduler *scheduler)
static void task_scheduler_push(TaskScheduler *scheduler, Task *task, TaskPriority priority)
{
+ TaskPool *pool = task->pool;
+
/* add task to queue */
BLI_spin_lock(&scheduler->queue_spinlock);
@@ -524,7 +529,9 @@ static void task_scheduler_push(TaskScheduler *scheduler, Task *task, TaskPriori
BLI_spin_unlock(&scheduler->queue_spinlock);
- task_pool_num_increase(task->pool);
+ /* WARNING! do not use task anymore, it might be already processed and freed by concurrent thread! */
+
+ task_pool_num_increase(pool);
// atomic_add_and_fetch_z(&scheduler->num_queued, 1);
}