From 08b0b1c439c8b4f502b7ad503c4de50194280299 Mon Sep 17 00:00:00 2001 From: Bastien Montagne Date: Fri, 23 Dec 2016 12:11:11 +0100 Subject: Revert "Attempt to address nearly-starving cases." This reverts commit 32959917ee112200125e3e742afb528fc2196072. Definitively gives worse performances. Looks like every overhead we add to task management is always worse than potential better scheduling it might give us... --- source/blender/blenlib/BLI_task.h | 2 +- source/blender/blenlib/intern/task.c | 41 ++++++------------------------------ 2 files changed, 8 insertions(+), 35 deletions(-) (limited to 'source/blender') diff --git a/source/blender/blenlib/BLI_task.h b/source/blender/blenlib/BLI_task.h index 0de0dec770d..d27bf4dad20 100644 --- a/source/blender/blenlib/BLI_task.h +++ b/source/blender/blenlib/BLI_task.h @@ -97,7 +97,7 @@ void BLI_task_pool_work_and_wait(TaskPool *pool); void BLI_task_pool_cancel(TaskPool *pool); /* set number of threads allowed to be used by this pool */ -void BLI_pool_set_num_threads(TaskPool *pool, size_t num_threads_max); +void BLI_pool_set_num_threads(TaskPool *pool, int num_threads); /* for worker threads, test if canceled */ bool BLI_task_pool_canceled(TaskPool *pool); diff --git a/source/blender/blenlib/intern/task.c b/source/blender/blenlib/intern/task.c index 4d8becf8e07..560ad7f88e9 100644 --- a/source/blender/blenlib/intern/task.c +++ b/source/blender/blenlib/intern/task.c @@ -110,7 +110,6 @@ struct TaskPool { TaskScheduler *scheduler; size_t num; - size_t num_threads_max; size_t num_threads; size_t currently_running_tasks; @@ -300,10 +299,8 @@ BLI_INLINE bool task_find( continue; } - /* Order is important, we do not want to increase currently_running_tasks if we are in main thread - * (run_and_wait)! */ - if (is_main || - atomic_add_and_fetch_z(¤t_pool->currently_running_tasks, 1) <= current_pool->num_threads) + if (atomic_add_and_fetch_z(¤t_pool->currently_running_tasks, 1) <= current_pool->num_threads || + is_main || current_pool->num_threads == 0) { *task = current_task; found_task = true; @@ -584,9 +581,7 @@ static TaskPool *task_pool_create_ex(TaskScheduler *scheduler, void *userdata, c pool->scheduler = scheduler; pool->num = 0; - /* Do not use BLI_task_scheduler_num_threads(scheduler) here, we want number of workers, without main thred! */ - pool->num_threads_max = scheduler->num_threads; - pool->num_threads = pool->num_threads_max; + pool->num_threads = 0; pool->currently_running_tasks = 0; pool->do_cancel = false; pool->run_in_background = is_background; @@ -673,14 +668,12 @@ void BLI_task_pool_free(TaskPool *pool) BLI_end_threaded_malloc(); } -#include "PIL_time_utildefines.h" + static void task_pool_push( TaskPool *pool, TaskRunFunction run, void *taskdata, bool free_taskdata, TaskFreeFunction freedata, TaskPriority priority, int thread_id) { - static int i = 1; - Task *task = task_alloc(pool, thread_id); task->run = run; @@ -690,23 +683,6 @@ static void task_pool_push( task->pool = pool; task_scheduler_push(pool->scheduler, task, priority); -#if 1 - if ((i++ % 200) == 0) { - const size_t min_threads = 1; /* At least one worker. */ -// printf("%s: %lu, %lu -> ", __func__, pool->num, pool->num_threads); - if (pool->num < pool->num_threads / 2 && pool->num_threads > min_threads) { - if (atomic_sub_and_fetch_z(&pool->num_threads, 1) < min_threads) { - pool->num_threads = min_threads; - } - } - else if (pool->num > pool->num_threads && pool->num_threads < pool->num_threads_max) { - if (atomic_add_and_fetch_z(&pool->num_threads, 1) > pool->num_threads_max) { - pool->num_threads = pool->num_threads_max; - } - } -// printf("%lu\n", pool->num_threads); - } -#endif } void BLI_task_pool_push_ex( @@ -750,7 +726,7 @@ void BLI_task_pool_work_and_wait(TaskPool *pool) /* delete task */ task_free(pool, task, 0); -// atomic_sub_and_fetch_z(&pool->currently_running_tasks, 1); + atomic_sub_and_fetch_z(&pool->currently_running_tasks, 1); /* notify pool task was done */ task_pool_num_decrease(pool, 1); @@ -769,11 +745,8 @@ void BLI_task_pool_work_and_wait(TaskPool *pool) void BLI_pool_set_num_threads(TaskPool *pool, size_t num_threads_max) { - pool->num_threads_max = num_threads_max; - - for (size_t num_threads_old = pool->num_threads; - atomic_cas_z(&pool->num_threads, num_threads_old, min_ii(num_threads_old, num_threads_max)) != num_threads_old; - num_threads_old = pool->num_threads); + /* NOTE: Don't try to modify threads while tasks are running! */ + pool->num_threads = num_threads; } void BLI_task_pool_cancel(TaskPool *pool) -- cgit v1.2.3