diff options
author | Sergey Sharybin <sergey.vfx@gmail.com> | 2016-05-10 10:55:58 +0300 |
---|---|---|
committer | Sergey Sharybin <sergey.vfx@gmail.com> | 2016-05-10 11:01:24 +0300 |
commit | 7efa34d078336bb366ac5b099e56a2896176f599 (patch) | |
tree | 1b650c155105cd2167d839d07c8048fdbb72060b /source/blender/depsgraph | |
parent | 401e7108075c46808bcc5e008e11767dcd09c04e (diff) |
Task scheduler: Add thread-aware task push routines
This commit implements new function BLI_task_pool_push_from_thread()
who's main goal is to have less parasitic load on the CPU bu avoiding
memory allocations as much as possible, making taks pushing cheaper.
This function expects thread ID, which must be 0 for the thread from
which pool is created from (and from which wait_work() is called) and
for other threads it mush be the ID which was sent to the thread working
function.
This reduces allocations quite a bit in the new dependency graph,
hopefully gaining some visible speedup on a fewzillion core machines
(on my own machine can only see benefit in profiler, which shows
significant reduce of time wasted in the memory allocation).
Diffstat (limited to 'source/blender/depsgraph')
-rw-r--r-- | source/blender/depsgraph/intern/depsgraph_eval.cc | 26 |
1 files changed, 17 insertions, 9 deletions
diff --git a/source/blender/depsgraph/intern/depsgraph_eval.cc b/source/blender/depsgraph/intern/depsgraph_eval.cc index adacbb6cb1a..e96ee613f37 100644 --- a/source/blender/depsgraph/intern/depsgraph_eval.cc +++ b/source/blender/depsgraph/intern/depsgraph_eval.cc @@ -122,7 +122,8 @@ void DEG_evaluation_context_free(EvaluationContext *eval_ctx) static void schedule_children(TaskPool *pool, Depsgraph *graph, OperationDepsNode *node, - const int layers); + const int layers, + const int thread_id); struct DepsgraphEvalState { EvaluationContext *eval_ctx; @@ -132,7 +133,7 @@ struct DepsgraphEvalState { static void deg_task_run_func(TaskPool *pool, void *taskdata, - int UNUSED(threadid)) + int thread_id) { DepsgraphEvalState *state = (DepsgraphEvalState *)BLI_task_pool_userdata(pool); OperationDepsNode *node = (OperationDepsNode *)taskdata; @@ -161,7 +162,7 @@ static void deg_task_run_func(TaskPool *pool, node, end_time - start_time); - schedule_children(pool, state->graph, node, state->layers); + schedule_children(pool, state->graph, node, state->layers, thread_id); } static void calculate_pending_parents(Depsgraph *graph, int layers) @@ -235,7 +236,8 @@ static void calculate_eval_priority(OperationDepsNode *node) * after a task has been completed. */ static void schedule_node(TaskPool *pool, Depsgraph *graph, int layers, - OperationDepsNode *node, bool dec_parents) + OperationDepsNode *node, bool dec_parents, + const int thread_id) { int id_layers = node->owner->owner->layers; @@ -252,11 +254,16 @@ static void schedule_node(TaskPool *pool, Depsgraph *graph, int layers, if (!is_scheduled) { if (node->is_noop()) { /* skip NOOP node, schedule children right away */ - schedule_children(pool, graph, node, layers); + schedule_children(pool, graph, node, layers, thread_id); } else { /* children are scheduled once this task is completed */ - BLI_task_pool_push(pool, deg_task_run_func, node, false, TASK_PRIORITY_LOW); + BLI_task_pool_push_from_thread(pool, + deg_task_run_func, + node, + false, + TASK_PRIORITY_LOW, + thread_id); } } } @@ -272,14 +279,15 @@ static void schedule_graph(TaskPool *pool, ++it) { OperationDepsNode *node = *it; - schedule_node(pool, graph, layers, node, false); + schedule_node(pool, graph, layers, node, false, 0); } } static void schedule_children(TaskPool *pool, Depsgraph *graph, OperationDepsNode *node, - const int layers) + const int layers, + const int thread_id) { DEPSNODE_RELATIONS_ITER_BEGIN(node->outlinks, rel) { @@ -289,7 +297,7 @@ static void schedule_children(TaskPool *pool, /* Happens when having cyclic dependencies. */ continue; } - schedule_node(pool, graph, layers, child, (rel->flag & DEPSREL_FLAG_CYCLIC) == 0); + schedule_node(pool, graph, layers, child, (rel->flag & DEPSREL_FLAG_CYCLIC) == 0, thread_id); } DEPSNODE_RELATIONS_ITER_END; } |