Welcome to mirror list, hosted at ThFree Co, Russian Federation.

git.blender.org/blender.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
Diffstat (limited to 'source/blender/depsgraph')
-rw-r--r--source/blender/depsgraph/intern/eval/deg_eval.cc145
1 files changed, 120 insertions, 25 deletions
diff --git a/source/blender/depsgraph/intern/eval/deg_eval.cc b/source/blender/depsgraph/intern/eval/deg_eval.cc
index aca20955ba4..ff30bf7571a 100644
--- a/source/blender/depsgraph/intern/eval/deg_eval.cc
+++ b/source/blender/depsgraph/intern/eval/deg_eval.cc
@@ -31,6 +31,7 @@
#include "BLI_utildefines.h"
#include "BLI_task.h"
#include "BLI_ghash.h"
+#include "BLI_gsqueue.h"
#include "BKE_global.h"
@@ -56,7 +57,22 @@ namespace DEG {
namespace {
-void schedule_children(TaskPool *pool, Depsgraph *graph, OperationNode *node, const int thread_id);
+struct DepsgraphEvalState;
+
+void deg_task_run_func(TaskPool *pool, void *taskdata, int thread_id);
+
+template<typename ScheduleFunction, typename... ScheduleFunctionArgs>
+void schedule_children(DepsgraphEvalState *state,
+ OperationNode *node,
+ const int thread_id,
+ ScheduleFunction *schedule_function,
+ ScheduleFunctionArgs... schedule_function_args);
+
+void schedule_node_to_pool(OperationNode *node, const int thread_id, TaskPool *pool)
+{
+ BLI_task_pool_push_from_thread(
+ pool, deg_task_run_func, node, false, TASK_PRIORITY_HIGH, thread_id);
+}
/* Denotes which part of dependency graph is being evaluated. */
enum class EvaluationStage {
@@ -67,33 +83,50 @@ enum class EvaluationStage {
/* Threaded evaluation of all possible operations. */
THREADED_EVALUATION,
+
+ /* Workaround for areas which can not be evaluated in threads.
+ *
+ * For example, metaballs, which are iterating over all bases and are requesting duplilists
+ * to see whether there are metaballs inside. */
+ SINGLE_THREADED_WORKAROUND,
};
struct DepsgraphEvalState {
Depsgraph *graph;
bool do_stats;
EvaluationStage stage;
+ bool need_single_thread_pass;
};
-void deg_task_run_func(TaskPool *pool, void *taskdata, int thread_id)
+void evaluate_node(const DepsgraphEvalState *state, OperationNode *operation_node)
{
- void *userdata_v = BLI_task_pool_userdata(pool);
- DepsgraphEvalState *state = (DepsgraphEvalState *)userdata_v;
- OperationNode *node = (OperationNode *)taskdata;
+ ::Depsgraph *depsgraph = reinterpret_cast<::Depsgraph *>(state->graph);
+
/* Sanity checks. */
- BLI_assert(!node->is_noop() && "NOOP nodes should not actually be scheduled");
+ BLI_assert(!operation_node->is_noop() && "NOOP nodes should not actually be scheduled");
/* Perform operation. */
if (state->do_stats) {
const double start_time = PIL_check_seconds_timer();
- node->evaluate((::Depsgraph *)state->graph);
- node->stats.current_time += PIL_check_seconds_timer() - start_time;
+ operation_node->evaluate(depsgraph);
+ operation_node->stats.current_time += PIL_check_seconds_timer() - start_time;
}
else {
- node->evaluate((::Depsgraph *)state->graph);
+ operation_node->evaluate(depsgraph);
}
+}
+
+void deg_task_run_func(TaskPool *pool, void *taskdata, int thread_id)
+{
+ void *userdata_v = BLI_task_pool_userdata(pool);
+ DepsgraphEvalState *state = (DepsgraphEvalState *)userdata_v;
+
+ /* Evaluate node. */
+ OperationNode *operation_node = reinterpret_cast<OperationNode *>(taskdata);
+ evaluate_node(state, operation_node);
+
/* Schedule children. */
BLI_task_pool_delayed_push_begin(pool, thread_id);
- schedule_children(pool, state->graph, node, thread_id);
+ schedule_children(state, operation_node, thread_id, schedule_node_to_pool, pool);
BLI_task_pool_delayed_push_end(pool, thread_id);
}
@@ -159,7 +192,18 @@ void initialize_execution(DepsgraphEvalState *state, Depsgraph *graph)
}
}
-bool need_evaluate_operation_at_stage(const DepsgraphEvalState *state,
+bool is_metaball_object_operation(const OperationNode *operation_node)
+{
+ const ComponentNode *component_node = operation_node->owner;
+ const IDNode *id_node = component_node->owner;
+ if (GS(id_node->id_cow->name) != ID_OB) {
+ return false;
+ }
+ const Object *object = reinterpret_cast<const Object *>(id_node->id_cow);
+ return object->type == OB_MBALL;
+}
+
+bool need_evaluate_operation_at_stage(DepsgraphEvalState *state,
const OperationNode *operation_node)
{
const ComponentNode *component_node = operation_node->owner;
@@ -172,6 +216,13 @@ bool need_evaluate_operation_at_stage(const DepsgraphEvalState *state,
* scheduled flag (we assume that scheduled operations have been actually handled by previous
* stage). */
BLI_assert(operation_node->scheduled || component_node->type != NodeType::COPY_ON_WRITE);
+ if (is_metaball_object_operation(operation_node)) {
+ state->need_single_thread_pass = true;
+ return false;
+ }
+ return true;
+
+ case EvaluationStage::SINGLE_THREADED_WORKAROUND:
return true;
}
BLI_assert(!"Unhandled evaluation stage, should never happen.");
@@ -182,8 +233,13 @@ bool need_evaluate_operation_at_stage(const DepsgraphEvalState *state,
* dec_parents: Decrement pending parents count, true when child nodes are
* scheduled after a task has been completed.
*/
-void schedule_node(
- TaskPool *pool, Depsgraph *graph, OperationNode *node, bool dec_parents, const int thread_id)
+template<typename ScheduleFunction, typename... ScheduleFunctionArgs>
+void schedule_node(DepsgraphEvalState *state,
+ OperationNode *node,
+ bool dec_parents,
+ const int thread_id,
+ ScheduleFunction *schedule_function,
+ ScheduleFunctionArgs... schedule_function_args)
{
/* No need to schedule nodes of invisible ID. */
if (!check_operation_node_visible(node)) {
@@ -206,7 +262,6 @@ void schedule_node(
return;
}
/* During the COW stage only schedule COW nodes. */
- const DepsgraphEvalState *state = (DepsgraphEvalState *)BLI_task_pool_userdata(pool);
if (!need_evaluate_operation_at_stage(state, node)) {
return;
}
@@ -215,24 +270,31 @@ void schedule_node(
if (!is_scheduled) {
if (node->is_noop()) {
/* skip NOOP node, schedule children right away */
- schedule_children(pool, graph, node, thread_id);
+ schedule_children(state, node, thread_id, schedule_function, schedule_function_args...);
}
else {
/* children are scheduled once this task is completed */
- BLI_task_pool_push_from_thread(
- pool, deg_task_run_func, node, false, TASK_PRIORITY_HIGH, thread_id);
+ schedule_function(node, thread_id, schedule_function_args...);
}
}
}
-void schedule_graph(TaskPool *pool, Depsgraph *graph)
+template<typename ScheduleFunction, typename... ScheduleFunctionArgs>
+void schedule_graph(DepsgraphEvalState *state,
+ ScheduleFunction *schedule_function,
+ ScheduleFunctionArgs... schedule_function_args)
{
- for (OperationNode *node : graph->operations) {
- schedule_node(pool, graph, node, false, -1);
+ for (OperationNode *node : state->graph->operations) {
+ schedule_node(state, node, false, -1, schedule_function, schedule_function_args...);
}
}
-void schedule_children(TaskPool *pool, Depsgraph *graph, OperationNode *node, const int thread_id)
+template<typename ScheduleFunction, typename... ScheduleFunctionArgs>
+void schedule_children(DepsgraphEvalState *state,
+ OperationNode *node,
+ const int thread_id,
+ ScheduleFunction *schedule_function,
+ ScheduleFunctionArgs... schedule_function_args)
{
for (Relation *rel : node->outlinks) {
OperationNode *child = (OperationNode *)rel->to;
@@ -241,8 +303,36 @@ void schedule_children(TaskPool *pool, Depsgraph *graph, OperationNode *node, co
/* Happens when having cyclic dependencies. */
continue;
}
- schedule_node(pool, graph, child, (rel->flag & RELATION_FLAG_CYCLIC) == 0, thread_id);
+ schedule_node(state,
+ child,
+ (rel->flag & RELATION_FLAG_CYCLIC) == 0,
+ thread_id,
+ schedule_function,
+ schedule_function_args...);
+ }
+}
+
+void schedule_node_to_queue(OperationNode *node,
+ const int /*thread_id*/,
+ GSQueue *evaluation_queue)
+{
+ BLI_gsqueue_push(evaluation_queue, &node);
+}
+
+void evaluate_graph_single_threaded(DepsgraphEvalState *state)
+{
+ GSQueue *evaluation_queue = BLI_gsqueue_new(sizeof(OperationNode *));
+ schedule_graph(state, schedule_node_to_queue, evaluation_queue);
+
+ while (!BLI_gsqueue_is_empty(evaluation_queue)) {
+ OperationNode *operation_node;
+ BLI_gsqueue_pop(evaluation_queue, &operation_node);
+
+ evaluate_node(state, operation_node);
+ schedule_children(state, operation_node, 0, schedule_node_to_queue, evaluation_queue);
}
+
+ BLI_gsqueue_free(evaluation_queue);
}
void depsgraph_ensure_view_layer(Depsgraph *graph)
@@ -282,6 +372,7 @@ void deg_evaluate_on_refresh(Depsgraph *graph)
DepsgraphEvalState state;
state.graph = graph;
state.do_stats = do_time_debug;
+ state.need_single_thread_pass = false;
/* Set up task scheduler and pull for threaded evaluation. */
TaskScheduler *task_scheduler;
bool need_free_scheduler;
@@ -301,16 +392,20 @@ void deg_evaluate_on_refresh(Depsgraph *graph)
/* First, process all Copy-On-Write nodes. */
state.stage = EvaluationStage::COPY_ON_WRITE;
- schedule_graph(task_pool, graph);
+ schedule_graph(&state, schedule_node_to_pool, task_pool);
BLI_task_pool_work_wait_and_reset(task_pool);
/* After that, process all other nodes. */
state.stage = EvaluationStage::THREADED_EVALUATION;
- schedule_graph(task_pool, graph);
+ schedule_graph(&state, schedule_node_to_pool, task_pool);
BLI_task_pool_work_and_wait(task_pool);
-
BLI_task_pool_free(task_pool);
+ if (state.need_single_thread_pass) {
+ state.stage = EvaluationStage::SINGLE_THREADED_WORKAROUND;
+ evaluate_graph_single_threaded(&state);
+ }
+
/* Finalize statistics gathering. This is because we only gather single
* operation timing here, without aggregating anything to avoid any extra
* synchronization. */