Welcome to mirror list, hosted at ThFree Co, Russian Federation.

git.blender.org/blender.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJacques Lucke <jacques@blender.org>2021-09-09 13:54:20 +0300
committerJacques Lucke <jacques@blender.org>2021-09-09 13:54:20 +0300
commitbf47fb40fd6f0ee9386e9936cf213a1049c55b61 (patch)
treec8bbe7c00b27ac845e4adbc214b7f29ec670a9f3 /source/blender/functions/intern
parent0f6be4e1520087bfe6d1dc98b61d65686ae09b3f (diff)
Geometry Nodes: fields and anonymous attributes
This implements the initial core framework for fields and anonymous attributes (also see T91274). The new functionality is hidden behind the "Geometry Nodes Fields" feature flag. When enabled in the user preferences, the following new nodes become available: `Position`, `Index`, `Normal`, `Set Position` and `Attribute Capture`. Socket inspection has not been updated to work with fields yet. Besides these changes at the user level, this patch contains the ground work for: * building and evaluating fields at run-time (`FN_fields.hh`) and * creating and accessing anonymous attributes on geometry (`BKE_anonymous_attribute.h`). For evaluating fields we use a new so called multi-function procedure (`FN_multi_function_procedure.hh`). It allows composing multi-functions in arbitrary ways and supports efficient evaluation as is required by fields. See `FN_multi_function_procedure.hh` for more details on how this evaluation mechanism can be used. A new `AttributeIDRef` has been added which allows handling named and anonymous attributes in the same way in many places. Hans and I worked on this patch together. Differential Revision: https://developer.blender.org/D12414
Diffstat (limited to 'source/blender/functions/intern')
-rw-r--r--source/blender/functions/intern/cpp_types.cc9
-rw-r--r--source/blender/functions/intern/field.cc569
-rw-r--r--source/blender/functions/intern/multi_function_builder.cc28
-rw-r--r--source/blender/functions/intern/multi_function_procedure.cc794
-rw-r--r--source/blender/functions/intern/multi_function_procedure_builder.cc175
-rw-r--r--source/blender/functions/intern/multi_function_procedure_executor.cc1212
6 files changed, 2787 insertions, 0 deletions
diff --git a/source/blender/functions/intern/cpp_types.cc b/source/blender/functions/intern/cpp_types.cc
index 7be34d2a1bf..058fb76af2b 100644
--- a/source/blender/functions/intern/cpp_types.cc
+++ b/source/blender/functions/intern/cpp_types.cc
@@ -15,6 +15,7 @@
*/
#include "FN_cpp_type_make.hh"
+#include "FN_field_cpp_type.hh"
#include "BLI_color.hh"
#include "BLI_float2.hh"
@@ -39,4 +40,12 @@ MAKE_CPP_TYPE(ColorGeometry4b, blender::ColorGeometry4b, CPPTypeFlags::BasicType
MAKE_CPP_TYPE(string, std::string, CPPTypeFlags::BasicType)
+MAKE_FIELD_CPP_TYPE(FloatField, float);
+MAKE_FIELD_CPP_TYPE(Float2Field, float2);
+MAKE_FIELD_CPP_TYPE(Float3Field, float3);
+MAKE_FIELD_CPP_TYPE(ColorGeometry4fField, blender::ColorGeometry4f);
+MAKE_FIELD_CPP_TYPE(BoolField, bool);
+MAKE_FIELD_CPP_TYPE(Int32Field, int32_t);
+MAKE_FIELD_CPP_TYPE(StringField, std::string);
+
} // namespace blender::fn
diff --git a/source/blender/functions/intern/field.cc b/source/blender/functions/intern/field.cc
new file mode 100644
index 00000000000..fa7dea97b7f
--- /dev/null
+++ b/source/blender/functions/intern/field.cc
@@ -0,0 +1,569 @@
+/*
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "BLI_map.hh"
+#include "BLI_multi_value_map.hh"
+#include "BLI_set.hh"
+#include "BLI_stack.hh"
+#include "BLI_vector_set.hh"
+
+#include "FN_field.hh"
+
+namespace blender::fn {
+
+/* --------------------------------------------------------------------
+ * Field Evaluation.
+ */
+
+struct FieldTreeInfo {
+ /**
+ * When fields are built, they only have references to the fields that they depend on. This map
+ * allows traversal of fields in the opposite direction. So for every field it stores the other
+ * fields that depend on it directly.
+ */
+ MultiValueMap<GFieldRef, GFieldRef> field_users;
+ /**
+ * The same field input may exist in the field tree as as separate nodes due to the way
+ * the tree is constructed. This set contains every different input only once.
+ */
+ VectorSet<std::reference_wrapper<const FieldInput>> deduplicated_field_inputs;
+};
+
+/**
+ * Collects some information from the field tree that is required by later steps.
+ */
+static FieldTreeInfo preprocess_field_tree(Span<GFieldRef> entry_fields)
+{
+ FieldTreeInfo field_tree_info;
+
+ Stack<GFieldRef> fields_to_check;
+ Set<GFieldRef> handled_fields;
+
+ for (GFieldRef field : entry_fields) {
+ if (handled_fields.add(field)) {
+ fields_to_check.push(field);
+ }
+ }
+
+ while (!fields_to_check.is_empty()) {
+ GFieldRef field = fields_to_check.pop();
+ if (field.node().is_input()) {
+ const FieldInput &field_input = static_cast<const FieldInput &>(field.node());
+ field_tree_info.deduplicated_field_inputs.add(field_input);
+ continue;
+ }
+ BLI_assert(field.node().is_operation());
+ const FieldOperation &operation = static_cast<const FieldOperation &>(field.node());
+ for (const GFieldRef operation_input : operation.inputs()) {
+ field_tree_info.field_users.add(operation_input, field);
+ if (handled_fields.add(operation_input)) {
+ fields_to_check.push(operation_input);
+ }
+ }
+ }
+ return field_tree_info;
+}
+
+/**
+ * Retrieves the data from the context that is passed as input into the field.
+ */
+static Vector<const GVArray *> get_field_context_inputs(
+ ResourceScope &scope,
+ const IndexMask mask,
+ const FieldContext &context,
+ const Span<std::reference_wrapper<const FieldInput>> field_inputs)
+{
+ Vector<const GVArray *> field_context_inputs;
+ for (const FieldInput &field_input : field_inputs) {
+ const GVArray *varray = context.get_varray_for_input(field_input, mask, scope);
+ if (varray == nullptr) {
+ const CPPType &type = field_input.cpp_type();
+ varray = &scope.construct<GVArray_For_SingleValueRef>(
+ __func__, type, mask.min_array_size(), type.default_value());
+ }
+ field_context_inputs.append(varray);
+ }
+ return field_context_inputs;
+}
+
+/**
+ * \return A set that contains all fields from the field tree that depend on an input that varies
+ * for different indices.
+ */
+static Set<GFieldRef> find_varying_fields(const FieldTreeInfo &field_tree_info,
+ Span<const GVArray *> field_context_inputs)
+{
+ Set<GFieldRef> found_fields;
+ Stack<GFieldRef> fields_to_check;
+
+ /* The varying fields are the ones that depend on inputs that are not constant. Therefore we
+ * start the tree search at the non-constant input fields and traverse through all fields that
+ * depend on them. */
+ for (const int i : field_context_inputs.index_range()) {
+ const GVArray *varray = field_context_inputs[i];
+ if (varray->is_single()) {
+ continue;
+ }
+ const FieldInput &field_input = field_tree_info.deduplicated_field_inputs[i];
+ const GFieldRef field_input_field{field_input, 0};
+ const Span<GFieldRef> users = field_tree_info.field_users.lookup(field_input_field);
+ for (const GFieldRef &field : users) {
+ if (found_fields.add(field)) {
+ fields_to_check.push(field);
+ }
+ }
+ }
+ while (!fields_to_check.is_empty()) {
+ GFieldRef field = fields_to_check.pop();
+ const Span<GFieldRef> users = field_tree_info.field_users.lookup(field);
+ for (GFieldRef field : users) {
+ if (found_fields.add(field)) {
+ fields_to_check.push(field);
+ }
+ }
+ }
+ return found_fields;
+}
+
+/**
+ * Builds the #procedure so that it computes the the fields.
+ */
+static void build_multi_function_procedure_for_fields(MFProcedure &procedure,
+ ResourceScope &scope,
+ const FieldTreeInfo &field_tree_info,
+ Span<GFieldRef> output_fields)
+{
+ MFProcedureBuilder builder{procedure};
+ /* Every input, intermediate and output field corresponds to a variable in the procedure. */
+ Map<GFieldRef, MFVariable *> variable_by_field;
+
+ /* Start by adding the field inputs as parameters to the procedure. */
+ for (const FieldInput &field_input : field_tree_info.deduplicated_field_inputs) {
+ MFVariable &variable = builder.add_input_parameter(
+ MFDataType::ForSingle(field_input.cpp_type()), field_input.debug_name());
+ variable_by_field.add_new({field_input, 0}, &variable);
+ }
+
+ /* Utility struct that is used to do proper depth first search traversal of the tree below. */
+ struct FieldWithIndex {
+ GFieldRef field;
+ int current_input_index = 0;
+ };
+
+ for (GFieldRef field : output_fields) {
+ /* We start a new stack for each output field to make sure that a field pushed later to the
+ * stack does never depend on a field that was pushed before. */
+ Stack<FieldWithIndex> fields_to_check;
+ fields_to_check.push({field, 0});
+ while (!fields_to_check.is_empty()) {
+ FieldWithIndex &field_with_index = fields_to_check.peek();
+ const GFieldRef &field = field_with_index.field;
+ if (variable_by_field.contains(field)) {
+ /* The field has been handled already. */
+ fields_to_check.pop();
+ continue;
+ }
+ /* Field inputs should already be handled above. */
+ BLI_assert(field.node().is_operation());
+
+ const FieldOperation &operation = static_cast<const FieldOperation &>(field.node());
+ const Span<GField> operation_inputs = operation.inputs();
+
+ if (field_with_index.current_input_index < operation_inputs.size()) {
+ /* Not all inputs are handled yet. Push the next input field to the stack and increment the
+ * input index. */
+ fields_to_check.push({operation_inputs[field_with_index.current_input_index]});
+ field_with_index.current_input_index++;
+ }
+ else {
+ /* All inputs variables are ready, now add the function call. */
+ Vector<MFVariable *> input_variables;
+ for (const GField &field : operation_inputs) {
+ input_variables.append(variable_by_field.lookup(field));
+ }
+ const MultiFunction &multi_function = operation.multi_function();
+ Vector<MFVariable *> output_variables = builder.add_call(multi_function, input_variables);
+ /* Add newly created variables to the map. */
+ for (const int i : output_variables.index_range()) {
+ variable_by_field.add_new({operation, i}, output_variables[i]);
+ }
+ }
+ }
+ }
+
+ /* Add output parameters to the procedure. */
+ Set<MFVariable *> already_output_variables;
+ for (const GFieldRef &field : output_fields) {
+ MFVariable *variable = variable_by_field.lookup(field);
+ if (!already_output_variables.add(variable)) {
+ /* One variable can be output at most once. To output the same value twice, we have to make
+ * a copy first. */
+ const MultiFunction &copy_fn = scope.construct<CustomMF_GenericCopy>(
+ __func__, "copy", variable->data_type());
+ variable = builder.add_call<1>(copy_fn, {variable})[0];
+ }
+ builder.add_output_parameter(*variable);
+ }
+
+ /* Remove the variables that should not be destructed from the map. */
+ for (const GFieldRef &field : output_fields) {
+ variable_by_field.remove(field);
+ }
+ /* Add destructor calls for the remaining variables. */
+ for (MFVariable *variable : variable_by_field.values()) {
+ builder.add_destruct(*variable);
+ }
+
+ builder.add_return();
+
+ // std::cout << procedure.to_dot() << "\n";
+ BLI_assert(procedure.validate());
+}
+
+/**
+ * Utility class that destructs elements from a partially initialized array.
+ */
+struct PartiallyInitializedArray : NonCopyable, NonMovable {
+ void *buffer;
+ IndexMask mask;
+ const CPPType *type;
+
+ ~PartiallyInitializedArray()
+ {
+ this->type->destruct_indices(this->buffer, this->mask);
+ }
+};
+
+/**
+ * Evaluate fields in the given context. If possible, multiple fields should be evaluated together,
+ * because that can be more efficient when they share common sub-fields.
+ *
+ * \param scope: The resource scope that owns data that makes up the output virtual arrays. Make
+ * sure the scope is not destructed when the output virtual arrays are still used.
+ * \param fields_to_evaluate: The fields that should be evaluated together.
+ * \param mask: Determines which indices are computed. The mask may be referenced by the returned
+ * virtual arrays. So the underlying indices (if applicable) should live longer then #scope.
+ * \param context: The context that the field is evaluated in. Used to retrieve data from each
+ * #FieldInput in the field network.
+ * \param dst_varrays: If provided, the computed data will be written into those virtual arrays
+ * instead of into newly created ones. That allows making the computed data live longer than
+ * #scope and is more efficient when the data will be written into those virtual arrays
+ * later anyway.
+ * \return The computed virtual arrays for each provided field. If #dst_varrays is passed, the
+ * provided virtual arrays are returned.
+ */
+Vector<const GVArray *> evaluate_fields(ResourceScope &scope,
+ Span<GFieldRef> fields_to_evaluate,
+ IndexMask mask,
+ const FieldContext &context,
+ Span<GVMutableArray *> dst_varrays)
+{
+ Vector<const GVArray *> r_varrays(fields_to_evaluate.size(), nullptr);
+ const int array_size = mask.min_array_size();
+
+ /* Destination arrays are optional. Create a small utility method to access them. */
+ auto get_dst_varray_if_available = [&](int index) -> GVMutableArray * {
+ if (dst_varrays.is_empty()) {
+ return nullptr;
+ }
+ BLI_assert(dst_varrays[index] == nullptr || dst_varrays[index]->size() >= array_size);
+ return dst_varrays[index];
+ };
+
+ /* Traverse the field tree and prepare some data that is used in later steps. */
+ FieldTreeInfo field_tree_info = preprocess_field_tree(fields_to_evaluate);
+
+ /* Get inputs that will be passed into the field when evaluated. */
+ Vector<const GVArray *> field_context_inputs = get_field_context_inputs(
+ scope, mask, context, field_tree_info.deduplicated_field_inputs);
+
+ /* Finish fields that output an input varray directly. For those we don't have to do any further
+ * processing. */
+ for (const int out_index : fields_to_evaluate.index_range()) {
+ const GFieldRef &field = fields_to_evaluate[out_index];
+ if (!field.node().is_input()) {
+ continue;
+ }
+ const FieldInput &field_input = static_cast<const FieldInput &>(field.node());
+ const int field_input_index = field_tree_info.deduplicated_field_inputs.index_of(field_input);
+ const GVArray *varray = field_context_inputs[field_input_index];
+ r_varrays[out_index] = varray;
+ }
+
+ Set<GFieldRef> varying_fields = find_varying_fields(field_tree_info, field_context_inputs);
+
+ /* Separate fields into two categories. Those that are constant and need to be evaluated only
+ * once, and those that need to be evaluated for every index. */
+ Vector<GFieldRef> varying_fields_to_evaluate;
+ Vector<int> varying_field_indices;
+ Vector<GFieldRef> constant_fields_to_evaluate;
+ Vector<int> constant_field_indices;
+ for (const int i : fields_to_evaluate.index_range()) {
+ if (r_varrays[i] != nullptr) {
+ /* Already done. */
+ continue;
+ }
+ GFieldRef field = fields_to_evaluate[i];
+ if (varying_fields.contains(field)) {
+ varying_fields_to_evaluate.append(field);
+ varying_field_indices.append(i);
+ }
+ else {
+ constant_fields_to_evaluate.append(field);
+ constant_field_indices.append(i);
+ }
+ }
+
+ /* Evaluate varying fields if necessary. */
+ if (!varying_fields_to_evaluate.is_empty()) {
+ /* Build the procedure for those fields. */
+ MFProcedure procedure;
+ build_multi_function_procedure_for_fields(
+ procedure, scope, field_tree_info, varying_fields_to_evaluate);
+ MFProcedureExecutor procedure_executor{"Procedure", procedure};
+ MFParamsBuilder mf_params{procedure_executor, array_size};
+ MFContextBuilder mf_context;
+
+ /* Provide inputs to the procedure executor. */
+ for (const GVArray *varray : field_context_inputs) {
+ mf_params.add_readonly_single_input(*varray);
+ }
+
+ for (const int i : varying_fields_to_evaluate.index_range()) {
+ const GFieldRef &field = varying_fields_to_evaluate[i];
+ const CPPType &type = field.cpp_type();
+ const int out_index = varying_field_indices[i];
+
+ /* Try to get an existing virtual array that the result should be written into. */
+ GVMutableArray *output_varray = get_dst_varray_if_available(out_index);
+ void *buffer;
+ if (output_varray == nullptr || !output_varray->is_span()) {
+ /* Allocate a new buffer for the computed result. */
+ buffer = scope.linear_allocator().allocate(type.size() * array_size, type.alignment());
+
+ /* Make sure that elements in the buffer will be destructed. */
+ PartiallyInitializedArray &destruct_helper = scope.construct<PartiallyInitializedArray>(
+ __func__);
+ destruct_helper.buffer = buffer;
+ destruct_helper.mask = mask;
+ destruct_helper.type = &type;
+
+ r_varrays[out_index] = &scope.construct<GVArray_For_GSpan>(
+ __func__, GSpan{type, buffer, array_size});
+ }
+ else {
+ /* Write the result into the existing span. */
+ buffer = output_varray->get_internal_span().data();
+
+ r_varrays[out_index] = output_varray;
+ }
+
+ /* Pass output buffer to the procedure executor. */
+ const GMutableSpan span{type, buffer, array_size};
+ mf_params.add_uninitialized_single_output(span);
+ }
+
+ procedure_executor.call(mask, mf_params, mf_context);
+ }
+
+ /* Evaluate constant fields if necessary. */
+ if (!constant_fields_to_evaluate.is_empty()) {
+ /* Build the procedure for those fields. */
+ MFProcedure procedure;
+ build_multi_function_procedure_for_fields(
+ procedure, scope, field_tree_info, constant_fields_to_evaluate);
+ MFProcedureExecutor procedure_executor{"Procedure", procedure};
+ MFParamsBuilder mf_params{procedure_executor, 1};
+ MFContextBuilder mf_context;
+
+ /* Provide inputs to the procedure executor. */
+ for (const GVArray *varray : field_context_inputs) {
+ mf_params.add_readonly_single_input(*varray);
+ }
+
+ for (const int i : constant_fields_to_evaluate.index_range()) {
+ const GFieldRef &field = constant_fields_to_evaluate[i];
+ const CPPType &type = field.cpp_type();
+ /* Allocate memory where the computed value will be stored in. */
+ void *buffer = scope.linear_allocator().allocate(type.size(), type.alignment());
+
+ /* Use this to make sure that the value is destructed in the end. */
+ PartiallyInitializedArray &destruct_helper = scope.construct<PartiallyInitializedArray>(
+ __func__);
+ destruct_helper.buffer = buffer;
+ destruct_helper.mask = IndexRange(1);
+ destruct_helper.type = &type;
+
+ /* Pass output buffer to the procedure executor. */
+ mf_params.add_uninitialized_single_output({type, buffer, 1});
+
+ /* Create virtual array that can be used after the procedure has been executed below. */
+ const int out_index = constant_field_indices[i];
+ r_varrays[out_index] = &scope.construct<GVArray_For_SingleValueRef>(
+ __func__, type, array_size, buffer);
+ }
+
+ procedure_executor.call(IndexRange(1), mf_params, mf_context);
+ }
+
+ /* Copy data to supplied destination arrays if necessary. In some cases the evaluation above has
+ * written the computed data in the right place already. */
+ if (!dst_varrays.is_empty()) {
+ for (const int out_index : fields_to_evaluate.index_range()) {
+ GVMutableArray *output_varray = get_dst_varray_if_available(out_index);
+ if (output_varray == nullptr) {
+ /* Caller did not provide a destination for this output. */
+ continue;
+ }
+ const GVArray *computed_varray = r_varrays[out_index];
+ BLI_assert(computed_varray->type() == output_varray->type());
+ if (output_varray == computed_varray) {
+ /* The result has been written into the destination provided by the caller already. */
+ continue;
+ }
+ /* Still have to copy over the data in the destination provided by the caller. */
+ if (output_varray->is_span()) {
+ /* Materialize into a span. */
+ computed_varray->materialize_to_uninitialized(output_varray->get_internal_span().data());
+ }
+ else {
+ /* Slower materialize into a different structure. */
+ const CPPType &type = computed_varray->type();
+ BUFFER_FOR_CPP_TYPE_VALUE(type, buffer);
+ for (const int i : mask) {
+ computed_varray->get_to_uninitialized(i, buffer);
+ output_varray->set_by_relocate(i, buffer);
+ }
+ }
+ r_varrays[out_index] = output_varray;
+ }
+ }
+ return r_varrays;
+}
+
+void evaluate_constant_field(const GField &field, void *r_value)
+{
+ ResourceScope scope;
+ FieldContext context;
+ Vector<const GVArray *> varrays = evaluate_fields(scope, {field}, IndexRange(1), context);
+ varrays[0]->get_to_uninitialized(0, r_value);
+}
+
+const GVArray *FieldContext::get_varray_for_input(const FieldInput &field_input,
+ IndexMask mask,
+ ResourceScope &scope) const
+{
+ /* By default ask the field input to create the varray. Another field context might overwrite
+ * the context here. */
+ return field_input.get_varray_for_context(*this, mask, scope);
+}
+
+/* --------------------------------------------------------------------
+ * FieldEvaluator.
+ */
+
+static Vector<int64_t> indices_from_selection(const VArray<bool> &selection)
+{
+ /* If the selection is just a single value, it's best to avoid calling this
+ * function when constructing an IndexMask and use an IndexRange instead. */
+ BLI_assert(!selection.is_single());
+
+ Vector<int64_t> indices;
+ if (selection.is_span()) {
+ Span<bool> span = selection.get_internal_span();
+ for (const int64_t i : span.index_range()) {
+ if (span[i]) {
+ indices.append(i);
+ }
+ }
+ }
+ else {
+ for (const int i : selection.index_range()) {
+ if (selection[i]) {
+ indices.append(i);
+ }
+ }
+ }
+ return indices;
+}
+
+int FieldEvaluator::add_with_destination(GField field, GVMutableArray &dst)
+{
+ const int field_index = fields_to_evaluate_.append_and_get_index(std::move(field));
+ dst_varrays_.append(&dst);
+ output_pointer_infos_.append({});
+ return field_index;
+}
+
+int FieldEvaluator::add_with_destination(GField field, GMutableSpan dst)
+{
+ GVMutableArray &varray = scope_.construct<GVMutableArray_For_GMutableSpan>(__func__, dst);
+ return this->add_with_destination(std::move(field), varray);
+}
+
+int FieldEvaluator::add(GField field, const GVArray **varray_ptr)
+{
+ const int field_index = fields_to_evaluate_.append_and_get_index(std::move(field));
+ dst_varrays_.append(nullptr);
+ output_pointer_infos_.append(OutputPointerInfo{
+ varray_ptr, [](void *dst, const GVArray &varray, ResourceScope &UNUSED(scope)) {
+ *(const GVArray **)dst = &varray;
+ }});
+ return field_index;
+}
+
+int FieldEvaluator::add(GField field)
+{
+ const int field_index = fields_to_evaluate_.append_and_get_index(std::move(field));
+ dst_varrays_.append(nullptr);
+ output_pointer_infos_.append({});
+ return field_index;
+}
+
+void FieldEvaluator::evaluate()
+{
+ BLI_assert_msg(!is_evaluated_, "Cannot evaluate fields twice.");
+ Array<GFieldRef> fields(fields_to_evaluate_.size());
+ for (const int i : fields_to_evaluate_.index_range()) {
+ fields[i] = fields_to_evaluate_[i];
+ }
+ evaluated_varrays_ = evaluate_fields(scope_, fields, mask_, context_, dst_varrays_);
+ BLI_assert(fields_to_evaluate_.size() == evaluated_varrays_.size());
+ for (const int i : fields_to_evaluate_.index_range()) {
+ OutputPointerInfo &info = output_pointer_infos_[i];
+ if (info.dst != nullptr) {
+ info.set(info.dst, *evaluated_varrays_[i], scope_);
+ }
+ }
+ is_evaluated_ = true;
+}
+
+IndexMask FieldEvaluator::get_evaluated_as_mask(const int field_index)
+{
+ const GVArray &varray = this->get_evaluated(field_index);
+ GVArray_Typed<bool> typed_varray{varray};
+
+ if (typed_varray->is_single()) {
+ if (typed_varray->get_internal_single()) {
+ return IndexRange(typed_varray.size());
+ }
+ return IndexRange(0);
+ }
+
+ return scope_.add_value(indices_from_selection(*typed_varray), __func__).as_span();
+}
+
+} // namespace blender::fn
diff --git a/source/blender/functions/intern/multi_function_builder.cc b/source/blender/functions/intern/multi_function_builder.cc
index c6b3b808130..180d1f17a54 100644
--- a/source/blender/functions/intern/multi_function_builder.cc
+++ b/source/blender/functions/intern/multi_function_builder.cc
@@ -123,4 +123,32 @@ void CustomMF_DefaultOutput::call(IndexMask mask, MFParams params, MFContext UNU
}
}
+CustomMF_GenericCopy::CustomMF_GenericCopy(StringRef name, MFDataType data_type)
+{
+ MFSignatureBuilder signature{name};
+ signature.input("Input", data_type);
+ signature.output("Output", data_type);
+ signature_ = signature.build();
+ this->set_signature(&signature_);
+}
+
+void CustomMF_GenericCopy::call(IndexMask mask, MFParams params, MFContext UNUSED(context)) const
+{
+ const MFDataType data_type = this->param_type(0).data_type();
+ switch (data_type.category()) {
+ case MFDataType::Single: {
+ const GVArray &inputs = params.readonly_single_input(0, "Input");
+ GMutableSpan outputs = params.uninitialized_single_output(1, "Output");
+ inputs.materialize_to_uninitialized(mask, outputs.data());
+ break;
+ }
+ case MFDataType::Vector: {
+ const GVVectorArray &inputs = params.readonly_vector_input(0, "Input");
+ GVectorArray &outputs = params.vector_output(1, "Output");
+ outputs.extend(mask, inputs);
+ break;
+ }
+ }
+}
+
} // namespace blender::fn
diff --git a/source/blender/functions/intern/multi_function_procedure.cc b/source/blender/functions/intern/multi_function_procedure.cc
new file mode 100644
index 00000000000..6eff7bc09f8
--- /dev/null
+++ b/source/blender/functions/intern/multi_function_procedure.cc
@@ -0,0 +1,794 @@
+/*
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "FN_multi_function_procedure.hh"
+
+#include "BLI_dot_export.hh"
+#include "BLI_stack.hh"
+
+namespace blender::fn {
+
+void MFVariable::set_name(std::string name)
+{
+ name_ = std::move(name);
+}
+
+void MFCallInstruction::set_next(MFInstruction *instruction)
+{
+ if (next_ != nullptr) {
+ next_->prev_.remove_first_occurrence_and_reorder(this);
+ }
+ if (instruction != nullptr) {
+ instruction->prev_.append(this);
+ }
+ next_ = instruction;
+}
+
+void MFCallInstruction::set_param_variable(int param_index, MFVariable *variable)
+{
+ if (params_[param_index] != nullptr) {
+ params_[param_index]->users_.remove_first_occurrence_and_reorder(this);
+ }
+ if (variable != nullptr) {
+ BLI_assert(fn_->param_type(param_index).data_type() == variable->data_type());
+ variable->users_.append(this);
+ }
+ params_[param_index] = variable;
+}
+
+void MFCallInstruction::set_params(Span<MFVariable *> variables)
+{
+ BLI_assert(variables.size() == params_.size());
+ for (const int i : variables.index_range()) {
+ this->set_param_variable(i, variables[i]);
+ }
+}
+
+void MFBranchInstruction::set_condition(MFVariable *variable)
+{
+ if (condition_ != nullptr) {
+ condition_->users_.remove_first_occurrence_and_reorder(this);
+ }
+ if (variable != nullptr) {
+ variable->users_.append(this);
+ }
+ condition_ = variable;
+}
+
+void MFBranchInstruction::set_branch_true(MFInstruction *instruction)
+{
+ if (branch_true_ != nullptr) {
+ branch_true_->prev_.remove_first_occurrence_and_reorder(this);
+ }
+ if (instruction != nullptr) {
+ instruction->prev_.append(this);
+ }
+ branch_true_ = instruction;
+}
+
+void MFBranchInstruction::set_branch_false(MFInstruction *instruction)
+{
+ if (branch_false_ != nullptr) {
+ branch_false_->prev_.remove_first_occurrence_and_reorder(this);
+ }
+ if (instruction != nullptr) {
+ instruction->prev_.append(this);
+ }
+ branch_false_ = instruction;
+}
+
+void MFDestructInstruction::set_variable(MFVariable *variable)
+{
+ if (variable_ != nullptr) {
+ variable_->users_.remove_first_occurrence_and_reorder(this);
+ }
+ if (variable != nullptr) {
+ variable->users_.append(this);
+ }
+ variable_ = variable;
+}
+
+void MFDestructInstruction::set_next(MFInstruction *instruction)
+{
+ if (next_ != nullptr) {
+ next_->prev_.remove_first_occurrence_and_reorder(this);
+ }
+ if (instruction != nullptr) {
+ instruction->prev_.append(this);
+ }
+ next_ = instruction;
+}
+
+void MFDummyInstruction::set_next(MFInstruction *instruction)
+{
+ if (next_ != nullptr) {
+ next_->prev_.remove_first_occurrence_and_reorder(this);
+ }
+ if (instruction != nullptr) {
+ instruction->prev_.append(this);
+ }
+ next_ = instruction;
+}
+
+MFVariable &MFProcedure::new_variable(MFDataType data_type, std::string name)
+{
+ MFVariable &variable = *allocator_.construct<MFVariable>().release();
+ variable.name_ = std::move(name);
+ variable.data_type_ = data_type;
+ variable.id_ = variables_.size();
+ variables_.append(&variable);
+ return variable;
+}
+
+MFCallInstruction &MFProcedure::new_call_instruction(const MultiFunction &fn)
+{
+ MFCallInstruction &instruction = *allocator_.construct<MFCallInstruction>().release();
+ instruction.type_ = MFInstructionType::Call;
+ instruction.fn_ = &fn;
+ instruction.params_ = allocator_.allocate_array<MFVariable *>(fn.param_amount());
+ instruction.params_.fill(nullptr);
+ call_instructions_.append(&instruction);
+ return instruction;
+}
+
+MFBranchInstruction &MFProcedure::new_branch_instruction()
+{
+ MFBranchInstruction &instruction = *allocator_.construct<MFBranchInstruction>().release();
+ instruction.type_ = MFInstructionType::Branch;
+ branch_instructions_.append(&instruction);
+ return instruction;
+}
+
+MFDestructInstruction &MFProcedure::new_destruct_instruction()
+{
+ MFDestructInstruction &instruction = *allocator_.construct<MFDestructInstruction>().release();
+ instruction.type_ = MFInstructionType::Destruct;
+ destruct_instructions_.append(&instruction);
+ return instruction;
+}
+
+MFDummyInstruction &MFProcedure::new_dummy_instruction()
+{
+ MFDummyInstruction &instruction = *allocator_.construct<MFDummyInstruction>().release();
+ instruction.type_ = MFInstructionType::Dummy;
+ dummy_instructions_.append(&instruction);
+ return instruction;
+}
+
+MFReturnInstruction &MFProcedure::new_return_instruction()
+{
+ MFReturnInstruction &instruction = *allocator_.construct<MFReturnInstruction>().release();
+ instruction.type_ = MFInstructionType::Return;
+ return_instructions_.append(&instruction);
+ return instruction;
+}
+
+void MFProcedure::add_parameter(MFParamType::InterfaceType interface_type, MFVariable &variable)
+{
+ params_.append({interface_type, &variable});
+}
+
+void MFProcedure::set_entry(MFInstruction &entry)
+{
+ entry_ = &entry;
+}
+
+MFProcedure::~MFProcedure()
+{
+ for (MFCallInstruction *instruction : call_instructions_) {
+ instruction->~MFCallInstruction();
+ }
+ for (MFBranchInstruction *instruction : branch_instructions_) {
+ instruction->~MFBranchInstruction();
+ }
+ for (MFDestructInstruction *instruction : destruct_instructions_) {
+ instruction->~MFDestructInstruction();
+ }
+ for (MFDummyInstruction *instruction : dummy_instructions_) {
+ instruction->~MFDummyInstruction();
+ }
+ for (MFReturnInstruction *instruction : return_instructions_) {
+ instruction->~MFReturnInstruction();
+ }
+ for (MFVariable *variable : variables_) {
+ variable->~MFVariable();
+ }
+}
+
+bool MFProcedure::validate() const
+{
+ if (entry_ == nullptr) {
+ return false;
+ }
+ if (!this->validate_all_instruction_pointers_set()) {
+ return false;
+ }
+ if (!this->validate_all_params_provided()) {
+ return false;
+ }
+ if (!this->validate_same_variables_in_one_call()) {
+ return false;
+ }
+ if (!this->validate_parameters()) {
+ return false;
+ }
+ if (!this->validate_initialization()) {
+ return false;
+ }
+ return true;
+}
+
+bool MFProcedure::validate_all_instruction_pointers_set() const
+{
+ for (const MFCallInstruction *instruction : call_instructions_) {
+ if (instruction->next_ == nullptr) {
+ return false;
+ }
+ }
+ for (const MFDestructInstruction *instruction : destruct_instructions_) {
+ if (instruction->next_ == nullptr) {
+ return false;
+ }
+ }
+ for (const MFBranchInstruction *instruction : branch_instructions_) {
+ if (instruction->branch_true_ == nullptr) {
+ return false;
+ }
+ if (instruction->branch_false_ == nullptr) {
+ return false;
+ }
+ }
+ for (const MFDummyInstruction *instruction : dummy_instructions_) {
+ if (instruction->next_ == nullptr) {
+ return false;
+ }
+ }
+ return true;
+}
+
+bool MFProcedure::validate_all_params_provided() const
+{
+ for (const MFCallInstruction *instruction : call_instructions_) {
+ for (const MFVariable *variable : instruction->params_) {
+ if (variable == nullptr) {
+ return false;
+ }
+ }
+ }
+ for (const MFBranchInstruction *instruction : branch_instructions_) {
+ if (instruction->condition_ == nullptr) {
+ return false;
+ }
+ }
+ for (const MFDestructInstruction *instruction : destruct_instructions_) {
+ if (instruction->variable_ == nullptr) {
+ return false;
+ }
+ }
+ return true;
+}
+
+bool MFProcedure::validate_same_variables_in_one_call() const
+{
+ for (const MFCallInstruction *instruction : call_instructions_) {
+ const MultiFunction &fn = *instruction->fn_;
+ for (const int param_index : fn.param_indices()) {
+ const MFParamType param_type = fn.param_type(param_index);
+ const MFVariable *variable = instruction->params_[param_index];
+ for (const int other_param_index : fn.param_indices()) {
+ if (other_param_index == param_index) {
+ continue;
+ }
+ const MFVariable *other_variable = instruction->params_[other_param_index];
+ if (other_variable != variable) {
+ continue;
+ }
+ if (ELEM(param_type.interface_type(), MFParamType::Mutable, MFParamType::Output)) {
+ /* When a variable is used as mutable or output parameter, it can only be used once. */
+ return false;
+ }
+ const MFParamType other_param_type = fn.param_type(other_param_index);
+ /* A variable is allowed to be used as input more than once. */
+ if (other_param_type.interface_type() != MFParamType::Input) {
+ return false;
+ }
+ }
+ }
+ }
+ return true;
+}
+
+bool MFProcedure::validate_parameters() const
+{
+ Set<const MFVariable *> variables;
+ for (const MFParameter &param : params_) {
+ /* One variable cannot be used as multiple parameters. */
+ if (!variables.add(param.variable)) {
+ return false;
+ }
+ }
+ return true;
+}
+
+bool MFProcedure::validate_initialization() const
+{
+ /* TODO: Issue warning when it maybe wrongly initialized. */
+ for (const MFDestructInstruction *instruction : destruct_instructions_) {
+ const MFVariable &variable = *instruction->variable_;
+ const InitState state = this->find_initialization_state_before_instruction(*instruction,
+ variable);
+ if (!state.can_be_initialized) {
+ return false;
+ }
+ }
+ for (const MFBranchInstruction *instruction : branch_instructions_) {
+ const MFVariable &variable = *instruction->condition_;
+ const InitState state = this->find_initialization_state_before_instruction(*instruction,
+ variable);
+ if (!state.can_be_initialized) {
+ return false;
+ }
+ }
+ for (const MFCallInstruction *instruction : call_instructions_) {
+ const MultiFunction &fn = *instruction->fn_;
+ for (const int param_index : fn.param_indices()) {
+ const MFParamType param_type = fn.param_type(param_index);
+ const MFVariable &variable = *instruction->params_[param_index];
+ const InitState state = this->find_initialization_state_before_instruction(*instruction,
+ variable);
+ switch (param_type.interface_type()) {
+ case MFParamType::Input:
+ case MFParamType::Mutable: {
+ if (!state.can_be_initialized) {
+ return false;
+ }
+ break;
+ }
+ case MFParamType::Output: {
+ if (!state.can_be_uninitialized) {
+ return false;
+ }
+ break;
+ }
+ }
+ }
+ }
+ Set<const MFVariable *> variables_that_should_be_initialized_on_return;
+ for (const MFParameter &param : params_) {
+ if (ELEM(param.type, MFParamType::Mutable, MFParamType::Output)) {
+ variables_that_should_be_initialized_on_return.add_new(param.variable);
+ }
+ }
+ for (const MFReturnInstruction *instruction : return_instructions_) {
+ for (const MFVariable *variable : variables_) {
+ const InitState init_state = this->find_initialization_state_before_instruction(*instruction,
+ *variable);
+ if (variables_that_should_be_initialized_on_return.contains(variable)) {
+ if (!init_state.can_be_initialized) {
+ return false;
+ }
+ }
+ else {
+ if (!init_state.can_be_uninitialized) {
+ return false;
+ }
+ }
+ }
+ }
+ return true;
+}
+
+MFProcedure::InitState MFProcedure::find_initialization_state_before_instruction(
+ const MFInstruction &target_instruction, const MFVariable &target_variable) const
+{
+ InitState state;
+
+ auto check_entry_instruction = [&]() {
+ bool caller_initialized_variable = false;
+ for (const MFParameter &param : params_) {
+ if (param.variable == &target_variable) {
+ if (ELEM(param.type, MFParamType::Input, MFParamType::Mutable)) {
+ caller_initialized_variable = true;
+ break;
+ }
+ }
+ }
+ if (caller_initialized_variable) {
+ state.can_be_initialized = true;
+ }
+ else {
+ state.can_be_uninitialized = true;
+ }
+ };
+
+ if (&target_instruction == entry_) {
+ check_entry_instruction();
+ }
+
+ Set<const MFInstruction *> checked_instructions;
+ Stack<const MFInstruction *> instructions_to_check;
+ instructions_to_check.push_multiple(target_instruction.prev_);
+
+ while (!instructions_to_check.is_empty()) {
+ const MFInstruction &instruction = *instructions_to_check.pop();
+ if (!checked_instructions.add(&instruction)) {
+ /* Skip if the instruction has been checked already. */
+ continue;
+ }
+ bool state_modified = false;
+ switch (instruction.type_) {
+ case MFInstructionType::Call: {
+ const MFCallInstruction &call_instruction = static_cast<const MFCallInstruction &>(
+ instruction);
+ const MultiFunction &fn = *call_instruction.fn_;
+ for (const int param_index : fn.param_indices()) {
+ if (call_instruction.params_[param_index] == &target_variable) {
+ const MFParamType param_type = fn.param_type(param_index);
+ if (param_type.interface_type() == MFParamType::Output) {
+ state.can_be_initialized = true;
+ state_modified = true;
+ break;
+ }
+ }
+ }
+ break;
+ }
+ case MFInstructionType::Destruct: {
+ const MFDestructInstruction &destruct_instruction =
+ static_cast<const MFDestructInstruction &>(instruction);
+ if (destruct_instruction.variable_ == &target_variable) {
+ state.can_be_uninitialized = true;
+ state_modified = true;
+ }
+ break;
+ }
+ case MFInstructionType::Branch:
+ case MFInstructionType::Dummy:
+ case MFInstructionType::Return: {
+ /* These instruction types don't change the initialization state of variables. */
+ break;
+ }
+ }
+
+ if (!state_modified) {
+ if (&instruction == entry_) {
+ check_entry_instruction();
+ }
+ instructions_to_check.push_multiple(instruction.prev_);
+ }
+ }
+
+ return state;
+}
+
+class MFProcedureDotExport {
+ private:
+ const MFProcedure &procedure_;
+ dot::DirectedGraph digraph_;
+ Map<const MFInstruction *, dot::Node *> dot_nodes_by_begin_;
+ Map<const MFInstruction *, dot::Node *> dot_nodes_by_end_;
+
+ public:
+ MFProcedureDotExport(const MFProcedure &procedure) : procedure_(procedure)
+ {
+ }
+
+ std::string generate()
+ {
+ this->create_nodes();
+ this->create_edges();
+ return digraph_.to_dot_string();
+ }
+
+ void create_nodes()
+ {
+ Vector<const MFInstruction *> all_instructions;
+ auto add_instructions = [&](auto instructions) {
+ all_instructions.extend(instructions.begin(), instructions.end());
+ };
+ add_instructions(procedure_.call_instructions_);
+ add_instructions(procedure_.branch_instructions_);
+ add_instructions(procedure_.destruct_instructions_);
+ add_instructions(procedure_.dummy_instructions_);
+ add_instructions(procedure_.return_instructions_);
+
+ Set<const MFInstruction *> handled_instructions;
+
+ for (const MFInstruction *representative : all_instructions) {
+ if (handled_instructions.contains(representative)) {
+ continue;
+ }
+ Vector<const MFInstruction *> block_instructions = this->get_instructions_in_block(
+ *representative);
+ std::stringstream ss;
+ ss << "<";
+
+ for (const MFInstruction *current : block_instructions) {
+ handled_instructions.add_new(current);
+ switch (current->type()) {
+ case MFInstructionType::Call: {
+ this->instruction_to_string(*static_cast<const MFCallInstruction *>(current), ss);
+ break;
+ }
+ case MFInstructionType::Destruct: {
+ this->instruction_to_string(*static_cast<const MFDestructInstruction *>(current), ss);
+ break;
+ }
+ case MFInstructionType::Dummy: {
+ this->instruction_to_string(*static_cast<const MFDummyInstruction *>(current), ss);
+ break;
+ }
+ case MFInstructionType::Return: {
+ this->instruction_to_string(*static_cast<const MFReturnInstruction *>(current), ss);
+ break;
+ }
+ case MFInstructionType::Branch: {
+ this->instruction_to_string(*static_cast<const MFBranchInstruction *>(current), ss);
+ break;
+ }
+ }
+ ss << R"(<br align="left" />)";
+ }
+ ss << ">";
+
+ dot::Node &dot_node = digraph_.new_node(ss.str());
+ dot_node.set_shape(dot::Attr_shape::Rectangle);
+ dot_nodes_by_begin_.add_new(block_instructions.first(), &dot_node);
+ dot_nodes_by_end_.add_new(block_instructions.last(), &dot_node);
+ }
+ }
+
+ void create_edges()
+ {
+ auto create_edge = [&](dot::Node &from_node,
+ const MFInstruction *to_instruction) -> dot::DirectedEdge & {
+ if (to_instruction == nullptr) {
+ dot::Node &to_node = digraph_.new_node("missing");
+ to_node.set_shape(dot::Attr_shape::Diamond);
+ return digraph_.new_edge(from_node, to_node);
+ }
+ dot::Node &to_node = *dot_nodes_by_begin_.lookup(to_instruction);
+ return digraph_.new_edge(from_node, to_node);
+ };
+
+ for (auto item : dot_nodes_by_end_.items()) {
+ const MFInstruction &from_instruction = *item.key;
+ dot::Node &from_node = *item.value;
+ switch (from_instruction.type()) {
+ case MFInstructionType::Call: {
+ const MFInstruction *to_instruction =
+ static_cast<const MFCallInstruction &>(from_instruction).next();
+ create_edge(from_node, to_instruction);
+ break;
+ }
+ case MFInstructionType::Destruct: {
+ const MFInstruction *to_instruction =
+ static_cast<const MFDestructInstruction &>(from_instruction).next();
+ create_edge(from_node, to_instruction);
+ break;
+ }
+ case MFInstructionType::Dummy: {
+ const MFInstruction *to_instruction =
+ static_cast<const MFDummyInstruction &>(from_instruction).next();
+ create_edge(from_node, to_instruction);
+ break;
+ }
+ case MFInstructionType::Return: {
+ break;
+ }
+ case MFInstructionType::Branch: {
+ const MFBranchInstruction &branch_instruction = static_cast<const MFBranchInstruction &>(
+ from_instruction);
+ const MFInstruction *to_true_instruction = branch_instruction.branch_true();
+ const MFInstruction *to_false_instruction = branch_instruction.branch_false();
+ create_edge(from_node, to_true_instruction).attributes.set("color", "#118811");
+ create_edge(from_node, to_false_instruction).attributes.set("color", "#881111");
+ break;
+ }
+ }
+ }
+
+ dot::Node &entry_node = this->create_entry_node();
+ create_edge(entry_node, procedure_.entry());
+ }
+
+ bool has_to_be_block_begin(const MFInstruction &instruction)
+ {
+ if (procedure_.entry() == &instruction) {
+ return true;
+ }
+ if (instruction.prev().size() != 1) {
+ return true;
+ }
+ if (instruction.prev()[0]->type() == MFInstructionType::Branch) {
+ return true;
+ }
+ return false;
+ }
+
+ const MFInstruction &get_first_instruction_in_block(const MFInstruction &representative)
+ {
+ const MFInstruction *current = &representative;
+ while (!this->has_to_be_block_begin(*current)) {
+ current = current->prev()[0];
+ if (current == &representative) {
+ /* There is a loop without entry or exit, just break it up here. */
+ break;
+ }
+ }
+ return *current;
+ }
+
+ const MFInstruction *get_next_instruction_in_block(const MFInstruction &instruction,
+ const MFInstruction &block_begin)
+ {
+ const MFInstruction *next = nullptr;
+ switch (instruction.type()) {
+ case MFInstructionType::Call: {
+ next = static_cast<const MFCallInstruction &>(instruction).next();
+ break;
+ }
+ case MFInstructionType::Destruct: {
+ next = static_cast<const MFDestructInstruction &>(instruction).next();
+ break;
+ }
+ case MFInstructionType::Dummy: {
+ next = static_cast<const MFDummyInstruction &>(instruction).next();
+ break;
+ }
+ case MFInstructionType::Return:
+ case MFInstructionType::Branch: {
+ break;
+ }
+ }
+ if (next == nullptr) {
+ return nullptr;
+ }
+ if (next == &block_begin) {
+ return nullptr;
+ }
+ if (this->has_to_be_block_begin(*next)) {
+ return nullptr;
+ }
+ return next;
+ }
+
+ Vector<const MFInstruction *> get_instructions_in_block(const MFInstruction &representative)
+ {
+ Vector<const MFInstruction *> instructions;
+ const MFInstruction &begin = this->get_first_instruction_in_block(representative);
+ for (const MFInstruction *current = &begin; current != nullptr;
+ current = this->get_next_instruction_in_block(*current, begin)) {
+ instructions.append(current);
+ }
+ return instructions;
+ }
+
+ void variable_to_string(const MFVariable *variable, std::stringstream &ss)
+ {
+ if (variable == nullptr) {
+ ss << "null";
+ }
+ else {
+ ss << "$" << variable->id();
+ if (!variable->name().is_empty()) {
+ ss << "(" << variable->name() << ")";
+ }
+ }
+ }
+
+ void instruction_name_format(StringRef name, std::stringstream &ss)
+ {
+ ss << name;
+ }
+
+ void instruction_to_string(const MFCallInstruction &instruction, std::stringstream &ss)
+ {
+ const MultiFunction &fn = instruction.fn();
+ this->instruction_name_format(fn.name() + ": ", ss);
+ for (const int param_index : fn.param_indices()) {
+ const MFParamType param_type = fn.param_type(param_index);
+ const MFVariable *variable = instruction.params()[param_index];
+ ss << R"(<font color="grey30">)";
+ switch (param_type.interface_type()) {
+ case MFParamType::Input: {
+ ss << "in";
+ break;
+ }
+ case MFParamType::Mutable: {
+ ss << "mut";
+ break;
+ }
+ case MFParamType::Output: {
+ ss << "out";
+ break;
+ }
+ }
+ ss << " </font> ";
+ variable_to_string(variable, ss);
+ if (param_index < fn.param_amount() - 1) {
+ ss << ", ";
+ }
+ }
+ }
+
+ void instruction_to_string(const MFDestructInstruction &instruction, std::stringstream &ss)
+ {
+ instruction_name_format("Destruct ", ss);
+ variable_to_string(instruction.variable(), ss);
+ }
+
+ void instruction_to_string(const MFDummyInstruction &UNUSED(instruction), std::stringstream &ss)
+ {
+ instruction_name_format("Dummy ", ss);
+ }
+
+ void instruction_to_string(const MFReturnInstruction &UNUSED(instruction), std::stringstream &ss)
+ {
+ instruction_name_format("Return ", ss);
+
+ Vector<ConstMFParameter> outgoing_parameters;
+ for (const ConstMFParameter &param : procedure_.params()) {
+ if (ELEM(param.type, MFParamType::Mutable, MFParamType::Output)) {
+ outgoing_parameters.append(param);
+ }
+ }
+ for (const int param_index : outgoing_parameters.index_range()) {
+ const ConstMFParameter &param = outgoing_parameters[param_index];
+ variable_to_string(param.variable, ss);
+ if (param_index < outgoing_parameters.size() - 1) {
+ ss << ", ";
+ }
+ }
+ }
+
+ void instruction_to_string(const MFBranchInstruction &instruction, std::stringstream &ss)
+ {
+ instruction_name_format("Branch ", ss);
+ variable_to_string(instruction.condition(), ss);
+ }
+
+ dot::Node &create_entry_node()
+ {
+ std::stringstream ss;
+ ss << "Entry: ";
+ Vector<ConstMFParameter> incoming_parameters;
+ for (const ConstMFParameter &param : procedure_.params()) {
+ if (ELEM(param.type, MFParamType::Input, MFParamType::Mutable)) {
+ incoming_parameters.append(param);
+ }
+ }
+ for (const int param_index : incoming_parameters.index_range()) {
+ const ConstMFParameter &param = incoming_parameters[param_index];
+ variable_to_string(param.variable, ss);
+ if (param_index < incoming_parameters.size() - 1) {
+ ss << ", ";
+ }
+ }
+
+ dot::Node &node = digraph_.new_node(ss.str());
+ node.set_shape(dot::Attr_shape::Ellipse);
+ return node;
+ }
+};
+
+std::string MFProcedure::to_dot() const
+{
+ MFProcedureDotExport dot_export{*this};
+ return dot_export.generate();
+}
+
+} // namespace blender::fn
diff --git a/source/blender/functions/intern/multi_function_procedure_builder.cc b/source/blender/functions/intern/multi_function_procedure_builder.cc
new file mode 100644
index 00000000000..3c088776bea
--- /dev/null
+++ b/source/blender/functions/intern/multi_function_procedure_builder.cc
@@ -0,0 +1,175 @@
+/*
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "FN_multi_function_procedure_builder.hh"
+
+namespace blender::fn {
+
+void MFInstructionCursor::insert(MFProcedure &procedure, MFInstruction *new_instruction)
+{
+ if (instruction_ == nullptr) {
+ if (is_entry_) {
+ procedure.set_entry(*new_instruction);
+ }
+ else {
+ /* The cursors points at nothing, nothing to do. */
+ }
+ }
+ else {
+ switch (instruction_->type()) {
+ case MFInstructionType::Call: {
+ static_cast<MFCallInstruction *>(instruction_)->set_next(new_instruction);
+ break;
+ }
+ case MFInstructionType::Branch: {
+ MFBranchInstruction &branch_instruction = *static_cast<MFBranchInstruction *>(
+ instruction_);
+ if (branch_output_) {
+ branch_instruction.set_branch_true(new_instruction);
+ }
+ else {
+ branch_instruction.set_branch_false(new_instruction);
+ }
+ break;
+ }
+ case MFInstructionType::Destruct: {
+ static_cast<MFDestructInstruction *>(instruction_)->set_next(new_instruction);
+ break;
+ }
+ case MFInstructionType::Dummy: {
+ static_cast<MFDummyInstruction *>(instruction_)->set_next(new_instruction);
+ break;
+ }
+ case MFInstructionType::Return: {
+ /* It shouldn't be possible to build a cursor that points to a return instruction. */
+ BLI_assert_unreachable();
+ break;
+ }
+ }
+ }
+}
+
+void MFProcedureBuilder::add_destruct(MFVariable &variable)
+{
+ MFDestructInstruction &instruction = procedure_->new_destruct_instruction();
+ instruction.set_variable(&variable);
+ this->link_to_cursors(&instruction);
+ cursors_ = {MFInstructionCursor{instruction}};
+}
+
+void MFProcedureBuilder::add_destruct(Span<MFVariable *> variables)
+{
+ for (MFVariable *variable : variables) {
+ this->add_destruct(*variable);
+ }
+}
+
+MFReturnInstruction &MFProcedureBuilder::add_return()
+{
+ MFReturnInstruction &instruction = procedure_->new_return_instruction();
+ this->link_to_cursors(&instruction);
+ cursors_ = {};
+ return instruction;
+}
+
+MFCallInstruction &MFProcedureBuilder::add_call_with_no_variables(const MultiFunction &fn)
+{
+ MFCallInstruction &instruction = procedure_->new_call_instruction(fn);
+ this->link_to_cursors(&instruction);
+ cursors_ = {MFInstructionCursor{instruction}};
+ return instruction;
+}
+
+MFCallInstruction &MFProcedureBuilder::add_call_with_all_variables(
+ const MultiFunction &fn, Span<MFVariable *> param_variables)
+{
+ MFCallInstruction &instruction = this->add_call_with_no_variables(fn);
+ instruction.set_params(param_variables);
+ return instruction;
+}
+
+Vector<MFVariable *> MFProcedureBuilder::add_call(const MultiFunction &fn,
+ Span<MFVariable *> input_and_mutable_variables)
+{
+ Vector<MFVariable *> output_variables;
+ MFCallInstruction &instruction = this->add_call_with_no_variables(fn);
+ for (const int param_index : fn.param_indices()) {
+ const MFParamType param_type = fn.param_type(param_index);
+ switch (param_type.interface_type()) {
+ case MFParamType::Input:
+ case MFParamType::Mutable: {
+ MFVariable *variable = input_and_mutable_variables.first();
+ instruction.set_param_variable(param_index, variable);
+ input_and_mutable_variables = input_and_mutable_variables.drop_front(1);
+ break;
+ }
+ case MFParamType::Output: {
+ MFVariable &variable = procedure_->new_variable(param_type.data_type(),
+ fn.param_name(param_index));
+ instruction.set_param_variable(param_index, &variable);
+ output_variables.append(&variable);
+ break;
+ }
+ }
+ }
+ /* All passed in variables should have been dropped in the loop above. */
+ BLI_assert(input_and_mutable_variables.is_empty());
+ return output_variables;
+}
+
+MFProcedureBuilder::Branch MFProcedureBuilder::add_branch(MFVariable &condition)
+{
+ MFBranchInstruction &instruction = procedure_->new_branch_instruction();
+ instruction.set_condition(&condition);
+ this->link_to_cursors(&instruction);
+ /* Clear cursors because this builder ends here. */
+ cursors_.clear();
+
+ Branch branch{*procedure_, *procedure_};
+ branch.branch_true.set_cursor(MFInstructionCursor{instruction, true});
+ branch.branch_false.set_cursor(MFInstructionCursor{instruction, false});
+ return branch;
+}
+
+MFProcedureBuilder::Loop MFProcedureBuilder::add_loop()
+{
+ MFDummyInstruction &loop_begin = procedure_->new_dummy_instruction();
+ MFDummyInstruction &loop_end = procedure_->new_dummy_instruction();
+ this->link_to_cursors(&loop_begin);
+ cursors_ = {MFInstructionCursor{loop_begin}};
+
+ Loop loop;
+ loop.begin = &loop_begin;
+ loop.end = &loop_end;
+
+ return loop;
+}
+
+void MFProcedureBuilder::add_loop_continue(Loop &loop)
+{
+ this->link_to_cursors(loop.begin);
+ /* Clear cursors because this builder ends here. */
+ cursors_.clear();
+}
+
+void MFProcedureBuilder::add_loop_break(Loop &loop)
+{
+ this->link_to_cursors(loop.end);
+ /* Clear cursors because this builder ends here. */
+ cursors_.clear();
+}
+
+} // namespace blender::fn
diff --git a/source/blender/functions/intern/multi_function_procedure_executor.cc b/source/blender/functions/intern/multi_function_procedure_executor.cc
new file mode 100644
index 00000000000..38b26415779
--- /dev/null
+++ b/source/blender/functions/intern/multi_function_procedure_executor.cc
@@ -0,0 +1,1212 @@
+/*
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "FN_multi_function_procedure_executor.hh"
+
+#include "BLI_stack.hh"
+
+namespace blender::fn {
+
+MFProcedureExecutor::MFProcedureExecutor(std::string name, const MFProcedure &procedure)
+ : procedure_(procedure)
+{
+ MFSignatureBuilder signature(std::move(name));
+
+ for (const ConstMFParameter &param : procedure.params()) {
+ signature.add(param.variable->name(), MFParamType(param.type, param.variable->data_type()));
+ }
+
+ signature_ = signature.build();
+ this->set_signature(&signature_);
+}
+
+using IndicesSplitVectors = std::array<Vector<int64_t>, 2>;
+
+namespace {
+enum class ValueType {
+ GVArray = 0,
+ Span = 1,
+ GVVectorArray = 2,
+ GVectorArray = 3,
+ OneSingle = 4,
+ OneVector = 5,
+};
+constexpr int tot_variable_value_types = 6;
+} // namespace
+
+/**
+ * During evaluation, a variable may be stored in various different forms, depending on what
+ * instructions do with the variables.
+ */
+struct VariableValue {
+ ValueType type;
+
+ VariableValue(ValueType type) : type(type)
+ {
+ }
+};
+
+/* This variable is the unmodified virtual array from the caller. */
+struct VariableValue_GVArray : public VariableValue {
+ static inline constexpr ValueType static_type = ValueType::GVArray;
+ const GVArray &data;
+
+ VariableValue_GVArray(const GVArray &data) : VariableValue(static_type), data(data)
+ {
+ }
+};
+
+/* This variable has a different value for every index. Some values may be uninitialized. The span
+ * may be owned by the caller. */
+struct VariableValue_Span : public VariableValue {
+ static inline constexpr ValueType static_type = ValueType::Span;
+ void *data;
+ bool owned;
+
+ VariableValue_Span(void *data, bool owned) : VariableValue(static_type), data(data), owned(owned)
+ {
+ }
+};
+
+/* This variable is the unmodified virtual vector array from the caller. */
+struct VariableValue_GVVectorArray : public VariableValue {
+ static inline constexpr ValueType static_type = ValueType::GVVectorArray;
+ const GVVectorArray &data;
+
+ VariableValue_GVVectorArray(const GVVectorArray &data) : VariableValue(static_type), data(data)
+ {
+ }
+};
+
+/* This variable has a different vector for every index. */
+struct VariableValue_GVectorArray : public VariableValue {
+ static inline constexpr ValueType static_type = ValueType::GVectorArray;
+ GVectorArray &data;
+ bool owned;
+
+ VariableValue_GVectorArray(GVectorArray &data, bool owned)
+ : VariableValue(static_type), data(data), owned(owned)
+ {
+ }
+};
+
+/* This variable has the same value for every index. */
+struct VariableValue_OneSingle : public VariableValue {
+ static inline constexpr ValueType static_type = ValueType::OneSingle;
+ void *data;
+ bool is_initialized = false;
+
+ VariableValue_OneSingle(void *data) : VariableValue(static_type), data(data)
+ {
+ }
+};
+
+/* This variable has the same vector for every index. */
+struct VariableValue_OneVector : public VariableValue {
+ static inline constexpr ValueType static_type = ValueType::OneVector;
+ GVectorArray &data;
+
+ VariableValue_OneVector(GVectorArray &data) : VariableValue(static_type), data(data)
+ {
+ }
+};
+
+static_assert(std::is_trivially_destructible_v<VariableValue_GVArray>);
+static_assert(std::is_trivially_destructible_v<VariableValue_Span>);
+static_assert(std::is_trivially_destructible_v<VariableValue_GVVectorArray>);
+static_assert(std::is_trivially_destructible_v<VariableValue_GVectorArray>);
+static_assert(std::is_trivially_destructible_v<VariableValue_OneSingle>);
+static_assert(std::is_trivially_destructible_v<VariableValue_OneVector>);
+
+class VariableState;
+
+/**
+ * The #ValueAllocator is responsible for providing memory for variables and their values. It also
+ * manages the reuse of buffers to improve performance.
+ */
+class ValueAllocator : NonCopyable, NonMovable {
+ private:
+ /* Allocate with 64 byte alignment for better reusability of buffers and improved cache
+ * performance. */
+ static constexpr inline int min_alignment = 64;
+
+ /* Use stacks so that the most recently used buffers are reused first. This improves cache
+ * efficiency. */
+ std::array<Stack<VariableValue *>, tot_variable_value_types> values_free_lists_;
+ /* The integer key is the size of one element (e.g. 4 for an integer buffer). All buffers are
+ * aligned to #min_alignment bytes. */
+ Map<int, Stack<void *>> span_buffers_free_list_;
+
+ public:
+ ValueAllocator() = default;
+
+ ~ValueAllocator()
+ {
+ for (Stack<VariableValue *> &stack : values_free_lists_) {
+ while (!stack.is_empty()) {
+ MEM_freeN(stack.pop());
+ }
+ }
+ for (Stack<void *> &stack : span_buffers_free_list_.values()) {
+ while (!stack.is_empty()) {
+ MEM_freeN(stack.pop());
+ }
+ }
+ }
+
+ template<typename... Args> VariableState *obtain_variable_state(Args &&...args);
+
+ void release_variable_state(VariableState *state);
+
+ VariableValue_GVArray *obtain_GVArray(const GVArray &varray)
+ {
+ return this->obtain<VariableValue_GVArray>(varray);
+ }
+
+ VariableValue_GVVectorArray *obtain_GVVectorArray(const GVVectorArray &varray)
+ {
+ return this->obtain<VariableValue_GVVectorArray>(varray);
+ }
+
+ VariableValue_Span *obtain_Span_not_owned(void *buffer)
+ {
+ return this->obtain<VariableValue_Span>(buffer, false);
+ }
+
+ VariableValue_Span *obtain_Span(const CPPType &type, int size)
+ {
+ void *buffer = nullptr;
+
+ const int element_size = type.size();
+ const int alignment = type.alignment();
+
+ if (alignment > min_alignment) {
+ /* In this rare case we fallback to not reusing existing buffers. */
+ buffer = MEM_mallocN_aligned(element_size * size, alignment, __func__);
+ }
+ else {
+ Stack<void *> *stack = span_buffers_free_list_.lookup_ptr(element_size);
+ if (stack == nullptr || stack->is_empty()) {
+ buffer = MEM_mallocN_aligned(element_size * size, min_alignment, __func__);
+ }
+ else {
+ /* Reuse existing buffer. */
+ buffer = stack->pop();
+ }
+ }
+
+ return this->obtain<VariableValue_Span>(buffer, true);
+ }
+
+ VariableValue_GVectorArray *obtain_GVectorArray_not_owned(GVectorArray &data)
+ {
+ return this->obtain<VariableValue_GVectorArray>(data, false);
+ }
+
+ VariableValue_GVectorArray *obtain_GVectorArray(const CPPType &type, int size)
+ {
+ GVectorArray *vector_array = new GVectorArray(type, size);
+ return this->obtain<VariableValue_GVectorArray>(*vector_array, true);
+ }
+
+ VariableValue_OneSingle *obtain_OneSingle(const CPPType &type)
+ {
+ void *buffer = MEM_mallocN_aligned(type.size(), type.alignment(), __func__);
+ return this->obtain<VariableValue_OneSingle>(buffer);
+ }
+
+ VariableValue_OneVector *obtain_OneVector(const CPPType &type)
+ {
+ GVectorArray *vector_array = new GVectorArray(type, 1);
+ return this->obtain<VariableValue_OneVector>(*vector_array);
+ }
+
+ void release_value(VariableValue *value, const MFDataType &data_type)
+ {
+ switch (value->type) {
+ case ValueType::GVArray: {
+ break;
+ }
+ case ValueType::Span: {
+ auto *value_typed = static_cast<VariableValue_Span *>(value);
+ if (value_typed->owned) {
+ const CPPType &type = data_type.single_type();
+ /* Assumes all values in the buffer are uninitialized already. */
+ Stack<void *> &buffers = span_buffers_free_list_.lookup_or_add_default(type.size());
+ buffers.push(value_typed->data);
+ }
+ break;
+ }
+ case ValueType::GVVectorArray: {
+ break;
+ }
+ case ValueType::GVectorArray: {
+ auto *value_typed = static_cast<VariableValue_GVectorArray *>(value);
+ if (value_typed->owned) {
+ delete &value_typed->data;
+ }
+ break;
+ }
+ case ValueType::OneSingle: {
+ auto *value_typed = static_cast<VariableValue_OneSingle *>(value);
+ if (value_typed->is_initialized) {
+ const CPPType &type = data_type.single_type();
+ type.destruct(value_typed->data);
+ }
+ MEM_freeN(value_typed->data);
+ break;
+ }
+ case ValueType::OneVector: {
+ auto *value_typed = static_cast<VariableValue_OneVector *>(value);
+ delete &value_typed->data;
+ break;
+ }
+ }
+
+ Stack<VariableValue *> &stack = values_free_lists_[(int)value->type];
+ stack.push(value);
+ }
+
+ private:
+ template<typename T, typename... Args> T *obtain(Args &&...args)
+ {
+ static_assert(std::is_base_of_v<VariableValue, T>);
+ Stack<VariableValue *> &stack = values_free_lists_[(int)T::static_type];
+ if (stack.is_empty()) {
+ void *buffer = MEM_mallocN(sizeof(T), __func__);
+ return new (buffer) T(std::forward<Args>(args)...);
+ }
+ return new (stack.pop()) T(std::forward<Args>(args)...);
+ }
+};
+
+/**
+ * This class keeps track of a single variable during evaluation.
+ */
+class VariableState : NonCopyable, NonMovable {
+ private:
+ /** The current value of the variable. The storage format may change over time. */
+ VariableValue *value_;
+ /** Number of indices that are currently initialized in this variable. */
+ int tot_initialized_;
+ /* This a non-owning pointer to either span buffer or #GVectorArray or null. */
+ void *caller_provided_storage_ = nullptr;
+
+ public:
+ VariableState(VariableValue &value, int tot_initialized, void *caller_provided_storage = nullptr)
+ : value_(&value),
+ tot_initialized_(tot_initialized),
+ caller_provided_storage_(caller_provided_storage)
+ {
+ }
+
+ void destruct_self(ValueAllocator &value_allocator, const MFDataType &data_type)
+ {
+ value_allocator.release_value(value_, data_type);
+ value_allocator.release_variable_state(this);
+ }
+
+ /* True if this contains only one value for all indices, i.e. the value for all indices is
+ * the same. */
+ bool is_one() const
+ {
+ switch (value_->type) {
+ case ValueType::GVArray:
+ return this->value_as<VariableValue_GVArray>()->data.is_single();
+ case ValueType::Span:
+ return tot_initialized_ == 0;
+ case ValueType::GVVectorArray:
+ return this->value_as<VariableValue_GVVectorArray>()->data.is_single_vector();
+ case ValueType::GVectorArray:
+ return tot_initialized_ == 0;
+ case ValueType::OneSingle:
+ return true;
+ case ValueType::OneVector:
+ return true;
+ }
+ BLI_assert_unreachable();
+ return false;
+ }
+
+ bool is_fully_initialized(const IndexMask full_mask)
+ {
+ return tot_initialized_ == full_mask.size();
+ }
+
+ bool is_fully_uninitialized(const IndexMask full_mask)
+ {
+ UNUSED_VARS(full_mask);
+ return tot_initialized_ == 0;
+ }
+
+ void add_as_input(MFParamsBuilder &params, IndexMask mask, const MFDataType &data_type) const
+ {
+ /* Sanity check to make sure that enough values are initialized. */
+ BLI_assert(mask.size() <= tot_initialized_);
+
+ switch (value_->type) {
+ case ValueType::GVArray: {
+ params.add_readonly_single_input(this->value_as<VariableValue_GVArray>()->data);
+ break;
+ }
+ case ValueType::Span: {
+ const void *data = this->value_as<VariableValue_Span>()->data;
+ const GSpan span{data_type.single_type(), data, mask.min_array_size()};
+ params.add_readonly_single_input(span);
+ break;
+ }
+ case ValueType::GVVectorArray: {
+ params.add_readonly_vector_input(this->value_as<VariableValue_GVVectorArray>()->data);
+ break;
+ }
+ case ValueType::GVectorArray: {
+ params.add_readonly_vector_input(this->value_as<VariableValue_GVectorArray>()->data);
+ break;
+ }
+ case ValueType::OneSingle: {
+ const auto *value_typed = this->value_as<VariableValue_OneSingle>();
+ BLI_assert(value_typed->is_initialized);
+ const GPointer gpointer{data_type.single_type(), value_typed->data};
+ params.add_readonly_single_input(gpointer);
+ break;
+ }
+ case ValueType::OneVector: {
+ params.add_readonly_vector_input(this->value_as<VariableValue_OneVector>()->data[0]);
+ break;
+ }
+ }
+ }
+
+ void ensure_is_mutable(IndexMask full_mask,
+ const MFDataType &data_type,
+ ValueAllocator &value_allocator)
+ {
+ if (ELEM(value_->type, ValueType::Span, ValueType::GVectorArray)) {
+ return;
+ }
+
+ const int array_size = full_mask.min_array_size();
+
+ switch (data_type.category()) {
+ case MFDataType::Single: {
+ const CPPType &type = data_type.single_type();
+ VariableValue_Span *new_value = nullptr;
+ if (caller_provided_storage_ == nullptr) {
+ new_value = value_allocator.obtain_Span(type, array_size);
+ }
+ else {
+ /* Reuse the storage provided caller when possible. */
+ new_value = value_allocator.obtain_Span_not_owned(caller_provided_storage_);
+ }
+ if (value_->type == ValueType::GVArray) {
+ /* Fill new buffer with data from virtual array. */
+ this->value_as<VariableValue_GVArray>()->data.materialize_to_uninitialized(
+ full_mask, new_value->data);
+ }
+ else if (value_->type == ValueType::OneSingle) {
+ auto *old_value_typed_ = this->value_as<VariableValue_OneSingle>();
+ if (old_value_typed_->is_initialized) {
+ /* Fill the buffer with a single value. */
+ type.fill_construct_indices(old_value_typed_->data, new_value->data, full_mask);
+ }
+ }
+ else {
+ BLI_assert_unreachable();
+ }
+ value_allocator.release_value(value_, data_type);
+ value_ = new_value;
+ break;
+ }
+ case MFDataType::Vector: {
+ const CPPType &type = data_type.vector_base_type();
+ VariableValue_GVectorArray *new_value = nullptr;
+ if (caller_provided_storage_ == nullptr) {
+ new_value = value_allocator.obtain_GVectorArray(type, array_size);
+ }
+ else {
+ new_value = value_allocator.obtain_GVectorArray_not_owned(
+ *(GVectorArray *)caller_provided_storage_);
+ }
+ if (value_->type == ValueType::GVVectorArray) {
+ /* Fill new vector array with data from virtual vector array. */
+ new_value->data.extend(full_mask, this->value_as<VariableValue_GVVectorArray>()->data);
+ }
+ else if (value_->type == ValueType::OneVector) {
+ /* Fill all indices with the same value. */
+ const GSpan vector = this->value_as<VariableValue_OneVector>()->data[0];
+ new_value->data.extend(full_mask, GVVectorArray_For_SingleGSpan{vector, array_size});
+ }
+ else {
+ BLI_assert_unreachable();
+ }
+ value_allocator.release_value(value_, data_type);
+ value_ = new_value;
+ break;
+ }
+ }
+ }
+
+ void add_as_mutable(MFParamsBuilder &params,
+ IndexMask mask,
+ IndexMask full_mask,
+ const MFDataType &data_type,
+ ValueAllocator &value_allocator)
+ {
+ /* Sanity check to make sure that enough values are initialized. */
+ BLI_assert(mask.size() <= tot_initialized_);
+
+ this->ensure_is_mutable(full_mask, data_type, value_allocator);
+
+ switch (value_->type) {
+ case ValueType::Span: {
+ void *data = this->value_as<VariableValue_Span>()->data;
+ const GMutableSpan span{data_type.single_type(), data, mask.min_array_size()};
+ params.add_single_mutable(span);
+ break;
+ }
+ case ValueType::GVectorArray: {
+ params.add_vector_mutable(this->value_as<VariableValue_GVectorArray>()->data);
+ break;
+ }
+ case ValueType::GVArray:
+ case ValueType::GVVectorArray:
+ case ValueType::OneSingle:
+ case ValueType::OneVector: {
+ BLI_assert_unreachable();
+ break;
+ }
+ }
+ }
+
+ void add_as_output(MFParamsBuilder &params,
+ IndexMask mask,
+ IndexMask full_mask,
+ const MFDataType &data_type,
+ ValueAllocator &value_allocator)
+ {
+ /* Sanity check to make sure that enough values are not initialized. */
+ BLI_assert(mask.size() <= full_mask.size() - tot_initialized_);
+ this->ensure_is_mutable(full_mask, data_type, value_allocator);
+
+ switch (value_->type) {
+ case ValueType::Span: {
+ void *data = this->value_as<VariableValue_Span>()->data;
+ const GMutableSpan span{data_type.single_type(), data, mask.min_array_size()};
+ params.add_uninitialized_single_output(span);
+ break;
+ }
+ case ValueType::GVectorArray: {
+ params.add_vector_output(this->value_as<VariableValue_GVectorArray>()->data);
+ break;
+ }
+ case ValueType::GVArray:
+ case ValueType::GVVectorArray:
+ case ValueType::OneSingle:
+ case ValueType::OneVector: {
+ BLI_assert_unreachable();
+ break;
+ }
+ }
+
+ tot_initialized_ += mask.size();
+ }
+
+ void add_as_input__one(MFParamsBuilder &params, const MFDataType &data_type) const
+ {
+ BLI_assert(this->is_one());
+
+ switch (value_->type) {
+ case ValueType::GVArray: {
+ params.add_readonly_single_input(this->value_as<VariableValue_GVArray>()->data);
+ break;
+ }
+ case ValueType::GVVectorArray: {
+ params.add_readonly_vector_input(this->value_as<VariableValue_GVVectorArray>()->data);
+ break;
+ }
+ case ValueType::OneSingle: {
+ const auto *value_typed = this->value_as<VariableValue_OneSingle>();
+ BLI_assert(value_typed->is_initialized);
+ GPointer ptr{data_type.single_type(), value_typed->data};
+ params.add_readonly_single_input(ptr);
+ break;
+ }
+ case ValueType::OneVector: {
+ params.add_readonly_vector_input(this->value_as<VariableValue_OneVector>()->data);
+ break;
+ }
+ case ValueType::Span:
+ case ValueType::GVectorArray: {
+ BLI_assert_unreachable();
+ break;
+ }
+ }
+ }
+
+ void ensure_is_mutable__one(const MFDataType &data_type, ValueAllocator &value_allocator)
+ {
+ BLI_assert(this->is_one());
+ if (ELEM(value_->type, ValueType::OneSingle, ValueType::OneVector)) {
+ return;
+ }
+
+ switch (data_type.category()) {
+ case MFDataType::Single: {
+ const CPPType &type = data_type.single_type();
+ VariableValue_OneSingle *new_value = value_allocator.obtain_OneSingle(type);
+ if (value_->type == ValueType::GVArray) {
+ this->value_as<VariableValue_GVArray>()->data.get_internal_single_to_uninitialized(
+ new_value->data);
+ new_value->is_initialized = true;
+ }
+ else if (value_->type == ValueType::Span) {
+ BLI_assert(tot_initialized_ == 0);
+ /* Nothing to do, the single value is uninitialized already. */
+ }
+ else {
+ BLI_assert_unreachable();
+ }
+ value_allocator.release_value(value_, data_type);
+ value_ = new_value;
+ break;
+ }
+ case MFDataType::Vector: {
+ const CPPType &type = data_type.vector_base_type();
+ VariableValue_OneVector *new_value = value_allocator.obtain_OneVector(type);
+ if (value_->type == ValueType::GVVectorArray) {
+ const GVVectorArray &old_vector_array =
+ this->value_as<VariableValue_GVVectorArray>()->data;
+ new_value->data.extend(IndexRange(1), old_vector_array);
+ }
+ else if (value_->type == ValueType::GVectorArray) {
+ BLI_assert(tot_initialized_ == 0);
+ /* Nothing to do. */
+ }
+ else {
+ BLI_assert_unreachable();
+ }
+ value_allocator.release_value(value_, data_type);
+ value_ = new_value;
+ break;
+ }
+ }
+ }
+
+ void add_as_mutable__one(MFParamsBuilder &params,
+ const MFDataType &data_type,
+ ValueAllocator &value_allocator)
+ {
+ BLI_assert(this->is_one());
+ this->ensure_is_mutable__one(data_type, value_allocator);
+
+ switch (value_->type) {
+ case ValueType::OneSingle: {
+ auto *value_typed = this->value_as<VariableValue_OneSingle>();
+ BLI_assert(value_typed->is_initialized);
+ params.add_single_mutable(GMutableSpan{data_type.single_type(), value_typed->data, 1});
+ break;
+ }
+ case ValueType::OneVector: {
+ params.add_vector_mutable(this->value_as<VariableValue_OneVector>()->data);
+ break;
+ }
+ case ValueType::GVArray:
+ case ValueType::Span:
+ case ValueType::GVVectorArray:
+ case ValueType::GVectorArray: {
+ BLI_assert_unreachable();
+ break;
+ }
+ }
+ }
+
+ void add_as_output__one(MFParamsBuilder &params,
+ IndexMask mask,
+ const MFDataType &data_type,
+ ValueAllocator &value_allocator)
+ {
+ BLI_assert(this->is_one());
+ this->ensure_is_mutable__one(data_type, value_allocator);
+
+ switch (value_->type) {
+ case ValueType::OneSingle: {
+ auto *value_typed = this->value_as<VariableValue_OneSingle>();
+ BLI_assert(!value_typed->is_initialized);
+ params.add_uninitialized_single_output(
+ GMutableSpan{data_type.single_type(), value_typed->data, 1});
+ /* It becomes initialized when the multi-function is called. */
+ value_typed->is_initialized = true;
+ break;
+ }
+ case ValueType::OneVector: {
+ auto *value_typed = this->value_as<VariableValue_OneVector>();
+ BLI_assert(value_typed->data[0].is_empty());
+ params.add_vector_output(value_typed->data);
+ break;
+ }
+ case ValueType::GVArray:
+ case ValueType::Span:
+ case ValueType::GVVectorArray:
+ case ValueType::GVectorArray: {
+ BLI_assert_unreachable();
+ break;
+ }
+ }
+
+ tot_initialized_ += mask.size();
+ }
+
+ void destruct(IndexMask mask,
+ IndexMask full_mask,
+ const MFDataType &data_type,
+ ValueAllocator &value_allocator)
+ {
+ int new_tot_initialized = tot_initialized_ - mask.size();
+
+ /* Sanity check to make sure that enough indices can be destructed. */
+ BLI_assert(new_tot_initialized >= 0);
+
+ switch (value_->type) {
+ case ValueType::GVArray: {
+ if (mask.size() == full_mask.size()) {
+ /* All elements are destructed. The elements are owned by the caller, so we don't
+ * actually destruct them. */
+ value_allocator.release_value(value_, data_type);
+ value_ = value_allocator.obtain_OneSingle(data_type.single_type());
+ }
+ else {
+ /* Not all elements are destructed. Since we can't work on the original array, we have to
+ * create a copy first. */
+ this->ensure_is_mutable(full_mask, data_type, value_allocator);
+ BLI_assert(value_->type == ValueType::Span);
+ const CPPType &type = data_type.single_type();
+ type.destruct_indices(this->value_as<VariableValue_Span>()->data, mask);
+ }
+ break;
+ }
+ case ValueType::Span: {
+ const CPPType &type = data_type.single_type();
+ type.destruct_indices(this->value_as<VariableValue_Span>()->data, mask);
+ if (new_tot_initialized == 0) {
+ /* Release span when all values are initialized. */
+ value_allocator.release_value(value_, data_type);
+ value_ = value_allocator.obtain_OneSingle(data_type.single_type());
+ }
+ break;
+ }
+ case ValueType::GVVectorArray: {
+ if (mask.size() == full_mask.size()) {
+ /* All elements are cleared. The elements are owned by the caller, so don't actually
+ * destruct them. */
+ value_allocator.release_value(value_, data_type);
+ value_ = value_allocator.obtain_OneVector(data_type.vector_base_type());
+ }
+ else {
+ /* Not all elements are cleared. Since we can't work on the original vector array, we
+ * have to create a copy first. A possible future optimization is to create the partial
+ * copy directly. */
+ this->ensure_is_mutable(full_mask, data_type, value_allocator);
+ BLI_assert(value_->type == ValueType::GVectorArray);
+ this->value_as<VariableValue_GVectorArray>()->data.clear(mask);
+ }
+ break;
+ }
+ case ValueType::GVectorArray: {
+ this->value_as<VariableValue_GVectorArray>()->data.clear(mask);
+ break;
+ }
+ case ValueType::OneSingle: {
+ auto *value_typed = this->value_as<VariableValue_OneSingle>();
+ BLI_assert(value_typed->is_initialized);
+ if (mask.size() == tot_initialized_) {
+ const CPPType &type = data_type.single_type();
+ type.destruct(value_typed->data);
+ value_typed->is_initialized = false;
+ }
+ break;
+ }
+ case ValueType::OneVector: {
+ auto *value_typed = this->value_as<VariableValue_OneVector>();
+ if (mask.size() == tot_initialized_) {
+ value_typed->data.clear({0});
+ }
+ break;
+ }
+ }
+
+ tot_initialized_ = new_tot_initialized;
+ }
+
+ void indices_split(IndexMask mask, IndicesSplitVectors &r_indices)
+ {
+ BLI_assert(mask.size() <= tot_initialized_);
+
+ switch (value_->type) {
+ case ValueType::GVArray: {
+ const GVArray_Typed<bool> varray{this->value_as<VariableValue_GVArray>()->data};
+ for (const int i : mask) {
+ r_indices[varray[i]].append(i);
+ }
+ break;
+ }
+ case ValueType::Span: {
+ const Span<bool> span((bool *)this->value_as<VariableValue_Span>()->data,
+ mask.min_array_size());
+ for (const int i : mask) {
+ r_indices[span[i]].append(i);
+ }
+ break;
+ }
+ case ValueType::OneSingle: {
+ auto *value_typed = this->value_as<VariableValue_OneSingle>();
+ BLI_assert(value_typed->is_initialized);
+ const bool condition = *(bool *)value_typed->data;
+ r_indices[condition].extend(mask);
+ break;
+ }
+ case ValueType::GVVectorArray:
+ case ValueType::GVectorArray:
+ case ValueType::OneVector: {
+ BLI_assert_unreachable();
+ break;
+ }
+ }
+ }
+
+ template<typename T> T *value_as()
+ {
+ BLI_assert(value_->type == T::static_type);
+ return static_cast<T *>(value_);
+ }
+
+ template<typename T> const T *value_as() const
+ {
+ BLI_assert(value_->type == T::static_type);
+ return static_cast<T *>(value_);
+ }
+};
+
+template<typename... Args> VariableState *ValueAllocator::obtain_variable_state(Args &&...args)
+{
+ return new VariableState(std::forward<Args>(args)...);
+}
+
+void ValueAllocator::release_variable_state(VariableState *state)
+{
+ delete state;
+}
+
+/** Keeps track of the states of all variables during evaluation. */
+class VariableStates {
+ private:
+ ValueAllocator value_allocator_;
+ Map<const MFVariable *, VariableState *> variable_states_;
+ IndexMask full_mask_;
+
+ public:
+ VariableStates(IndexMask full_mask) : full_mask_(full_mask)
+ {
+ }
+
+ ~VariableStates()
+ {
+ for (auto &&item : variable_states_.items()) {
+ const MFVariable *variable = item.key;
+ VariableState *state = item.value;
+ state->destruct_self(value_allocator_, variable->data_type());
+ }
+ }
+
+ ValueAllocator &value_allocator()
+ {
+ return value_allocator_;
+ }
+
+ const IndexMask &full_mask() const
+ {
+ return full_mask_;
+ }
+
+ void add_initial_variable_states(const MFProcedureExecutor &fn,
+ const MFProcedure &procedure,
+ MFParams &params)
+ {
+ for (const int param_index : fn.param_indices()) {
+ MFParamType param_type = fn.param_type(param_index);
+ const MFVariable *variable = procedure.params()[param_index].variable;
+
+ auto add_state = [&](VariableValue *value,
+ bool input_is_initialized,
+ void *caller_provided_storage = nullptr) {
+ const int tot_initialized = input_is_initialized ? full_mask_.size() : 0;
+ variable_states_.add_new(variable,
+ value_allocator_.obtain_variable_state(
+ *value, tot_initialized, caller_provided_storage));
+ };
+
+ switch (param_type.category()) {
+ case MFParamType::SingleInput: {
+ const GVArray &data = params.readonly_single_input(param_index);
+ add_state(value_allocator_.obtain_GVArray(data), true);
+ break;
+ }
+ case MFParamType::VectorInput: {
+ const GVVectorArray &data = params.readonly_vector_input(param_index);
+ add_state(value_allocator_.obtain_GVVectorArray(data), true);
+ break;
+ }
+ case MFParamType::SingleOutput: {
+ GMutableSpan data = params.uninitialized_single_output(param_index);
+ add_state(value_allocator_.obtain_Span_not_owned(data.data()), false, data.data());
+ break;
+ }
+ case MFParamType::VectorOutput: {
+ GVectorArray &data = params.vector_output(param_index);
+ add_state(value_allocator_.obtain_GVectorArray_not_owned(data), false, &data);
+ break;
+ }
+ case MFParamType::SingleMutable: {
+ GMutableSpan data = params.single_mutable(param_index);
+ add_state(value_allocator_.obtain_Span_not_owned(data.data()), true, data.data());
+ break;
+ }
+ case MFParamType::VectorMutable: {
+ GVectorArray &data = params.vector_mutable(param_index);
+ add_state(value_allocator_.obtain_GVectorArray_not_owned(data), true, &data);
+ break;
+ }
+ }
+ }
+ }
+
+ void add_as_param(VariableState &variable_state,
+ MFParamsBuilder &params,
+ const MFParamType &param_type,
+ const IndexMask &mask)
+ {
+ const MFDataType data_type = param_type.data_type();
+ switch (param_type.interface_type()) {
+ case MFParamType::Input: {
+ variable_state.add_as_input(params, mask, data_type);
+ break;
+ }
+ case MFParamType::Mutable: {
+ variable_state.add_as_mutable(params, mask, full_mask_, data_type, value_allocator_);
+ break;
+ }
+ case MFParamType::Output: {
+ variable_state.add_as_output(params, mask, full_mask_, data_type, value_allocator_);
+ break;
+ }
+ }
+ }
+
+ void add_as_param__one(VariableState &variable_state,
+ MFParamsBuilder &params,
+ const MFParamType &param_type,
+ const IndexMask &mask)
+ {
+ const MFDataType data_type = param_type.data_type();
+ switch (param_type.interface_type()) {
+ case MFParamType::Input: {
+ variable_state.add_as_input__one(params, data_type);
+ break;
+ }
+ case MFParamType::Mutable: {
+ variable_state.add_as_mutable__one(params, data_type, value_allocator_);
+ break;
+ }
+ case MFParamType::Output: {
+ variable_state.add_as_output__one(params, mask, data_type, value_allocator_);
+ break;
+ }
+ }
+ }
+
+ void destruct(const MFVariable &variable, const IndexMask &mask)
+ {
+ VariableState &variable_state = this->get_variable_state(variable);
+ variable_state.destruct(mask, full_mask_, variable.data_type(), value_allocator_);
+ }
+
+ VariableState &get_variable_state(const MFVariable &variable)
+ {
+ return *variable_states_.lookup_or_add_cb(
+ &variable, [&]() { return this->create_new_state_for_variable(variable); });
+ }
+
+ VariableState *create_new_state_for_variable(const MFVariable &variable)
+ {
+ MFDataType data_type = variable.data_type();
+ switch (data_type.category()) {
+ case MFDataType::Single: {
+ const CPPType &type = data_type.single_type();
+ return value_allocator_.obtain_variable_state(*value_allocator_.obtain_OneSingle(type), 0);
+ }
+ case MFDataType::Vector: {
+ const CPPType &type = data_type.vector_base_type();
+ return value_allocator_.obtain_variable_state(*value_allocator_.obtain_OneVector(type), 0);
+ }
+ }
+ BLI_assert_unreachable();
+ return nullptr;
+ }
+};
+
+static bool evaluate_as_one(const MultiFunction &fn,
+ Span<VariableState *> param_variable_states,
+ const IndexMask &mask,
+ const IndexMask &full_mask)
+{
+ if (fn.depends_on_context()) {
+ return false;
+ }
+ if (mask.size() < full_mask.size()) {
+ return false;
+ }
+ for (VariableState *state : param_variable_states) {
+ if (!state->is_one()) {
+ return false;
+ }
+ }
+ return true;
+}
+
+static void execute_call_instruction(const MFCallInstruction &instruction,
+ IndexMask mask,
+ VariableStates &variable_states,
+ const MFContext &context)
+{
+ const MultiFunction &fn = instruction.fn();
+
+ Vector<VariableState *> param_variable_states;
+ param_variable_states.resize(fn.param_amount());
+
+ for (const int param_index : fn.param_indices()) {
+ const MFVariable *variable = instruction.params()[param_index];
+ VariableState &variable_state = variable_states.get_variable_state(*variable);
+ param_variable_states[param_index] = &variable_state;
+ }
+
+ /* If all inputs to the function are constant, it's enough to call the function only once instead
+ * of for every index. */
+ if (evaluate_as_one(fn, param_variable_states, mask, variable_states.full_mask())) {
+ MFParamsBuilder params(fn, 1);
+
+ for (const int param_index : fn.param_indices()) {
+ const MFParamType param_type = fn.param_type(param_index);
+ VariableState &variable_state = *param_variable_states[param_index];
+ variable_states.add_as_param__one(variable_state, params, param_type, mask);
+ }
+
+ fn.call(IndexRange(1), params, context);
+ }
+ else {
+ MFParamsBuilder params(fn, mask.min_array_size());
+
+ for (const int param_index : fn.param_indices()) {
+ const MFParamType param_type = fn.param_type(param_index);
+ VariableState &variable_state = *param_variable_states[param_index];
+ variable_states.add_as_param(variable_state, params, param_type, mask);
+ }
+
+ fn.call(mask, params, context);
+ }
+}
+
+/** An index mask, that might own the indices if necessary. */
+struct InstructionIndices {
+ bool is_owned;
+ Vector<int64_t> owned_indices;
+ IndexMask referenced_indices;
+
+ IndexMask mask() const
+ {
+ if (this->is_owned) {
+ return this->owned_indices.as_span();
+ }
+ return this->referenced_indices;
+ }
+};
+
+/** Contains information about the next instruction that should be executed. */
+struct NextInstructionInfo {
+ const MFInstruction *instruction = nullptr;
+ InstructionIndices indices;
+
+ IndexMask mask() const
+ {
+ return this->indices.mask();
+ }
+
+ operator bool() const
+ {
+ return this->instruction != nullptr;
+ }
+};
+
+/**
+ * Keeps track of the next instruction for all indices and decides in which order instructions are
+ * evaluated.
+ */
+class InstructionScheduler {
+ private:
+ Map<const MFInstruction *, Vector<InstructionIndices>> indices_by_instruction_;
+
+ public:
+ InstructionScheduler() = default;
+
+ void add_referenced_indices(const MFInstruction &instruction, IndexMask mask)
+ {
+ if (mask.is_empty()) {
+ return;
+ }
+ InstructionIndices new_indices;
+ new_indices.is_owned = false;
+ new_indices.referenced_indices = mask;
+ indices_by_instruction_.lookup_or_add_default(&instruction).append(std::move(new_indices));
+ }
+
+ void add_owned_indices(const MFInstruction &instruction, Vector<int64_t> indices)
+ {
+ if (indices.is_empty()) {
+ return;
+ }
+ BLI_assert(IndexMask::indices_are_valid_index_mask(indices));
+
+ InstructionIndices new_indices;
+ new_indices.is_owned = true;
+ new_indices.owned_indices = std::move(indices);
+ indices_by_instruction_.lookup_or_add_default(&instruction).append(std::move(new_indices));
+ }
+
+ void add_previous_instruction_indices(const MFInstruction &instruction,
+ NextInstructionInfo &instr_info)
+ {
+ indices_by_instruction_.lookup_or_add_default(&instruction)
+ .append(std::move(instr_info.indices));
+ }
+
+ NextInstructionInfo pop_next()
+ {
+ if (indices_by_instruction_.is_empty()) {
+ return {};
+ }
+ /* TODO: Implement better mechanism to determine next instruction. */
+ const MFInstruction *instruction = *indices_by_instruction_.keys().begin();
+
+ NextInstructionInfo next_instruction_info;
+ next_instruction_info.instruction = instruction;
+ next_instruction_info.indices = this->pop_indices_array(instruction);
+ return next_instruction_info;
+ }
+
+ private:
+ InstructionIndices pop_indices_array(const MFInstruction *instruction)
+ {
+ Vector<InstructionIndices> *indices = indices_by_instruction_.lookup_ptr(instruction);
+ if (indices == nullptr) {
+ return {};
+ }
+ InstructionIndices r_indices = (*indices).pop_last();
+ BLI_assert(!r_indices.mask().is_empty());
+ if (indices->is_empty()) {
+ indices_by_instruction_.remove_contained(instruction);
+ }
+ return r_indices;
+ }
+};
+
+void MFProcedureExecutor::call(IndexMask full_mask, MFParams params, MFContext context) const
+{
+ BLI_assert(procedure_.validate());
+
+ LinearAllocator<> allocator;
+
+ VariableStates variable_states{full_mask};
+ variable_states.add_initial_variable_states(*this, procedure_, params);
+
+ InstructionScheduler scheduler;
+ scheduler.add_referenced_indices(*procedure_.entry(), full_mask);
+
+ /* Loop until all indices got to a return instruction. */
+ while (NextInstructionInfo instr_info = scheduler.pop_next()) {
+ const MFInstruction &instruction = *instr_info.instruction;
+ switch (instruction.type()) {
+ case MFInstructionType::Call: {
+ const MFCallInstruction &call_instruction = static_cast<const MFCallInstruction &>(
+ instruction);
+ execute_call_instruction(call_instruction, instr_info.mask(), variable_states, context);
+ scheduler.add_previous_instruction_indices(*call_instruction.next(), instr_info);
+ break;
+ }
+ case MFInstructionType::Branch: {
+ const MFBranchInstruction &branch_instruction = static_cast<const MFBranchInstruction &>(
+ instruction);
+ const MFVariable *condition_var = branch_instruction.condition();
+ VariableState &variable_state = variable_states.get_variable_state(*condition_var);
+
+ IndicesSplitVectors new_indices;
+ variable_state.indices_split(instr_info.mask(), new_indices);
+ scheduler.add_owned_indices(*branch_instruction.branch_false(), new_indices[false]);
+ scheduler.add_owned_indices(*branch_instruction.branch_true(), new_indices[true]);
+ break;
+ }
+ case MFInstructionType::Destruct: {
+ const MFDestructInstruction &destruct_instruction =
+ static_cast<const MFDestructInstruction &>(instruction);
+ const MFVariable *variable = destruct_instruction.variable();
+ variable_states.destruct(*variable, instr_info.mask());
+ scheduler.add_previous_instruction_indices(*destruct_instruction.next(), instr_info);
+ break;
+ }
+ case MFInstructionType::Dummy: {
+ const MFDummyInstruction &dummy_instruction = static_cast<const MFDummyInstruction &>(
+ instruction);
+ scheduler.add_previous_instruction_indices(*dummy_instruction.next(), instr_info);
+ break;
+ }
+ case MFInstructionType::Return: {
+ /* Don't insert the indices back into the scheduler. */
+ break;
+ }
+ }
+ }
+
+ for (const int param_index : this->param_indices()) {
+ const MFParamType param_type = this->param_type(param_index);
+ const MFVariable *variable = procedure_.params()[param_index].variable;
+ VariableState &variable_state = variable_states.get_variable_state(*variable);
+ switch (param_type.interface_type()) {
+ case MFParamType::Input: {
+ /* Input variables must be destructed in the end. */
+ BLI_assert(variable_state.is_fully_uninitialized(full_mask));
+ break;
+ }
+ case MFParamType::Mutable:
+ case MFParamType::Output: {
+ /* Mutable and output variables must be initialized in the end. */
+ BLI_assert(variable_state.is_fully_initialized(full_mask));
+ /* Make sure that the data is in the memory provided by the caller. */
+ variable_state.ensure_is_mutable(
+ full_mask, param_type.data_type(), variable_states.value_allocator());
+ break;
+ }
+ }
+ }
+}
+
+} // namespace blender::fn