diff options
55 files changed, 6141 insertions, 3554 deletions
diff --git a/source/blender/blenkernel/BKE_compute_contexts.hh b/source/blender/blenkernel/BKE_compute_contexts.hh new file mode 100644 index 00000000000..a8f0022f49b --- /dev/null +++ b/source/blender/blenkernel/BKE_compute_contexts.hh @@ -0,0 +1,46 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +#pragma once + +/** + * This file implements some specific compute contexts for concepts in Blender. + */ + +#include "BLI_compute_context.hh" + +namespace blender::bke { + +class ModifierComputeContext : public ComputeContext { + private: + static constexpr const char *s_static_type = "MODIFIER"; + + /** + * Use modifier name instead of something like `session_uuid` for now because: + * - It's more obvious that the name matches between the original and evaluated object. + * - We might want that the context hash is consistent between sessions in the future. + */ + std::string modifier_name_; + + public: + ModifierComputeContext(const ComputeContext *parent, std::string modifier_name); + + private: + void print_current_in_line(std::ostream &stream) const override; +}; + +class NodeGroupComputeContext : public ComputeContext { + private: + static constexpr const char *s_static_type = "NODE_GROUP"; + + std::string node_name_; + + public: + NodeGroupComputeContext(const ComputeContext *parent, std::string node_name); + + StringRefNull node_name() const; + + private: + void print_current_in_line(std::ostream &stream) const override; +}; + +} // namespace blender::bke diff --git a/source/blender/blenkernel/BKE_node_runtime.hh b/source/blender/blenkernel/BKE_node_runtime.hh index f2e551a9f32..194820aa4ba 100644 --- a/source/blender/blenkernel/BKE_node_runtime.hh +++ b/source/blender/blenkernel/BKE_node_runtime.hh @@ -21,6 +21,7 @@ struct bNodeType; namespace blender::nodes { struct FieldInferencingInterface; class NodeDeclaration; +struct GeometryNodesLazyFunctionGraphInfo; } // namespace blender::nodes namespace blender::bke { @@ -49,6 +50,15 @@ class bNodeTreeRuntime : NonCopyable, NonMovable { std::unique_ptr<nodes::FieldInferencingInterface> field_inferencing_interface; /** + * For geometry nodes, a lazy function graph with some additional info is cached. This is used to + * evaluate the node group. Caching it here allows us to reuse the preprocessed node tree in case + * its used multiple times. + */ + std::mutex geometry_nodes_lazy_function_graph_info_mutex; + std::unique_ptr<nodes::GeometryNodesLazyFunctionGraphInfo> + geometry_nodes_lazy_function_graph_info; + + /** * Protects access to all topology cache variables below. This is necessary so that the cache can * be updated on a const #bNodeTree. */ @@ -70,6 +80,7 @@ class bNodeTreeRuntime : NonCopyable, NonMovable { MultiValueMap<const bNodeType *, bNode *> nodes_by_type; Vector<bNode *> toposort_left_to_right; Vector<bNode *> toposort_right_to_left; + Vector<bNode *> group_nodes; bool has_link_cycle = false; bool has_undefined_nodes_or_sockets = false; bNode *group_output_node = nullptr; @@ -148,6 +159,12 @@ class bNodeRuntime : NonCopyable, NonMovable { namespace node_tree_runtime { +/** + * Is executed when the depsgraph determines that something in the node group changed that will + * affect the output. + */ +void handle_node_tree_output_changed(bNodeTree &tree_cow); + class AllowUsingOutdatedInfo : NonCopyable, NonMovable { private: const bNodeTree &tree_; @@ -241,6 +258,18 @@ inline blender::Span<bNode *> bNodeTree::all_nodes() return this->runtime->nodes; } +inline blender::Span<const bNode *> bNodeTree::group_nodes() const +{ + BLI_assert(blender::bke::node_tree_runtime::topology_cache_is_available(*this)); + return this->runtime->group_nodes; +} + +inline blender::Span<bNode *> bNodeTree::group_nodes() +{ + BLI_assert(blender::bke::node_tree_runtime::topology_cache_is_available(*this)); + return this->runtime->group_nodes; +} + inline bool bNodeTree::has_link_cycle() const { BLI_assert(blender::bke::node_tree_runtime::topology_cache_is_available(*this)); @@ -413,7 +442,6 @@ inline blender::Span<const bNodeLink *> bNode::internal_links_span() const inline const blender::nodes::NodeDeclaration *bNode::declaration() const { - BLI_assert(this->runtime->declaration != nullptr); return this->runtime->declaration; } diff --git a/source/blender/blenkernel/CMakeLists.txt b/source/blender/blenkernel/CMakeLists.txt index b982c69a378..2f1e1897f8d 100644 --- a/source/blender/blenkernel/CMakeLists.txt +++ b/source/blender/blenkernel/CMakeLists.txt @@ -98,6 +98,7 @@ set(SRC intern/collision.c intern/colorband.c intern/colortools.c + intern/compute_contexts.cc intern/constraint.c intern/context.c intern/crazyspace.cc @@ -352,6 +353,7 @@ set(SRC BKE_collision.h BKE_colorband.h BKE_colortools.h + BKE_compute_contexts.hh BKE_constraint.h BKE_context.h BKE_crazyspace.h diff --git a/source/blender/blenkernel/intern/compute_contexts.cc b/source/blender/blenkernel/intern/compute_contexts.cc new file mode 100644 index 00000000000..026706d363e --- /dev/null +++ b/source/blender/blenkernel/intern/compute_contexts.cc @@ -0,0 +1,38 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +#include "BKE_compute_contexts.hh" + +namespace blender::bke { + +ModifierComputeContext::ModifierComputeContext(const ComputeContext *parent, + std::string modifier_name) + : ComputeContext(s_static_type, parent), modifier_name_(std::move(modifier_name)) +{ + hash_.mix_in(s_static_type, strlen(s_static_type)); + hash_.mix_in(modifier_name_.data(), modifier_name_.size()); +} + +void ModifierComputeContext::print_current_in_line(std::ostream &stream) const +{ + stream << "Modifier: " << modifier_name_; +} + +NodeGroupComputeContext::NodeGroupComputeContext(const ComputeContext *parent, + std::string node_name) + : ComputeContext(s_static_type, parent), node_name_(std::move(node_name)) +{ + hash_.mix_in(s_static_type, strlen(s_static_type)); + hash_.mix_in(node_name_.data(), node_name_.size()); +} + +StringRefNull NodeGroupComputeContext::node_name() const +{ + return node_name_; +} + +void NodeGroupComputeContext::print_current_in_line(std::ostream &stream) const +{ + stream << "Node: " << node_name_; +} + +} // namespace blender::bke diff --git a/source/blender/blenkernel/intern/node.cc b/source/blender/blenkernel/intern/node.cc index 2ae0b456b0d..b82cf30416a 100644 --- a/source/blender/blenkernel/intern/node.cc +++ b/source/blender/blenkernel/intern/node.cc @@ -71,6 +71,7 @@ #include "NOD_composite.h" #include "NOD_function.h" #include "NOD_geometry.h" +#include "NOD_geometry_nodes_lazy_function.hh" #include "NOD_node_declaration.hh" #include "NOD_shader.h" #include "NOD_socket.h" diff --git a/source/blender/blenkernel/intern/node_runtime.cc b/source/blender/blenkernel/intern/node_runtime.cc index a8281820a0b..00b78284791 100644 --- a/source/blender/blenkernel/intern/node_runtime.cc +++ b/source/blender/blenkernel/intern/node_runtime.cc @@ -10,8 +10,22 @@ #include "BLI_task.hh" #include "BLI_timeit.hh" +#include "NOD_geometry_nodes_lazy_function.hh" + namespace blender::bke::node_tree_runtime { +void handle_node_tree_output_changed(bNodeTree &tree_cow) +{ + if (tree_cow.type == NTREE_GEOMETRY) { + /* Rebuild geometry nodes lazy function graph. */ + { + std::lock_guard lock{tree_cow.runtime->geometry_nodes_lazy_function_graph_info_mutex}; + tree_cow.runtime->geometry_nodes_lazy_function_graph_info.reset(); + } + blender::nodes::ensure_geometry_nodes_lazy_function_graph(tree_cow); + } +} + static void double_checked_lock(std::mutex &mutex, bool &data_is_dirty, FunctionRef<void()> fn) { if (!data_is_dirty) { @@ -36,11 +50,15 @@ static void update_node_vector(const bNodeTree &ntree) { bNodeTreeRuntime &tree_runtime = *ntree.runtime; tree_runtime.nodes.clear(); + tree_runtime.group_nodes.clear(); tree_runtime.has_undefined_nodes_or_sockets = false; LISTBASE_FOREACH (bNode *, node, &ntree.nodes) { node->runtime->index_in_tree = tree_runtime.nodes.append_and_get_index(node); node->runtime->owner_tree = const_cast<bNodeTree *>(&ntree); tree_runtime.has_undefined_nodes_or_sockets |= node->typeinfo == &NodeTypeUndefined; + if (node->is_group()) { + tree_runtime.group_nodes.append(node); + } } } diff --git a/source/blender/blenlib/BLI_compute_context.hh b/source/blender/blenlib/BLI_compute_context.hh new file mode 100644 index 00000000000..7422467e400 --- /dev/null +++ b/source/blender/blenlib/BLI_compute_context.hh @@ -0,0 +1,173 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +#pragma once + +/** \file + * \ingroup bli + * + * When logging computed values, we generally want to know where the value was computed. For + * example, geometry nodes logs socket values so that they can be displayed in the ui. For that we + * can combine the logged value with a `ComputeContext`, which identifies the place where the value + * was computed. + * + * This is not a trivial problem because e.g. just storing storing a pointer to the socket a value + * belongs to is not enough. That's because the same socket may correspond to many different values + * when the socket is used in a node group that is used multiple times. In this case, not only does + * the socket have to be stored but also the entire nested node group path that led to the + * evaluation of the socket. + * + * Storing the entire "context path" for every logged value is not feasible, because that path can + * become quite long. So that would need much more memory, more compute overhead and makes it + * complicated to compare if two contexts are the same. If the identifier for a compute context + * would have a variable size, it would also be much harder to create a map from context to values. + * + * The solution implemented below uses the following key ideas: + * - Every compute context can be hashed to a unique fixed size value (`ComputeContextHash`). While + * technically there could be hash collisions, the hashing algorithm has to be chosen to make + * that practically impossible. This way an entire context path, possibly consisting of many + * nested contexts, is represented by a single value that can be stored easily. + * - A nested compute context is build as singly linked list, where every compute context has a + * pointer to the parent compute context. Note that a link in the other direction is not possible + * because the same parent compute context may be used by many different children which possibly + * run on different threads. + */ + +#include "BLI_array.hh" +#include "BLI_linear_allocator.hh" +#include "BLI_stack.hh" +#include "BLI_string_ref.hh" + +namespace blender { + +/** + * A hash that uniquely identifies a specific (non-fixed-size) compute context. The hash has to + * have enough bits to make collisions practically impossible. + */ +struct ComputeContextHash { + static constexpr int64_t HashSizeInBytes = 16; + uint64_t v1 = 0; + uint64_t v2 = 0; + + uint64_t hash() const + { + return v1; + } + + friend bool operator==(const ComputeContextHash &a, const ComputeContextHash &b) + { + return a.v1 == b.v1 && a.v2 == b.v2; + } + + void mix_in(const void *data, int64_t len); + + friend std::ostream &operator<<(std::ostream &stream, const ComputeContextHash &hash); +}; + +static_assert(sizeof(ComputeContextHash) == ComputeContextHash::HashSizeInBytes); + +/** + * Identifies the context in which a computation happens. This context can be used to identify + * values logged during the computation. For more details, see the comment at the top of the file. + * + * This class should be subclassed to implement specific contexts. + */ +class ComputeContext { + private: + /** + * Only used for debugging currently. + */ + const char *static_type_; + /** + * Pointer to the context that this context is child of. That allows nesting compute contexts. + */ + const ComputeContext *parent_ = nullptr; + + protected: + /** + * The hash that uniquely identifies this context. It's a combined hash of this context as well + * as all the parent contexts. + */ + ComputeContextHash hash_; + + public: + ComputeContext(const char *static_type, const ComputeContext *parent) + : static_type_(static_type), parent_(parent) + { + if (parent != nullptr) { + hash_ = parent_->hash_; + } + } + virtual ~ComputeContext() = default; + + const ComputeContextHash &hash() const + { + return hash_; + } + + const char *static_type() const + { + return static_type_; + } + + const ComputeContext *parent() const + { + return parent_; + } + + /** + * Print the entire nested context stack. + */ + void print_stack(std::ostream &stream, StringRef name) const; + + /** + * Print information about this specific context. This has to be implemented by each subclass. + */ + virtual void print_current_in_line(std::ostream &stream) const = 0; + + friend std::ostream &operator<<(std::ostream &stream, const ComputeContext &compute_context); +}; + +/** + * Utility class to build a context stack in one place. This is typically used to get the hash that + * corresponds to a specific nested compute context, in order to look up corresponding logged + * values. + */ +class ComputeContextBuilder { + private: + LinearAllocator<> allocator_; + Stack<destruct_ptr<ComputeContext>> contexts_; + + public: + bool is_empty() const + { + return contexts_.is_empty(); + } + + const ComputeContext *current() const + { + if (contexts_.is_empty()) { + return nullptr; + } + return contexts_.peek().get(); + } + + const ComputeContextHash hash() const + { + BLI_assert(!contexts_.is_empty()); + return this->current()->hash(); + } + + template<typename T, typename... Args> void push(Args &&...args) + { + const ComputeContext *current = this->current(); + destruct_ptr<T> context = allocator_.construct<T>(current, std::forward<Args>(args)...); + contexts_.push(std::move(context)); + } + + void pop() + { + contexts_.pop(); + } +}; + +} // namespace blender diff --git a/source/blender/blenlib/BLI_multi_value_map.hh b/source/blender/blenlib/BLI_multi_value_map.hh index 1fc5a797574..81b536e7d3c 100644 --- a/source/blender/blenlib/BLI_multi_value_map.hh +++ b/source/blender/blenlib/BLI_multi_value_map.hh @@ -115,6 +115,14 @@ template<typename Key, typename Value> class MultiValueMap { } /** + * Get the number of keys. + */ + int64_t size() const + { + return map_.size(); + } + + /** * NOTE: This signature will change when the implementation changes. */ typename MapType::ItemIterator items() const diff --git a/source/blender/blenlib/CMakeLists.txt b/source/blender/blenlib/CMakeLists.txt index d87c60e6099..4cd222165be 100644 --- a/source/blender/blenlib/CMakeLists.txt +++ b/source/blender/blenlib/CMakeLists.txt @@ -53,6 +53,7 @@ set(SRC intern/bitmap_draw_2d.c intern/boxpack_2d.c intern/buffer.c + intern/compute_context.cc intern/convexhull_2d.c intern/cpp_type.cc intern/delaunay_2d.cc @@ -180,6 +181,7 @@ set(SRC BLI_compiler_attrs.h BLI_compiler_compat.h BLI_compiler_typecheck.h + BLI_compute_context.hh BLI_console.h BLI_convexhull_2d.h BLI_cpp_type.hh diff --git a/source/blender/blenlib/intern/compute_context.cc b/source/blender/blenlib/intern/compute_context.cc new file mode 100644 index 00000000000..50a4a90a4a9 --- /dev/null +++ b/source/blender/blenlib/intern/compute_context.cc @@ -0,0 +1,48 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +#include "BLI_compute_context.hh" +#include "BLI_hash_md5.h" + +namespace blender { + +void ComputeContextHash::mix_in(const void *data, int64_t len) +{ + DynamicStackBuffer<> buffer_owner(HashSizeInBytes + len, 8); + char *buffer = static_cast<char *>(buffer_owner.buffer()); + memcpy(buffer, this, HashSizeInBytes); + memcpy(buffer + HashSizeInBytes, data, len); + + BLI_hash_md5_buffer(buffer, HashSizeInBytes + len, this); +} + +std::ostream &operator<<(std::ostream &stream, const ComputeContextHash &hash) +{ + std::stringstream ss; + ss << "0x" << std::hex << hash.v1 << hash.v2; + stream << ss.str(); + return stream; +} + +void ComputeContext::print_stack(std::ostream &stream, StringRef name) const +{ + Stack<const ComputeContext *> stack; + for (const ComputeContext *current = this; current; current = current->parent_) { + stack.push(current); + } + stream << "Context Stack: " << name << "\n"; + while (!stack.is_empty()) { + const ComputeContext *current = stack.pop(); + stream << "-> "; + current->print_current_in_line(stream); + const ComputeContextHash ¤t_hash = current->hash_; + stream << " \t(hash: " << current_hash << ")\n"; + } +} + +std::ostream &operator<<(std::ostream &stream, const ComputeContext &compute_context) +{ + compute_context.print_stack(stream, ""); + return stream; +} + +} // namespace blender diff --git a/source/blender/blenlib/intern/cpp_type.cc b/source/blender/blenlib/intern/cpp_type.cc index d6a087cf175..38de32d3ec8 100644 --- a/source/blender/blenlib/intern/cpp_type.cc +++ b/source/blender/blenlib/intern/cpp_type.cc @@ -26,3 +26,4 @@ BLI_CPP_TYPE_MAKE(ColorGeometry4f, blender::ColorGeometry4f, CPPTypeFlags::Basic BLI_CPP_TYPE_MAKE(ColorGeometry4b, blender::ColorGeometry4b, CPPTypeFlags::BasicType) BLI_CPP_TYPE_MAKE(string, std::string, CPPTypeFlags::BasicType) +BLI_CPP_TYPE_MAKE(StringVector, blender::Vector<std::string>, CPPTypeFlags::None) diff --git a/source/blender/depsgraph/intern/builder/deg_builder_nodes.cc b/source/blender/depsgraph/intern/builder/deg_builder_nodes.cc index ca3e4543a23..dcefb5528b2 100644 --- a/source/blender/depsgraph/intern/builder/deg_builder_nodes.cc +++ b/source/blender/depsgraph/intern/builder/deg_builder_nodes.cc @@ -1741,7 +1741,14 @@ void DepsgraphNodeBuilder::build_nodetree(bNodeTree *ntree) /* Animation, */ build_animdata(&ntree->id); /* Output update. */ - add_operation_node(&ntree->id, NodeType::NTREE_OUTPUT, OperationCode::NTREE_OUTPUT); + ID *id_cow = get_cow_id(&ntree->id); + add_operation_node(&ntree->id, + NodeType::NTREE_OUTPUT, + OperationCode::NTREE_OUTPUT, + [id_cow](::Depsgraph * /*depsgraph*/) { + bNodeTree *ntree_cow = reinterpret_cast<bNodeTree *>(id_cow); + bke::node_tree_runtime::handle_node_tree_output_changed(*ntree_cow); + }); /* nodetree's nodes... */ LISTBASE_FOREACH (bNode *, bnode, &ntree->nodes) { build_idproperties(bnode->prop); diff --git a/source/blender/editors/include/UI_interface.hh b/source/blender/editors/include/UI_interface.hh index 82bfdd7e212..6c756984203 100644 --- a/source/blender/editors/include/UI_interface.hh +++ b/source/blender/editors/include/UI_interface.hh @@ -13,7 +13,7 @@ #include "UI_resources.h" -namespace blender::nodes::geometry_nodes_eval_log { +namespace blender::nodes::geo_eval_log { struct GeometryAttributeInfo; } @@ -44,12 +44,11 @@ void context_path_add_generic(Vector<ContextPathItem> &path, void template_breadcrumbs(uiLayout &layout, Span<ContextPathItem> context_path); -void attribute_search_add_items( - StringRefNull str, - bool can_create_attribute, - Span<const nodes::geometry_nodes_eval_log::GeometryAttributeInfo *> infos, - uiSearchItems *items, - bool is_first); +void attribute_search_add_items(StringRefNull str, + bool can_create_attribute, + Span<const nodes::geo_eval_log::GeometryAttributeInfo *> infos, + uiSearchItems *items, + bool is_first); } // namespace blender::ui diff --git a/source/blender/editors/interface/interface_template_attribute_search.cc b/source/blender/editors/interface/interface_template_attribute_search.cc index 0a684903f0f..55ca945671f 100644 --- a/source/blender/editors/interface/interface_template_attribute_search.cc +++ b/source/blender/editors/interface/interface_template_attribute_search.cc @@ -14,13 +14,15 @@ #include "BLT_translation.h" -#include "NOD_geometry_nodes_eval_log.hh" +#include "BKE_attribute.hh" + +#include "NOD_geometry_nodes_log.hh" #include "UI_interface.h" #include "UI_interface.hh" #include "UI_resources.h" -using blender::nodes::geometry_nodes_eval_log::GeometryAttributeInfo; +using blender::nodes::geo_eval_log::GeometryAttributeInfo; namespace blender::ui { diff --git a/source/blender/editors/space_node/node_draw.cc b/source/blender/editors/space_node/node_draw.cc index 3da799d0fd5..3a8e5d0aed6 100644 --- a/source/blender/editors/space_node/node_draw.cc +++ b/source/blender/editors/space_node/node_draw.cc @@ -13,6 +13,7 @@ #include "DNA_light_types.h" #include "DNA_linestyle_types.h" #include "DNA_material_types.h" +#include "DNA_modifier_types.h" #include "DNA_node_types.h" #include "DNA_screen_types.h" #include "DNA_space_types.h" @@ -29,11 +30,13 @@ #include "BLT_translation.h" +#include "BKE_compute_contexts.hh" #include "BKE_context.h" #include "BKE_idtype.h" #include "BKE_lib_id.h" #include "BKE_main.h" #include "BKE_node.h" +#include "BKE_node_runtime.hh" #include "BKE_node_tree_update.h" #include "BKE_object.h" @@ -65,7 +68,8 @@ #include "RNA_access.h" #include "RNA_prototypes.h" -#include "NOD_geometry_nodes_eval_log.hh" +#include "NOD_geometry_exec.hh" +#include "NOD_geometry_nodes_log.hh" #include "NOD_node_declaration.hh" #include "NOD_socket_declarations_geometry.hh" @@ -74,10 +78,11 @@ #include "node_intern.hh" /* own include */ +namespace geo_log = blender::nodes::geo_eval_log; + using blender::GPointer; +using blender::Vector; using blender::fn::GField; -namespace geo_log = blender::nodes::geometry_nodes_eval_log; -using geo_log::eNamedAttrUsage; extern "C" { /* XXX interface.h */ @@ -85,6 +90,17 @@ extern void ui_draw_dropshadow( const rctf *rct, float radius, float aspect, float alpha, int select); } +/** + * This is passed to many functions which draw the node editor. + */ +struct TreeDrawContext { + /** + * Geometry nodes logs various data during execution. The logged data that corresponds to the + * currently drawn node tree can be retrieved from the log below. + */ + geo_log::GeoTreeLog *geo_tree_log = nullptr; +}; + float ED_node_grid_size() { return U.widget_unit; @@ -157,6 +173,12 @@ void ED_node_tag_update_id(ID *id) namespace blender::ed::space_node { +static void node_socket_add_tooltip_in_node_editor(TreeDrawContext * /*tree_draw_ctx*/, + const bNodeTree *ntree, + const bNode *node, + const bNodeSocket *sock, + uiLayout *layout); + static bool compare_nodes(const bNode *a, const bNode *b) { /* These tell if either the node or any of the parent nodes is selected. @@ -313,7 +335,11 @@ float2 node_from_view(const bNode &node, const float2 &co) /** * Based on settings and sockets in node, set drawing rect info. */ -static void node_update_basis(const bContext &C, bNodeTree &ntree, bNode &node, uiBlock &block) +static void node_update_basis(const bContext &C, + TreeDrawContext &tree_draw_ctx, + bNodeTree &ntree, + bNode &node, + uiBlock &block) { PointerRNA nodeptr; RNA_pointer_create(&ntree.id, &RNA_Node, &node, &nodeptr); @@ -374,7 +400,7 @@ static void node_update_basis(const bContext &C, bNodeTree &ntree, bNode &node, const char *socket_label = nodeSocketLabel(socket); socket->typeinfo->draw((bContext *)&C, row, &sockptr, &nodeptr, IFACE_(socket_label)); - node_socket_add_tooltip(ntree, node, *socket, *row); + node_socket_add_tooltip_in_node_editor(&tree_draw_ctx, &ntree, &node, socket, row); UI_block_align_end(&block); UI_block_layout_resolve(&block, nullptr, &buty); @@ -506,7 +532,7 @@ static void node_update_basis(const bContext &C, bNodeTree &ntree, bNode &node, const char *socket_label = nodeSocketLabel(socket); socket->typeinfo->draw((bContext *)&C, row, &sockptr, &nodeptr, IFACE_(socket_label)); - node_socket_add_tooltip(ntree, node, *socket, *row); + node_socket_add_tooltip_in_node_editor(&tree_draw_ctx, &ntree, &node, socket, row); UI_block_align_end(&block); UI_block_layout_resolve(&block, nullptr, &buty); @@ -823,25 +849,16 @@ static void create_inspection_string_for_generic_value(const GPointer value, std } } -static void create_inspection_string_for_gfield(const geo_log::GFieldValueLog &value_log, - std::stringstream &ss) +static void create_inspection_string_for_field_info(const geo_log::FieldInfoLog &value_log, + std::stringstream &ss) { - const CPPType &type = value_log.type(); - const GField &field = value_log.field(); - const Span<std::string> input_tooltips = value_log.input_tooltips(); + const CPPType &type = value_log.type; + const Span<std::string> input_tooltips = value_log.input_tooltips; if (input_tooltips.is_empty()) { - if (field) { - BUFFER_FOR_CPP_TYPE_VALUE(type, buffer); - blender::fn::evaluate_constant_field(field, buffer); - create_inspection_string_for_generic_value({type, buffer}, ss); - type.destruct(buffer); - } - else { - /* Constant values should always be logged. */ - BLI_assert_unreachable(); - ss << "Value has not been logged"; - } + /* Should have been logged as constant value. */ + BLI_assert_unreachable(); + ss << "Value has not been logged"; } else { if (type.is<int>()) { @@ -874,11 +891,11 @@ static void create_inspection_string_for_gfield(const geo_log::GFieldValueLog &v } } -static void create_inspection_string_for_geometry(const geo_log::GeometryValueLog &value_log, - std::stringstream &ss, - const nodes::decl::Geometry *geometry) +static void create_inspection_string_for_geometry_info(const geo_log::GeometryInfoLog &value_log, + std::stringstream &ss, + const nodes::decl::Geometry *socket_decl) { - Span<GeometryComponentType> component_types = value_log.component_types(); + Span<GeometryComponentType> component_types = value_log.component_types; if (component_types.is_empty()) { ss << TIP_("Empty Geometry"); return; @@ -895,7 +912,7 @@ static void create_inspection_string_for_geometry(const geo_log::GeometryValueLo const char *line_end = (type == component_types.last()) ? "" : ".\n"; switch (type) { case GEO_COMPONENT_TYPE_MESH: { - const geo_log::GeometryValueLog::MeshInfo &mesh_info = *value_log.mesh_info; + const geo_log::GeometryInfoLog::MeshInfo &mesh_info = *value_log.mesh_info; char line[256]; BLI_snprintf(line, sizeof(line), @@ -907,7 +924,7 @@ static void create_inspection_string_for_geometry(const geo_log::GeometryValueLo break; } case GEO_COMPONENT_TYPE_POINT_CLOUD: { - const geo_log::GeometryValueLog::PointCloudInfo &pointcloud_info = + const geo_log::GeometryInfoLog::PointCloudInfo &pointcloud_info = *value_log.pointcloud_info; char line[256]; BLI_snprintf(line, @@ -918,7 +935,7 @@ static void create_inspection_string_for_geometry(const geo_log::GeometryValueLo break; } case GEO_COMPONENT_TYPE_CURVE: { - const geo_log::GeometryValueLog::CurveInfo &curve_info = *value_log.curve_info; + const geo_log::GeometryInfoLog::CurveInfo &curve_info = *value_log.curve_info; char line[256]; BLI_snprintf(line, sizeof(line), @@ -928,7 +945,7 @@ static void create_inspection_string_for_geometry(const geo_log::GeometryValueLo break; } case GEO_COMPONENT_TYPE_INSTANCES: { - const geo_log::GeometryValueLog::InstancesInfo &instances_info = *value_log.instances_info; + const geo_log::GeometryInfoLog::InstancesInfo &instances_info = *value_log.instances_info; char line[256]; BLI_snprintf(line, sizeof(line), @@ -943,7 +960,7 @@ static void create_inspection_string_for_geometry(const geo_log::GeometryValueLo } case GEO_COMPONENT_TYPE_EDIT: { if (value_log.edit_data_info.has_value()) { - const geo_log::GeometryValueLog::EditDataInfo &edit_info = *value_log.edit_data_info; + const geo_log::GeometryInfoLog::EditDataInfo &edit_info = *value_log.edit_data_info; char line[256]; BLI_snprintf(line, sizeof(line), @@ -959,11 +976,11 @@ static void create_inspection_string_for_geometry(const geo_log::GeometryValueLo /* If the geometry declaration is null, as is the case for input to group output, * or it is an output socket don't show supported types. */ - if (geometry == nullptr || geometry->in_out() == SOCK_OUT) { + if (socket_decl == nullptr || socket_decl->in_out() == SOCK_OUT) { return; } - Span<GeometryComponentType> supported_types = geometry->supported_types(); + Span<GeometryComponentType> supported_types = socket_decl->supported_types(); if (supported_types.is_empty()) { ss << ".\n\n" << TIP_("Supported: All Types"); return; @@ -1000,43 +1017,37 @@ static void create_inspection_string_for_geometry(const geo_log::GeometryValueLo } } -static std::optional<std::string> create_socket_inspection_string(const bContext &C, - const bNode &node, +static std::optional<std::string> create_socket_inspection_string(TreeDrawContext &tree_draw_ctx, const bNodeSocket &socket) { - const SpaceNode *snode = CTX_wm_space_node(&C); - if (snode == nullptr) { - return {}; - }; - - const geo_log::SocketLog *socket_log = geo_log::ModifierLog::find_socket_by_node_editor_context( - *snode, node, socket); - if (socket_log == nullptr) { - return {}; - } - const geo_log::ValueLog *value_log = socket_log->value(); + using namespace blender::nodes::geo_eval_log; + tree_draw_ctx.geo_tree_log->ensure_socket_values(); + ValueLog *value_log = tree_draw_ctx.geo_tree_log->find_socket_value_log(socket); if (value_log == nullptr) { - return {}; + return std::nullopt; } - std::stringstream ss; if (const geo_log::GenericValueLog *generic_value_log = dynamic_cast<const geo_log::GenericValueLog *>(value_log)) { - create_inspection_string_for_generic_value(generic_value_log->value(), ss); + create_inspection_string_for_generic_value(generic_value_log->value, ss); } - if (const geo_log::GFieldValueLog *gfield_value_log = - dynamic_cast<const geo_log::GFieldValueLog *>(value_log)) { - create_inspection_string_for_gfield(*gfield_value_log, ss); + else if (const geo_log::FieldInfoLog *gfield_value_log = + dynamic_cast<const geo_log::FieldInfoLog *>(value_log)) { + create_inspection_string_for_field_info(*gfield_value_log, ss); } - else if (const geo_log::GeometryValueLog *geo_value_log = - dynamic_cast<const geo_log::GeometryValueLog *>(value_log)) { - create_inspection_string_for_geometry( + else if (const geo_log::GeometryInfoLog *geo_value_log = + dynamic_cast<const geo_log::GeometryInfoLog *>(value_log)) { + create_inspection_string_for_geometry_info( *geo_value_log, ss, dynamic_cast<const nodes::decl::Geometry *>(socket.runtime->declaration)); } - return ss.str(); + std::string str = ss.str(); + if (str.empty()) { + return std::nullopt; + } + return str; } static bool node_socket_has_tooltip(const bNodeTree &ntree, const bNodeSocket &socket) @@ -1046,34 +1057,42 @@ static bool node_socket_has_tooltip(const bNodeTree &ntree, const bNodeSocket &s } if (socket.runtime->declaration != nullptr) { - const blender::nodes::SocketDeclaration &socket_decl = *socket.runtime->declaration; + const nodes::SocketDeclaration &socket_decl = *socket.runtime->declaration; return !socket_decl.description().is_empty(); } return false; } -static char *node_socket_get_tooltip(const bContext &C, - const bNodeTree &ntree, - const bNode &node, - const bNodeSocket &socket) +static char *node_socket_get_tooltip(const bContext *C, + const bNodeTree *ntree, + const bNode *UNUSED(node), + const bNodeSocket *socket) { + SpaceNode *snode = CTX_wm_space_node(C); + TreeDrawContext tree_draw_ctx; + if (snode != nullptr) { + if (ntree->type == NTREE_GEOMETRY) { + tree_draw_ctx.geo_tree_log = geo_log::GeoModifierLog::get_tree_log_for_node_editor(*snode); + } + } + std::stringstream output; - if (socket.runtime->declaration != nullptr) { - const blender::nodes::SocketDeclaration &socket_decl = *socket.runtime->declaration; + if (socket->runtime->declaration != nullptr) { + const blender::nodes::SocketDeclaration &socket_decl = *socket->runtime->declaration; blender::StringRef description = socket_decl.description(); if (!description.is_empty()) { output << TIP_(description.data()); } } - if (ntree.type == NTREE_GEOMETRY) { + if (ntree->type == NTREE_GEOMETRY && tree_draw_ctx.geo_tree_log != nullptr) { if (!output.str().empty()) { output << ".\n\n"; } std::optional<std::string> socket_inspection_str = create_socket_inspection_string( - C, node, socket); + tree_draw_ctx, *socket); if (socket_inspection_str.has_value()) { output << *socket_inspection_str; } @@ -1083,37 +1102,46 @@ static char *node_socket_get_tooltip(const bContext &C, } if (output.str().empty()) { - output << nodeSocketLabel(&socket); + output << nodeSocketLabel(socket); } return BLI_strdup(output.str().c_str()); } -void node_socket_add_tooltip(const bNodeTree &ntree, - const bNode &node, - const bNodeSocket &sock, - uiLayout &layout) +static void node_socket_add_tooltip_in_node_editor(TreeDrawContext *UNUSED(tree_draw_ctx), + const bNodeTree *ntree, + const bNode *node, + const bNodeSocket *sock, + uiLayout *layout) { - if (!node_socket_has_tooltip(ntree, sock)) { + if (!node_socket_has_tooltip(*ntree, *sock)) { return; } - SocketTooltipData *data = MEM_new<SocketTooltipData>(__func__); - data->ntree = &ntree; - data->node = &node; - data->socket = &sock; + SocketTooltipData *data = MEM_cnew<SocketTooltipData>(__func__); + data->ntree = ntree; + data->node = node; + data->socket = sock; uiLayoutSetTooltipFunc( - &layout, + layout, [](bContext *C, void *argN, const char *UNUSED(tip)) { - const SocketTooltipData *data = static_cast<SocketTooltipData *>(argN); - return node_socket_get_tooltip(*C, *data->ntree, *data->node, *data->socket); + SocketTooltipData *data = static_cast<SocketTooltipData *>(argN); + return node_socket_get_tooltip(C, data->ntree, data->node, data->socket); }, data, MEM_dupallocN, MEM_freeN); } +void node_socket_add_tooltip(const bNodeTree &ntree, + const bNode &node, + const bNodeSocket &sock, + uiLayout &layout) +{ + node_socket_add_tooltip_in_node_editor(nullptr, &ntree, &node, &sock, &layout); +} + static void node_socket_draw_nested(const bContext &C, bNodeTree &ntree, PointerRNA &node_ptr, @@ -1178,7 +1206,7 @@ static void node_socket_draw_nested(const bContext &C, but, [](bContext *C, void *argN, const char *UNUSED(tip)) { SocketTooltipData *data = (SocketTooltipData *)argN; - return node_socket_get_tooltip(*C, *data->ntree, *data->node, *data->socket); + return node_socket_get_tooltip(C, data->ntree, data->node, data->socket); }, data, MEM_freeN); @@ -1607,27 +1635,26 @@ static char *node_errors_tooltip_fn(bContext *UNUSED(C), void *argN, const char #define NODE_HEADER_ICON_SIZE (0.8f * U.widget_unit) -static void node_add_error_message_button( - const bContext &C, bNode &node, uiBlock &block, const rctf &rect, float &icon_offset) +static void node_add_error_message_button(TreeDrawContext &tree_draw_ctx, + bNode &node, + uiBlock &block, + const rctf &rect, + float &icon_offset) { - SpaceNode *snode = CTX_wm_space_node(&C); - const geo_log::NodeLog *node_log = geo_log::ModifierLog::find_node_by_node_editor_context(*snode, - node); - if (node_log == nullptr) { - return; + Span<geo_log::NodeWarning> warnings; + if (tree_draw_ctx.geo_tree_log) { + geo_log::GeoNodeLog *node_log = tree_draw_ctx.geo_tree_log->nodes.lookup_ptr(node.name); + if (node_log != nullptr) { + warnings = node_log->warnings; + } } - - Span<geo_log::NodeWarning> warnings = node_log->warnings(); - if (warnings.is_empty()) { return; } - NodeErrorsTooltipData *tooltip_data = (NodeErrorsTooltipData *)MEM_mallocN( - sizeof(NodeErrorsTooltipData), __func__); - tooltip_data->warnings = warnings; - const geo_log::NodeWarningType display_type = node_error_highest_priority(warnings); + NodeErrorsTooltipData *tooltip_data = MEM_new<NodeErrorsTooltipData>(__func__); + tooltip_data->warnings = warnings; icon_offset -= NODE_HEADER_ICON_SIZE; UI_block_emboss_set(&block, UI_EMBOSS_NONE); @@ -1645,90 +1672,70 @@ static void node_add_error_message_button( 0, 0, nullptr); - UI_but_func_tooltip_set(but, node_errors_tooltip_fn, tooltip_data, MEM_freeN); + UI_but_func_tooltip_set(but, node_errors_tooltip_fn, tooltip_data, [](void *arg) { + MEM_delete(static_cast<NodeErrorsTooltipData *>(arg)); + }); UI_block_emboss_set(&block, UI_EMBOSS); } -static void get_exec_time_other_nodes(const bNode &node, - const SpaceNode &snode, - std::chrono::microseconds &exec_time, - int &node_count) +static std::optional<std::chrono::nanoseconds> node_get_execution_time( + TreeDrawContext &tree_draw_ctx, const bNodeTree &ntree, const bNode &node) { - if (node.type == NODE_GROUP) { - const geo_log::TreeLog *root_tree_log = geo_log::ModifierLog::find_tree_by_node_editor_context( - snode); - if (root_tree_log == nullptr) { - return; - } - const geo_log::TreeLog *tree_log = root_tree_log->lookup_child_log(node.name); - if (tree_log == nullptr) { - return; - } - tree_log->foreach_node_log([&](const geo_log::NodeLog &node_log) { - exec_time += node_log.execution_time(); - node_count++; - }); + const geo_log::GeoTreeLog *tree_log = tree_draw_ctx.geo_tree_log; + if (tree_log == nullptr) { + return std::nullopt; } - else { - const geo_log::NodeLog *node_log = geo_log::ModifierLog::find_node_by_node_editor_context( - snode, node); - if (node_log) { - exec_time += node_log->execution_time(); - node_count++; - } - } -} - -static std::chrono::microseconds node_get_execution_time(const bNodeTree &ntree, - const bNode &node, - const SpaceNode &snode, - int &node_count) -{ - std::chrono::microseconds exec_time = std::chrono::microseconds::zero(); if (node.type == NODE_GROUP_OUTPUT) { - const geo_log::TreeLog *tree_log = geo_log::ModifierLog::find_tree_by_node_editor_context( - snode); - - if (tree_log == nullptr) { - return exec_time; - } - tree_log->foreach_node_log([&](const geo_log::NodeLog &node_log) { - exec_time += node_log.execution_time(); - node_count++; - }); + return tree_log->run_time_sum; } - else if (node.type == NODE_FRAME) { + if (node.type == NODE_FRAME) { /* Could be cached in the future if this recursive code turns out to be slow. */ + std::chrono::nanoseconds run_time{0}; + bool found_node = false; LISTBASE_FOREACH (bNode *, tnode, &ntree.nodes) { if (tnode->parent != &node) { continue; } if (tnode->type == NODE_FRAME) { - exec_time += node_get_execution_time(ntree, *tnode, snode, node_count); + std::optional<std::chrono::nanoseconds> sub_frame_run_time = node_get_execution_time( + tree_draw_ctx, ntree, *tnode); + if (sub_frame_run_time.has_value()) { + run_time += *sub_frame_run_time; + found_node = true; + } } else { - get_exec_time_other_nodes(*tnode, snode, exec_time, node_count); + if (const geo_log::GeoNodeLog *node_log = tree_log->nodes.lookup_ptr_as(tnode->name)) { + found_node = true; + run_time += node_log->run_time; + } } } + if (found_node) { + return run_time; + } + return std::nullopt; } - else { - get_exec_time_other_nodes(node, snode, exec_time, node_count); + if (const geo_log::GeoNodeLog *node_log = tree_log->nodes.lookup_ptr(node.name)) { + return node_log->run_time; } - return exec_time; + return std::nullopt; } -static std::string node_get_execution_time_label(const SpaceNode &snode, const bNode &node) +static std::string node_get_execution_time_label(TreeDrawContext &tree_draw_ctx, + const SpaceNode &snode, + const bNode &node) { - int node_count = 0; - std::chrono::microseconds exec_time = node_get_execution_time( - *snode.edittree, node, snode, node_count); + const std::optional<std::chrono::nanoseconds> exec_time = node_get_execution_time( + tree_draw_ctx, *snode.edittree, node); - if (node_count == 0) { + if (!exec_time.has_value()) { return std::string(""); } - uint64_t exec_time_us = exec_time.count(); + const uint64_t exec_time_us = + std::chrono::duration_cast<std::chrono::microseconds>(*exec_time).count(); /* Don't show time if execution time is 0 microseconds. */ if (exec_time_us == 0) { @@ -1763,7 +1770,7 @@ struct NodeExtraInfoRow { }; struct NamedAttributeTooltipArg { - Map<std::string, eNamedAttrUsage> usage_by_attribute; + Map<std::string, geo_log::NamedAttributeUsage> usage_by_attribute; }; static char *named_attribute_tooltip(bContext *UNUSED(C), void *argN, const char *UNUSED(tip)) @@ -1775,7 +1782,7 @@ static char *named_attribute_tooltip(bContext *UNUSED(C), void *argN, const char struct NameWithUsage { StringRefNull name; - eNamedAttrUsage usage; + geo_log::NamedAttributeUsage usage; }; Vector<NameWithUsage> sorted_used_attribute; @@ -1790,16 +1797,16 @@ static char *named_attribute_tooltip(bContext *UNUSED(C), void *argN, const char for (const NameWithUsage &attribute : sorted_used_attribute) { const StringRefNull name = attribute.name; - const eNamedAttrUsage usage = attribute.usage; + const geo_log::NamedAttributeUsage usage = attribute.usage; ss << " \u2022 \"" << name << "\": "; Vector<std::string> usages; - if ((usage & eNamedAttrUsage::Read) != eNamedAttrUsage::None) { + if ((usage & geo_log::NamedAttributeUsage::Read) != geo_log::NamedAttributeUsage::None) { usages.append(TIP_("read")); } - if ((usage & eNamedAttrUsage::Write) != eNamedAttrUsage::None) { + if ((usage & geo_log::NamedAttributeUsage::Write) != geo_log::NamedAttributeUsage::None) { usages.append(TIP_("write")); } - if ((usage & eNamedAttrUsage::Remove) != eNamedAttrUsage::None) { + if ((usage & geo_log::NamedAttributeUsage::Remove) != geo_log::NamedAttributeUsage::None) { usages.append(TIP_("remove")); } for (const int i : usages.index_range()) { @@ -1817,7 +1824,7 @@ static char *named_attribute_tooltip(bContext *UNUSED(C), void *argN, const char } static NodeExtraInfoRow row_from_used_named_attribute( - const Map<std::string, eNamedAttrUsage> &usage_by_attribute_name) + const Map<std::string, geo_log::NamedAttributeUsage> &usage_by_attribute_name) { const int attributes_num = usage_by_attribute_name.size(); @@ -1831,32 +1838,11 @@ static NodeExtraInfoRow row_from_used_named_attribute( return row; } -static std::optional<NodeExtraInfoRow> node_get_accessed_attributes_row(const SpaceNode &snode, - const bNode &node) +static std::optional<NodeExtraInfoRow> node_get_accessed_attributes_row( + TreeDrawContext &tree_draw_ctx, const bNode &node) { - if (node.type == NODE_GROUP) { - const geo_log::TreeLog *root_tree_log = geo_log::ModifierLog::find_tree_by_node_editor_context( - snode); - if (root_tree_log == nullptr) { - return std::nullopt; - } - const geo_log::TreeLog *tree_log = root_tree_log->lookup_child_log(node.name); - if (tree_log == nullptr) { - return std::nullopt; - } - - Map<std::string, eNamedAttrUsage> usage_by_attribute; - tree_log->foreach_node_log([&](const geo_log::NodeLog &node_log) { - for (const geo_log::UsedNamedAttribute &used_attribute : node_log.used_named_attributes()) { - usage_by_attribute.lookup_or_add_as(used_attribute.name, - used_attribute.usage) |= used_attribute.usage; - } - }); - if (usage_by_attribute.is_empty()) { - return std::nullopt; - } - - return row_from_used_named_attribute(usage_by_attribute); + if (tree_draw_ctx.geo_tree_log == nullptr) { + return std::nullopt; } if (ELEM(node.type, GEO_NODE_STORE_NAMED_ATTRIBUTE, @@ -1865,31 +1851,26 @@ static std::optional<NodeExtraInfoRow> node_get_accessed_attributes_row(const Sp /* Only show the overlay when the name is passed in from somewhere else. */ LISTBASE_FOREACH (bNodeSocket *, socket, &node.inputs) { if (STREQ(socket->name, "Name")) { - if ((socket->flag & SOCK_IN_USE) == 0) { + if (!socket->is_directly_linked()) { return std::nullopt; } } } - const geo_log::NodeLog *node_log = geo_log::ModifierLog::find_node_by_node_editor_context( - snode, node.name); - if (node_log == nullptr) { - return std::nullopt; - } - Map<std::string, eNamedAttrUsage> usage_by_attribute; - for (const geo_log::UsedNamedAttribute &used_attribute : node_log->used_named_attributes()) { - usage_by_attribute.lookup_or_add_as(used_attribute.name, - used_attribute.usage) |= used_attribute.usage; - } - if (usage_by_attribute.is_empty()) { - return std::nullopt; - } - return row_from_used_named_attribute(usage_by_attribute); } - - return std::nullopt; + tree_draw_ctx.geo_tree_log->ensure_used_named_attributes(); + geo_log::GeoNodeLog *node_log = tree_draw_ctx.geo_tree_log->nodes.lookup_ptr(node.name); + if (node_log == nullptr) { + return std::nullopt; + } + if (node_log->used_named_attributes.is_empty()) { + return std::nullopt; + } + return row_from_used_named_attribute(node_log->used_named_attributes); } -static Vector<NodeExtraInfoRow> node_get_extra_info(const SpaceNode &snode, const bNode &node) +static Vector<NodeExtraInfoRow> node_get_extra_info(TreeDrawContext &tree_draw_ctx, + const SpaceNode &snode, + const bNode &node) { Vector<NodeExtraInfoRow> rows; if (!(snode.overlay.flag & SN_OVERLAY_SHOW_OVERLAYS)) { @@ -1898,7 +1879,8 @@ static Vector<NodeExtraInfoRow> node_get_extra_info(const SpaceNode &snode, cons if (snode.overlay.flag & SN_OVERLAY_SHOW_NAMED_ATTRIBUTES && snode.edittree->type == NTREE_GEOMETRY) { - if (std::optional<NodeExtraInfoRow> row = node_get_accessed_attributes_row(snode, node)) { + if (std::optional<NodeExtraInfoRow> row = node_get_accessed_attributes_row(tree_draw_ctx, + node)) { rows.append(std::move(*row)); } } @@ -1907,7 +1889,7 @@ static Vector<NodeExtraInfoRow> node_get_extra_info(const SpaceNode &snode, cons (ELEM(node.typeinfo->nclass, NODE_CLASS_GEOMETRY, NODE_CLASS_GROUP, NODE_CLASS_ATTRIBUTE) || ELEM(node.type, NODE_FRAME, NODE_GROUP_OUTPUT))) { NodeExtraInfoRow row; - row.text = node_get_execution_time_label(snode, node); + row.text = node_get_execution_time_label(tree_draw_ctx, snode, node); if (!row.text.empty()) { row.tooltip = TIP_( "The execution time from the node tree's latest evaluation. For frame and group nodes, " @@ -1916,14 +1898,17 @@ static Vector<NodeExtraInfoRow> node_get_extra_info(const SpaceNode &snode, cons rows.append(std::move(row)); } } - const geo_log::NodeLog *node_log = geo_log::ModifierLog::find_node_by_node_editor_context(snode, - node); - if (node_log != nullptr) { - for (const std::string &message : node_log->debug_messages()) { - NodeExtraInfoRow row; - row.text = message; - row.icon = ICON_INFO; - rows.append(std::move(row)); + + if (snode.edittree->type == NTREE_GEOMETRY && tree_draw_ctx.geo_tree_log != nullptr) { + tree_draw_ctx.geo_tree_log->ensure_debug_messages(); + const geo_log::GeoNodeLog *node_log = tree_draw_ctx.geo_tree_log->nodes.lookup_ptr(node.name); + if (node_log != nullptr) { + for (const StringRef message : node_log->debug_messages) { + NodeExtraInfoRow row; + row.text = message; + row.icon = ICON_INFO; + rows.append(std::move(row)); + } } } @@ -1988,9 +1973,12 @@ static void node_draw_extra_info_row(const bNode &node, } } -static void node_draw_extra_info_panel(const SpaceNode &snode, const bNode &node, uiBlock &block) +static void node_draw_extra_info_panel(TreeDrawContext &tree_draw_ctx, + const SpaceNode &snode, + const bNode &node, + uiBlock &block) { - Vector<NodeExtraInfoRow> extra_info_rows = node_get_extra_info(snode, node); + Vector<NodeExtraInfoRow> extra_info_rows = node_get_extra_info(tree_draw_ctx, snode, node); if (extra_info_rows.size() == 0) { return; @@ -2046,6 +2034,7 @@ static void node_draw_extra_info_panel(const SpaceNode &snode, const bNode &node } static void node_draw_basis(const bContext &C, + TreeDrawContext &tree_draw_ctx, const View2D &v2d, const SpaceNode &snode, bNodeTree &ntree, @@ -2070,7 +2059,7 @@ static void node_draw_basis(const bContext &C, GPU_line_width(1.0f); - node_draw_extra_info_panel(snode, node, block); + node_draw_extra_info_panel(tree_draw_ctx, snode, node, block); /* Header. */ { @@ -2165,7 +2154,7 @@ static void node_draw_basis(const bContext &C, UI_block_emboss_set(&block, UI_EMBOSS); } - node_add_error_message_button(C, node, block, rct, iconofs); + node_add_error_message_button(tree_draw_ctx, node, block, rct, iconofs); /* Title. */ if (node.flag & SELECT) { @@ -2338,6 +2327,7 @@ static void node_draw_basis(const bContext &C, } static void node_draw_hidden(const bContext &C, + TreeDrawContext &tree_draw_ctx, const View2D &v2d, const SpaceNode &snode, bNodeTree &ntree, @@ -2353,7 +2343,7 @@ static void node_draw_hidden(const bContext &C, const int color_id = node_get_colorid(node); - node_draw_extra_info_panel(snode, node, block); + node_draw_extra_info_panel(tree_draw_ctx, snode, node, block); /* Shadow. */ node_draw_shadow(snode, node, hiddenrad, 1.0f); @@ -2668,6 +2658,7 @@ static void reroute_node_prepare_for_draw(bNode &node) } static void node_update_nodetree(const bContext &C, + TreeDrawContext &tree_draw_ctx, bNodeTree &ntree, Span<bNode *> nodes, Span<uiBlock *> blocks) @@ -2694,7 +2685,7 @@ static void node_update_nodetree(const bContext &C, node_update_hidden(node, block); } else { - node_update_basis(C, ntree, node, block); + node_update_basis(C, tree_draw_ctx, ntree, node, block); } } } @@ -2795,6 +2786,7 @@ static void frame_node_draw_label(const bNodeTree &ntree, } static void frame_node_draw(const bContext &C, + TreeDrawContext &tree_draw_ctx, const ARegion ®ion, const SpaceNode &snode, bNodeTree &ntree, @@ -2841,7 +2833,7 @@ static void frame_node_draw(const bContext &C, /* label and text */ frame_node_draw_label(ntree, node, snode); - node_draw_extra_info_panel(snode, node, block); + node_draw_extra_info_panel(tree_draw_ctx, snode, node, block); UI_block_end(&C, &block); UI_block_draw(&C, &block); @@ -2895,6 +2887,7 @@ static void reroute_node_draw( } static void node_draw(const bContext &C, + TreeDrawContext &tree_draw_ctx, ARegion ®ion, const SpaceNode &snode, bNodeTree &ntree, @@ -2903,7 +2896,7 @@ static void node_draw(const bContext &C, bNodeInstanceKey key) { if (node.type == NODE_FRAME) { - frame_node_draw(C, region, snode, ntree, node, block); + frame_node_draw(C, tree_draw_ctx, region, snode, ntree, node, block); } else if (node.type == NODE_REROUTE) { reroute_node_draw(C, region, ntree, node, block); @@ -2911,10 +2904,10 @@ static void node_draw(const bContext &C, else { const View2D &v2d = region.v2d; if (node.flag & NODE_HIDDEN) { - node_draw_hidden(C, v2d, snode, ntree, node, block); + node_draw_hidden(C, tree_draw_ctx, v2d, snode, ntree, node, block); } else { - node_draw_basis(C, v2d, snode, ntree, node, block, key); + node_draw_basis(C, tree_draw_ctx, v2d, snode, ntree, node, block, key); } } } @@ -2922,6 +2915,7 @@ static void node_draw(const bContext &C, #define USE_DRAW_TOT_UPDATE static void node_draw_nodetree(const bContext &C, + TreeDrawContext &tree_draw_ctx, ARegion ®ion, SpaceNode &snode, bNodeTree &ntree, @@ -2946,7 +2940,7 @@ static void node_draw_nodetree(const bContext &C, } bNodeInstanceKey key = BKE_node_instance_key(parent_key, &ntree, nodes[i]); - node_draw(C, region, snode, ntree, *nodes[i], *blocks[i], key); + node_draw(C, tree_draw_ctx, region, snode, ntree, *nodes[i], *blocks[i], key); } /* Node lines. */ @@ -2976,7 +2970,7 @@ static void node_draw_nodetree(const bContext &C, } bNodeInstanceKey key = BKE_node_instance_key(parent_key, &ntree, nodes[i]); - node_draw(C, region, snode, ntree, *nodes[i], *blocks[i], key); + node_draw(C, tree_draw_ctx, region, snode, ntree, *nodes[i], *blocks[i], key); } } @@ -3035,8 +3029,17 @@ static void draw_nodetree(const bContext &C, Array<uiBlock *> blocks = node_uiblocks_init(C, nodes); - node_update_nodetree(C, ntree, nodes, blocks); - node_draw_nodetree(C, region, *snode, ntree, nodes, blocks, parent_key); + TreeDrawContext tree_draw_ctx; + if (ntree.type == NTREE_GEOMETRY) { + tree_draw_ctx.geo_tree_log = geo_log::GeoModifierLog::get_tree_log_for_node_editor(*snode); + if (tree_draw_ctx.geo_tree_log != nullptr) { + tree_draw_ctx.geo_tree_log->ensure_node_warnings(); + tree_draw_ctx.geo_tree_log->ensure_node_run_time(); + } + } + + node_update_nodetree(C, tree_draw_ctx, ntree, nodes, blocks); + node_draw_nodetree(C, tree_draw_ctx, region, *snode, ntree, nodes, blocks, parent_key); } /** diff --git a/source/blender/editors/space_node/node_geometry_attribute_search.cc b/source/blender/editors/space_node/node_geometry_attribute_search.cc index e328a86b0fd..809c4b2fe59 100644 --- a/source/blender/editors/space_node/node_geometry_attribute_search.cc +++ b/source/blender/editors/space_node/node_geometry_attribute_search.cc @@ -14,6 +14,7 @@ #include "DNA_space_types.h" #include "BKE_context.h" +#include "BKE_node_runtime.hh" #include "BKE_node_tree_update.h" #include "BKE_object.h" @@ -30,12 +31,11 @@ #include "UI_interface.hh" #include "UI_resources.h" -#include "NOD_geometry_nodes_eval_log.hh" +#include "NOD_geometry_nodes_log.hh" #include "node_intern.hh" -namespace geo_log = blender::nodes::geometry_nodes_eval_log; -using geo_log::GeometryAttributeInfo; +using blender::nodes::geo_eval_log::GeometryAttributeInfo; namespace blender::ed::space_node { @@ -50,6 +50,8 @@ BLI_STATIC_ASSERT(std::is_trivially_destructible_v<AttributeSearchData>, ""); static Vector<const GeometryAttributeInfo *> get_attribute_info_from_context( const bContext &C, AttributeSearchData &data) { + using namespace nodes::geo_eval_log; + SpaceNode *snode = CTX_wm_space_node(&C); if (!snode) { BLI_assert_unreachable(); @@ -65,41 +67,48 @@ static Vector<const GeometryAttributeInfo *> get_attribute_info_from_context( BLI_assert_unreachable(); return {}; } + GeoTreeLog *tree_log = GeoModifierLog::get_tree_log_for_node_editor(*snode); + if (tree_log == nullptr) { + return {}; + } + tree_log->ensure_socket_values(); /* For the attribute input node, collect attribute information from all nodes in the group. */ if (node->type == GEO_NODE_INPUT_NAMED_ATTRIBUTE) { - const geo_log::TreeLog *tree_log = geo_log::ModifierLog::find_tree_by_node_editor_context( - *snode); - if (tree_log == nullptr) { - return {}; - } - + tree_log->ensure_existing_attributes(); Vector<const GeometryAttributeInfo *> attributes; - Set<StringRef> names; - tree_log->foreach_node_log([&](const geo_log::NodeLog &node_log) { - for (const geo_log::SocketLog &socket_log : node_log.input_logs()) { - const geo_log::ValueLog *value_log = socket_log.value(); - if (const geo_log::GeometryValueLog *geo_value_log = - dynamic_cast<const geo_log::GeometryValueLog *>(value_log)) { - for (const GeometryAttributeInfo &attribute : geo_value_log->attributes()) { - if (bke::allow_procedural_attribute_access(attribute.name)) { - if (names.add(attribute.name)) { - attributes.append(&attribute); - } - } - } - } + for (const GeometryAttributeInfo *attribute : tree_log->existing_attributes) { + if (bke::allow_procedural_attribute_access(attribute->name)) { + attributes.append(attribute); } - }); + } return attributes; } - - const geo_log::NodeLog *node_log = geo_log::ModifierLog::find_node_by_node_editor_context( - *snode, data.node_name); + GeoNodeLog *node_log = tree_log->nodes.lookup_ptr(node->name); if (node_log == nullptr) { return {}; } - return node_log->lookup_available_attributes(); + Set<StringRef> names; + Vector<const GeometryAttributeInfo *> attributes; + for (const bNodeSocket *input_socket : node->input_sockets()) { + if (input_socket->type != SOCK_GEOMETRY) { + continue; + } + const ValueLog *value_log = tree_log->find_socket_value_log(*input_socket); + if (value_log == nullptr) { + continue; + } + if (const GeometryInfoLog *geo_log = dynamic_cast<const GeometryInfoLog *>(value_log)) { + for (const GeometryAttributeInfo &attribute : geo_log->attributes) { + if (bke::allow_procedural_attribute_access(attribute.name)) { + if (names.add(attribute.name)) { + attributes.append(&attribute); + } + } + } + } + } + return attributes; } static void attribute_search_update_fn( diff --git a/source/blender/editors/space_spreadsheet/spreadsheet_data_source_geometry.cc b/source/blender/editors/space_spreadsheet/spreadsheet_data_source_geometry.cc index 3290c0ddd87..4703eacdcb9 100644 --- a/source/blender/editors/space_spreadsheet/spreadsheet_data_source_geometry.cc +++ b/source/blender/editors/space_spreadsheet/spreadsheet_data_source_geometry.cc @@ -4,6 +4,7 @@ #include "BLI_virtual_array.hh" #include "BKE_attribute.hh" +#include "BKE_compute_contexts.hh" #include "BKE_context.h" #include "BKE_curves.hh" #include "BKE_editmesh.h" @@ -26,7 +27,8 @@ #include "ED_curves_sculpt.h" #include "ED_spreadsheet.h" -#include "NOD_geometry_nodes_eval_log.hh" +#include "NOD_geometry_nodes_lazy_function.hh" +#include "NOD_geometry_nodes_log.hh" #include "BLT_translation.h" @@ -40,8 +42,8 @@ #include "spreadsheet_data_source_geometry.hh" #include "spreadsheet_intern.hh" -namespace geo_log = blender::nodes::geometry_nodes_eval_log; using blender::fn::GField; +using blender::nodes::geo_eval_log::ViewerNodeLog; namespace blender::ed::spreadsheet { @@ -465,19 +467,10 @@ GeometrySet spreadsheet_get_display_geometry_set(const SpaceSpreadsheet *sspread } } else { - const geo_log::NodeLog *node_log = - geo_log::ModifierLog::find_node_by_spreadsheet_editor_context(*sspreadsheet); - if (node_log != nullptr) { - for (const geo_log::SocketLog &input_log : node_log->input_logs()) { - if (const geo_log::GeometryValueLog *geo_value_log = - dynamic_cast<const geo_log::GeometryValueLog *>(input_log.value())) { - const GeometrySet *full_geometry = geo_value_log->full_geometry(); - if (full_geometry != nullptr) { - geometry_set = *full_geometry; - break; - } - } - } + if (const ViewerNodeLog *viewer_log = + nodes::geo_eval_log::GeoModifierLog::find_viewer_node_log_for_spreadsheet( + *sspreadsheet)) { + geometry_set = viewer_log->geometry; } } } @@ -495,27 +488,11 @@ static void find_fields_to_evaluate(const SpaceSpreadsheet *sspreadsheet, /* No viewer is currently referenced by the context path. */ return; } - const geo_log::NodeLog *node_log = geo_log::ModifierLog::find_node_by_spreadsheet_editor_context( - *sspreadsheet); - if (node_log == nullptr) { - return; - } - for (const geo_log::SocketLog &socket_log : node_log->input_logs()) { - const geo_log::ValueLog *value_log = socket_log.value(); - if (value_log == nullptr) { - continue; - } - if (const geo_log::GFieldValueLog *field_value_log = - dynamic_cast<const geo_log::GFieldValueLog *>(value_log)) { - const GField &field = field_value_log->field(); - if (field) { - r_fields.add("Viewer", std::move(field)); - } - } - if (const geo_log::GenericValueLog *generic_value_log = - dynamic_cast<const geo_log::GenericValueLog *>(value_log)) { - GPointer value = generic_value_log->value(); - r_fields.add("Viewer", fn::make_constant_field(*value.type(), value.get())); + if (const ViewerNodeLog *viewer_log = + nodes::geo_eval_log::GeoModifierLog::find_viewer_node_log_for_spreadsheet( + *sspreadsheet)) { + if (viewer_log->field) { + r_fields.add("Viewer", viewer_log->field); } } } diff --git a/source/blender/functions/CMakeLists.txt b/source/blender/functions/CMakeLists.txt index f1298a7f5b7..3d153813425 100644 --- a/source/blender/functions/CMakeLists.txt +++ b/source/blender/functions/CMakeLists.txt @@ -13,6 +13,10 @@ set(INC_SYS set(SRC intern/cpp_types.cc intern/field.cc + intern/lazy_function.cc + intern/lazy_function_execute.cc + intern/lazy_function_graph.cc + intern/lazy_function_graph_executor.cc intern/multi_function.cc intern/multi_function_builder.cc intern/multi_function_params.cc @@ -23,6 +27,10 @@ set(SRC FN_field.hh FN_field_cpp_type.hh + FN_lazy_function.hh + FN_lazy_function_execute.hh + FN_lazy_function_graph.hh + FN_lazy_function_graph_executor.hh FN_multi_function.hh FN_multi_function_builder.hh FN_multi_function_context.hh @@ -61,6 +69,7 @@ blender_add_lib(bf_functions "${SRC}" "${INC}" "${INC_SYS}" "${LIB}") if(WITH_GTESTS) set(TEST_SRC tests/FN_field_test.cc + tests/FN_lazy_function_test.cc tests/FN_multi_function_procedure_test.cc tests/FN_multi_function_test.cc diff --git a/source/blender/functions/FN_field.hh b/source/blender/functions/FN_field.hh index bc42cab8db5..ca12f407e49 100644 --- a/source/blender/functions/FN_field.hh +++ b/source/blender/functions/FN_field.hh @@ -565,6 +565,17 @@ template<typename T> struct ValueOrField { } return this->value; } + + friend std::ostream &operator<<(std::ostream &stream, const ValueOrField<T> &value_or_field) + { + if (value_or_field.field) { + stream << "ValueOrField<T>"; + } + else { + stream << value_or_field.value; + } + return stream; + } }; /** \} */ diff --git a/source/blender/functions/FN_field_cpp_type.hh b/source/blender/functions/FN_field_cpp_type.hh index 63a648f3202..6900a093dc6 100644 --- a/source/blender/functions/FN_field_cpp_type.hh +++ b/source/blender/functions/FN_field_cpp_type.hh @@ -59,7 +59,7 @@ class ValueOrFieldCPPType : public CPPType { public: template<typename T> ValueOrFieldCPPType(FieldCPPTypeParam<ValueOrField<T>> /* unused */, StringRef debug_name) - : CPPType(CPPTypeParam<ValueOrField<T>, CPPTypeFlags::None>(), debug_name), + : CPPType(CPPTypeParam<ValueOrField<T>, CPPTypeFlags::Printable>(), debug_name), base_type_(CPPType::get<T>()) { construct_from_value_ = [](void *dst, const void *value_or_field) { diff --git a/source/blender/functions/FN_lazy_function.hh b/source/blender/functions/FN_lazy_function.hh new file mode 100644 index 00000000000..8dceb9ed993 --- /dev/null +++ b/source/blender/functions/FN_lazy_function.hh @@ -0,0 +1,384 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +#pragma once + +/** \file + * \ingroup fn + * + * A `LazyFunction` encapsulates a computation which has inputs, outputs and potentially side + * effects. Most importantly, a `LazyFunction` supports lazyness in its inputs and outputs: + * - Only outputs that are actually used have to be computed. + * - Inputs can be requested lazily based on which outputs are used or what side effects the + * function has. + * + * A lazy-function that uses lazyness may be executed more than once. The most common example is + * the geometry nodes switch node. Depending on a condition input, it decides which one of the + * other inputs is actually used. From the perspective of the switch node, its execution works as + * follows: + * 1. The switch node is first executed. It sees that the output is used. Now it requests the + * condition input from the caller and exits. + * 2. Once the caller is able to provide the condition input the switch node is executed again. + * This time it retrieves the condition and requests one of the other inputs. Then the node + * exits again, giving back control to the caller. + * 3. When the caller computed the second requested input the switch node executes a last time. + * This time it retrieves the new input and forwards it to the output. + * + * In some sense, a lazy-function can be thought of like a state machine. Every time it is + * executed, it advances its state until all required outputs are ready. + * + * The lazy-function interface is designed to support composition of many such functions into a new + * lazy-functions, all while keeping the lazyness working. For example, in geometry nodes a switch + * node in a node group should still be able to decide whether a node in the parent group will be + * executed or not. This is essential to avoid doing unnecessary work. + * + * The lazy-function system consists of multiple core components: + * - The interface of a lazy-function itself including its calling convention. + * - A graph data structure that allows composing many lazy-functions by connecting their inputs + * and outputs. + * - An executor that allows multi-threaded execution or such a graph. + */ + +#include "BLI_cpp_type.hh" +#include "BLI_generic_pointer.hh" +#include "BLI_linear_allocator.hh" +#include "BLI_vector.hh" + +namespace blender::fn::lazy_function { + +enum class ValueUsage { + /** + * The value is definitely used and therefore has to be computed. + */ + Used, + /** + * It's unknown whether this value will be used or not. Computing it is ok but the result may be + * discarded. + */ + Maybe, + /** + * The value will definitely not be used. It can still be computed but the result will be + * discarded in all cases. + */ + Unused, +}; + +class LazyFunction; + +/** + * This allows passing arbitrary data into a lazy-function during execution. For that, #UserData + * has to be subclassed. This mainly exists because it's more type safe than passing a `void *` + * with no type information attached. + * + * Some lazy-functions may expect to find a certain type of user data when executed. + */ +class UserData { + public: + virtual ~UserData() = default; +}; + +/** + * Passed to the lazy-function when it is executed. + */ +struct Context { + /** + * If the lazy-function has some state (which only makes sense when it is executed more than once + * to finish its job), the state is stored here. This points to memory returned from + * #LazyFunction::init_storage. + */ + void *storage; + /** + * Custom user data that can be used in the function. + */ + UserData *user_data; +}; + +/** + * Defines the calling convention for a lazy-function. During execution, a lazy-function retrieves + * its inputs and sets the outputs through #Params. + */ +class Params { + public: + /** + * The lazy-function this #Params has been prepared for. + */ + const LazyFunction &fn_; + + public: + Params(const LazyFunction &fn); + + /** + * Get a pointer to an input value if the value is available already. Otherwise null is returned. + * + * The #LazyFunction must leave returned object in an initialized state, but can move from it. + */ + void *try_get_input_data_ptr(int index) const; + + /** + * Same as #try_get_input_data_ptr, but if the data is not yet available, request it. This makes + * sure that the data will be available in a future execution of the #LazyFunction. + */ + void *try_get_input_data_ptr_or_request(int index); + + /** + * Get a pointer to where the output value should be stored. + * The value at the pointer is in an uninitialized state at first. + * The #LazyFunction is responsible for initializing the value. + * After the output has been initialized to its final value, #output_set has to be called. + */ + void *get_output_data_ptr(int index); + + /** + * Call this after the output value is initialized. After this is called, the value must not be + * touched anymore. It may be moved or destructed immediatly. + */ + void output_set(int index); + + /** + * Allows the #LazyFunction to check whether an output was computed already without keeping + * track of it itself. + */ + bool output_was_set(int index) const; + + /** + * Can be used to detect which outputs have to be computed. + */ + ValueUsage get_output_usage(int index) const; + + /** + * Tell the caller of the #LazyFunction that a specific input will definitely not be used. + * Only an input that was not #ValueUsage::Used can become unused. + */ + void set_input_unused(int index); + + /** + * Typed utility methods that wrap the methods above. + */ + template<typename T> T extract_input(int index); + template<typename T> const T &get_input(int index); + template<typename T> T *try_get_input_data_ptr_or_request(int index); + template<typename T> void set_output(int index, T &&value); + + /** + * Utility to initialize all outputs that haven't been set yet. + */ + void set_default_remaining_outputs(); + + private: + /** + * Methods that need to be implemented by subclasses. Those are separate from the non-virtual + * methods above to make it easy to insert additional debugging logic on top of the + * implementations. + */ + virtual void *try_get_input_data_ptr_impl(int index) const = 0; + virtual void *try_get_input_data_ptr_or_request_impl(int index) = 0; + virtual void *get_output_data_ptr_impl(int index) = 0; + virtual void output_set_impl(int index) = 0; + virtual bool output_was_set_impl(int index) const = 0; + virtual ValueUsage get_output_usage_impl(int index) const = 0; + virtual void set_input_unused_impl(int index) = 0; +}; + +/** + * Describes an input of a #LazyFunction. + */ +struct Input { + /** + * Name used for debugging purposes. The string has to be static or has to be owned by something + * else. + */ + const char *debug_name; + /** + * Data type of this input. + */ + const CPPType *type; + /** + * Can be used to indicate a caller or this function if this input is used statically before + * executing it the first time. This is technically not needed but can improve efficiency because + * a round-trip through the `execute` method can be avoided. + * + * When this is #ValueUsage::Used, the caller has to ensure that the input is definitely + * available when the #execute method is first called. The #execute method does not have to check + * whether the value is actually available. + */ + ValueUsage usage; + + Input(const char *debug_name, const CPPType &type, const ValueUsage usage = ValueUsage::Used) + : debug_name(debug_name), type(&type), usage(usage) + { + } +}; + +struct Output { + /** + * Name used for debugging purposes. The string has to be static or has to be owned by something + * else. + */ + const char *debug_name; + /** + * Data type of this output. + */ + const CPPType *type = nullptr; + + Output(const char *debug_name, const CPPType &type) : debug_name(debug_name), type(&type) + { + } +}; + +/** + * A function that can compute outputs and request inputs lazily. For more details see the comment + * at the top of the file. + */ +class LazyFunction { + protected: + const char *debug_name_ = "<unknown>"; + Vector<Input> inputs_; + Vector<Output> outputs_; + + public: + virtual ~LazyFunction() = default; + + /** + * Get a name of the function or an input or output. This is mainly used for debugging. + * These are virtual functions because the names are often not used outside of debugging + * workflows. This way the names are only generated when they are actually needed. + */ + virtual std::string name() const; + virtual std::string input_name(int index) const; + virtual std::string output_name(int index) const; + + /** + * Allocates storage for this function. The storage will be passed to every call to #execute. + * If the function does not keep track of any state, this does not have to be implemented. + */ + virtual void *init_storage(LinearAllocator<> &allocator) const; + + /** + * Destruct the storage created in #init_storage. + */ + virtual void destruct_storage(void *storage) const; + + /** + * Inputs of the function. + */ + Span<Input> inputs() const; + /** + * Outputs of the function. + */ + Span<Output> outputs() const; + + /** + * During execution the function retrieves inputs and sets outputs in #params. For some + * functions, this method is called more than once. After execution, the function either has + * computed all required outputs or is waiting for more inputs. + */ + void execute(Params ¶ms, const Context &context) const; + + /** + * Utility to check that the guarantee by #Input::usage is followed. + */ + bool always_used_inputs_available(const Params ¶ms) const; + + private: + /** + * Needs to be implemented by subclasses. This is separate from #execute so that additional + * debugging logic can be implemented in #execute. + */ + virtual void execute_impl(Params ¶ms, const Context &context) const = 0; +}; + +/* -------------------------------------------------------------------- */ +/** \name #LazyFunction Inline Methods + * \{ */ + +inline Span<Input> LazyFunction::inputs() const +{ + return inputs_; +} + +inline Span<Output> LazyFunction::outputs() const +{ + return outputs_; +} + +inline void LazyFunction::execute(Params ¶ms, const Context &context) const +{ + BLI_assert(this->always_used_inputs_available(params)); + this->execute_impl(params, context); +} + +/** \} */ + +/* -------------------------------------------------------------------- */ +/** \name #Params Inline Methods + * \{ */ + +inline Params::Params(const LazyFunction &fn) : fn_(fn) +{ +} + +inline void *Params::try_get_input_data_ptr(const int index) const +{ + return this->try_get_input_data_ptr_impl(index); +} + +inline void *Params::try_get_input_data_ptr_or_request(const int index) +{ + return this->try_get_input_data_ptr_or_request_impl(index); +} + +inline void *Params::get_output_data_ptr(const int index) +{ + return this->get_output_data_ptr_impl(index); +} + +inline void Params::output_set(const int index) +{ + this->output_set_impl(index); +} + +inline bool Params::output_was_set(const int index) const +{ + return this->output_was_set_impl(index); +} + +inline ValueUsage Params::get_output_usage(const int index) const +{ + return this->get_output_usage_impl(index); +} + +inline void Params::set_input_unused(const int index) +{ + this->set_input_unused_impl(index); +} + +template<typename T> inline T Params::extract_input(const int index) +{ + void *data = this->try_get_input_data_ptr(index); + BLI_assert(data != nullptr); + T return_value = std::move(*static_cast<T *>(data)); + return return_value; +} + +template<typename T> inline const T &Params::get_input(const int index) +{ + const void *data = this->try_get_input_data_ptr(index); + BLI_assert(data != nullptr); + return *static_cast<const T *>(data); +} + +template<typename T> inline T *Params::try_get_input_data_ptr_or_request(const int index) +{ + return static_cast<T *>(this->try_get_input_data_ptr_or_request(index)); +} + +template<typename T> inline void Params::set_output(const int index, T &&value) +{ + using DecayT = std::decay_t<T>; + void *data = this->get_output_data_ptr(index); + new (data) DecayT(std::forward<T>(value)); + this->output_set(index); +} + +/** \} */ + +} // namespace blender::fn::lazy_function diff --git a/source/blender/functions/FN_lazy_function_execute.hh b/source/blender/functions/FN_lazy_function_execute.hh new file mode 100644 index 00000000000..4213f5ca5f9 --- /dev/null +++ b/source/blender/functions/FN_lazy_function_execute.hh @@ -0,0 +1,122 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +#pragma once + +/** \file + * \ingroup fn + * + * This file contains common utilities for actually executing a lazy-function. + */ + +#include "BLI_parameter_pack_utils.hh" + +#include "FN_lazy_function.hh" + +namespace blender::fn::lazy_function { + +/** + * Most basic implementation of #Params. It does not actually implement any logic for how to + * retrieve inputs or set outputs. Instead, code using #BasicParams has to implement that. + */ +class BasicParams : public Params { + private: + const Span<GMutablePointer> inputs_; + const Span<GMutablePointer> outputs_; + MutableSpan<std::optional<ValueUsage>> input_usages_; + Span<ValueUsage> output_usages_; + MutableSpan<bool> set_outputs_; + + public: + BasicParams(const LazyFunction &fn, + const Span<GMutablePointer> inputs, + const Span<GMutablePointer> outputs, + MutableSpan<std::optional<ValueUsage>> input_usages, + Span<ValueUsage> output_usages, + MutableSpan<bool> set_outputs); + + void *try_get_input_data_ptr_impl(const int index) const override; + void *try_get_input_data_ptr_or_request_impl(const int index) override; + void *get_output_data_ptr_impl(const int index) override; + void output_set_impl(const int index) override; + bool output_was_set_impl(const int index) const override; + ValueUsage get_output_usage_impl(const int index) const override; + void set_input_unused_impl(const int index) override; +}; + +namespace detail { + +/** + * Utility to implement #execute_lazy_function_eagerly. + */ +template<typename... Inputs, typename... Outputs, size_t... InIndices, size_t... OutIndices> +inline void execute_lazy_function_eagerly_impl( + const LazyFunction &fn, + UserData *user_data, + std::tuple<Inputs...> &inputs, + std::tuple<Outputs *...> &outputs, + std::index_sequence<InIndices...> /* in_indices */, + std::index_sequence<OutIndices...> /* out_indices */) +{ + constexpr size_t InputsNum = sizeof...(Inputs); + constexpr size_t OutputsNum = sizeof...(Outputs); + std::array<GMutablePointer, InputsNum> input_pointers; + std::array<GMutablePointer, OutputsNum> output_pointers; + std::array<std::optional<ValueUsage>, InputsNum> input_usages; + std::array<ValueUsage, OutputsNum> output_usages; + std::array<bool, OutputsNum> set_outputs; + ( + [&]() { + constexpr size_t I = InIndices; + using T = Inputs; + const CPPType &type = CPPType::get<T>(); + input_pointers[I] = {type, &std::get<I>(inputs)}; + }(), + ...); + ( + [&]() { + constexpr size_t I = OutIndices; + using T = Outputs; + const CPPType &type = CPPType::get<T>(); + output_pointers[I] = {type, std::get<I>(outputs)}; + }(), + ...); + output_usages.fill(ValueUsage::Used); + set_outputs.fill(false); + LinearAllocator<> allocator; + Context context; + context.user_data = user_data; + context.storage = fn.init_storage(allocator); + BasicParams params{ + fn, input_pointers, output_pointers, input_usages, output_usages, set_outputs}; + fn.execute(params, context); + fn.destruct_storage(context.storage); +} + +} // namespace detail + +/** + * In some cases (mainly for tests), the set of inputs and outputs for a lazy-function is known at + * compile time and one just wants to compute the outputs based on the inputs, without any + * lazyness. + * + * This function does exactly that. It takes all inputs in a tuple and writes the outputs to points + * provided in a second tuple. Since all inputs have to be provided, the lazy-function has to + * compute all outputs. + */ +template<typename... Inputs, typename... Outputs> +inline void execute_lazy_function_eagerly(const LazyFunction &fn, + UserData *user_data, + std::tuple<Inputs...> inputs, + std::tuple<Outputs *...> outputs) +{ + BLI_assert(fn.inputs().size() == sizeof...(Inputs)); + BLI_assert(fn.outputs().size() == sizeof...(Outputs)); + detail::execute_lazy_function_eagerly_impl(fn, + user_data, + inputs, + outputs, + std::make_index_sequence<sizeof...(Inputs)>(), + std::make_index_sequence<sizeof...(Outputs)>()); +} + +} // namespace blender::fn::lazy_function diff --git a/source/blender/functions/FN_lazy_function_graph.hh b/source/blender/functions/FN_lazy_function_graph.hh new file mode 100644 index 00000000000..4ede28c4f26 --- /dev/null +++ b/source/blender/functions/FN_lazy_function_graph.hh @@ -0,0 +1,421 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +#pragma once + +/** \file + * \ingroup fn + * + * This file contains a graph data structure that allows composing multiple lazy-functions into a + * combined lazy-function. + * + * There are two types of nodes in the graph: + * - #FunctionNode: Corresponds to a #LazyFunction. The inputs and outputs of the function become + * input and output sockets of the node. + * - #DummyNode: Is used to indicate inputs and outputs of the entire graph. It can have an + * arbitrary number of sockets. + */ + +#include "BLI_linear_allocator.hh" + +#include "FN_lazy_function.hh" + +namespace blender::fn::lazy_function { + +class Socket; +class InputSocket; +class OutputSocket; +class Node; +class Graph; + +/** + * A #Socket is the interface of a #Node. Every #Socket is either an #InputSocket or #OutputSocket. + * Links can be created from output sockets to input sockets. + */ +class Socket : NonCopyable, NonMovable { + protected: + /** + * The node the socket belongs to. + */ + Node *node_; + /** + * Data type of the socket. Only sockets with the same type can be linked. + */ + const CPPType *type_; + /** + * Indicates whether this is an #InputSocket or #OutputSocket. + */ + bool is_input_; + /** + * Index of the socket. E.g. 0 for the first input and the first output socket. + */ + int index_in_node_; + + friend Graph; + + public: + bool is_input() const; + bool is_output() const; + + int index() const; + + InputSocket &as_input(); + OutputSocket &as_output(); + const InputSocket &as_input() const; + const OutputSocket &as_output() const; + + const Node &node() const; + Node &node(); + + const CPPType &type() const; + + std::string name() const; +}; + +class InputSocket : public Socket { + private: + /** + * An input can have at most one link connected to it. The linked socket is the "origin" because + * it's where the data is coming from. The type of the origin must be the same as the type of + * this socket. + */ + OutputSocket *origin_; + /** + * Can be null or a non-owning pointer to a value of the type of the socket. This value will be + * used when the input is used but not linked. + * + * This is technically not needed, because one could just create a separate node that just + * outputs the value, but that would have more overhead. Especially because it's commonly the + * case that most inputs are unlinked. + */ + const void *default_value_ = nullptr; + + friend Graph; + + public: + OutputSocket *origin(); + const OutputSocket *origin() const; + + const void *default_value() const; + void set_default_value(const void *value); +}; + +class OutputSocket : public Socket { + private: + /** + * An output can be linked to an arbitrary number of inputs of the same type. + */ + Vector<InputSocket *> targets_; + + friend Graph; + + public: + Span<InputSocket *> targets(); + Span<const InputSocket *> targets() const; +}; + +/** + * A #Node has input and output sockets. Every node is either a #FunctionNode or a #DummyNode. + */ +class Node : NonCopyable, NonMovable { + protected: + /** + * The function this node corresponds to. If this is null, the node is a #DummyNode. + * The function is not owned by this #Node nor by the #Graph. + */ + const LazyFunction *fn_ = nullptr; + /** + * Input sockets of the node. + */ + Span<InputSocket *> inputs_; + /** + * Output sockets of the node. + */ + Span<OutputSocket *> outputs_; + /** + * An index that is set when calling #Graph::update_node_indices. This can be used to create + * efficient mappings from nodes to other data using just an array instead of a hash map. + * + * This is technically not necessary but has better performance than always using hash maps. + */ + int index_in_graph_ = -1; + + friend Graph; + + public: + bool is_dummy() const; + bool is_function() const; + int index_in_graph() const; + + Span<const InputSocket *> inputs() const; + Span<const OutputSocket *> outputs() const; + Span<InputSocket *> inputs(); + Span<OutputSocket *> outputs(); + + const InputSocket &input(int index) const; + const OutputSocket &output(int index) const; + InputSocket &input(int index); + OutputSocket &output(int index); + + std::string name() const; +}; + +/** + * A #Node that corresponds to a specific #LazyFunction. + */ +class FunctionNode : public Node { + public: + const LazyFunction &function() const; +}; + +/** + * A #Node that does *not* correspond to a #LazyFunction. Instead it can be used to indicate inputs + * and outputs of the entire graph. It can have an arbitrary number of inputs and outputs. + */ +class DummyNode : public Node { + private: + std::string name_; + + friend Node; +}; + +/** + * A container for an arbitrary number of nodes and links between their sockets. + */ +class Graph : NonCopyable, NonMovable { + private: + /** + * Used to allocate nodes and sockets in the graph. + */ + LinearAllocator<> allocator_; + /** + * Contains all nodes in the graph so that it is efficient to iterate over them. + */ + Vector<Node *> nodes_; + + public: + ~Graph(); + + /** + * Get all nodes in the graph. The index in the span corresponds to #Node::index_in_graph. + */ + Span<const Node *> nodes() const; + + /** + * Add a new function node with sockets that match the passed in #LazyFunction. + */ + FunctionNode &add_function(const LazyFunction &fn); + + /** + * Add a new dummy node with the given socket types. + */ + DummyNode &add_dummy(Span<const CPPType *> input_types, Span<const CPPType *> output_types); + + /** + * Add a link between the two given sockets. + * This has undefined behavior when the input is linked to something else already. + */ + void add_link(OutputSocket &from, InputSocket &to); + + /** + * Make sure that #Node::index_in_graph is up to date. + */ + void update_node_indices(); + + /** + * Can be used to assert that #update_node_indices has been called. + */ + bool node_indices_are_valid() const; + + /** + * Utility to generate a dot graph string for the graph. This can be used for debugging. + */ + std::string to_dot() const; +}; + +/* -------------------------------------------------------------------- */ +/** \name #Socket Inline Methods + * \{ */ + +inline bool Socket::is_input() const +{ + return is_input_; +} + +inline bool Socket::is_output() const +{ + return !is_input_; +} + +inline int Socket::index() const +{ + return index_in_node_; +} + +inline InputSocket &Socket::as_input() +{ + BLI_assert(this->is_input()); + return *static_cast<InputSocket *>(this); +} + +inline OutputSocket &Socket::as_output() +{ + BLI_assert(this->is_output()); + return *static_cast<OutputSocket *>(this); +} + +inline const InputSocket &Socket::as_input() const +{ + BLI_assert(this->is_input()); + return *static_cast<const InputSocket *>(this); +} + +inline const OutputSocket &Socket::as_output() const +{ + BLI_assert(this->is_output()); + return *static_cast<const OutputSocket *>(this); +} + +inline const Node &Socket::node() const +{ + return *node_; +} + +inline Node &Socket::node() +{ + return *node_; +} + +inline const CPPType &Socket::type() const +{ + return *type_; +} + +/** \} */ + +/* -------------------------------------------------------------------- */ +/** \name #InputSocket Inline Methods + * \{ */ + +inline const OutputSocket *InputSocket::origin() const +{ + return origin_; +} + +inline OutputSocket *InputSocket::origin() +{ + return origin_; +} + +inline const void *InputSocket::default_value() const +{ + return default_value_; +} + +inline void InputSocket::set_default_value(const void *value) +{ + default_value_ = value; +} + +/** \} */ + +/* -------------------------------------------------------------------- */ +/** \name #OutputSocket Inline Methods + * \{ */ + +inline Span<const InputSocket *> OutputSocket::targets() const +{ + return targets_; +} + +inline Span<InputSocket *> OutputSocket::targets() +{ + return targets_; +} + +/** \} */ + +/* -------------------------------------------------------------------- */ +/** \name #Node Inline Methods + * \{ */ + +inline bool Node::is_dummy() const +{ + return fn_ == nullptr; +} + +inline bool Node::is_function() const +{ + return fn_ != nullptr; +} + +inline int Node::index_in_graph() const +{ + return index_in_graph_; +} + +inline Span<const InputSocket *> Node::inputs() const +{ + return inputs_; +} + +inline Span<const OutputSocket *> Node::outputs() const +{ + return outputs_; +} + +inline Span<InputSocket *> Node::inputs() +{ + return inputs_; +} + +inline Span<OutputSocket *> Node::outputs() +{ + return outputs_; +} + +inline const InputSocket &Node::input(const int index) const +{ + return *inputs_[index]; +} + +inline const OutputSocket &Node::output(const int index) const +{ + return *outputs_[index]; +} + +inline InputSocket &Node::input(const int index) +{ + return *inputs_[index]; +} + +inline OutputSocket &Node::output(const int index) +{ + return *outputs_[index]; +} + +/** \} */ + +/* -------------------------------------------------------------------- */ +/** \name #FunctionNode Inline Methods + * \{ */ + +inline const LazyFunction &FunctionNode::function() const +{ + BLI_assert(fn_ != nullptr); + return *fn_; +} + +/** \} */ + +/* -------------------------------------------------------------------- */ +/** \name #Graph Inline Methods + * \{ */ + +inline Span<const Node *> Graph::nodes() const +{ + return nodes_; +} + +/** \} */ + +} // namespace blender::fn::lazy_function diff --git a/source/blender/functions/FN_lazy_function_graph_executor.hh b/source/blender/functions/FN_lazy_function_graph_executor.hh new file mode 100644 index 00000000000..a6ae5cac967 --- /dev/null +++ b/source/blender/functions/FN_lazy_function_graph_executor.hh @@ -0,0 +1,98 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +#pragma once + +/** \file + * \ingroup fn + * + * This file provides means to create a #LazyFunction from #Graph (which could then e.g. be used in + * another #Graph again). + */ + +#include "BLI_vector.hh" +#include "BLI_vector_set.hh" + +#include "FN_lazy_function_graph.hh" + +namespace blender::fn::lazy_function { + +/** + * Can be implemented to log values produced during graph evaluation. + */ +class GraphExecutorLogger { + public: + virtual ~GraphExecutorLogger() = default; + + virtual void log_socket_value(const Socket &socket, + GPointer value, + const Context &context) const; + + virtual void log_before_node_execute(const FunctionNode &node, + const Params ¶ms, + const Context &context) const; + + virtual void log_after_node_execute(const FunctionNode &node, + const Params ¶ms, + const Context &context) const; + + virtual void dump_when_outputs_are_missing(const FunctionNode &node, + Span<const OutputSocket *> missing_sockets, + const Context &context) const; + virtual void dump_when_input_is_set_twice(const InputSocket &target_socket, + const OutputSocket &from_socket, + const Context &context) const; +}; + +/** + * Has to be implemented when some of the nodes in the graph may have side effects. The + * #GraphExecutor has to know about that to make sure that these nodes will be executed even though + * their outputs are not needed. + */ +class GraphExecutorSideEffectProvider { + public: + virtual ~GraphExecutorSideEffectProvider() = default; + virtual Vector<const FunctionNode *> get_nodes_with_side_effects(const Context &context) const; +}; + +class GraphExecutor : public LazyFunction { + public: + using Logger = GraphExecutorLogger; + using SideEffectProvider = GraphExecutorSideEffectProvider; + + private: + /** + * The graph that is evaluated. + */ + const Graph &graph_; + /** + * Input and output sockets of the entire graph. + */ + VectorSet<const OutputSocket *> graph_inputs_; + VectorSet<const InputSocket *> graph_outputs_; + /** + * Optional logger for events that happen during execution. + */ + const Logger *logger_; + /** + * Optional side effect provider. It knows which nodes have side effects based on the context + * during evaluation. + */ + const SideEffectProvider *side_effect_provider_; + + friend class Executor; + + public: + GraphExecutor(const Graph &graph, + Span<const OutputSocket *> graph_inputs, + Span<const InputSocket *> graph_outputs, + const Logger *logger, + const SideEffectProvider *side_effect_provider); + + void *init_storage(LinearAllocator<> &allocator) const override; + void destruct_storage(void *storage) const override; + + private: + void execute_impl(Params ¶ms, const Context &context) const override; +}; + +} // namespace blender::fn::lazy_function diff --git a/source/blender/functions/FN_multi_function.hh b/source/blender/functions/FN_multi_function.hh index 015df179ef0..accbaf899be 100644 --- a/source/blender/functions/FN_multi_function.hh +++ b/source/blender/functions/FN_multi_function.hh @@ -157,6 +157,7 @@ namespace multi_function_types { using fn::MFContext; using fn::MFContextBuilder; using fn::MFDataType; +using fn::MFParamCategory; using fn::MFParams; using fn::MFParamsBuilder; using fn::MFParamType; diff --git a/source/blender/functions/intern/cpp_types.cc b/source/blender/functions/intern/cpp_types.cc index 5c43fffdd61..f046da30994 100644 --- a/source/blender/functions/intern/cpp_types.cc +++ b/source/blender/functions/intern/cpp_types.cc @@ -16,3 +16,6 @@ MAKE_FIELD_CPP_TYPE(BoolField, bool); MAKE_FIELD_CPP_TYPE(Int8Field, int8_t); MAKE_FIELD_CPP_TYPE(Int32Field, int32_t); MAKE_FIELD_CPP_TYPE(StringField, std::string); +BLI_CPP_TYPE_MAKE(StringValueOrFieldVector, + blender::Vector<blender::fn::ValueOrField<std::string>>, + CPPTypeFlags::None); diff --git a/source/blender/functions/intern/lazy_function.cc b/source/blender/functions/intern/lazy_function.cc new file mode 100644 index 00000000000..46572283e9b --- /dev/null +++ b/source/blender/functions/intern/lazy_function.cc @@ -0,0 +1,66 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +/** \file + * \ingroup fn + */ + +#include "BLI_array.hh" + +#include "FN_lazy_function.hh" + +namespace blender::fn::lazy_function { + +std::string LazyFunction::name() const +{ + return debug_name_; +} + +std::string LazyFunction::input_name(int index) const +{ + return inputs_[index].debug_name; +} + +std::string LazyFunction::output_name(int index) const +{ + return outputs_[index].debug_name; +} + +void *LazyFunction::init_storage(LinearAllocator<> &UNUSED(allocator)) const +{ + return nullptr; +} + +void LazyFunction::destruct_storage(void *storage) const +{ + BLI_assert(storage == nullptr); + UNUSED_VARS_NDEBUG(storage); +} + +bool LazyFunction::always_used_inputs_available(const Params ¶ms) const +{ + for (const int i : inputs_.index_range()) { + const Input &fn_input = inputs_[i]; + if (fn_input.usage == ValueUsage::Used) { + if (params.try_get_input_data_ptr(i) == nullptr) { + return false; + } + } + } + return true; +} + +void Params::set_default_remaining_outputs() +{ + for (const int i : fn_.outputs().index_range()) { + if (this->output_was_set(i)) { + continue; + } + const Output &fn_output = fn_.outputs()[i]; + const CPPType &type = *fn_output.type; + void *data_ptr = this->get_output_data_ptr(i); + type.value_initialize(data_ptr); + this->output_set(i); + } +} + +} // namespace blender::fn::lazy_function diff --git a/source/blender/functions/intern/lazy_function_execute.cc b/source/blender/functions/intern/lazy_function_execute.cc new file mode 100644 index 00000000000..279056afa99 --- /dev/null +++ b/source/blender/functions/intern/lazy_function_execute.cc @@ -0,0 +1,65 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +/** \file + * \ingroup fn + */ + +#include "FN_lazy_function_execute.hh" + +namespace blender::fn::lazy_function { + +BasicParams::BasicParams(const LazyFunction &fn, + const Span<GMutablePointer> inputs, + const Span<GMutablePointer> outputs, + MutableSpan<std::optional<ValueUsage>> input_usages, + Span<ValueUsage> output_usages, + MutableSpan<bool> set_outputs) + : Params(fn), + inputs_(inputs), + outputs_(outputs), + input_usages_(input_usages), + output_usages_(output_usages), + set_outputs_(set_outputs) +{ +} + +void *BasicParams::try_get_input_data_ptr_impl(const int index) const +{ + return inputs_[index].get(); +} + +void *BasicParams::try_get_input_data_ptr_or_request_impl(const int index) +{ + void *value = inputs_[index].get(); + if (value == nullptr) { + input_usages_[index] = ValueUsage::Used; + } + return value; +} + +void *BasicParams::get_output_data_ptr_impl(const int index) +{ + return outputs_[index].get(); +} + +void BasicParams::output_set_impl(const int index) +{ + set_outputs_[index] = true; +} + +bool BasicParams::output_was_set_impl(const int index) const +{ + return set_outputs_[index]; +} + +ValueUsage BasicParams::get_output_usage_impl(const int index) const +{ + return output_usages_[index]; +} + +void BasicParams::set_input_unused_impl(const int index) +{ + input_usages_[index] = ValueUsage::Unused; +} + +} // namespace blender::fn::lazy_function diff --git a/source/blender/functions/intern/lazy_function_graph.cc b/source/blender/functions/intern/lazy_function_graph.cc new file mode 100644 index 00000000000..cc55b70d166 --- /dev/null +++ b/source/blender/functions/intern/lazy_function_graph.cc @@ -0,0 +1,181 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +#include "BLI_dot_export.hh" + +#include "FN_lazy_function_graph.hh" + +namespace blender::fn::lazy_function { + +Graph::~Graph() +{ + for (Node *node : nodes_) { + for (InputSocket *socket : node->inputs_) { + std::destroy_at(socket); + } + for (OutputSocket *socket : node->outputs_) { + std::destroy_at(socket); + } + std::destroy_at(node); + } +} + +FunctionNode &Graph::add_function(const LazyFunction &fn) +{ + const Span<Input> inputs = fn.inputs(); + const Span<Output> outputs = fn.outputs(); + + FunctionNode &node = *allocator_.construct<FunctionNode>().release(); + node.fn_ = &fn; + node.inputs_ = allocator_.construct_elements_and_pointer_array<InputSocket>(inputs.size()); + node.outputs_ = allocator_.construct_elements_and_pointer_array<OutputSocket>(outputs.size()); + + for (const int i : inputs.index_range()) { + InputSocket &socket = *node.inputs_[i]; + socket.index_in_node_ = i; + socket.is_input_ = true; + socket.node_ = &node; + socket.type_ = inputs[i].type; + } + for (const int i : outputs.index_range()) { + OutputSocket &socket = *node.outputs_[i]; + socket.index_in_node_ = i; + socket.is_input_ = false; + socket.node_ = &node; + socket.type_ = outputs[i].type; + } + + nodes_.append(&node); + return node; +} + +DummyNode &Graph::add_dummy(Span<const CPPType *> input_types, Span<const CPPType *> output_types) +{ + DummyNode &node = *allocator_.construct<DummyNode>().release(); + node.fn_ = nullptr; + node.inputs_ = allocator_.construct_elements_and_pointer_array<InputSocket>(input_types.size()); + node.outputs_ = allocator_.construct_elements_and_pointer_array<OutputSocket>( + output_types.size()); + + for (const int i : input_types.index_range()) { + InputSocket &socket = *node.inputs_[i]; + socket.index_in_node_ = i; + socket.is_input_ = true; + socket.node_ = &node; + socket.type_ = input_types[i]; + } + for (const int i : output_types.index_range()) { + OutputSocket &socket = *node.outputs_[i]; + socket.index_in_node_ = i; + socket.is_input_ = false; + socket.node_ = &node; + socket.type_ = output_types[i]; + } + + nodes_.append(&node); + return node; +} + +void Graph::add_link(OutputSocket &from, InputSocket &to) +{ + BLI_assert(to.origin_ == nullptr); + BLI_assert(from.type_ == to.type_); + to.origin_ = &from; + from.targets_.append(&to); +} + +void Graph::update_node_indices() +{ + for (const int i : nodes_.index_range()) { + nodes_[i]->index_in_graph_ = i; + } +} + +bool Graph::node_indices_are_valid() const +{ + for (const int i : nodes_.index_range()) { + if (nodes_[i]->index_in_graph_ != i) { + return false; + } + } + return true; +} + +std::string Socket::name() const +{ + if (node_->is_function()) { + const FunctionNode &fn_node = static_cast<const FunctionNode &>(*node_); + const LazyFunction &fn = fn_node.function(); + if (is_input_) { + return fn.input_name(index_in_node_); + } + return fn.output_name(index_in_node_); + } + return "Unnamed"; +} + +std::string Node::name() const +{ + if (fn_ == nullptr) { + return static_cast<const DummyNode *>(this)->name_; + } + return fn_->name(); +} + +std::string Graph::to_dot() const +{ + dot::DirectedGraph digraph; + digraph.set_rankdir(dot::Attr_rankdir::LeftToRight); + + Map<const Node *, dot::NodeWithSocketsRef> dot_nodes; + + for (const Node *node : nodes_) { + dot::Node &dot_node = digraph.new_node(""); + if (node->is_dummy()) { + dot_node.set_background_color("lightblue"); + } + else { + dot_node.set_background_color("white"); + } + + Vector<std::string> input_names; + Vector<std::string> output_names; + for (const InputSocket *socket : node->inputs()) { + input_names.append(socket->name()); + } + for (const OutputSocket *socket : node->outputs()) { + output_names.append(socket->name()); + } + + dot_nodes.add_new(node, + dot::NodeWithSocketsRef(dot_node, node->name(), input_names, output_names)); + } + + for (const Node *node : nodes_) { + for (const InputSocket *socket : node->inputs()) { + const dot::NodeWithSocketsRef &to_dot_node = dot_nodes.lookup(&socket->node()); + const dot::NodePort to_dot_port = to_dot_node.input(socket->index()); + + if (const OutputSocket *origin = socket->origin()) { + dot::NodeWithSocketsRef &from_dot_node = dot_nodes.lookup(&origin->node()); + digraph.new_edge(from_dot_node.output(origin->index()), to_dot_port); + } + else if (const void *default_value = socket->default_value()) { + const CPPType &type = socket->type(); + std::string value_string; + if (type.is_printable()) { + value_string = type.to_string(default_value); + } + else { + value_string = "<" + type.name() + ">"; + } + dot::Node &default_value_dot_node = digraph.new_node(value_string); + default_value_dot_node.set_shape(dot::Attr_shape::Ellipse); + digraph.new_edge(default_value_dot_node, to_dot_port); + } + } + } + + return digraph.to_dot_string(); +} + +} // namespace blender::fn::lazy_function diff --git a/source/blender/functions/intern/lazy_function_graph_executor.cc b/source/blender/functions/intern/lazy_function_graph_executor.cc new file mode 100644 index 00000000000..eca29121889 --- /dev/null +++ b/source/blender/functions/intern/lazy_function_graph_executor.cc @@ -0,0 +1,1163 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +/** + * This file implements the evaluation of a lazy-function graph. It's main objectices are: + * - Only compute values that are actually used. + * - Allow spreading the work over an arbitrary number of CPU cores. + * + * Other (simpler) executors with different main objectives could be implemented in the future. For + * some scenarios those could be simpler when many nodes do very little work or most nodes have to + * be processed sequentially. Those assumptions make the first and second objective less important + * respectively. + * + * The design implemented in this executor requires *no* main thread that coordinates everything. + * Instead, one thread will trigger some initial work and then many threads coordinate themselves + * in a distributed fashion. In an ideal situation, every thread ends up processing a separate part + * of the graph which results in less communication overhead. The way TBB schedules tasks helps + * with that: a thread will next process the task that it added to a task pool just before. + * + * Communication between threads is synchronized by using a mutex in every node. When a thread + * wants to access the state of a node, its mutex has to be locked first (with some documented + * exceptions). The assumption here is that most nodes are only ever touched by a single thread and + * therefore the lock contention is reduced the more nodes there are. + * + * Similar to how a #LazyFunction can be thought of as a state machine (see `FN_lazy_function.hh`), + * each node can also be thought of as a state machine. The state of a node contains the evaluation + * state of its inputs and outputs. Every time a node is executed, it has to advance its state in + * some way (e.g. it requests a new input or computes a new output). + * + * At the core of the executor is a task pool. Every task in that pool represents a node execution. + * When a node is executed it may send notifications to other nodes which may in turn add those + * nodes to the task pool. For example, the current node has computed one of its outputs, then the + * computed value is forwarded to all linked inputs, changing their node states in the process. If + * this input was the last missing required input, the node will be added to the task pool so that + * it is executed next. + * + * When the task pool is empty, the executor gives back control to the caller which may later + * provide new inputs to the graph which in turn adds new nodes to the task pool and the process + * starts again. + */ + +#include <mutex> + +#include "BLI_compute_context.hh" +#include "BLI_enumerable_thread_specific.hh" +#include "BLI_function_ref.hh" +#include "BLI_task.h" +#include "BLI_task.hh" +#include "BLI_timeit.hh" + +#include "FN_lazy_function_graph_executor.hh" + +namespace blender::fn::lazy_function { + +enum class NodeScheduleState { + /** + * Default state of every node. + */ + NotScheduled, + /** + * The node has been added to the task pool or is otherwise scheduled to be executed in the + * future. + */ + Scheduled, + /** + * The node is currently running. + */ + Running, + /** + * The node is running and has been rescheduled while running. In this case the node run again. + * This state exists, because we don't want to add the node to the task pool twice, because then + * the node might run twice at the same time, which is not allowed. Instead, once the node is + * done running, it will reschedule itself. + */ + RunningAndRescheduled, +}; + +struct InputState { + /** + * Value of this input socket. By default, the value is empty. When other nodes are done + * computing their outputs, the computed values will be forwarded to linked input sockets. The + * value will thenlive here until it is found that it is not needed anymore. + * + * If #was_ready_for_execution is true, access does not require holding the node lock. + */ + void *value = nullptr; + /** + * How the node intends to use this input. By default, all inputs may be used. Based on which + * outputs are used, a node can decide that an input will definitely be used or is never used. + * This allows freeing values early and avoids unnecessary computations. + */ + ValueUsage usage = ValueUsage::Maybe; + /** + * Set to true once #value is set and will stay true afterwards. Access during execution of a + * node, does not require holding the node lock. + */ + bool was_ready_for_execution = false; +}; + +struct OutputState { + /** + * Keeps track of how the output value is used. If a connected input becomes used, this output + * has to become used as well. The output becomes unused when it is used by no input socket + * anymore and it's not an output of the graph. + */ + ValueUsage usage = ValueUsage::Maybe; + /** + * This is a copy of #usage that is done right before node execution starts. This is done so that + * the node gets a consistent view of what outputs are used, even when this changes while the + * node is running (the node might be reevaluated in that case). Access during execution of a + * node, does not require holding the node lock. + */ + ValueUsage usage_for_execution = ValueUsage::Maybe; + /** + * Number of linked sockets that might still use the value of this output. + */ + int potential_target_sockets = 0; + /** + * Is set to true once the output has been computed and then stays true. Access does not require + * holding the node lock. + */ + bool has_been_computed = false; + /** + * Holds the output value for a short period of time while the node is initializing it and before + * it's forwarded to input sockets. Access does not require holding the node lock. + */ + void *value = nullptr; +}; + +struct NodeState { + /** + * Needs to be locked when any data in this state is accessed that is not explicitly marked as + * not needing the lock. + */ + mutable std::mutex mutex; + /** + * States of the individual input and output sockets. One can index into these arrays without + * locking. However, to access data inside, a lock is needed unless noted otherwise. + */ + MutableSpan<InputState> inputs; + MutableSpan<OutputState> outputs; + /** + * Counts the number of inputs that still have to be provided to this node, until it should run + * again. This is used as an optimization so that nodes are not scheduled unnecessarily in many + * cases. + */ + int missing_required_inputs = 0; + /** + * Is set to true once the node is done with its work, i.e. when all outputs that may be used + * have been computed. + */ + bool node_has_finished = false; + /** + * Set to true once the node is done running for the first time. + */ + bool had_initialization = true; + /** + * Nodes with side effects should always be executed when their required inputs have been + * computed. + */ + bool has_side_effects = false; + /** + * A node is always in one specific schedule state. This helps to ensure that the same node does + * not run twice at the same time accidentally. + */ + NodeScheduleState schedule_state = NodeScheduleState::NotScheduled; + /** + * Custom storage of the node. + */ + void *storage = nullptr; +}; + +/** + * Utility class that wraps a node whose state is locked. Having this is a separate class is useful + * because it allows methods to communicate that they expect the node to be locked. + */ +struct LockedNode { + /** + * This is the node that is currently locked. + */ + const Node &node; + NodeState &node_state; + + /** + * Used to delay notifying (and therefore locking) other nodes until the current node is not + * locked anymore. This might not be strictly necessary to avoid deadlocks in the current code, + * but is a good measure to avoid accidentally adding a deadlock later on. By not locking more + * than one node per thread at a time, deadlocks are avoided. + * + * The notifications will be send right after the node is not locked anymore. + */ + Vector<const OutputSocket *> delayed_required_outputs; + Vector<const OutputSocket *> delayed_unused_outputs; + Vector<const FunctionNode *> delayed_scheduled_nodes; + + LockedNode(const Node &node, NodeState &node_state) : node(node), node_state(node_state) + { + } +}; + +struct CurrentTask { + /** + * The node that should be run on the same thread after the current node is done. This avoids + * some overhead by skipping a round trip through the task pool. + */ + std::atomic<const FunctionNode *> next_node = nullptr; + /** + * Indicates that some node has been added to the task pool. + */ + std::atomic<bool> added_node_to_pool = false; +}; + +class GraphExecutorLFParams; + +class Executor { + private: + const GraphExecutor &self_; + /** + * Remembers which inputs have been loaded from the caller already, to avoid loading them twice. + * Atomics are used to make sure that every input is only retrieved once. + */ + Array<std::atomic<uint8_t>> loaded_inputs_; + /** + * State of every node, indexed by #Node::index_in_graph. + */ + Array<NodeState *> node_states_; + /** + * Parameters provided by the caller. This is always non-null, while a node is running. + */ + Params *params_ = nullptr; + const Context *context_ = nullptr; + /** + * Used to distribute work on separate nodes to separate threads. + */ + TaskPool *task_pool_ = nullptr; + /** + * A separate linear allocator for every thread. We could potentially reuse some memory, but that + * doesn't seem worth it yet. + */ + threading::EnumerableThreadSpecific<LinearAllocator<>> local_allocators_; + /** + * Set to false when the first execution ends. + */ + bool is_first_execution_ = true; + + friend GraphExecutorLFParams; + + public: + Executor(const GraphExecutor &self) : self_(self), loaded_inputs_(self.graph_inputs_.size()) + { + /* The indices are necessary, because they are used as keys in #node_states_. */ + BLI_assert(self_.graph_.node_indices_are_valid()); + } + + ~Executor() + { + BLI_task_pool_free(task_pool_); + threading::parallel_for(node_states_.index_range(), 1024, [&](const IndexRange range) { + for (const int node_index : range) { + const Node &node = *self_.graph_.nodes()[node_index]; + NodeState &node_state = *node_states_[node_index]; + this->destruct_node_state(node, node_state); + } + }); + } + + /** + * Main entry point to the execution of this graph. + */ + void execute(Params ¶ms, const Context &context) + { + params_ = ¶ms; + context_ = &context; + BLI_SCOPED_DEFER([&]() { + /* Make sure the #params_ pointer is not dangling, even when it shouldn't be accessed by + * anyone. */ + params_ = nullptr; + context_ = nullptr; + is_first_execution_ = false; + }); + + CurrentTask current_task; + if (is_first_execution_) { + this->initialize_node_states(); + task_pool_ = BLI_task_pool_create(this, TASK_PRIORITY_HIGH); + + /* Initialize atomics to zero. */ + memset(static_cast<void *>(loaded_inputs_.data()), 0, loaded_inputs_.size() * sizeof(bool)); + + this->set_always_unused_graph_inputs(); + this->set_defaulted_graph_outputs(); + this->schedule_side_effect_nodes(current_task); + } + + this->schedule_newly_requested_outputs(current_task); + this->forward_newly_provided_inputs(current_task); + + /* Avoid using task pool when there is no parallel work to do. */ + while (!current_task.added_node_to_pool) { + if (current_task.next_node == nullptr) { + /* Nothing to do. */ + return; + } + const FunctionNode &node = *current_task.next_node; + current_task.next_node = nullptr; + this->run_node_task(node, current_task); + } + if (current_task.next_node != nullptr) { + this->add_node_to_task_pool(*current_task.next_node); + } + + BLI_task_pool_work_and_wait(task_pool_); + } + + private: + void initialize_node_states() + { + Span<const Node *> nodes = self_.graph_.nodes(); + node_states_.reinitialize(nodes.size()); + + /* Construct all node states in parallel. */ + threading::parallel_for(nodes.index_range(), 256, [&](const IndexRange range) { + LinearAllocator<> &allocator = local_allocators_.local(); + for (const int i : range) { + const Node &node = *nodes[i]; + NodeState &node_state = *allocator.construct<NodeState>().release(); + node_states_[i] = &node_state; + this->construct_initial_node_state(allocator, node, node_state); + } + }); + } + + void construct_initial_node_state(LinearAllocator<> &allocator, + const Node &node, + NodeState &node_state) + { + const Span<const InputSocket *> node_inputs = node.inputs(); + const Span<const OutputSocket *> node_outputs = node.outputs(); + + node_state.inputs = allocator.construct_array<InputState>(node_inputs.size()); + node_state.outputs = allocator.construct_array<OutputState>(node_outputs.size()); + + for (const int i : node_outputs.index_range()) { + OutputState &output_state = node_state.outputs[i]; + const OutputSocket &output_socket = *node_outputs[i]; + output_state.potential_target_sockets = output_socket.targets().size(); + if (output_state.potential_target_sockets == 0) { + output_state.usage = ValueUsage::Unused; + } + } + } + + void destruct_node_state(const Node &node, NodeState &node_state) + { + if (node.is_function()) { + const LazyFunction &fn = static_cast<const FunctionNode &>(node).function(); + if (node_state.storage != nullptr) { + fn.destruct_storage(node_state.storage); + } + } + for (const int i : node.inputs().index_range()) { + InputState &input_state = node_state.inputs[i]; + const InputSocket &input_socket = node.input(i); + this->destruct_input_value_if_exists(input_state, input_socket.type()); + } + std::destroy_at(&node_state); + } + + void schedule_newly_requested_outputs(CurrentTask ¤t_task) + { + for (const int graph_output_index : self_.graph_outputs_.index_range()) { + if (params_->get_output_usage(graph_output_index) != ValueUsage::Used) { + continue; + } + if (params_->output_was_set(graph_output_index)) { + continue; + } + const InputSocket &socket = *self_.graph_outputs_[graph_output_index]; + const Node &node = socket.node(); + NodeState &node_state = *node_states_[node.index_in_graph()]; + this->with_locked_node(node, node_state, current_task, [&](LockedNode &locked_node) { + this->set_input_required(locked_node, socket); + }); + } + } + + void set_defaulted_graph_outputs() + { + for (const int graph_output_index : self_.graph_outputs_.index_range()) { + const InputSocket &socket = *self_.graph_outputs_[graph_output_index]; + if (socket.origin() != nullptr) { + continue; + } + const CPPType &type = socket.type(); + const void *default_value = socket.default_value(); + BLI_assert(default_value != nullptr); + + if (self_.logger_ != nullptr) { + self_.logger_->log_socket_value(socket, {type, default_value}, *context_); + } + + void *output_ptr = params_->get_output_data_ptr(graph_output_index); + type.copy_construct(default_value, output_ptr); + params_->output_set(graph_output_index); + } + } + + void set_always_unused_graph_inputs() + { + for (const int i : self_.graph_inputs_.index_range()) { + const OutputSocket &socket = *self_.graph_inputs_[i]; + const Node &node = socket.node(); + const NodeState &node_state = *node_states_[node.index_in_graph()]; + const OutputState &output_state = node_state.outputs[socket.index()]; + if (output_state.usage == ValueUsage::Unused) { + params_->set_input_unused(i); + } + } + } + + void schedule_side_effect_nodes(CurrentTask ¤t_task) + { + if (self_.side_effect_provider_ != nullptr) { + const Vector<const FunctionNode *> side_effect_nodes = + self_.side_effect_provider_->get_nodes_with_side_effects(*context_); + for (const FunctionNode *node : side_effect_nodes) { + NodeState &node_state = *node_states_[node->index_in_graph()]; + node_state.has_side_effects = true; + this->with_locked_node(*node, node_state, current_task, [&](LockedNode &locked_node) { + this->schedule_node(locked_node); + }); + } + } + } + + void forward_newly_provided_inputs(CurrentTask ¤t_task) + { + LinearAllocator<> &allocator = local_allocators_.local(); + for (const int graph_input_index : self_.graph_inputs_.index_range()) { + std::atomic<uint8_t> &was_loaded = loaded_inputs_[graph_input_index]; + if (was_loaded.load()) { + continue; + } + void *input_data = params_->try_get_input_data_ptr(graph_input_index); + if (input_data == nullptr) { + continue; + } + if (was_loaded.fetch_or(1)) { + /* The value was forwarded before. */ + continue; + } + this->forward_newly_provided_input(current_task, allocator, graph_input_index, input_data); + } + } + + void forward_newly_provided_input(CurrentTask ¤t_task, + LinearAllocator<> &allocator, + const int graph_input_index, + void *input_data) + { + const OutputSocket &socket = *self_.graph_inputs_[graph_input_index]; + const CPPType &type = socket.type(); + void *buffer = allocator.allocate(type.size(), type.alignment()); + type.move_construct(input_data, buffer); + this->forward_value_to_linked_inputs(socket, {type, buffer}, current_task); + } + + void notify_output_required(const OutputSocket &socket, CurrentTask ¤t_task) + { + const Node &node = socket.node(); + const int index_in_node = socket.index(); + NodeState &node_state = *node_states_[node.index_in_graph()]; + OutputState &output_state = node_state.outputs[index_in_node]; + + /* The notified output socket might be an input of the entire graph. In this case, notify the + * caller that the input is required. */ + if (node.is_dummy()) { + const int graph_input_index = self_.graph_inputs_.index_of(&socket); + std::atomic<uint8_t> &was_loaded = loaded_inputs_[graph_input_index]; + if (was_loaded.load()) { + return; + } + void *input_data = params_->try_get_input_data_ptr_or_request(graph_input_index); + if (input_data == nullptr) { + return; + } + if (was_loaded.fetch_or(1)) { + /* The value was forwarded already. */ + return; + } + this->forward_newly_provided_input( + current_task, local_allocators_.local(), graph_input_index, input_data); + return; + } + + BLI_assert(node.is_function()); + this->with_locked_node(node, node_state, current_task, [&](LockedNode &locked_node) { + if (output_state.usage == ValueUsage::Used) { + return; + } + output_state.usage = ValueUsage::Used; + this->schedule_node(locked_node); + }); + } + + void notify_output_unused(const OutputSocket &socket, CurrentTask ¤t_task) + { + const Node &node = socket.node(); + const int index_in_node = socket.index(); + NodeState &node_state = *node_states_[node.index_in_graph()]; + OutputState &output_state = node_state.outputs[index_in_node]; + + this->with_locked_node(node, node_state, current_task, [&](LockedNode &locked_node) { + output_state.potential_target_sockets -= 1; + if (output_state.potential_target_sockets == 0) { + BLI_assert(output_state.usage != ValueUsage::Unused); + if (output_state.usage == ValueUsage::Maybe) { + output_state.usage = ValueUsage::Unused; + if (node.is_dummy()) { + const int graph_input_index = self_.graph_inputs_.index_of(&socket); + params_->set_input_unused(graph_input_index); + } + else { + this->schedule_node(locked_node); + } + } + } + }); + } + + void schedule_node(LockedNode &locked_node) + { + BLI_assert(locked_node.node.is_function()); + switch (locked_node.node_state.schedule_state) { + case NodeScheduleState::NotScheduled: { + /* Don't add the node to the task pool immeditately, because the task pool might start + * executing it immediatly (when Blender is started with a single thread). That would often + * result in a deadlock, because we are still holding the mutex of the current node. + * Also see comments in #LockedNode. */ + locked_node.node_state.schedule_state = NodeScheduleState::Scheduled; + locked_node.delayed_scheduled_nodes.append( + &static_cast<const FunctionNode &>(locked_node.node)); + break; + } + case NodeScheduleState::Scheduled: { + break; + } + case NodeScheduleState::Running: { + locked_node.node_state.schedule_state = NodeScheduleState::RunningAndRescheduled; + break; + } + case NodeScheduleState::RunningAndRescheduled: { + break; + } + } + } + + void with_locked_node(const Node &node, + NodeState &node_state, + CurrentTask ¤t_task, + const FunctionRef<void(LockedNode &)> f) + { + BLI_assert(&node_state == node_states_[node.index_in_graph()]); + + LockedNode locked_node{node, node_state}; + { + std::lock_guard lock{node_state.mutex}; + threading::isolate_task([&]() { f(locked_node); }); + } + + this->send_output_required_notifications(locked_node.delayed_required_outputs, current_task); + this->send_output_unused_notifications(locked_node.delayed_unused_outputs, current_task); + this->schedule_new_nodes(locked_node.delayed_scheduled_nodes, current_task); + } + + void send_output_required_notifications(const Span<const OutputSocket *> sockets, + CurrentTask ¤t_task) + { + for (const OutputSocket *socket : sockets) { + this->notify_output_required(*socket, current_task); + } + } + + void send_output_unused_notifications(const Span<const OutputSocket *> sockets, + CurrentTask ¤t_task) + { + for (const OutputSocket *socket : sockets) { + this->notify_output_unused(*socket, current_task); + } + } + + void schedule_new_nodes(const Span<const FunctionNode *> nodes, CurrentTask ¤t_task) + { + for (const FunctionNode *node_to_schedule : nodes) { + /* Avoid a round trip through the task pool for the first node that is scheduled by the + * current node execution. Other nodes are added to the pool so that other threads can pick + * them up. */ + const FunctionNode *expected = nullptr; + if (current_task.next_node.compare_exchange_strong( + expected, node_to_schedule, std::memory_order_relaxed)) { + continue; + } + this->add_node_to_task_pool(*node_to_schedule); + current_task.added_node_to_pool.store(true, std::memory_order_relaxed); + } + } + + void add_node_to_task_pool(const Node &node) + { + BLI_task_pool_push( + task_pool_, Executor::run_node_from_task_pool, (void *)&node, false, nullptr); + } + + static void run_node_from_task_pool(TaskPool *task_pool, void *task_data) + { + void *user_data = BLI_task_pool_user_data(task_pool); + Executor &executor = *static_cast<Executor *>(user_data); + const FunctionNode &node = *static_cast<const FunctionNode *>(task_data); + + /* This loop reduces the number of round trips through the task pool as long as the current + * node is scheduling more nodes. */ + CurrentTask current_task; + current_task.next_node = &node; + while (current_task.next_node != nullptr) { + const FunctionNode &node_to_run = *current_task.next_node; + current_task.next_node = nullptr; + executor.run_node_task(node_to_run, current_task); + } + } + + void run_node_task(const FunctionNode &node, CurrentTask ¤t_task) + { + NodeState &node_state = *node_states_[node.index_in_graph()]; + LinearAllocator<> &allocator = local_allocators_.local(); + const LazyFunction &fn = node.function(); + + bool node_needs_execution = false; + this->with_locked_node(node, node_state, current_task, [&](LockedNode &locked_node) { + BLI_assert(node_state.schedule_state == NodeScheduleState::Scheduled); + node_state.schedule_state = NodeScheduleState::Running; + + if (node_state.node_has_finished) { + return; + } + + bool required_uncomputed_output_exists = false; + for (OutputState &output_state : node_state.outputs) { + output_state.usage_for_execution = output_state.usage; + if (output_state.usage == ValueUsage::Used && !output_state.has_been_computed) { + required_uncomputed_output_exists = true; + } + } + if (!required_uncomputed_output_exists && !node_state.has_side_effects) { + return; + } + + if (node_state.had_initialization) { + /* Initialize storage. */ + node_state.storage = fn.init_storage(allocator); + + /* Load unlinked inputs. */ + for (const int input_index : node.inputs().index_range()) { + const InputSocket &input_socket = node.input(input_index); + if (input_socket.origin() != nullptr) { + continue; + } + InputState &input_state = node_state.inputs[input_index]; + const CPPType &type = input_socket.type(); + const void *default_value = input_socket.default_value(); + BLI_assert(default_value != nullptr); + if (self_.logger_ != nullptr) { + self_.logger_->log_socket_value(input_socket, {type, default_value}, *context_); + } + void *buffer = allocator.allocate(type.size(), type.alignment()); + type.copy_construct(default_value, buffer); + this->forward_value_to_input(locked_node, input_state, {type, buffer}); + } + + /* Request linked inputs that are always needed. */ + const Span<Input> fn_inputs = fn.inputs(); + for (const int input_index : fn_inputs.index_range()) { + const Input &fn_input = fn_inputs[input_index]; + if (fn_input.usage == ValueUsage::Used) { + const InputSocket &input_socket = node.input(input_index); + this->set_input_required(locked_node, input_socket); + } + } + + node_state.had_initialization = false; + } + + for (const int input_index : node_state.inputs.index_range()) { + InputState &input_state = node_state.inputs[input_index]; + if (input_state.was_ready_for_execution) { + continue; + } + if (input_state.value != nullptr) { + input_state.was_ready_for_execution = true; + continue; + } + if (input_state.usage == ValueUsage::Used) { + return; + } + } + + node_needs_execution = true; + }); + + if (node_needs_execution) { + /* Importantly, the node must not be locked when it is executed. That would result in locks + * being hold very long in some cases and results in multiple locks being hold by the same + * thread in the same graph which can lead to deadlocks. */ + this->execute_node(node, node_state, current_task); + } + + this->with_locked_node(node, node_state, current_task, [&](LockedNode &locked_node) { +#ifdef DEBUG + if (node_needs_execution) { + this->assert_expected_outputs_have_been_computed(locked_node); + } +#endif + this->finish_node_if_possible(locked_node); + const bool reschedule_requested = node_state.schedule_state == + NodeScheduleState::RunningAndRescheduled; + node_state.schedule_state = NodeScheduleState::NotScheduled; + if (reschedule_requested && !node_state.node_has_finished) { + this->schedule_node(locked_node); + } + }); + } + + void assert_expected_outputs_have_been_computed(LockedNode &locked_node) + { + const FunctionNode &node = static_cast<const FunctionNode &>(locked_node.node); + const NodeState &node_state = locked_node.node_state; + + if (node_state.missing_required_inputs > 0) { + return; + } + if (node_state.schedule_state == NodeScheduleState::RunningAndRescheduled) { + return; + } + Vector<const OutputSocket *> missing_outputs; + for (const int i : node_state.outputs.index_range()) { + const OutputState &output_state = node_state.outputs[i]; + if (output_state.usage_for_execution == ValueUsage::Used) { + if (!output_state.has_been_computed) { + missing_outputs.append(&node.output(i)); + } + } + } + if (!missing_outputs.is_empty()) { + if (self_.logger_ != nullptr) { + self_.logger_->dump_when_outputs_are_missing(node, missing_outputs, *context_); + } + BLI_assert_unreachable(); + } + } + + void finish_node_if_possible(LockedNode &locked_node) + { + const Node &node = locked_node.node; + NodeState &node_state = locked_node.node_state; + + if (node_state.node_has_finished) { + /* Was finished already. */ + return; + } + /* If there are outputs that may still be used, the node is not done yet. */ + for (const OutputState &output_state : node_state.outputs) { + if (output_state.usage != ValueUsage::Unused && !output_state.has_been_computed) { + return; + } + } + /* If the node is still waiting for inputs, it is not done yet. */ + for (const InputState &input_state : node_state.inputs) { + if (input_state.usage == ValueUsage::Used && !input_state.was_ready_for_execution) { + return; + } + } + + node_state.node_has_finished = true; + + for (const int input_index : node_state.inputs.index_range()) { + const InputSocket &input_socket = node.input(input_index); + InputState &input_state = node_state.inputs[input_index]; + if (input_state.usage == ValueUsage::Maybe) { + this->set_input_unused(locked_node, input_socket); + } + else if (input_state.usage == ValueUsage::Used) { + this->destruct_input_value_if_exists(input_state, input_socket.type()); + } + } + + if (node_state.storage != nullptr) { + if (node.is_function()) { + const FunctionNode &fn_node = static_cast<const FunctionNode &>(node); + fn_node.function().destruct_storage(node_state.storage); + } + node_state.storage = nullptr; + } + } + + void destruct_input_value_if_exists(InputState &input_state, const CPPType &type) + { + if (input_state.value != nullptr) { + type.destruct(input_state.value); + input_state.value = nullptr; + } + } + + void execute_node(const FunctionNode &node, NodeState &node_state, CurrentTask ¤t_task); + + void set_input_unused_during_execution(const Node &node, + NodeState &node_state, + const int input_index, + CurrentTask ¤t_task) + { + const InputSocket &input_socket = node.input(input_index); + this->with_locked_node(node, node_state, current_task, [&](LockedNode &locked_node) { + this->set_input_unused(locked_node, input_socket); + }); + } + + void set_input_unused(LockedNode &locked_node, const InputSocket &input_socket) + { + NodeState &node_state = locked_node.node_state; + const int input_index = input_socket.index(); + InputState &input_state = node_state.inputs[input_index]; + + BLI_assert(input_state.usage != ValueUsage::Used); + if (input_state.usage == ValueUsage::Unused) { + return; + } + input_state.usage = ValueUsage::Unused; + + this->destruct_input_value_if_exists(input_state, input_socket.type()); + if (input_state.was_ready_for_execution) { + return; + } + const OutputSocket *origin = input_socket.origin(); + if (origin != nullptr) { + locked_node.delayed_unused_outputs.append(origin); + } + } + + void *set_input_required_during_execution(const Node &node, + NodeState &node_state, + const int input_index, + CurrentTask ¤t_task) + { + const InputSocket &input_socket = node.input(input_index); + void *result; + this->with_locked_node(node, node_state, current_task, [&](LockedNode &locked_node) { + result = this->set_input_required(locked_node, input_socket); + }); + return result; + } + + void *set_input_required(LockedNode &locked_node, const InputSocket &input_socket) + { + BLI_assert(&locked_node.node == &input_socket.node()); + NodeState &node_state = locked_node.node_state; + const int input_index = input_socket.index(); + InputState &input_state = node_state.inputs[input_index]; + + BLI_assert(input_state.usage != ValueUsage::Unused); + + if (input_state.value != nullptr) { + input_state.was_ready_for_execution = true; + return input_state.value; + } + if (input_state.usage == ValueUsage::Used) { + return nullptr; + } + input_state.usage = ValueUsage::Used; + node_state.missing_required_inputs += 1; + + const OutputSocket *origin_socket = input_socket.origin(); + /* Unlinked inputs are always loaded in advance. */ + BLI_assert(origin_socket != nullptr); + locked_node.delayed_required_outputs.append(origin_socket); + return nullptr; + } + + void forward_value_to_linked_inputs(const OutputSocket &from_socket, + GMutablePointer value_to_forward, + CurrentTask ¤t_task) + { + BLI_assert(value_to_forward.get() != nullptr); + LinearAllocator<> &allocator = local_allocators_.local(); + const CPPType &type = *value_to_forward.type(); + + if (self_.logger_ != nullptr) { + self_.logger_->log_socket_value(from_socket, value_to_forward, *context_); + } + + const Span<const InputSocket *> targets = from_socket.targets(); + for (const InputSocket *target_socket : targets) { + const Node &target_node = target_socket->node(); + NodeState &node_state = *node_states_[target_node.index_in_graph()]; + const int input_index = target_socket->index(); + InputState &input_state = node_state.inputs[input_index]; + const bool is_last_target = target_socket == targets.last(); +#ifdef DEBUG + if (input_state.value != nullptr) { + if (self_.logger_ != nullptr) { + self_.logger_->dump_when_input_is_set_twice(*target_socket, from_socket, *context_); + } + BLI_assert_unreachable(); + } +#endif + BLI_assert(!input_state.was_ready_for_execution); + BLI_assert(target_socket->type() == type); + BLI_assert(target_socket->origin() == &from_socket); + + if (self_.logger_ != nullptr) { + self_.logger_->log_socket_value(*target_socket, value_to_forward, *context_); + } + if (target_node.is_dummy()) { + /* Forward the value to the outside of the graph. */ + const int graph_output_index = self_.graph_outputs_.index_of_try(target_socket); + if (graph_output_index != -1 && + params_->get_output_usage(graph_output_index) != ValueUsage::Unused) { + void *dst_buffer = params_->get_output_data_ptr(graph_output_index); + if (is_last_target) { + type.move_construct(value_to_forward.get(), dst_buffer); + } + else { + type.copy_construct(value_to_forward.get(), dst_buffer); + } + params_->output_set(graph_output_index); + } + continue; + } + this->with_locked_node(target_node, node_state, current_task, [&](LockedNode &locked_node) { + if (input_state.usage == ValueUsage::Unused) { + return; + } + if (is_last_target) { + /* No need to make a copy if this is the last target. */ + this->forward_value_to_input(locked_node, input_state, value_to_forward); + value_to_forward = {}; + } + else { + void *buffer = allocator.allocate(type.size(), type.alignment()); + type.copy_construct(value_to_forward.get(), buffer); + this->forward_value_to_input(locked_node, input_state, {type, buffer}); + } + }); + } + if (value_to_forward.get() != nullptr) { + value_to_forward.destruct(); + } + } + + void forward_value_to_input(LockedNode &locked_node, + InputState &input_state, + GMutablePointer value) + { + NodeState &node_state = locked_node.node_state; + + BLI_assert(input_state.value == nullptr); + BLI_assert(!input_state.was_ready_for_execution); + input_state.value = value.get(); + + if (input_state.usage == ValueUsage::Used) { + node_state.missing_required_inputs -= 1; + if (node_state.missing_required_inputs == 0) { + this->schedule_node(locked_node); + } + } + } +}; + +class GraphExecutorLFParams final : public Params { + private: + Executor &executor_; + const Node &node_; + NodeState &node_state_; + CurrentTask ¤t_task_; + + public: + GraphExecutorLFParams(const LazyFunction &fn, + Executor &executor, + const Node &node, + NodeState &node_state, + CurrentTask ¤t_task) + : Params(fn), + executor_(executor), + node_(node), + node_state_(node_state), + current_task_(current_task) + { + } + + private: + void *try_get_input_data_ptr_impl(const int index) const override + { + const InputState &input_state = node_state_.inputs[index]; + if (input_state.was_ready_for_execution) { + return input_state.value; + } + return nullptr; + } + + void *try_get_input_data_ptr_or_request_impl(const int index) override + { + const InputState &input_state = node_state_.inputs[index]; + if (input_state.was_ready_for_execution) { + return input_state.value; + } + return executor_.set_input_required_during_execution(node_, node_state_, index, current_task_); + } + + void *get_output_data_ptr_impl(const int index) override + { + OutputState &output_state = node_state_.outputs[index]; + BLI_assert(!output_state.has_been_computed); + if (output_state.value == nullptr) { + LinearAllocator<> &allocator = executor_.local_allocators_.local(); + const CPPType &type = node_.output(index).type(); + output_state.value = allocator.allocate(type.size(), type.alignment()); + } + return output_state.value; + } + + void output_set_impl(const int index) override + { + OutputState &output_state = node_state_.outputs[index]; + BLI_assert(!output_state.has_been_computed); + BLI_assert(output_state.value != nullptr); + const OutputSocket &output_socket = node_.output(index); + executor_.forward_value_to_linked_inputs( + output_socket, {output_socket.type(), output_state.value}, current_task_); + output_state.value = nullptr; + output_state.has_been_computed = true; + } + + bool output_was_set_impl(const int index) const override + { + const OutputState &output_state = node_state_.outputs[index]; + return output_state.has_been_computed; + } + + ValueUsage get_output_usage_impl(const int index) const override + { + const OutputState &output_state = node_state_.outputs[index]; + return output_state.usage_for_execution; + } + + void set_input_unused_impl(const int index) override + { + executor_.set_input_unused_during_execution(node_, node_state_, index, current_task_); + } +}; + +/** + * Actually execute the node. + * + * Making this `inline` results in a simpler backtrace in release builds. + */ +inline void Executor::execute_node(const FunctionNode &node, + NodeState &node_state, + CurrentTask ¤t_task) +{ + const LazyFunction &fn = node.function(); + GraphExecutorLFParams node_params{fn, *this, node, node_state, current_task}; + BLI_assert(context_ != nullptr); + Context fn_context = *context_; + fn_context.storage = node_state.storage; + + if (self_.logger_ != nullptr) { + self_.logger_->log_before_node_execute(node, node_params, fn_context); + } + + fn.execute(node_params, fn_context); + + if (self_.logger_ != nullptr) { + self_.logger_->log_after_node_execute(node, node_params, fn_context); + } +} + +GraphExecutor::GraphExecutor(const Graph &graph, + const Span<const OutputSocket *> graph_inputs, + const Span<const InputSocket *> graph_outputs, + const Logger *logger, + const SideEffectProvider *side_effect_provider) + : graph_(graph), + graph_inputs_(graph_inputs), + graph_outputs_(graph_outputs), + logger_(logger), + side_effect_provider_(side_effect_provider) +{ + for (const OutputSocket *socket : graph_inputs_) { + BLI_assert(socket->node().is_dummy()); + inputs_.append({"In", socket->type(), ValueUsage::Maybe}); + } + for (const InputSocket *socket : graph_outputs_) { + BLI_assert(socket->node().is_dummy()); + outputs_.append({"Out", socket->type()}); + } +} + +void GraphExecutor::execute_impl(Params ¶ms, const Context &context) const +{ + Executor &executor = *static_cast<Executor *>(context.storage); + executor.execute(params, context); +} + +void *GraphExecutor::init_storage(LinearAllocator<> &allocator) const +{ + Executor &executor = *allocator.construct<Executor>(*this).release(); + return &executor; +} + +void GraphExecutor::destruct_storage(void *storage) const +{ + std::destroy_at(static_cast<Executor *>(storage)); +} + +void GraphExecutorLogger::log_socket_value(const Socket &socket, + const GPointer value, + const Context &context) const +{ + UNUSED_VARS(socket, value, context); +} + +void GraphExecutorLogger::log_before_node_execute(const FunctionNode &node, + const Params ¶ms, + const Context &context) const +{ + UNUSED_VARS(node, params, context); +} + +void GraphExecutorLogger::log_after_node_execute(const FunctionNode &node, + const Params ¶ms, + const Context &context) const +{ + UNUSED_VARS(node, params, context); +} + +Vector<const FunctionNode *> GraphExecutorSideEffectProvider::get_nodes_with_side_effects( + const Context &context) const +{ + UNUSED_VARS(context); + return {}; +} + +void GraphExecutorLogger::dump_when_outputs_are_missing(const FunctionNode &node, + Span<const OutputSocket *> missing_sockets, + const Context &context) const +{ + UNUSED_VARS(node, missing_sockets, context); +} + +void GraphExecutorLogger::dump_when_input_is_set_twice(const InputSocket &target_socket, + const OutputSocket &from_socket, + const Context &context) const +{ + UNUSED_VARS(target_socket, from_socket, context); +} + +} // namespace blender::fn::lazy_function diff --git a/source/blender/functions/tests/FN_lazy_function_test.cc b/source/blender/functions/tests/FN_lazy_function_test.cc new file mode 100644 index 00000000000..8df064cd8a6 --- /dev/null +++ b/source/blender/functions/tests/FN_lazy_function_test.cc @@ -0,0 +1,115 @@ +/* SPDX-License-Identifier: Apache-2.0 */ + +#include "testing/testing.h" + +#include "FN_lazy_function_execute.hh" +#include "FN_lazy_function_graph.hh" +#include "FN_lazy_function_graph_executor.hh" + +#include "BLI_task.h" +#include "BLI_timeit.hh" + +namespace blender::fn::lazy_function::tests { + +class AddLazyFunction : public LazyFunction { + public: + AddLazyFunction() + { + debug_name_ = "Add"; + inputs_.append({"A", CPPType::get<int>()}); + inputs_.append({"B", CPPType::get<int>()}); + outputs_.append({"Result", CPPType::get<int>()}); + } + + void execute_impl(Params ¶ms, const Context &UNUSED(context)) const override + { + const int a = params.get_input<int>(0); + const int b = params.get_input<int>(1); + params.set_output(0, a + b); + } +}; + +class StoreValueFunction : public LazyFunction { + private: + int *dst1_; + int *dst2_; + + public: + StoreValueFunction(int *dst1, int *dst2) : dst1_(dst1), dst2_(dst2) + { + debug_name_ = "Store Value"; + inputs_.append({"A", CPPType::get<int>()}); + inputs_.append({"B", CPPType::get<int>(), ValueUsage::Maybe}); + } + + void execute_impl(Params ¶ms, const Context &UNUSED(context)) const override + { + *dst1_ = params.get_input<int>(0); + if (int *value = params.try_get_input_data_ptr_or_request<int>(1)) { + *dst2_ = *value; + } + } +}; + +class SimpleSideEffectProvider : public GraphExecutor::SideEffectProvider { + private: + Vector<const FunctionNode *> side_effect_nodes_; + + public: + SimpleSideEffectProvider(Span<const FunctionNode *> side_effect_nodes) + : side_effect_nodes_(side_effect_nodes) + { + } + + Vector<const FunctionNode *> get_nodes_with_side_effects( + const Context &UNUSED(context)) const override + { + return side_effect_nodes_; + } +}; + +TEST(lazy_function, SimpleAdd) +{ + const AddLazyFunction add_fn; + int result = 0; + execute_lazy_function_eagerly(add_fn, nullptr, std::make_tuple(30, 5), std::make_tuple(&result)); + EXPECT_EQ(result, 35); +} + +TEST(lazy_function, SideEffects) +{ + BLI_task_scheduler_init(); + int dst1 = 0; + int dst2 = 0; + + const AddLazyFunction add_fn; + const StoreValueFunction store_fn{&dst1, &dst2}; + + Graph graph; + FunctionNode &add_node_1 = graph.add_function(add_fn); + FunctionNode &add_node_2 = graph.add_function(add_fn); + FunctionNode &store_node = graph.add_function(store_fn); + DummyNode &input_node = graph.add_dummy({}, {&CPPType::get<int>()}); + + graph.add_link(input_node.output(0), add_node_1.input(0)); + graph.add_link(input_node.output(0), add_node_2.input(0)); + graph.add_link(add_node_1.output(0), store_node.input(0)); + graph.add_link(add_node_2.output(0), store_node.input(1)); + + const int value_10 = 10; + const int value_100 = 100; + add_node_1.input(1).set_default_value(&value_10); + add_node_2.input(1).set_default_value(&value_100); + + graph.update_node_indices(); + + SimpleSideEffectProvider side_effect_provider{{&store_node}}; + + GraphExecutor executor_fn{graph, {&input_node.output(0)}, {}, nullptr, &side_effect_provider}; + execute_lazy_function_eagerly(executor_fn, nullptr, std::make_tuple(5), std::make_tuple()); + + EXPECT_EQ(dst1, 15); + EXPECT_EQ(dst2, 105); +} + +} // namespace blender::fn::lazy_function::tests diff --git a/source/blender/makesdna/DNA_node_types.h b/source/blender/makesdna/DNA_node_types.h index b19210968d9..735f5c7b20a 100644 --- a/source/blender/makesdna/DNA_node_types.h +++ b/source/blender/makesdna/DNA_node_types.h @@ -637,6 +637,9 @@ typedef struct bNodeTree { /** A span containing all nodes in the node tree. */ blender::Span<bNode *> all_nodes(); blender::Span<const bNode *> all_nodes() const; + /** A span containing all group nodes in the node tree. */ + blender::Span<bNode *> group_nodes(); + blender::Span<const bNode *> group_nodes() const; /** A span containing all input sockets in the node tree. */ blender::Span<bNodeSocket *> all_input_sockets(); blender::Span<const bNodeSocket *> all_input_sockets() const; diff --git a/source/blender/modifiers/CMakeLists.txt b/source/blender/modifiers/CMakeLists.txt index 73daabec9b3..8bace2e048c 100644 --- a/source/blender/modifiers/CMakeLists.txt +++ b/source/blender/modifiers/CMakeLists.txt @@ -65,7 +65,6 @@ set(SRC intern/MOD_mirror.c intern/MOD_multires.c intern/MOD_nodes.cc - intern/MOD_nodes_evaluator.cc intern/MOD_none.c intern/MOD_normal_edit.c intern/MOD_ocean.c @@ -105,7 +104,6 @@ set(SRC MOD_modifiertypes.h MOD_nodes.h intern/MOD_meshcache_util.h - intern/MOD_nodes_evaluator.hh intern/MOD_solidify_util.h intern/MOD_ui_common.h intern/MOD_util.h diff --git a/source/blender/modifiers/intern/MOD_nodes.cc b/source/blender/modifiers/intern/MOD_nodes.cc index 2908fbf5597..ffd78a90638 100644 --- a/source/blender/modifiers/intern/MOD_nodes.cc +++ b/source/blender/modifiers/intern/MOD_nodes.cc @@ -36,6 +36,7 @@ #include "DNA_windowmanager_types.h" #include "BKE_attribute_math.hh" +#include "BKE_compute_contexts.hh" #include "BKE_customdata.h" #include "BKE_geometry_fields.hh" #include "BKE_geometry_set_instances.hh" @@ -73,7 +74,6 @@ #include "MOD_modifiertypes.h" #include "MOD_nodes.h" -#include "MOD_nodes_evaluator.hh" #include "MOD_ui_common.h" #include "ED_object.h" @@ -81,15 +81,18 @@ #include "ED_spreadsheet.h" #include "ED_undo.h" -#include "NOD_derived_node_tree.hh" #include "NOD_geometry.h" -#include "NOD_geometry_nodes_eval_log.hh" +#include "NOD_geometry_nodes_lazy_function.hh" #include "NOD_node_declaration.hh" #include "FN_field.hh" #include "FN_field_cpp_type.hh" +#include "FN_lazy_function_execute.hh" +#include "FN_lazy_function_graph_executor.hh" #include "FN_multi_function.hh" +namespace lf = blender::fn::lazy_function; + using blender::Array; using blender::ColorGeometry4f; using blender::CPPType; @@ -106,6 +109,7 @@ using blender::MultiValueMap; using blender::MutableSpan; using blender::Set; using blender::Span; +using blender::Stack; using blender::StringRef; using blender::StringRefNull; using blender::Vector; @@ -117,11 +121,17 @@ using blender::fn::ValueOrFieldCPPType; using blender::nodes::FieldInferencingInterface; using blender::nodes::GeoNodeExecParams; using blender::nodes::InputSocketFieldType; +using blender::nodes::geo_eval_log::GeoModifierLog; using blender::threading::EnumerableThreadSpecific; using namespace blender::fn::multi_function_types; -using namespace blender::nodes::derived_node_tree_types; -using geo_log::eNamedAttrUsage; -using geo_log::GeometryAttributeInfo; +using blender::nodes::geo_eval_log::GeometryAttributeInfo; +using blender::nodes::geo_eval_log::GeometryInfoLog; +using blender::nodes::geo_eval_log::GeoNodeLog; +using blender::nodes::geo_eval_log::GeoTreeLog; +using blender::nodes::geo_eval_log::NamedAttributeUsage; +using blender::nodes::geo_eval_log::NodeWarning; +using blender::nodes::geo_eval_log::NodeWarningType; +using blender::nodes::geo_eval_log::ValueLog; static void initData(ModifierData *md) { @@ -756,36 +766,37 @@ void MOD_nodes_update_interface(Object *object, NodesModifierData *nmd) } static void initialize_group_input(NodesModifierData &nmd, - const bNodeSocket &socket, + const bNodeSocket &interface_socket, + const int input_index, void *r_value) { - const bNodeSocketType &socket_type = *socket.typeinfo; - const bNodeSocket &bsocket = socket; - const eNodeSocketDatatype socket_data_type = static_cast<eNodeSocketDatatype>(bsocket.type); + const bNodeSocketType &socket_type = *interface_socket.typeinfo; + const eNodeSocketDatatype socket_data_type = static_cast<eNodeSocketDatatype>( + interface_socket.type); if (nmd.settings.properties == nullptr) { - socket_type.get_geometry_nodes_cpp_value(bsocket, r_value); + socket_type.get_geometry_nodes_cpp_value(interface_socket, r_value); return; } const IDProperty *property = IDP_GetPropertyFromGroup(nmd.settings.properties, - socket.identifier); + interface_socket.identifier); if (property == nullptr) { - socket_type.get_geometry_nodes_cpp_value(bsocket, r_value); + socket_type.get_geometry_nodes_cpp_value(interface_socket, r_value); return; } - if (!id_property_type_matches_socket(bsocket, *property)) { - socket_type.get_geometry_nodes_cpp_value(bsocket, r_value); + if (!id_property_type_matches_socket(interface_socket, *property)) { + socket_type.get_geometry_nodes_cpp_value(interface_socket, r_value); return; } - if (!input_has_attribute_toggle(*nmd.node_group, socket.runtime->index_in_node)) { + if (!input_has_attribute_toggle(*nmd.node_group, input_index)) { init_socket_cpp_value_from_property(*property, socket_data_type, r_value); return; } const IDProperty *property_use_attribute = IDP_GetPropertyFromGroup( - nmd.settings.properties, (socket.identifier + use_attribute_suffix).c_str()); + nmd.settings.properties, (interface_socket.identifier + use_attribute_suffix).c_str()); const IDProperty *property_attribute_name = IDP_GetPropertyFromGroup( - nmd.settings.properties, (socket.identifier + attribute_name_suffix).c_str()); + nmd.settings.properties, (interface_socket.identifier + attribute_name_suffix).c_str()); if (property_use_attribute == nullptr || property_attribute_name == nullptr) { init_socket_cpp_value_from_property(*property, socket_data_type, r_value); return; @@ -831,13 +842,25 @@ static Vector<SpaceSpreadsheet *> find_spreadsheet_editors(Main *bmain) return spreadsheets; } -static void find_sockets_to_preview_for_spreadsheet(SpaceSpreadsheet *sspreadsheet, - NodesModifierData *nmd, - const ModifierEvalContext *ctx, - const DerivedNodeTree &tree, - Set<DSocket> &r_sockets_to_preview) +static const lf::FunctionNode &find_viewer_lf_node(const bNode &viewer_bnode) +{ + return *blender::nodes::ensure_geometry_nodes_lazy_function_graph(viewer_bnode.owner_tree()) + ->mapping.viewer_node_map.lookup(&viewer_bnode); +} +static const lf::FunctionNode &find_group_lf_node(const bNode &group_bnode) +{ + return *blender::nodes::ensure_geometry_nodes_lazy_function_graph(group_bnode.owner_tree()) + ->mapping.group_node_map.lookup(&group_bnode); +} + +static void find_side_effect_nodes_for_spreadsheet( + const SpaceSpreadsheet &sspreadsheet, + const NodesModifierData &nmd, + const ModifierEvalContext &ctx, + const bNodeTree &root_tree, + MultiValueMap<blender::ComputeContextHash, const lf::FunctionNode *> &r_side_effect_nodes) { - Vector<SpreadsheetContext *> context_path = sspreadsheet->context_path; + Vector<SpreadsheetContext *> context_path = sspreadsheet.context_path; if (context_path.size() < 3) { return; } @@ -848,11 +871,11 @@ static void find_sockets_to_preview_for_spreadsheet(SpaceSpreadsheet *sspreadshe return; } SpreadsheetContextObject *object_context = (SpreadsheetContextObject *)context_path[0]; - if (object_context->object != DEG_get_original_object(ctx->object)) { + if (object_context->object != DEG_get_original_object(ctx.object)) { return; } SpreadsheetContextModifier *modifier_context = (SpreadsheetContextModifier *)context_path[1]; - if (StringRef(modifier_context->modifier_name) != nmd->modifier.name) { + if (StringRef(modifier_context->modifier_name) != nmd.modifier.name) { return; } for (SpreadsheetContext *context : context_path.as_span().drop_front(2)) { @@ -861,61 +884,77 @@ static void find_sockets_to_preview_for_spreadsheet(SpaceSpreadsheet *sspreadshe } } - Span<SpreadsheetContextNode *> nested_group_contexts = + blender::ComputeContextBuilder compute_context_builder; + compute_context_builder.push<blender::bke::ModifierComputeContext>(nmd.modifier.name); + + const Span<SpreadsheetContextNode *> nested_group_contexts = context_path.as_span().drop_front(2).drop_back(1).cast<SpreadsheetContextNode *>(); - SpreadsheetContextNode *last_context = (SpreadsheetContextNode *)context_path.last(); + const SpreadsheetContextNode *last_context = (SpreadsheetContextNode *)context_path.last(); - const DTreeContext *context = &tree.root_context(); + Stack<const bNode *> group_node_stack; + const bNodeTree *group = &root_tree; for (SpreadsheetContextNode *node_context : nested_group_contexts) { - const bNodeTree &btree = context->btree(); const bNode *found_node = nullptr; - for (const bNode *bnode : btree.all_nodes()) { - if (STREQ(bnode->name, node_context->node_name)) { - found_node = bnode; + for (const bNode *node : group->group_nodes()) { + if (STREQ(node->name, node_context->node_name)) { + found_node = node; break; } } if (found_node == nullptr) { return; } - context = context->child_context(*found_node); - if (context == nullptr) { + if (found_node->id == nullptr) { return; } + group_node_stack.push(found_node); + group = reinterpret_cast<const bNodeTree *>(found_node->id); + compute_context_builder.push<blender::bke::NodeGroupComputeContext>(node_context->node_name); } - const bNodeTree &btree = context->btree(); - for (const bNode *bnode : btree.nodes_by_type("GeometryNodeViewer")) { - if (STREQ(bnode->name, last_context->node_name)) { - const DNode viewer_node{context, bnode}; - for (const bNodeSocket *input_socket : bnode->input_sockets()) { - if (input_socket->is_available() && input_socket->is_logically_linked()) { - r_sockets_to_preview.add(DSocket{context, input_socket}); - } - } + const bNode *found_viewer_node = nullptr; + for (const bNode *viewer_node : group->nodes_by_type("GeometryNodeViewer")) { + if (STREQ(viewer_node->name, last_context->node_name)) { + found_viewer_node = viewer_node; + break; } } + if (found_viewer_node == nullptr) { + return; + } + + /* Not only mark the viewer node as having side effects, but also all group nodes it is contained + * in. */ + r_side_effect_nodes.add(compute_context_builder.hash(), + &find_viewer_lf_node(*found_viewer_node)); + compute_context_builder.pop(); + while (!compute_context_builder.is_empty()) { + r_side_effect_nodes.add(compute_context_builder.hash(), + &find_group_lf_node(*group_node_stack.pop())); + compute_context_builder.pop(); + } } -static void find_sockets_to_preview(NodesModifierData *nmd, - const ModifierEvalContext *ctx, - const DerivedNodeTree &tree, - Set<DSocket> &r_sockets_to_preview) +static void find_side_effect_nodes( + const NodesModifierData &nmd, + const ModifierEvalContext &ctx, + const bNodeTree &tree, + MultiValueMap<blender::ComputeContextHash, const lf::FunctionNode *> &r_side_effect_nodes) { - Main *bmain = DEG_get_bmain(ctx->depsgraph); + Main *bmain = DEG_get_bmain(ctx.depsgraph); /* Based on every visible spreadsheet context path, get a list of sockets that need to have their * intermediate geometries cached for display. */ Vector<SpaceSpreadsheet *> spreadsheets = find_spreadsheet_editors(bmain); for (SpaceSpreadsheet *sspreadsheet : spreadsheets) { - find_sockets_to_preview_for_spreadsheet(sspreadsheet, nmd, ctx, tree, r_sockets_to_preview); + find_side_effect_nodes_for_spreadsheet(*sspreadsheet, nmd, ctx, tree, r_side_effect_nodes); } } static void clear_runtime_data(NodesModifierData *nmd) { if (nmd->runtime_eval_log != nullptr) { - delete (geo_log::ModifierLog *)nmd->runtime_eval_log; + delete static_cast<GeoModifierLog *>(nmd->runtime_eval_log); nmd->runtime_eval_log = nullptr; } } @@ -1079,92 +1118,104 @@ static void store_output_attributes(GeometrySet &geometry, /** * Evaluate a node group to compute the output geometry. */ -static GeometrySet compute_geometry(const DerivedNodeTree &tree, - Span<const bNode *> group_input_nodes, - const bNode &output_node, - GeometrySet input_geometry_set, - NodesModifierData *nmd, - const ModifierEvalContext *ctx) +static GeometrySet compute_geometry( + const bNodeTree &btree, + const blender::nodes::GeometryNodesLazyFunctionGraphInfo &lf_graph_info, + const bNode &output_node, + GeometrySet input_geometry_set, + NodesModifierData *nmd, + const ModifierEvalContext *ctx) { - blender::ResourceScope scope; - blender::LinearAllocator<> &allocator = scope.linear_allocator(); - blender::nodes::NodeMultiFunctions mf_by_node{tree}; + const blender::nodes::GeometryNodeLazyFunctionGraphMapping &mapping = lf_graph_info.mapping; + + Span<const lf::OutputSocket *> graph_inputs = mapping.group_input_sockets; + Vector<const lf::InputSocket *> graph_outputs; + for (const bNodeSocket *bsocket : output_node.input_sockets().drop_back(1)) { + const lf::InputSocket &socket = mapping.dummy_socket_map.lookup(bsocket)->as_input(); + graph_outputs.append(&socket); + } - Map<DOutputSocket, GMutablePointer> group_inputs; + Array<GMutablePointer> param_inputs(graph_inputs.size()); + Array<GMutablePointer> param_outputs(graph_outputs.size()); + Array<std::optional<lf::ValueUsage>> param_input_usages(graph_inputs.size()); + Array<lf::ValueUsage> param_output_usages(graph_outputs.size(), lf::ValueUsage::Used); + Array<bool> param_set_outputs(graph_outputs.size(), false); - const DTreeContext *root_context = &tree.root_context(); - for (const bNode *group_input_node : group_input_nodes) { - Span<const bNodeSocket *> group_input_sockets = group_input_node->output_sockets().drop_back( - 1); - if (group_input_sockets.is_empty()) { - continue; - } + blender::nodes::GeometryNodesLazyFunctionLogger lf_logger(lf_graph_info); + blender::nodes::GeometryNodesLazyFunctionSideEffectProvider lf_side_effect_provider( + lf_graph_info); - Span<const bNodeSocket *> remaining_input_sockets = group_input_sockets; + lf::GraphExecutor graph_executor{ + lf_graph_info.graph, graph_inputs, graph_outputs, &lf_logger, &lf_side_effect_provider}; - /* If the group expects a geometry as first input, use the geometry that has been passed to - * modifier. */ - const bNodeSocket *first_input_socket = group_input_sockets[0]; - if (first_input_socket->type == SOCK_GEOMETRY) { - GeometrySet *geometry_set_in = - allocator.construct<GeometrySet>(input_geometry_set).release(); - group_inputs.add_new({root_context, first_input_socket}, geometry_set_in); - remaining_input_sockets = remaining_input_sockets.drop_front(1); + blender::nodes::GeoNodesModifierData geo_nodes_modifier_data; + geo_nodes_modifier_data.depsgraph = ctx->depsgraph; + geo_nodes_modifier_data.self_object = ctx->object; + auto eval_log = std::make_unique<GeoModifierLog>(); + if (logging_enabled(ctx)) { + geo_nodes_modifier_data.eval_log = eval_log.get(); + } + MultiValueMap<blender::ComputeContextHash, const lf::FunctionNode *> r_side_effect_nodes; + find_side_effect_nodes(*nmd, *ctx, btree, r_side_effect_nodes); + geo_nodes_modifier_data.side_effect_nodes = &r_side_effect_nodes; + blender::nodes::GeoNodesLFUserData user_data; + user_data.modifier_data = &geo_nodes_modifier_data; + blender::bke::ModifierComputeContext modifier_compute_context{nullptr, nmd->modifier.name}; + user_data.compute_context = &modifier_compute_context; + + blender::LinearAllocator<> allocator; + Vector<GMutablePointer> inputs_to_destruct; + + int input_index; + LISTBASE_FOREACH_INDEX (bNodeSocket *, interface_socket, &btree.inputs, input_index) { + if (interface_socket->type == SOCK_GEOMETRY && input_index == 0) { + param_inputs[input_index] = &input_geometry_set; + continue; } - /* Initialize remaining group inputs. */ - for (const bNodeSocket *socket : remaining_input_sockets) { - const CPPType &cpp_type = *socket->typeinfo->geometry_nodes_cpp_type; - void *value_in = allocator.allocate(cpp_type.size(), cpp_type.alignment()); - initialize_group_input(*nmd, *socket, value_in); - group_inputs.add_new({root_context, socket}, {cpp_type, value_in}); - } + const CPPType *type = interface_socket->typeinfo->geometry_nodes_cpp_type; + BLI_assert(type != nullptr); + void *value = allocator.allocate(type->size(), type->alignment()); + initialize_group_input(*nmd, *interface_socket, input_index, value); + param_inputs[input_index] = {type, value}; + inputs_to_destruct.append({type, value}); } - Vector<DInputSocket> group_outputs; - for (const bNodeSocket *socket_ref : output_node.input_sockets().drop_back(1)) { - group_outputs.append({root_context, socket_ref}); + for (const int i : graph_outputs.index_range()) { + const lf::InputSocket &socket = *graph_outputs[i]; + const CPPType &type = socket.type(); + void *buffer = allocator.allocate(type.size(), type.alignment()); + param_outputs[i] = {type, buffer}; } - std::optional<geo_log::GeoLogger> geo_logger; - - blender::modifiers::geometry_nodes::GeometryNodesEvaluationParams eval_params; - - if (logging_enabled(ctx)) { - Set<DSocket> preview_sockets; - find_sockets_to_preview(nmd, ctx, tree, preview_sockets); - eval_params.force_compute_sockets.extend(preview_sockets.begin(), preview_sockets.end()); - geo_logger.emplace(std::move(preview_sockets)); + lf::Context lf_context; + lf_context.storage = graph_executor.init_storage(allocator); + lf_context.user_data = &user_data; + lf::BasicParams lf_params{graph_executor, + param_inputs, + param_outputs, + param_input_usages, + param_output_usages, + param_set_outputs}; + graph_executor.execute(lf_params, lf_context); + graph_executor.destruct_storage(lf_context.storage); - geo_logger->log_input_geometry(input_geometry_set); + for (GMutablePointer &ptr : inputs_to_destruct) { + ptr.destruct(); } - /* Don't keep a reference to the input geometry components to avoid copies during evaluation. */ - input_geometry_set.clear(); - - eval_params.input_values = group_inputs; - eval_params.output_sockets = group_outputs; - eval_params.mf_by_node = &mf_by_node; - eval_params.modifier_ = nmd; - eval_params.depsgraph = ctx->depsgraph; - eval_params.self_object = ctx->object; - eval_params.geo_logger = geo_logger.has_value() ? &*geo_logger : nullptr; - blender::modifiers::geometry_nodes::evaluate_geometry_nodes(eval_params); + GeometrySet output_geometry_set = std::move(*static_cast<GeometrySet *>(param_outputs[0].get())); + store_output_attributes(output_geometry_set, *nmd, output_node, param_outputs); - GeometrySet output_geometry_set = std::move(*eval_params.r_output_values[0].get<GeometrySet>()); - - if (geo_logger.has_value()) { - geo_logger->log_output_geometry(output_geometry_set); - NodesModifierData *nmd_orig = (NodesModifierData *)BKE_modifier_get_original(ctx->object, - &nmd->modifier); - clear_runtime_data(nmd_orig); - nmd_orig->runtime_eval_log = new geo_log::ModifierLog(*geo_logger); + for (GMutablePointer &ptr : param_outputs) { + ptr.destruct(); } - store_output_attributes(output_geometry_set, *nmd, output_node, eval_params.r_output_values); - - for (GMutablePointer value : eval_params.r_output_values) { - value.destruct(); + if (logging_enabled(ctx)) { + NodesModifierData *nmd_orig = reinterpret_cast<NodesModifierData *>( + BKE_modifier_get_original(ctx->object, &nmd->modifier)); + delete static_cast<GeoModifierLog *>(nmd_orig->runtime_eval_log); + nmd_orig->runtime_eval_log = eval_log.release(); } return output_geometry_set; @@ -1225,27 +1276,18 @@ static void modifyGeometry(ModifierData *md, return; } + const bNodeTree &tree = *nmd->node_group; + tree.ensure_topology_cache(); check_property_socket_sync(ctx->object, md); - const bNodeTree &root_tree_ref = *nmd->node_group; - DerivedNodeTree tree{root_tree_ref}; - - if (tree.has_link_cycles()) { - BKE_modifier_set_error(ctx->object, md, "Node group has cycles"); - geometry_set.clear(); - return; - } - - Span<const bNode *> input_nodes = root_tree_ref.nodes_by_type("NodeGroupInput"); - Span<const bNode *> output_nodes = root_tree_ref.nodes_by_type("NodeGroupOutput"); - if (output_nodes.size() != 1) { - BKE_modifier_set_error(ctx->object, md, "Node group must have a single output node"); + const bNode *output_node = tree.group_output_node(); + if (output_node == nullptr) { + BKE_modifier_set_error(ctx->object, md, "Node group must have a group output node"); geometry_set.clear(); return; } - const bNode &output_node = *output_nodes[0]; - Span<const bNodeSocket *> group_outputs = output_node.input_sockets().drop_back(1); + Span<const bNodeSocket *> group_outputs = output_node->input_sockets().drop_back(1); if (group_outputs.is_empty()) { BKE_modifier_set_error(ctx->object, md, "Node group must have an output socket"); geometry_set.clear(); @@ -1259,6 +1301,14 @@ static void modifyGeometry(ModifierData *md, return; } + const blender::nodes::GeometryNodesLazyFunctionGraphInfo *lf_graph_info = + blender::nodes::ensure_geometry_nodes_lazy_function_graph(tree); + if (lf_graph_info == nullptr) { + BKE_modifier_set_error(ctx->object, md, "Cannot evaluate node group"); + geometry_set.clear(); + return; + } + bool use_orig_index_verts = false; bool use_orig_index_edges = false; bool use_orig_index_polys = false; @@ -1270,7 +1320,7 @@ static void modifyGeometry(ModifierData *md, } geometry_set = compute_geometry( - tree, input_nodes, output_node, std::move(geometry_set), nmd, ctx); + tree, *lf_graph_info, *output_node, std::move(geometry_set), nmd, ctx); if (geometry_set.has_mesh()) { /* Add #CD_ORIGINDEX layers if they don't exist already. This is required because the @@ -1342,6 +1392,16 @@ static NodesModifierData *get_modifier_data(Main &bmain, return reinterpret_cast<NodesModifierData *>(md); } +static GeoTreeLog *get_root_tree_log(const NodesModifierData &nmd) +{ + if (nmd.runtime_eval_log == nullptr) { + return nullptr; + } + GeoModifierLog &modifier_log = *static_cast<GeoModifierLog *>(nmd.runtime_eval_log); + blender::bke::ModifierComputeContext compute_context{nullptr, nmd.modifier.name}; + return &modifier_log.get_tree_log(compute_context.hash()); +} + static void attribute_search_update_fn( const bContext *C, void *arg, const char *str, uiSearchItems *items, const bool is_first) { @@ -1350,27 +1410,52 @@ static void attribute_search_update_fn( if (nmd == nullptr) { return; } - const geo_log::ModifierLog *modifier_log = static_cast<const geo_log::ModifierLog *>( - nmd->runtime_eval_log); - if (modifier_log == nullptr) { + if (nmd->node_group == nullptr) { return; } - const geo_log::GeometryValueLog *geometry_log = data.is_output ? - modifier_log->output_geometry_log() : - modifier_log->input_geometry_log(); - if (geometry_log == nullptr) { + GeoTreeLog *tree_log = get_root_tree_log(*nmd); + if (tree_log == nullptr) { return; } + tree_log->ensure_existing_attributes(); + nmd->node_group->ensure_topology_cache(); - Span<GeometryAttributeInfo> infos = geometry_log->attributes(); - - /* The shared attribute search code expects a span of pointers, so convert to that. */ - Array<const GeometryAttributeInfo *> info_ptrs(infos.size()); - for (const int i : infos.index_range()) { - info_ptrs[i] = &infos[i]; + Vector<const bNodeSocket *> sockets_to_check; + if (data.is_output) { + for (const bNode *node : nmd->node_group->nodes_by_type("NodeGroupOutput")) { + for (const bNodeSocket *socket : node->input_sockets()) { + if (socket->type == SOCK_GEOMETRY) { + sockets_to_check.append(socket); + } + } + } + } + else { + for (const bNode *node : nmd->node_group->nodes_by_type("NodeGroupInput")) { + for (const bNodeSocket *socket : node->output_sockets()) { + if (socket->type == SOCK_GEOMETRY) { + sockets_to_check.append(socket); + } + } + } + } + Set<StringRef> names; + Vector<const GeometryAttributeInfo *> attributes; + for (const bNodeSocket *socket : sockets_to_check) { + const ValueLog *value_log = tree_log->find_socket_value_log(*socket); + if (value_log == nullptr) { + continue; + } + if (const GeometryInfoLog *geo_log = dynamic_cast<const GeometryInfoLog *>(value_log)) { + for (const GeometryAttributeInfo &attribute : geo_log->attributes) { + if (names.add(attribute.name)) { + attributes.append(&attribute); + } + } + } } blender::ui::attribute_search_add_items( - str, data.is_output, info_ptrs.as_span(), items, is_first); + str, data.is_output, attributes.as_span(), items, is_first); } static void attribute_search_exec_fn(bContext *C, void *data_v, void *item_v) @@ -1401,8 +1486,7 @@ static void add_attribute_search_button(const bContext &C, const bNodeSocket &socket, const bool is_output) { - const geo_log::ModifierLog *log = static_cast<geo_log::ModifierLog *>(nmd.runtime_eval_log); - if (log == nullptr) { + if (nmd.runtime_eval_log == nullptr) { uiItemR(layout, md_ptr, rna_path_attribute_name.c_str(), 0, "", ICON_NONE); return; } @@ -1627,15 +1711,14 @@ static void panel_draw(const bContext *C, Panel *panel) } /* Draw node warnings. */ - if (nmd->runtime_eval_log != nullptr) { - const geo_log::ModifierLog &log = *static_cast<geo_log::ModifierLog *>(nmd->runtime_eval_log); - log.foreach_node_log([&](const geo_log::NodeLog &node_log) { - for (const geo_log::NodeWarning &warning : node_log.warnings()) { - if (warning.type != geo_log::NodeWarningType::Info) { - uiItemL(layout, warning.message.c_str(), ICON_ERROR); - } + GeoTreeLog *tree_log = get_root_tree_log(*nmd); + if (tree_log != nullptr) { + tree_log->ensure_node_warnings(); + for (const NodeWarning &warning : tree_log->all_warnings) { + if (warning.type != NodeWarningType::Info) { + uiItemL(layout, warning.message.c_str(), ICON_ERROR); } - }); + } } modifier_panel_end(layout, ptr); @@ -1672,17 +1755,14 @@ static void internal_dependencies_panel_draw(const bContext *UNUSED(C), Panel *p PointerRNA *ptr = modifier_panel_get_property_pointers(panel, nullptr); NodesModifierData *nmd = static_cast<NodesModifierData *>(ptr->data); - if (nmd->runtime_eval_log == nullptr) { + GeoTreeLog *tree_log = get_root_tree_log(*nmd); + if (tree_log == nullptr) { return; } - const geo_log::ModifierLog &log = *static_cast<geo_log::ModifierLog *>(nmd->runtime_eval_log); - Map<std::string, eNamedAttrUsage> usage_by_attribute; - log.foreach_node_log([&](const geo_log::NodeLog &node_log) { - for (const geo_log::UsedNamedAttribute &used_attribute : node_log.used_named_attributes()) { - usage_by_attribute.lookup_or_add_as(used_attribute.name, - used_attribute.usage) |= used_attribute.usage; - } - }); + + tree_log->ensure_used_named_attributes(); + const Map<std::string, NamedAttributeUsage> &usage_by_attribute = + tree_log->used_named_attributes; if (usage_by_attribute.is_empty()) { uiItemL(layout, IFACE_("No named attributes used"), ICON_INFO); @@ -1691,7 +1771,7 @@ static void internal_dependencies_panel_draw(const bContext *UNUSED(C), Panel *p struct NameWithUsage { StringRefNull name; - eNamedAttrUsage usage; + NamedAttributeUsage usage; }; Vector<NameWithUsage> sorted_used_attribute; @@ -1706,20 +1786,20 @@ static void internal_dependencies_panel_draw(const bContext *UNUSED(C), Panel *p for (const NameWithUsage &attribute : sorted_used_attribute) { const StringRefNull attribute_name = attribute.name; - const eNamedAttrUsage usage = attribute.usage; + const NamedAttributeUsage usage = attribute.usage; /* #uiLayoutRowWithHeading doesn't seem to work in this case. */ uiLayout *split = uiLayoutSplit(layout, 0.4f, false); std::stringstream ss; Vector<std::string> usages; - if ((usage & eNamedAttrUsage::Read) != eNamedAttrUsage::None) { + if ((usage & NamedAttributeUsage::Read) != NamedAttributeUsage::None) { usages.append(TIP_("Read")); } - if ((usage & eNamedAttrUsage::Write) != eNamedAttrUsage::None) { + if ((usage & NamedAttributeUsage::Write) != NamedAttributeUsage::None) { usages.append(TIP_("Write")); } - if ((usage & eNamedAttrUsage::Remove) != eNamedAttrUsage::None) { + if ((usage & NamedAttributeUsage::Remove) != NamedAttributeUsage::None) { usages.append(TIP_("Remove")); } for (const int i : usages.index_range()) { diff --git a/source/blender/modifiers/intern/MOD_nodes_evaluator.cc b/source/blender/modifiers/intern/MOD_nodes_evaluator.cc deleted file mode 100644 index dd7c87ca499..00000000000 --- a/source/blender/modifiers/intern/MOD_nodes_evaluator.cc +++ /dev/null @@ -1,1929 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ - -#include "MOD_nodes_evaluator.hh" - -#include "BKE_node.h" -#include "BKE_type_conversions.hh" - -#include "NOD_geometry_exec.hh" -#include "NOD_socket_declarations.hh" - -#include "DEG_depsgraph_query.h" - -#include "FN_field.hh" -#include "FN_field_cpp_type.hh" -#include "FN_multi_function.hh" - -#include "BLT_translation.h" - -#include "BLI_enumerable_thread_specific.hh" -#include "BLI_generic_value_map.hh" -#include "BLI_stack.hh" -#include "BLI_task.h" -#include "BLI_task.hh" -#include "BLI_vector_set.hh" - -#include <chrono> - -namespace blender::modifiers::geometry_nodes { - -using fn::Field; -using fn::GField; -using fn::ValueOrField; -using fn::ValueOrFieldCPPType; -using nodes::GeoNodeExecParams; -using namespace fn::multi_function_types; - -enum class ValueUsage : uint8_t { - /* The value is definitely used. */ - Required, - /* The value may be used. */ - Maybe, - /* The value will definitely not be used. */ - Unused, -}; - -struct SingleInputValue { - /** - * Points either to null or to a value of the type of input. - */ - void *value = nullptr; -}; - -struct MultiInputValue { - /** - * Ordered sockets connected to this multi-input. - */ - Vector<DSocket> origins; - /** - * A value for every origin socket. The order is determined by #origins. - * Note, the same origin can occur multiple times. However, it is guaranteed that values coming - * from the same origin have the same value (the pointer is different, but they point to values - * that would compare equal). - */ - Vector<void *> values; - /** - * Number of non-null values. - */ - int provided_value_count = 0; - - bool all_values_available() const - { - return this->missing_values() == 0; - } - - int missing_values() const - { - return this->values.size() - this->provided_value_count; - } - - void add_value(const DSocket origin, void *value) - { - const int index = this->find_available_index(origin); - this->values[index] = value; - this->provided_value_count++; - } - - private: - int find_available_index(DSocket origin) const - { - for (const int i : origins.index_range()) { - if (values[i] != nullptr) { - continue; - } - if (origins[i] != origin) { - continue; - } - return i; - } - BLI_assert_unreachable(); - return -1; - } -}; - -struct InputState { - - /** - * Type of the socket. If this is null, the socket should just be ignored. - */ - const CPPType *type = nullptr; - - /** - * Value of this input socket. By default, the value is empty. When other nodes are done - * computing their outputs, the computed values will be forwarded to linked input sockets. - * The value will then live here until it is consumed by the node or it was found that the value - * is not needed anymore. - * Whether the `single` or `multi` value is used depends on the socket. - */ - union { - SingleInputValue *single; - MultiInputValue *multi; - } value; - - /** - * How the node intends to use this input. By default all inputs may be used. Based on which - * outputs are used, a node can tell the evaluator that an input will definitely be used or is - * never used. This allows the evaluator to free values early, avoid copies and other unnecessary - * computations. - */ - ValueUsage usage = ValueUsage::Maybe; - - /** - * True when this input is/was used for an execution. While a node is running, only the inputs - * that have this set to true are allowed to be used. This makes sure that inputs created while - * the node is running correctly trigger the node to run again. Furthermore, it gives the node a - * consistent view of which inputs are available that does not change unexpectedly. - * - * While the node is running, this can be checked without a lock, because no one is writing to - * it. If this is true, the value can be read without a lock as well, because the value is not - * changed by others anymore. - */ - bool was_ready_for_execution = false; - - /** - * True when this input has to be computed for logging/debugging purposes, regardless of whether - * it is needed for some output. - */ - bool force_compute = false; -}; - -struct OutputState { - /** - * If this output has been computed and forwarded already. If this is true, the value is not - * computed/forwarded again. - */ - bool has_been_computed = false; - - /** - * Keeps track of how the output value is used. If a connected input becomes required, this - * output has to become required as well. The output becomes ignored when it has zero potential - * users that are counted below. - */ - ValueUsage output_usage = ValueUsage::Maybe; - - /** - * This is a copy of `output_usage` that is done right before node execution starts. This is - * done so that the node gets a consistent view of what outputs are used, even when this changes - * while the node is running (the node might be reevaluated in that case). - * - * While the node is running, this can be checked without a lock, because no one is writing to - * it. - */ - ValueUsage output_usage_for_execution = ValueUsage::Maybe; - - /** - * Counts how many times the value from this output might be used. If this number reaches zero, - * the output is not needed anymore. - */ - int potential_users = 0; -}; - -enum class NodeScheduleState { - /** - * Default state of every node. - */ - NotScheduled, - /** - * The node has been added to the task group and will be executed by it in the future. - */ - Scheduled, - /** - * The node is currently running. - */ - Running, - /** - * The node is running and has been rescheduled while running. In this case the node will run - * again. However, we don't add it to the task group immediately, because then the node might run - * twice at the same time, which is not allowed. Instead, once the node is done running, it will - * reschedule itself. - */ - RunningAndRescheduled, -}; - -struct NodeState { - /** - * Needs to be locked when any data in this state is accessed that is not explicitly marked as - * otherwise. - */ - std::mutex mutex; - - /** - * States of the individual input and output sockets. One can index into these arrays without - * locking. However, to access the data inside a lock is generally necessary. - * - * These spans have to be indexed with the socket index. Unavailable sockets have a state as - * well. Maybe we can handle unavailable sockets differently in Blender in general, so I did not - * want to add complexity around it here. - */ - MutableSpan<InputState> inputs; - MutableSpan<OutputState> outputs; - - /** - * Most nodes have inputs that are always required. Those have special handling to avoid an extra - * call to the node execution function. - */ - bool non_lazy_inputs_handled = false; - - /** - * Used to check that nodes that don't support laziness do not run more than once. - */ - bool has_been_executed = false; - - /** - * Becomes true when the node will never be executed again and its inputs are destructed. - * Generally, a node has finished once all of its outputs with (potential) users have been - * computed. - */ - bool node_has_finished = false; - - /** - * Counts the number of values that still have to be forwarded to this node until it should run - * again. It counts values from a multi input socket separately. - * This is used as an optimization so that nodes are not scheduled unnecessarily in many cases. - */ - int missing_required_inputs = 0; - - /** - * A node is always in one specific schedule state. This helps to ensure that the same node does - * not run twice at the same time accidentally. - */ - NodeScheduleState schedule_state = NodeScheduleState::NotScheduled; -}; - -/** - * Container for a node and its state. Packing them into a single struct allows the use of - * `VectorSet` instead of a `Map` for `node_states_` which simplifies parallel loops over all - * states. - * - * Equality operators and a hash function for `DNode` are provided so that one can lookup this type - * in `node_states_` just with a `DNode`. - */ -struct NodeWithState { - DNode node; - /* Store a pointer instead of `NodeState` directly to keep it small and movable. */ - NodeState *state = nullptr; - - friend bool operator==(const NodeWithState &a, const NodeWithState &b) - { - return a.node == b.node; - } - - friend bool operator==(const NodeWithState &a, const DNode &b) - { - return a.node == b; - } - - friend bool operator==(const DNode &a, const NodeWithState &b) - { - return a == b.node; - } - - uint64_t hash() const - { - return node.hash(); - } - - static uint64_t hash_as(const DNode &node) - { - return node.hash(); - } -}; - -class GeometryNodesEvaluator; - -/** - * Utility class that wraps a node whose state is locked. Having this is a separate class is useful - * because it allows methods to communicate that they expect the node to be locked. - */ -class LockedNode : NonCopyable, NonMovable { - public: - /** - * This is the node that is currently locked. - */ - const DNode node; - NodeState &node_state; - - /** - * Used to delay notifying (and therefore locking) other nodes until the current node is not - * locked anymore. This might not be strictly necessary to avoid deadlocks in the current code, - * but it is a good measure to avoid accidentally adding a deadlock later on. By not locking - * more than one node per thread at a time, deadlocks are avoided. - * - * The notifications will be send right after the node is not locked anymore. - */ - Vector<DOutputSocket> delayed_required_outputs; - Vector<DOutputSocket> delayed_unused_outputs; - Vector<DNode> delayed_scheduled_nodes; - - LockedNode(const DNode node, NodeState &node_state) : node(node), node_state(node_state) - { - } -}; - -static const CPPType *get_socket_cpp_type(const bNodeSocket &socket) -{ - const bNodeSocketType *typeinfo = socket.typeinfo; - if (typeinfo->geometry_nodes_cpp_type == nullptr) { - return nullptr; - } - const CPPType *type = typeinfo->geometry_nodes_cpp_type; - if (type == nullptr) { - return nullptr; - } - /* The evaluator only supports types that have special member functions. */ - if (!type->has_special_member_functions()) { - return nullptr; - } - return type; -} - -static const CPPType *get_socket_cpp_type(const DSocket socket) -{ - return get_socket_cpp_type(*socket); -} - -/** - * \note This is not supposed to be a long term solution. Eventually we want that nodes can - * specify more complex defaults (other than just single values) in their socket declarations. - */ -static bool get_implicit_socket_input(const bNodeSocket &socket, void *r_value) -{ - const bNode &node = socket.owner_node(); - const nodes::NodeDeclaration *node_declaration = node.runtime->declaration; - if (node_declaration == nullptr) { - return false; - } - const nodes::SocketDeclaration &socket_declaration = *node_declaration->inputs()[socket.index()]; - if (socket_declaration.input_field_type() == nodes::InputSocketFieldType::Implicit) { - const bNode &bnode = socket.owner_node(); - if (socket.typeinfo->type == SOCK_VECTOR) { - if (bnode.type == GEO_NODE_SET_CURVE_HANDLES) { - StringRef side = ((NodeGeometrySetCurveHandlePositions *)bnode.storage)->mode == - GEO_NODE_CURVE_HANDLE_LEFT ? - "handle_left" : - "handle_right"; - new (r_value) ValueOrField<float3>(bke::AttributeFieldInput::Create<float3>(side)); - return true; - } - if (bnode.type == GEO_NODE_EXTRUDE_MESH) { - new (r_value) - ValueOrField<float3>(Field<float3>(std::make_shared<bke::NormalFieldInput>())); - return true; - } - new (r_value) ValueOrField<float3>(bke::AttributeFieldInput::Create<float3>("position")); - return true; - } - if (socket.typeinfo->type == SOCK_INT) { - if (ELEM(bnode.type, FN_NODE_RANDOM_VALUE, GEO_NODE_INSTANCE_ON_POINTS)) { - new (r_value) - ValueOrField<int>(Field<int>(std::make_shared<bke::IDAttributeFieldInput>())); - return true; - } - new (r_value) ValueOrField<int>(Field<int>(std::make_shared<fn::IndexFieldInput>())); - return true; - } - } - return false; -} - -static void get_socket_value(const bNodeSocket &socket, void *r_value) -{ - if (get_implicit_socket_input(socket, r_value)) { - return; - } - - const bNodeSocketType *typeinfo = socket.typeinfo; - typeinfo->get_geometry_nodes_cpp_value(socket, r_value); -} - -static bool node_supports_laziness(const DNode node) -{ - return node->typeinfo->geometry_node_execute_supports_laziness; -} - -struct NodeTaskRunState { - /** The node that should be run on the same thread after the current node finished. */ - DNode next_node_to_run; -}; - -/** Implements the callbacks that might be called when a node is executed. */ -class NodeParamsProvider : public nodes::GeoNodeExecParamsProvider { - private: - GeometryNodesEvaluator &evaluator_; - NodeState &node_state_; - NodeTaskRunState *run_state_; - - public: - NodeParamsProvider(GeometryNodesEvaluator &evaluator, - DNode dnode, - NodeState &node_state, - NodeTaskRunState *run_state); - - bool can_get_input(StringRef identifier) const override; - bool can_set_output(StringRef identifier) const override; - GMutablePointer extract_input(StringRef identifier) override; - Vector<GMutablePointer> extract_multi_input(StringRef identifier) override; - GPointer get_input(StringRef identifier) const override; - GMutablePointer alloc_output_value(const CPPType &type) override; - void set_output(StringRef identifier, GMutablePointer value) override; - void set_input_unused(StringRef identifier) override; - bool output_is_required(StringRef identifier) const override; - - bool lazy_require_input(StringRef identifier) override; - bool lazy_output_is_required(StringRef identifier) const override; - - void set_default_remaining_outputs() override; -}; - -class GeometryNodesEvaluator { - private: - /** - * This allocator lives on after the evaluator has been destructed. Therefore outputs of the - * entire evaluator should be allocated here. - */ - LinearAllocator<> &outer_allocator_; - /** - * A local linear allocator for each thread. Only use this for values that do not need to live - * longer than the lifetime of the evaluator itself. Considerations for the future: - * - We could use an allocator that can free here, some temporary values don't live long. - * - If we ever run into false sharing bottlenecks, we could use local allocators that allocate - * on cache line boundaries. Note, just because a value is allocated in one specific thread, - * does not mean that it will only be used by that thread. - */ - threading::EnumerableThreadSpecific<LinearAllocator<>> local_allocators_; - - /** - * Every node that is reachable from the output gets its own state. Once all states have been - * constructed, this map can be used for lookups from multiple threads. - */ - VectorSet<NodeWithState> node_states_; - - /** - * Contains all the tasks for the nodes that are currently scheduled. - */ - TaskPool *task_pool_ = nullptr; - - GeometryNodesEvaluationParams ¶ms_; - const blender::bke::DataTypeConversions &conversions_; - - friend NodeParamsProvider; - - public: - GeometryNodesEvaluator(GeometryNodesEvaluationParams ¶ms) - : outer_allocator_(params.allocator), - params_(params), - conversions_(blender::bke::get_implicit_type_conversions()) - { - } - - void execute() - { - task_pool_ = BLI_task_pool_create(this, TASK_PRIORITY_HIGH); - - this->create_states_for_reachable_nodes(); - this->forward_group_inputs(); - this->schedule_initial_nodes(); - - /* This runs until all initially requested inputs have been computed. */ - BLI_task_pool_work_and_wait(task_pool_); - BLI_task_pool_free(task_pool_); - - this->extract_group_outputs(); - this->destruct_node_states(); - } - - void create_states_for_reachable_nodes() - { - /* This does a depth first search for all the nodes that are reachable from the group - * outputs. This finds all nodes that are relevant. */ - Stack<DNode> nodes_to_check; - /* Start at the output sockets. */ - for (const DInputSocket &socket : params_.output_sockets) { - nodes_to_check.push(socket.node()); - } - for (const DSocket &socket : params_.force_compute_sockets) { - nodes_to_check.push(socket.node()); - } - /* Use the local allocator because the states do not need to outlive the evaluator. */ - LinearAllocator<> &allocator = local_allocators_.local(); - while (!nodes_to_check.is_empty()) { - const DNode node = nodes_to_check.pop(); - if (node_states_.contains_as(node)) { - /* This node has been handled already. */ - continue; - } - /* Create a new state for the node. */ - NodeState &node_state = *allocator.construct<NodeState>().release(); - node_states_.add_new({node, &node_state}); - - /* Push all linked origins on the stack. */ - for (const bNodeSocket *input : node->input_sockets()) { - const DInputSocket dinput{node.context(), input}; - dinput.foreach_origin_socket( - [&](const DSocket origin) { nodes_to_check.push(origin.node()); }); - } - } - - /* Initialize the more complex parts of the node states in parallel. At this point no new - * node states are added anymore, so it is safe to lookup states from `node_states_` from - * multiple threads. */ - threading::parallel_for( - IndexRange(node_states_.size()), 50, [&, this](const IndexRange range) { - LinearAllocator<> &allocator = this->local_allocators_.local(); - for (const NodeWithState &item : node_states_.as_span().slice(range)) { - this->initialize_node_state(item.node, *item.state, allocator); - } - }); - - /* Mark input sockets that have to be computed. */ - for (const DSocket &socket : params_.force_compute_sockets) { - NodeState &node_state = *node_states_.lookup_key_as(socket.node()).state; - if (socket->is_input()) { - node_state.inputs[socket->index()].force_compute = true; - } - } - } - - void initialize_node_state(const DNode node, NodeState &node_state, LinearAllocator<> &allocator) - { - /* Construct arrays of the correct size. */ - node_state.inputs = allocator.construct_array<InputState>(node->input_sockets().size()); - node_state.outputs = allocator.construct_array<OutputState>(node->output_sockets().size()); - - /* Initialize input states. */ - for (const int i : node->input_sockets().index_range()) { - InputState &input_state = node_state.inputs[i]; - const DInputSocket socket = node.input(i); - if (!socket->is_available()) { - /* Unavailable sockets should never be used. */ - input_state.type = nullptr; - input_state.usage = ValueUsage::Unused; - continue; - } - const CPPType *type = get_socket_cpp_type(socket); - input_state.type = type; - if (type == nullptr) { - /* This is not a known data socket, it shouldn't be used. */ - input_state.usage = ValueUsage::Unused; - continue; - } - /* Construct the correct struct that can hold the input(s). */ - if (socket->is_multi_input()) { - input_state.value.multi = allocator.construct<MultiInputValue>().release(); - MultiInputValue &multi_value = *input_state.value.multi; - /* Count how many values should be added until the socket is complete. */ - socket.foreach_origin_socket([&](DSocket origin) { multi_value.origins.append(origin); }); - /* If no links are connected, we do read the value from socket itself. */ - if (multi_value.origins.is_empty()) { - multi_value.origins.append(socket); - } - multi_value.values.resize(multi_value.origins.size(), nullptr); - } - else { - input_state.value.single = allocator.construct<SingleInputValue>().release(); - } - } - /* Initialize output states. */ - for (const int i : node->output_sockets().index_range()) { - OutputState &output_state = node_state.outputs[i]; - const DOutputSocket socket = node.output(i); - if (!socket->is_available()) { - /* Unavailable outputs should never be used. */ - output_state.output_usage = ValueUsage::Unused; - continue; - } - const CPPType *type = get_socket_cpp_type(socket); - if (type == nullptr) { - /* Non data sockets should never be used. */ - output_state.output_usage = ValueUsage::Unused; - continue; - } - /* Count the number of potential users for this socket. */ - socket.foreach_target_socket( - [&, this](const DInputSocket target_socket, - const DOutputSocket::TargetSocketPathInfo &UNUSED(path_info)) { - const DNode target_node = target_socket.node(); - if (!this->node_states_.contains_as(target_node)) { - /* The target node is not computed because it is not computed to the output. */ - return; - } - output_state.potential_users += 1; - }); - if (output_state.potential_users == 0) { - /* If it does not have any potential users, it is unused. It might become required again in - * `schedule_initial_nodes`. */ - output_state.output_usage = ValueUsage::Unused; - } - } - } - - void destruct_node_states() - { - threading::parallel_for( - IndexRange(node_states_.size()), 50, [&, this](const IndexRange range) { - for (const NodeWithState &item : node_states_.as_span().slice(range)) { - this->destruct_node_state(item.node, *item.state); - } - }); - } - - void destruct_node_state(const DNode node, NodeState &node_state) - { - /* Need to destruct stuff manually, because it's allocated by a custom allocator. */ - for (const int i : node->input_sockets().index_range()) { - InputState &input_state = node_state.inputs[i]; - if (input_state.type == nullptr) { - continue; - } - const bNodeSocket &bsocket = node->input_socket(i); - if (bsocket.is_multi_input()) { - MultiInputValue &multi_value = *input_state.value.multi; - for (void *value : multi_value.values) { - if (value != nullptr) { - input_state.type->destruct(value); - } - } - multi_value.~MultiInputValue(); - } - else { - SingleInputValue &single_value = *input_state.value.single; - void *value = single_value.value; - if (value != nullptr) { - input_state.type->destruct(value); - } - single_value.~SingleInputValue(); - } - } - - destruct_n(node_state.inputs.data(), node_state.inputs.size()); - destruct_n(node_state.outputs.data(), node_state.outputs.size()); - - node_state.~NodeState(); - } - - void forward_group_inputs() - { - for (auto &&item : params_.input_values.items()) { - const DOutputSocket socket = item.key; - GMutablePointer value = item.value; - - const DNode node = socket.node(); - if (!node_states_.contains_as(node)) { - /* The socket is not connected to any output. */ - this->log_socket_value({socket}, value); - value.destruct(); - continue; - } - this->forward_output(socket, value, nullptr); - } - } - - void schedule_initial_nodes() - { - for (const DInputSocket &socket : params_.output_sockets) { - const DNode node = socket.node(); - NodeState &node_state = this->get_node_state(node); - this->with_locked_node(node, node_state, nullptr, [&](LockedNode &locked_node) { - /* Setting an input as required will schedule any linked node. */ - this->set_input_required(locked_node, socket); - }); - } - for (const DSocket socket : params_.force_compute_sockets) { - const DNode node = socket.node(); - NodeState &node_state = this->get_node_state(node); - this->with_locked_node(node, node_state, nullptr, [&](LockedNode &locked_node) { - if (socket->is_input()) { - this->set_input_required(locked_node, DInputSocket(socket)); - } - else { - OutputState &output_state = node_state.outputs[socket->index()]; - output_state.output_usage = ValueUsage::Required; - this->schedule_node(locked_node); - } - }); - } - } - - void schedule_node(LockedNode &locked_node) - { - switch (locked_node.node_state.schedule_state) { - case NodeScheduleState::NotScheduled: { - /* The node will be scheduled once it is not locked anymore. We could schedule the node - * right here, but that would result in a deadlock if the task pool decides to run the task - * immediately (this only happens when Blender is started with a single thread). */ - locked_node.node_state.schedule_state = NodeScheduleState::Scheduled; - locked_node.delayed_scheduled_nodes.append(locked_node.node); - break; - } - case NodeScheduleState::Scheduled: { - /* Scheduled already, nothing to do. */ - break; - } - case NodeScheduleState::Running: { - /* Reschedule node while it is running. - * The node will reschedule itself when it is done. */ - locked_node.node_state.schedule_state = NodeScheduleState::RunningAndRescheduled; - break; - } - case NodeScheduleState::RunningAndRescheduled: { - /* Scheduled already, nothing to do. */ - break; - } - } - } - - static void run_node_from_task_pool(TaskPool *task_pool, void *task_data) - { - void *user_data = BLI_task_pool_user_data(task_pool); - GeometryNodesEvaluator &evaluator = *(GeometryNodesEvaluator *)user_data; - const NodeWithState *root_node_with_state = (const NodeWithState *)task_data; - - /* First, the node provided by the task pool is executed. During the execution other nodes - * might be scheduled. One of those nodes is not added to the task pool but is executed in the - * loop below directly. This has two main benefits: - * - Fewer round trips through the task pool which add threading overhead. - * - Helps with cpu cache efficiency, because a thread is more likely to process data that it - * has processed shortly before. - */ - DNode next_node_to_run = root_node_with_state->node; - while (next_node_to_run) { - NodeTaskRunState run_state; - evaluator.node_task_run(next_node_to_run, &run_state); - next_node_to_run = run_state.next_node_to_run; - } - } - - void node_task_run(const DNode node, NodeTaskRunState *run_state) - { - /* These nodes are sometimes scheduled. We could also check for them in other places, but - * it's the easiest to do it here. */ - if (ELEM(node->type, NODE_GROUP_INPUT, NODE_GROUP_OUTPUT)) { - return; - } - - NodeState &node_state = *node_states_.lookup_key_as(node).state; - - const bool do_execute_node = this->node_task_preprocessing(node, node_state, run_state); - - /* Only execute the node if all prerequisites are met. There has to be an output that is - * required and all required inputs have to be provided already. */ - if (do_execute_node) { - this->execute_node(node, node_state, run_state); - } - - this->node_task_postprocessing(node, node_state, do_execute_node, run_state); - } - - bool node_task_preprocessing(const DNode node, - NodeState &node_state, - NodeTaskRunState *run_state) - { - bool do_execute_node = false; - this->with_locked_node(node, node_state, run_state, [&](LockedNode &locked_node) { - BLI_assert(node_state.schedule_state == NodeScheduleState::Scheduled); - node_state.schedule_state = NodeScheduleState::Running; - - /* Early return if the node has finished already. */ - if (locked_node.node_state.node_has_finished) { - return; - } - /* Prepare outputs and check if actually any new outputs have to be computed. */ - if (!this->prepare_node_outputs_for_execution(locked_node)) { - return; - } - /* Initialize inputs that don't support laziness. This is done after at least one output is - * required and before we check that all required inputs are provided. This reduces the - * number of "round-trips" through the task pool by one for most nodes. */ - if (!node_state.non_lazy_inputs_handled) { - this->require_non_lazy_inputs(locked_node); - node_state.non_lazy_inputs_handled = true; - } - /* Prepare inputs and check if all required inputs are provided. */ - if (!this->prepare_node_inputs_for_execution(locked_node)) { - return; - } - do_execute_node = true; - }); - return do_execute_node; - } - - /* A node is finished when it has computed all outputs that may be used have been computed and - * when no input is still forced to be computed. */ - bool finish_node_if_possible(LockedNode &locked_node) - { - if (locked_node.node_state.node_has_finished) { - /* Early return in case this node is known to have finished already. */ - return true; - } - - /* Check if there is any output that might be used but has not been computed yet. */ - for (OutputState &output_state : locked_node.node_state.outputs) { - if (output_state.has_been_computed) { - continue; - } - if (output_state.output_usage != ValueUsage::Unused) { - return false; - } - } - - /* Check if there is an input that still has to be computed. */ - for (InputState &input_state : locked_node.node_state.inputs) { - if (input_state.force_compute) { - if (!input_state.was_ready_for_execution) { - return false; - } - } - } - - /* If there are no remaining outputs, all the inputs can be destructed and/or can become - * unused. This can also trigger a chain reaction where nodes to the left become finished - * too. */ - for (const int i : locked_node.node->input_sockets().index_range()) { - const DInputSocket socket = locked_node.node.input(i); - InputState &input_state = locked_node.node_state.inputs[i]; - if (input_state.usage == ValueUsage::Maybe) { - this->set_input_unused(locked_node, socket); - } - else if (input_state.usage == ValueUsage::Required) { - /* The value was required, so it cannot become unused. However, we can destruct the - * value. */ - this->destruct_input_value_if_exists(locked_node, socket); - } - } - locked_node.node_state.node_has_finished = true; - return true; - } - - bool prepare_node_outputs_for_execution(LockedNode &locked_node) - { - bool execution_is_necessary = false; - for (OutputState &output_state : locked_node.node_state.outputs) { - /* Update the output usage for execution to the latest value. */ - output_state.output_usage_for_execution = output_state.output_usage; - if (!output_state.has_been_computed) { - if (output_state.output_usage == ValueUsage::Required) { - /* Only evaluate when there is an output that is required but has not been computed. */ - execution_is_necessary = true; - } - } - } - return execution_is_necessary; - } - - void require_non_lazy_inputs(LockedNode &locked_node) - { - this->foreach_non_lazy_input(locked_node, [&](const DInputSocket socket) { - this->set_input_required(locked_node, socket); - }); - } - - void foreach_non_lazy_input(LockedNode &locked_node, FunctionRef<void(DInputSocket socket)> fn) - { - if (node_supports_laziness(locked_node.node)) { - /* In the future only some of the inputs may support laziness. */ - return; - } - /* Nodes that don't support laziness require all inputs. */ - for (const int i : locked_node.node->input_sockets().index_range()) { - InputState &input_state = locked_node.node_state.inputs[i]; - if (input_state.type == nullptr) { - /* Ignore unavailable/non-data sockets. */ - continue; - } - fn(locked_node.node.input(i)); - } - } - - /** - * Checks if requested inputs are available and "marks" all the inputs that are available - * during the node execution. Inputs that are provided after this function ends but before the - * node is executed, cannot be read by the node in the execution (note that this only affects - * nodes that support lazy inputs). - */ - bool prepare_node_inputs_for_execution(LockedNode &locked_node) - { - for (const int i : locked_node.node_state.inputs.index_range()) { - InputState &input_state = locked_node.node_state.inputs[i]; - if (input_state.type == nullptr) { - /* Ignore unavailable and non-data sockets. */ - continue; - } - const DInputSocket socket = locked_node.node.input(i); - const bool is_required = input_state.usage == ValueUsage::Required; - - /* No need to check this socket again. */ - if (input_state.was_ready_for_execution) { - continue; - } - - if (socket->is_multi_input()) { - MultiInputValue &multi_value = *input_state.value.multi; - /* Checks if all the linked sockets have been provided already. */ - if (multi_value.all_values_available()) { - input_state.was_ready_for_execution = true; - } - else if (is_required) { - /* The input is required but is not fully provided yet. Therefore the node cannot be - * executed yet. */ - return false; - } - } - else { - SingleInputValue &single_value = *input_state.value.single; - if (single_value.value != nullptr) { - input_state.was_ready_for_execution = true; - } - else if (is_required) { - /* The input is required but has not been provided yet. Therefore the node cannot be - * executed yet. */ - return false; - } - } - } - /* All required inputs have been provided. */ - return true; - } - - /** - * Actually execute the node. All the required inputs are available and at least one output is - * required. - */ - void execute_node(const DNode node, NodeState &node_state, NodeTaskRunState *run_state) - { - const bNode &bnode = *node; - - if (node_state.has_been_executed) { - if (!node_supports_laziness(node)) { - /* Nodes that don't support laziness must not be executed more than once. */ - BLI_assert_unreachable(); - } - } - node_state.has_been_executed = true; - - /* Use the geometry node execute callback if it exists. */ - if (bnode.typeinfo->geometry_node_execute != nullptr) { - this->execute_geometry_node(node, node_state, run_state); - return; - } - - /* Use the multi-function implementation if it exists. */ - const nodes::NodeMultiFunctions::Item &fn_item = params_.mf_by_node->try_get(node); - if (fn_item.fn != nullptr) { - this->execute_multi_function_node(node, fn_item, node_state, run_state); - return; - } - - this->execute_unknown_node(node, node_state, run_state); - } - - void execute_geometry_node(const DNode node, NodeState &node_state, NodeTaskRunState *run_state) - { - using Clock = std::chrono::steady_clock; - const bNode &bnode = *node; - - NodeParamsProvider params_provider{*this, node, node_state, run_state}; - GeoNodeExecParams params{params_provider}; - Clock::time_point begin = Clock::now(); - bnode.typeinfo->geometry_node_execute(params); - Clock::time_point end = Clock::now(); - const std::chrono::microseconds duration = - std::chrono::duration_cast<std::chrono::microseconds>(end - begin); - if (params_.geo_logger != nullptr) { - params_.geo_logger->local().log_execution_time(node, duration); - } - } - - void execute_multi_function_node(const DNode node, - const nodes::NodeMultiFunctions::Item &fn_item, - NodeState &node_state, - NodeTaskRunState *run_state) - { - LinearAllocator<> &allocator = local_allocators_.local(); - - bool any_input_is_field = false; - Vector<const void *, 16> input_values; - Vector<const ValueOrFieldCPPType *, 16> input_types; - for (const int i : node->input_sockets().index_range()) { - const bNodeSocket &bsocket = node->input_socket(i); - if (!bsocket.is_available()) { - continue; - } - BLI_assert(!bsocket.is_multi_input()); - InputState &input_state = node_state.inputs[i]; - BLI_assert(input_state.was_ready_for_execution); - SingleInputValue &single_value = *input_state.value.single; - BLI_assert(single_value.value != nullptr); - const ValueOrFieldCPPType &field_cpp_type = static_cast<const ValueOrFieldCPPType &>( - *input_state.type); - input_values.append(single_value.value); - input_types.append(&field_cpp_type); - if (field_cpp_type.is_field(single_value.value)) { - any_input_is_field = true; - } - } - - if (any_input_is_field) { - this->execute_multi_function_node__field( - node, fn_item, node_state, allocator, input_values, input_types, run_state); - } - else { - this->execute_multi_function_node__value( - node, *fn_item.fn, node_state, allocator, input_values, input_types, run_state); - } - } - - void execute_multi_function_node__field(const DNode node, - const nodes::NodeMultiFunctions::Item &fn_item, - NodeState &node_state, - LinearAllocator<> &allocator, - Span<const void *> input_values, - Span<const ValueOrFieldCPPType *> input_types, - NodeTaskRunState *run_state) - { - Vector<GField> input_fields; - for (const int i : input_values.index_range()) { - const void *input_value_or_field = input_values[i]; - const ValueOrFieldCPPType &field_cpp_type = *input_types[i]; - input_fields.append(field_cpp_type.as_field(input_value_or_field)); - } - - std::shared_ptr<fn::FieldOperation> operation; - if (fn_item.owned_fn) { - operation = std::make_shared<fn::FieldOperation>(fn_item.owned_fn, std::move(input_fields)); - } - else { - operation = std::make_shared<fn::FieldOperation>(*fn_item.fn, std::move(input_fields)); - } - - int output_index = 0; - for (const int i : node->output_sockets().index_range()) { - const bNodeSocket &bsocket = node->output_socket(i); - if (!bsocket.is_available()) { - continue; - } - OutputState &output_state = node_state.outputs[i]; - const DOutputSocket socket{node.context(), &bsocket}; - const ValueOrFieldCPPType *cpp_type = static_cast<const ValueOrFieldCPPType *>( - get_socket_cpp_type(bsocket)); - GField new_field{operation, output_index}; - void *buffer = allocator.allocate(cpp_type->size(), cpp_type->alignment()); - cpp_type->construct_from_field(buffer, std::move(new_field)); - this->forward_output(socket, {cpp_type, buffer}, run_state); - output_state.has_been_computed = true; - output_index++; - } - } - - void execute_multi_function_node__value(const DNode node, - const MultiFunction &fn, - NodeState &node_state, - LinearAllocator<> &allocator, - Span<const void *> input_values, - Span<const ValueOrFieldCPPType *> input_types, - NodeTaskRunState *run_state) - { - MFParamsBuilder params{fn, 1}; - for (const int i : input_values.index_range()) { - const void *input_value_or_field = input_values[i]; - const ValueOrFieldCPPType &field_cpp_type = *input_types[i]; - const CPPType &base_type = field_cpp_type.base_type(); - const void *input_value = field_cpp_type.get_value_ptr(input_value_or_field); - params.add_readonly_single_input(GVArray::ForSingleRef(base_type, 1, input_value)); - } - - Vector<GMutablePointer, 16> output_buffers; - for (const int i : node->output_sockets().index_range()) { - const DOutputSocket socket = node.output(i); - if (!socket->is_available()) { - output_buffers.append({}); - continue; - } - const ValueOrFieldCPPType *value_or_field_type = static_cast<const ValueOrFieldCPPType *>( - get_socket_cpp_type(socket)); - const CPPType &base_type = value_or_field_type->base_type(); - void *value_or_field_buffer = allocator.allocate(value_or_field_type->size(), - value_or_field_type->alignment()); - value_or_field_type->default_construct(value_or_field_buffer); - void *value_buffer = value_or_field_type->get_value_ptr(value_or_field_buffer); - base_type.destruct(value_buffer); - params.add_uninitialized_single_output(GMutableSpan{base_type, value_buffer, 1}); - output_buffers.append({value_or_field_type, value_or_field_buffer}); - } - - MFContextBuilder context; - fn.call(IndexRange(1), params, context); - - for (const int i : output_buffers.index_range()) { - GMutablePointer buffer = output_buffers[i]; - if (buffer.get() == nullptr) { - continue; - } - const DOutputSocket socket = node.output(i); - this->forward_output(socket, buffer, run_state); - - OutputState &output_state = node_state.outputs[i]; - output_state.has_been_computed = true; - } - } - - void execute_unknown_node(const DNode node, NodeState &node_state, NodeTaskRunState *run_state) - { - LinearAllocator<> &allocator = local_allocators_.local(); - for (const bNodeSocket *socket : node->output_sockets()) { - if (!socket->is_available()) { - continue; - } - const CPPType *type = get_socket_cpp_type(*socket); - if (type == nullptr) { - continue; - } - /* Just forward the default value of the type as a fallback. That's typically better than - * crashing or doing nothing. */ - OutputState &output_state = node_state.outputs[socket->index()]; - output_state.has_been_computed = true; - void *buffer = allocator.allocate(type->size(), type->alignment()); - this->construct_default_value(*type, buffer); - this->forward_output({node.context(), socket}, {*type, buffer}, run_state); - } - } - - void node_task_postprocessing(const DNode node, - NodeState &node_state, - bool was_executed, - NodeTaskRunState *run_state) - { - this->with_locked_node(node, node_state, run_state, [&](LockedNode &locked_node) { - const bool node_has_finished = this->finish_node_if_possible(locked_node); - const bool reschedule_requested = node_state.schedule_state == - NodeScheduleState::RunningAndRescheduled; - node_state.schedule_state = NodeScheduleState::NotScheduled; - if (reschedule_requested && !node_has_finished) { - /* Either the node rescheduled itself or another node tried to schedule it while it ran. */ - this->schedule_node(locked_node); - } - if (was_executed) { - this->assert_expected_outputs_have_been_computed(locked_node); - } - }); - } - - void assert_expected_outputs_have_been_computed(LockedNode &locked_node) - { -#ifdef DEBUG - /* Outputs can only be computed when all required inputs have been provided. */ - if (locked_node.node_state.missing_required_inputs > 0) { - return; - } - /* If the node is still scheduled, it is not necessary that all its expected outputs are - * computed yet. */ - if (locked_node.node_state.schedule_state == NodeScheduleState::Scheduled) { - return; - } - - const bool supports_laziness = node_supports_laziness(locked_node.node); - /* Iterating over sockets instead of the states directly, because that makes it easier to - * figure out which socket is missing when one of the asserts is hit. */ - for (const bNodeSocket *bsocket : locked_node.node->output_sockets()) { - OutputState &output_state = locked_node.node_state.outputs[bsocket->index()]; - if (supports_laziness) { - /* Expected that at least all required sockets have been computed. If more outputs become - * required later, the node will be executed again. */ - if (output_state.output_usage_for_execution == ValueUsage::Required) { - BLI_assert(output_state.has_been_computed); - } - } - else { - /* Expect that all outputs that may be used have been computed, because the node cannot - * be executed again. */ - if (output_state.output_usage_for_execution != ValueUsage::Unused) { - BLI_assert(output_state.has_been_computed); - } - } - } -#else - UNUSED_VARS(locked_node); -#endif - } - - void extract_group_outputs() - { - for (const DInputSocket &socket : params_.output_sockets) { - BLI_assert(socket->is_available()); - BLI_assert(!socket->is_multi_input()); - - const DNode node = socket.node(); - NodeState &node_state = this->get_node_state(node); - InputState &input_state = node_state.inputs[socket->index()]; - - SingleInputValue &single_value = *input_state.value.single; - void *value = single_value.value; - - /* The value should have been computed by now. If this assert is hit, it means that there - * was some scheduling issue before. */ - BLI_assert(value != nullptr); - - /* Move value into memory owned by the outer allocator. */ - const CPPType &type = *input_state.type; - void *buffer = outer_allocator_.allocate(type.size(), type.alignment()); - type.move_construct(value, buffer); - - params_.r_output_values.append({type, buffer}); - } - } - - /** - * Load the required input from the socket or trigger nodes to the left to compute the value. - * \return True when the node will be triggered by another node again when the value is computed. - */ - bool set_input_required(LockedNode &locked_node, const DInputSocket input_socket) - { - BLI_assert(locked_node.node == input_socket.node()); - InputState &input_state = locked_node.node_state.inputs[input_socket->index()]; - - /* Value set as unused cannot become used again. */ - BLI_assert(input_state.usage != ValueUsage::Unused); - - if (input_state.was_ready_for_execution) { - return false; - } - - if (input_state.usage == ValueUsage::Required) { - /* If the input was not ready for execution but is required, the node will be triggered again - * once the input has been computed. */ - return true; - } - input_state.usage = ValueUsage::Required; - - /* Count how many values still have to be added to this input until it is "complete". */ - int missing_values = 0; - if (input_socket->is_multi_input()) { - MultiInputValue &multi_value = *input_state.value.multi; - missing_values = multi_value.missing_values(); - } - else { - SingleInputValue &single_value = *input_state.value.single; - if (single_value.value == nullptr) { - missing_values = 1; - } - } - if (missing_values == 0) { - return false; - } - /* Increase the total number of missing required inputs. This ensures that the node will be - * scheduled correctly when all inputs have been provided. */ - locked_node.node_state.missing_required_inputs += missing_values; - - /* Get all origin sockets, because we have to tag those as required as well. */ - Vector<DSocket> origin_sockets; - input_socket.foreach_origin_socket( - [&](const DSocket origin_socket) { origin_sockets.append(origin_socket); }); - - if (origin_sockets.is_empty()) { - /* If there are no origin sockets, just load the value from the socket directly. */ - this->load_unlinked_input_value(locked_node, input_socket, input_state, input_socket); - locked_node.node_state.missing_required_inputs -= 1; - return false; - } - bool requested_from_other_node = false; - for (const DSocket &origin_socket : origin_sockets) { - if (origin_socket->is_input()) { - /* Load the value directly from the origin socket. In most cases this is an unlinked - * group input. */ - this->load_unlinked_input_value(locked_node, input_socket, input_state, origin_socket); - locked_node.node_state.missing_required_inputs -= 1; - } - else { - /* The value has not been computed yet, so when it will be forwarded by another node, this - * node will be triggered. */ - requested_from_other_node = true; - locked_node.delayed_required_outputs.append(DOutputSocket(origin_socket)); - } - } - /* If this node will be triggered by another node, we don't have to schedule it now. */ - if (requested_from_other_node) { - return true; - } - return false; - } - - void set_input_unused(LockedNode &locked_node, const DInputSocket socket) - { - InputState &input_state = locked_node.node_state.inputs[socket->index()]; - - /* A required socket cannot become unused. */ - BLI_assert(input_state.usage != ValueUsage::Required); - - if (input_state.usage == ValueUsage::Unused) { - /* Nothing to do in this case. */ - return; - } - input_state.usage = ValueUsage::Unused; - - /* If the input is unused, its value can be destructed now. */ - this->destruct_input_value_if_exists(locked_node, socket); - - if (input_state.was_ready_for_execution) { - /* If the value was already computed, we don't need to notify origin nodes. */ - return; - } - - /* Notify origin nodes that might want to set its inputs as unused as well. */ - socket.foreach_origin_socket([&](const DSocket origin_socket) { - if (origin_socket->is_input()) { - /* Values from these sockets are loaded directly from the sockets, so there is no node to - * notify. */ - return; - } - /* Delay notification of the other node until this node is not locked anymore. */ - locked_node.delayed_unused_outputs.append(DOutputSocket(origin_socket)); - }); - } - - void send_output_required_notification(const DOutputSocket socket, NodeTaskRunState *run_state) - { - const DNode node = socket.node(); - NodeState &node_state = this->get_node_state(node); - OutputState &output_state = node_state.outputs[socket->index()]; - - this->with_locked_node(node, node_state, run_state, [&](LockedNode &locked_node) { - if (output_state.output_usage == ValueUsage::Required) { - /* Output is marked as required already. So the node is scheduled already. */ - return; - } - /* The origin node needs to be scheduled so that it provides the requested input - * eventually. */ - output_state.output_usage = ValueUsage::Required; - this->schedule_node(locked_node); - }); - } - - void send_output_unused_notification(const DOutputSocket socket, NodeTaskRunState *run_state) - { - const DNode node = socket.node(); - NodeState &node_state = this->get_node_state(node); - OutputState &output_state = node_state.outputs[socket->index()]; - - this->with_locked_node(node, node_state, run_state, [&](LockedNode &locked_node) { - output_state.potential_users -= 1; - if (output_state.potential_users == 0) { - /* The socket might be required even though the output is not used by other sockets. That - * can happen when the socket is forced to be computed. */ - if (output_state.output_usage != ValueUsage::Required) { - /* The output socket has no users anymore. */ - output_state.output_usage = ValueUsage::Unused; - /* Schedule the origin node in case it wants to set its inputs as unused as well. */ - this->schedule_node(locked_node); - } - } - }); - } - - void add_node_to_task_pool(const DNode node) - { - /* Push the task to the pool while it is not locked to avoid a deadlock in case when the task - * is executed immediately. */ - const NodeWithState *node_with_state = node_states_.lookup_key_ptr_as(node); - BLI_task_pool_push( - task_pool_, run_node_from_task_pool, (void *)node_with_state, false, nullptr); - } - - /** - * Moves a newly computed value from an output socket to all the inputs that might need it. - * Takes ownership of the value and destructs if it is unused. - */ - void forward_output(const DOutputSocket from_socket, - GMutablePointer value_to_forward, - NodeTaskRunState *run_state) - { - BLI_assert(value_to_forward.get() != nullptr); - - LinearAllocator<> &allocator = local_allocators_.local(); - - Vector<DSocket> log_original_value_sockets; - Vector<DInputSocket> forward_original_value_sockets; - log_original_value_sockets.append(from_socket); - - from_socket.foreach_target_socket([&](const DInputSocket to_socket, - const DOutputSocket::TargetSocketPathInfo &path_info) { - if (!this->should_forward_to_socket(to_socket)) { - return; - } - BLI_assert(to_socket == path_info.sockets.last()); - GMutablePointer current_value = value_to_forward; - for (const DSocket &next_socket : path_info.sockets) { - const DNode next_node = next_socket.node(); - const bool is_last_socket = to_socket == next_socket; - const bool do_conversion_if_necessary = is_last_socket || - next_node->type == NODE_GROUP_OUTPUT || - (next_node->is_group() && !next_node->is_muted()); - if (do_conversion_if_necessary) { - const CPPType &next_type = *get_socket_cpp_type(next_socket); - if (*current_value.type() != next_type) { - void *buffer = allocator.allocate(next_type.size(), next_type.alignment()); - this->convert_value(*current_value.type(), next_type, current_value.get(), buffer); - if (current_value.get() != value_to_forward.get()) { - current_value.destruct(); - } - current_value = {next_type, buffer}; - } - } - if (current_value.get() == value_to_forward.get()) { - /* Log the original value at the current socket. */ - log_original_value_sockets.append(next_socket); - } - else { - /* Multi-input sockets are logged when all values are available. */ - if (!(next_socket->is_input() && next_socket->is_multi_input())) { - /* Log the converted value at the socket. */ - this->log_socket_value({next_socket}, current_value); - } - } - } - if (current_value.get() == value_to_forward.get()) { - /* The value has not been converted, so forward the original value. */ - forward_original_value_sockets.append(to_socket); - } - else { - /* The value has been converted. */ - this->add_value_to_input_socket(to_socket, from_socket, current_value, run_state); - } - }); - this->log_socket_value(log_original_value_sockets, value_to_forward); - this->forward_to_sockets_with_same_type( - allocator, forward_original_value_sockets, value_to_forward, from_socket, run_state); - } - - bool should_forward_to_socket(const DInputSocket socket) - { - const DNode to_node = socket.node(); - const NodeWithState *target_node_with_state = node_states_.lookup_key_ptr_as(to_node); - if (target_node_with_state == nullptr) { - /* If the socket belongs to a node that has no state, the entire node is not used. */ - return false; - } - NodeState &target_node_state = *target_node_with_state->state; - InputState &target_input_state = target_node_state.inputs[socket->index()]; - - std::lock_guard lock{target_node_state.mutex}; - /* Do not forward to an input socket whose value won't be used. */ - return target_input_state.usage != ValueUsage::Unused; - } - - void forward_to_sockets_with_same_type(LinearAllocator<> &allocator, - Span<DInputSocket> to_sockets, - GMutablePointer value_to_forward, - const DOutputSocket from_socket, - NodeTaskRunState *run_state) - { - if (to_sockets.is_empty()) { - /* Value is not used anymore, so it can be destructed. */ - value_to_forward.destruct(); - } - else if (to_sockets.size() == 1) { - /* Value is only used by one input socket, no need to copy it. */ - const DInputSocket to_socket = to_sockets[0]; - this->add_value_to_input_socket(to_socket, from_socket, value_to_forward, run_state); - } - else { - /* Multiple inputs use the value, make a copy for every input except for one. */ - /* First make the copies, so that the next node does not start modifying the value while we - * are still making copies. */ - const CPPType &type = *value_to_forward.type(); - for (const DInputSocket &to_socket : to_sockets.drop_front(1)) { - void *buffer = allocator.allocate(type.size(), type.alignment()); - type.copy_construct(value_to_forward.get(), buffer); - this->add_value_to_input_socket(to_socket, from_socket, {type, buffer}, run_state); - } - /* Forward the original value to one of the targets. */ - const DInputSocket to_socket = to_sockets[0]; - this->add_value_to_input_socket(to_socket, from_socket, value_to_forward, run_state); - } - } - - void add_value_to_input_socket(const DInputSocket socket, - const DOutputSocket origin, - GMutablePointer value, - NodeTaskRunState *run_state) - { - BLI_assert(socket->is_available()); - - const DNode node = socket.node(); - NodeState &node_state = this->get_node_state(node); - InputState &input_state = node_state.inputs[socket->index()]; - - this->with_locked_node(node, node_state, run_state, [&](LockedNode &locked_node) { - if (socket->is_multi_input()) { - /* Add a new value to the multi-input. */ - MultiInputValue &multi_value = *input_state.value.multi; - multi_value.add_value(origin, value.get()); - - if (multi_value.all_values_available()) { - this->log_socket_value({socket}, input_state, multi_value.values); - } - } - else { - /* Assign the value to the input. */ - SingleInputValue &single_value = *input_state.value.single; - BLI_assert(single_value.value == nullptr); - single_value.value = value.get(); - } - - if (input_state.usage == ValueUsage::Required) { - node_state.missing_required_inputs--; - if (node_state.missing_required_inputs == 0) { - /* Schedule node if all the required inputs have been provided. */ - this->schedule_node(locked_node); - } - } - }); - } - - /** - * Loads the value of a socket that is not computed by another node. Note that the socket may - * still be linked to e.g. a Group Input node, but the socket on the outside is not connected to - * anything. - * - * \param input_socket: The socket of the node that wants to use the value. - * \param origin_socket: The socket that we want to load the value from. - */ - void load_unlinked_input_value(LockedNode &locked_node, - const DInputSocket input_socket, - InputState &input_state, - const DSocket origin_socket) - { - /* Only takes locked node as parameter, because the node needs to be locked. */ - UNUSED_VARS(locked_node); - - GMutablePointer value = this->get_value_from_socket(origin_socket, *input_state.type); - if (input_socket->is_multi_input()) { - MultiInputValue &multi_value = *input_state.value.multi; - multi_value.add_value(origin_socket, value.get()); - if (multi_value.all_values_available()) { - this->log_socket_value({input_socket}, input_state, multi_value.values); - } - } - else { - SingleInputValue &single_value = *input_state.value.single; - single_value.value = value.get(); - Vector<DSocket> sockets_to_log_to = {input_socket}; - if (origin_socket != input_socket) { - /* This might log the socket value for the #origin_socket more than once, but this is - * handled by the logging system gracefully. */ - sockets_to_log_to.append(origin_socket); - } - /* TODO: Log to the intermediate sockets between the group input and where the value is - * actually used as well. */ - this->log_socket_value(sockets_to_log_to, value); - } - } - - void destruct_input_value_if_exists(LockedNode &locked_node, const DInputSocket socket) - { - InputState &input_state = locked_node.node_state.inputs[socket->index()]; - if (socket->is_multi_input()) { - MultiInputValue &multi_value = *input_state.value.multi; - for (void *&value : multi_value.values) { - if (value != nullptr) { - input_state.type->destruct(value); - value = nullptr; - } - } - multi_value.provided_value_count = 0; - } - else { - SingleInputValue &single_value = *input_state.value.single; - if (single_value.value != nullptr) { - input_state.type->destruct(single_value.value); - single_value.value = nullptr; - } - } - } - - GMutablePointer get_value_from_socket(const DSocket socket, const CPPType &required_type) - { - LinearAllocator<> &allocator = local_allocators_.local(); - - const CPPType &type = *get_socket_cpp_type(socket); - void *buffer = allocator.allocate(type.size(), type.alignment()); - get_socket_value(*socket.bsocket(), buffer); - - if (type == required_type) { - return {type, buffer}; - } - void *converted_buffer = allocator.allocate(required_type.size(), required_type.alignment()); - this->convert_value(type, required_type, buffer, converted_buffer); - type.destruct(buffer); - return {required_type, converted_buffer}; - } - - void convert_value(const CPPType &from_type, - const CPPType &to_type, - const void *from_value, - void *to_value) - { - if (from_type == to_type) { - from_type.copy_construct(from_value, to_value); - return; - } - const ValueOrFieldCPPType *from_field_type = dynamic_cast<const ValueOrFieldCPPType *>( - &from_type); - const ValueOrFieldCPPType *to_field_type = dynamic_cast<const ValueOrFieldCPPType *>(&to_type); - - if (from_field_type != nullptr && to_field_type != nullptr) { - const CPPType &from_base_type = from_field_type->base_type(); - const CPPType &to_base_type = to_field_type->base_type(); - if (conversions_.is_convertible(from_base_type, to_base_type)) { - if (from_field_type->is_field(from_value)) { - const GField &from_field = *from_field_type->get_field_ptr(from_value); - to_field_type->construct_from_field(to_value, - conversions_.try_convert(from_field, to_base_type)); - } - else { - to_field_type->default_construct(to_value); - const void *from_value_ptr = from_field_type->get_value_ptr(from_value); - void *to_value_ptr = to_field_type->get_value_ptr(to_value); - conversions_.get_conversion_functions(from_base_type, to_base_type) - ->convert_single_to_initialized(from_value_ptr, to_value_ptr); - } - return; - } - } - if (conversions_.is_convertible(from_type, to_type)) { - /* Do the conversion if possible. */ - conversions_.convert_to_uninitialized(from_type, to_type, from_value, to_value); - } - else { - /* Cannot convert, use default value instead. */ - this->construct_default_value(to_type, to_value); - } - } - - void construct_default_value(const CPPType &type, void *r_value) - { - type.value_initialize(r_value); - } - - NodeState &get_node_state(const DNode node) - { - return *node_states_.lookup_key_as(node).state; - } - - void log_socket_value(DSocket socket, InputState &input_state, Span<void *> values) - { - if (params_.geo_logger == nullptr) { - return; - } - - Vector<GPointer, 16> value_pointers; - value_pointers.reserve(values.size()); - const CPPType &type = *input_state.type; - for (const void *value : values) { - value_pointers.append({type, value}); - } - params_.geo_logger->local().log_multi_value_socket(socket, value_pointers); - } - - void log_socket_value(Span<DSocket> sockets, GPointer value) - { - if (params_.geo_logger == nullptr) { - return; - } - params_.geo_logger->local().log_value_for_sockets(sockets, value); - } - - void log_debug_message(DNode node, std::string message) - { - if (params_.geo_logger == nullptr) { - return; - } - params_.geo_logger->local().log_debug_message(node, std::move(message)); - } - - /* In most cases when `NodeState` is accessed, the node has to be locked first to avoid race - * conditions. */ - template<typename Function> - void with_locked_node(const DNode node, - NodeState &node_state, - NodeTaskRunState *run_state, - const Function &function) - { - LockedNode locked_node{node, node_state}; - - node_state.mutex.lock(); - /* Isolate this thread because we don't want it to start executing another node. This other - * node might want to lock the same mutex leading to a deadlock. */ - threading::isolate_task([&] { function(locked_node); }); - node_state.mutex.unlock(); - - /* Then send notifications to the other nodes after the node state is unlocked. This avoids - * locking two nodes at the same time on this thread and helps to prevent deadlocks. */ - for (const DOutputSocket &socket : locked_node.delayed_required_outputs) { - this->send_output_required_notification(socket, run_state); - } - for (const DOutputSocket &socket : locked_node.delayed_unused_outputs) { - this->send_output_unused_notification(socket, run_state); - } - for (const DNode &node_to_schedule : locked_node.delayed_scheduled_nodes) { - if (run_state != nullptr && !run_state->next_node_to_run) { - /* Execute the node on the same thread after the current node finished. */ - /* Currently, this assumes that it is always best to run the first node that is scheduled - * on the same thread. That is usually correct, because the geometry socket which carries - * the most data usually comes first in nodes. */ - run_state->next_node_to_run = node_to_schedule; - } - else { - /* Push the node to the task pool so that another thread can start working on it. */ - this->add_node_to_task_pool(node_to_schedule); - } - } - } -}; - -NodeParamsProvider::NodeParamsProvider(GeometryNodesEvaluator &evaluator, - DNode dnode, - NodeState &node_state, - NodeTaskRunState *run_state) - : evaluator_(evaluator), node_state_(node_state), run_state_(run_state) -{ - this->dnode = dnode; - this->self_object = evaluator.params_.self_object; - this->modifier = &evaluator.params_.modifier_->modifier; - this->depsgraph = evaluator.params_.depsgraph; - this->logger = evaluator.params_.geo_logger; -} - -bool NodeParamsProvider::can_get_input(StringRef identifier) const -{ - const DInputSocket socket = this->dnode.input_by_identifier(identifier); - BLI_assert(socket); - - InputState &input_state = node_state_.inputs[socket->index()]; - if (!input_state.was_ready_for_execution) { - return false; - } - - if (socket->is_multi_input()) { - MultiInputValue &multi_value = *input_state.value.multi; - return multi_value.all_values_available(); - } - SingleInputValue &single_value = *input_state.value.single; - return single_value.value != nullptr; -} - -bool NodeParamsProvider::can_set_output(StringRef identifier) const -{ - const DOutputSocket socket = this->dnode.output_by_identifier(identifier); - BLI_assert(socket); - - OutputState &output_state = node_state_.outputs[socket->index()]; - return !output_state.has_been_computed; -} - -GMutablePointer NodeParamsProvider::extract_input(StringRef identifier) -{ - const DInputSocket socket = this->dnode.input_by_identifier(identifier); - BLI_assert(socket); - BLI_assert(!socket->is_multi_input()); - BLI_assert(this->can_get_input(identifier)); - - InputState &input_state = node_state_.inputs[socket->index()]; - SingleInputValue &single_value = *input_state.value.single; - void *value = single_value.value; - single_value.value = nullptr; - return {*input_state.type, value}; -} - -Vector<GMutablePointer> NodeParamsProvider::extract_multi_input(StringRef identifier) -{ - const DInputSocket socket = this->dnode.input_by_identifier(identifier); - BLI_assert(socket); - BLI_assert(socket->is_multi_input()); - BLI_assert(this->can_get_input(identifier)); - - InputState &input_state = node_state_.inputs[socket->index()]; - MultiInputValue &multi_value = *input_state.value.multi; - - Vector<GMutablePointer> ret_values; - for (void *&value : multi_value.values) { - BLI_assert(value != nullptr); - ret_values.append({*input_state.type, value}); - value = nullptr; - } - return ret_values; -} - -GPointer NodeParamsProvider::get_input(StringRef identifier) const -{ - const DInputSocket socket = this->dnode.input_by_identifier(identifier); - BLI_assert(socket); - BLI_assert(!socket->is_multi_input()); - BLI_assert(this->can_get_input(identifier)); - - InputState &input_state = node_state_.inputs[socket->index()]; - SingleInputValue &single_value = *input_state.value.single; - return {*input_state.type, single_value.value}; -} - -GMutablePointer NodeParamsProvider::alloc_output_value(const CPPType &type) -{ - LinearAllocator<> &allocator = evaluator_.local_allocators_.local(); - return {type, allocator.allocate(type.size(), type.alignment())}; -} - -void NodeParamsProvider::set_output(StringRef identifier, GMutablePointer value) -{ - const DOutputSocket socket = this->dnode.output_by_identifier(identifier); - BLI_assert(socket); - - OutputState &output_state = node_state_.outputs[socket->index()]; - BLI_assert(!output_state.has_been_computed); - evaluator_.forward_output(socket, value, run_state_); - output_state.has_been_computed = true; -} - -bool NodeParamsProvider::lazy_require_input(StringRef identifier) -{ - BLI_assert(node_supports_laziness(this->dnode)); - const DInputSocket socket = this->dnode.input_by_identifier(identifier); - BLI_assert(socket); - - InputState &input_state = node_state_.inputs[socket->index()]; - if (input_state.was_ready_for_execution) { - return false; - } - evaluator_.with_locked_node(this->dnode, node_state_, run_state_, [&](LockedNode &locked_node) { - if (!evaluator_.set_input_required(locked_node, socket)) { - /* Schedule the currently executed node again because the value is available now but was not - * ready for the current execution. */ - evaluator_.schedule_node(locked_node); - } - }); - return true; -} - -void NodeParamsProvider::set_input_unused(StringRef identifier) -{ - BLI_assert(node_supports_laziness(this->dnode)); - const DInputSocket socket = this->dnode.input_by_identifier(identifier); - BLI_assert(socket); - - evaluator_.with_locked_node(this->dnode, node_state_, run_state_, [&](LockedNode &locked_node) { - evaluator_.set_input_unused(locked_node, socket); - }); -} - -bool NodeParamsProvider::output_is_required(StringRef identifier) const -{ - const DOutputSocket socket = this->dnode.output_by_identifier(identifier); - BLI_assert(socket); - - OutputState &output_state = node_state_.outputs[socket->index()]; - if (output_state.has_been_computed) { - return false; - } - return output_state.output_usage_for_execution != ValueUsage::Unused; -} - -bool NodeParamsProvider::lazy_output_is_required(StringRef identifier) const -{ - BLI_assert(node_supports_laziness(this->dnode)); - const DOutputSocket socket = this->dnode.output_by_identifier(identifier); - BLI_assert(socket); - - OutputState &output_state = node_state_.outputs[socket->index()]; - if (output_state.has_been_computed) { - return false; - } - return output_state.output_usage_for_execution == ValueUsage::Required; -} - -void NodeParamsProvider::set_default_remaining_outputs() -{ - LinearAllocator<> &allocator = evaluator_.local_allocators_.local(); - - for (const int i : this->dnode->output_sockets().index_range()) { - OutputState &output_state = node_state_.outputs[i]; - if (output_state.has_been_computed) { - continue; - } - if (output_state.output_usage_for_execution == ValueUsage::Unused) { - continue; - } - - const DOutputSocket socket = this->dnode.output(i); - const CPPType *type = get_socket_cpp_type(socket); - BLI_assert(type != nullptr); - void *buffer = allocator.allocate(type->size(), type->alignment()); - type->value_initialize(buffer); - evaluator_.forward_output(socket, {type, buffer}, run_state_); - output_state.has_been_computed = true; - } -} - -void evaluate_geometry_nodes(GeometryNodesEvaluationParams ¶ms) -{ - GeometryNodesEvaluator evaluator{params}; - evaluator.execute(); -} - -} // namespace blender::modifiers::geometry_nodes diff --git a/source/blender/modifiers/intern/MOD_nodes_evaluator.hh b/source/blender/modifiers/intern/MOD_nodes_evaluator.hh deleted file mode 100644 index cbcbcab5679..00000000000 --- a/source/blender/modifiers/intern/MOD_nodes_evaluator.hh +++ /dev/null @@ -1,44 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ - -#pragma once - -#include "BLI_generic_pointer.hh" -#include "BLI_map.hh" - -#include "NOD_derived_node_tree.hh" -#include "NOD_geometry_nodes_eval_log.hh" -#include "NOD_multi_function.hh" - -#include "DNA_modifier_types.h" - -#include "FN_multi_function.hh" - -namespace geo_log = blender::nodes::geometry_nodes_eval_log; - -namespace blender::modifiers::geometry_nodes { - -using namespace nodes::derived_node_tree_types; - -struct GeometryNodesEvaluationParams { - blender::LinearAllocator<> allocator; - - Map<DOutputSocket, GMutablePointer> input_values; - Vector<DInputSocket> output_sockets; - /* These sockets will be computed but are not part of the output. Their value can be retrieved in - * `log_socket_value_fn`. These sockets are not part of `output_sockets` because then the - * evaluator would have to keep the socket values in memory until the end, which might not be - * necessary in all cases. Sometimes `log_socket_value_fn` might just want to look at the value - * and then it can be freed. */ - Vector<DSocket> force_compute_sockets; - nodes::NodeMultiFunctions *mf_by_node; - const NodesModifierData *modifier_; - Depsgraph *depsgraph; - Object *self_object; - geo_log::GeoLogger *geo_logger; - - Vector<GMutablePointer> r_output_values; -}; - -void evaluate_geometry_nodes(GeometryNodesEvaluationParams ¶ms); - -} // namespace blender::modifiers::geometry_nodes diff --git a/source/blender/nodes/CMakeLists.txt b/source/blender/nodes/CMakeLists.txt index ff8bd27f8d7..e042458ca19 100644 --- a/source/blender/nodes/CMakeLists.txt +++ b/source/blender/nodes/CMakeLists.txt @@ -40,7 +40,8 @@ set(INC set(SRC intern/derived_node_tree.cc - intern/geometry_nodes_eval_log.cc + intern/geometry_nodes_lazy_function.cc + intern/geometry_nodes_log.cc intern/math_functions.cc intern/node_common.cc intern/node_declaration.cc @@ -58,7 +59,7 @@ set(SRC NOD_function.h NOD_geometry.h NOD_geometry_exec.hh - NOD_geometry_nodes_eval_log.hh + NOD_geometry_nodes_lazy_function.hh NOD_math_functions.hh NOD_multi_function.hh NOD_node_declaration.hh diff --git a/source/blender/nodes/NOD_geometry_exec.hh b/source/blender/nodes/NOD_geometry_exec.hh index b5ffd3a317c..16669f7cfce 100644 --- a/source/blender/nodes/NOD_geometry_exec.hh +++ b/source/blender/nodes/NOD_geometry_exec.hh @@ -3,6 +3,7 @@ #pragma once #include "FN_field.hh" +#include "FN_lazy_function.hh" #include "FN_multi_function_builder.hh" #include "BKE_geometry_fields.hh" @@ -11,9 +12,8 @@ #include "DNA_node_types.h" #include "NOD_derived_node_tree.hh" -#include "NOD_geometry_nodes_eval_log.hh" +#include "NOD_geometry_nodes_lazy_function.hh" -struct Depsgraph; struct ModifierData; namespace blender::nodes { @@ -40,75 +40,18 @@ using fn::FieldInput; using fn::FieldOperation; using fn::GField; using fn::ValueOrField; -using geometry_nodes_eval_log::eNamedAttrUsage; -using geometry_nodes_eval_log::NodeWarningType; - -/** - * This class exists to separate the memory management details of the geometry nodes evaluator - * from the node execution functions and related utilities. - */ -class GeoNodeExecParamsProvider { - public: - DNode dnode; - const Object *self_object = nullptr; - const ModifierData *modifier = nullptr; - Depsgraph *depsgraph = nullptr; - geometry_nodes_eval_log::GeoLogger *logger = nullptr; - - /** - * Returns true when the node is allowed to get/extract the input value. The identifier is - * expected to be valid. This may return false if the input value has been consumed already. - */ - virtual bool can_get_input(StringRef identifier) const = 0; - - /** - * Returns true when the node is allowed to set the output value. The identifier is expected to - * be valid. This may return false if the output value has been set already. - */ - virtual bool can_set_output(StringRef identifier) const = 0; - - /** - * Take ownership of an input value. The caller is responsible for destructing the value. It does - * not have to be freed, because the memory is managed by the geometry nodes evaluator. - */ - virtual GMutablePointer extract_input(StringRef identifier) = 0; - - /** - * Similar to #extract_input, but has to be used for multi-input sockets. - */ - virtual Vector<GMutablePointer> extract_multi_input(StringRef identifier) = 0; - - /** - * Get the input value for the identifier without taking ownership of it. - */ - virtual GPointer get_input(StringRef identifier) const = 0; - - /** - * Prepare a memory buffer for an output value of the node. The returned memory has to be - * initialized by the caller. The identifier and type are expected to be correct. - */ - virtual GMutablePointer alloc_output_value(const CPPType &type) = 0; - - /** - * The value has been allocated with #alloc_output_value. - */ - virtual void set_output(StringRef identifier, GMutablePointer value) = 0; - - /* A description for these methods is provided in GeoNodeExecParams. */ - virtual void set_input_unused(StringRef identifier) = 0; - virtual bool output_is_required(StringRef identifier) const = 0; - virtual bool lazy_require_input(StringRef identifier) = 0; - virtual bool lazy_output_is_required(StringRef identifier) const = 0; - - virtual void set_default_remaining_outputs() = 0; -}; +using geo_eval_log::NamedAttributeUsage; +using geo_eval_log::NodeWarningType; class GeoNodeExecParams { private: - GeoNodeExecParamsProvider *provider_; + const bNode &node_; + lf::Params ¶ms_; + const lf::Context &lf_context_; public: - GeoNodeExecParams(GeoNodeExecParamsProvider &provider) : provider_(&provider) + GeoNodeExecParams(const bNode &node, lf::Params ¶ms, const lf::Context &lf_context) + : node_(node), params_(params), lf_context_(lf_context) { } @@ -119,20 +62,6 @@ class GeoNodeExecParams { /** * Get the input value for the input socket with the given identifier. * - * The node calling becomes responsible for destructing the value before it is done - * executing. This method can only be called once for each identifier. - */ - GMutablePointer extract_input(StringRef identifier) - { -#ifdef DEBUG - this->check_input_access(identifier); -#endif - return provider_->extract_input(identifier); - } - - /** - * Get the input value for the input socket with the given identifier. - * * This method can only be called once for each identifier. */ template<typename T> T extract_input(StringRef identifier) @@ -151,8 +80,8 @@ class GeoNodeExecParams { #ifdef DEBUG this->check_input_access(identifier, &CPPType::get<T>()); #endif - GMutablePointer gvalue = this->extract_input(identifier); - T value = gvalue.relocate_out<T>(); + const int index = this->get_input_index(identifier); + T value = params_.extract_input<T>(index); if constexpr (std::is_same_v<T, GeometrySet>) { this->check_input_geometry_set(identifier, value); } @@ -164,27 +93,6 @@ class GeoNodeExecParams { void check_output_geometry_set(const GeometrySet &geometry_set) const; /** - * Get input as vector for multi input socket with the given identifier. - * - * This method can only be called once for each identifier. - */ - template<typename T> Vector<T> extract_multi_input(StringRef identifier) - { - Vector<GMutablePointer> gvalues = provider_->extract_multi_input(identifier); - Vector<T> values; - for (GMutablePointer gvalue : gvalues) { - if constexpr (is_field_base_type_v<T>) { - const ValueOrField<T> value_or_field = gvalue.relocate_out<ValueOrField<T>>(); - values.append(value_or_field.as_value()); - } - else { - values.append(gvalue.relocate_out<T>()); - } - } - return values; - } - - /** * Get the input value for the input socket with the given identifier. */ template<typename T> T get_input(StringRef identifier) const @@ -202,9 +110,8 @@ class GeoNodeExecParams { #ifdef DEBUG this->check_input_access(identifier, &CPPType::get<T>()); #endif - GPointer gvalue = provider_->get_input(identifier); - BLI_assert(gvalue.is_type<T>()); - const T &value = *(const T *)gvalue.get(); + const int index = this->get_input_index(identifier); + const T &value = params_.get_input<T>(index); if constexpr (std::is_same_v<T, GeometrySet>) { this->check_input_geometry_set(identifier, value); } @@ -226,17 +133,28 @@ class GeoNodeExecParams { this->set_output(identifier, ValueOrField<BaseType>(std::forward<T>(value))); } else { - const CPPType &type = CPPType::get<StoredT>(); #ifdef DEBUG + const CPPType &type = CPPType::get<StoredT>(); this->check_output_access(identifier, type); #endif if constexpr (std::is_same_v<StoredT, GeometrySet>) { this->check_output_geometry_set(value); } - GMutablePointer gvalue = provider_->alloc_output_value(type); - new (gvalue.get()) StoredT(std::forward<T>(value)); - provider_->set_output(identifier, gvalue); + const int index = this->get_output_index(identifier); + params_.set_output(index, std::forward<T>(value)); + } + } + + geo_eval_log::GeoTreeLogger *get_local_tree_logger() const + { + GeoNodesLFUserData *user_data = this->user_data(); + BLI_assert(user_data != nullptr); + const ComputeContext *compute_context = user_data->compute_context; + BLI_assert(compute_context != nullptr); + if (user_data->modifier_data->eval_log == nullptr) { + return nullptr; } + return &user_data->modifier_data->eval_log->get_local_tree_logger(*compute_context); } /** @@ -244,7 +162,8 @@ class GeoNodeExecParams { */ void set_input_unused(StringRef identifier) { - provider_->set_input_unused(identifier); + const int index = this->get_input_index(identifier); + params_.set_input_unused(index); } /** @@ -254,7 +173,8 @@ class GeoNodeExecParams { */ bool output_is_required(StringRef identifier) const { - return provider_->output_is_required(identifier); + const int index = this->get_output_index(identifier); + return params_.get_output_usage(index) != lf::ValueUsage::Unused; } /** @@ -265,7 +185,8 @@ class GeoNodeExecParams { */ bool lazy_require_input(StringRef identifier) { - return provider_->lazy_require_input(identifier); + const int index = this->get_input_index(identifier); + return params_.try_get_input_data_ptr_or_request(index) == nullptr; } /** @@ -275,7 +196,8 @@ class GeoNodeExecParams { */ bool lazy_output_is_required(StringRef identifier) { - return provider_->lazy_output_is_required(identifier); + const int index = this->get_output_index(identifier); + return params_.get_output_usage(index) == lf::ValueUsage::Used; } /** @@ -283,17 +205,32 @@ class GeoNodeExecParams { */ const bNode &node() const { - return *provider_->dnode; + return node_; } const Object *self_object() const { - return provider_->self_object; + if (const auto *data = this->user_data()) { + if (data->modifier_data) { + return data->modifier_data->self_object; + } + } + return nullptr; } Depsgraph *depsgraph() const { - return provider_->depsgraph; + if (const auto *data = this->user_data()) { + if (data->modifier_data) { + return data->modifier_data->depsgraph; + } + } + return nullptr; + } + + GeoNodesLFUserData *user_data() const + { + return dynamic_cast<GeoNodesLFUserData *>(lf_context_.user_data); } /** @@ -306,7 +243,7 @@ class GeoNodeExecParams { void set_default_remaining_outputs(); - void used_named_attribute(std::string attribute_name, eNamedAttrUsage usage); + void used_named_attribute(std::string attribute_name, NamedAttributeUsage usage); private: /* Utilities for detecting common errors at when using this class. */ @@ -315,6 +252,38 @@ class GeoNodeExecParams { /* Find the active socket with the input name (not the identifier). */ const bNodeSocket *find_available_socket(const StringRef name) const; + + int get_input_index(const StringRef identifier) const + { + int counter = 0; + for (const bNodeSocket *socket : node_.input_sockets()) { + if (!socket->is_available()) { + continue; + } + if (socket->identifier == identifier) { + return counter; + } + counter++; + } + BLI_assert_unreachable(); + return -1; + } + + int get_output_index(const StringRef identifier) const + { + int counter = 0; + for (const bNodeSocket *socket : node_.output_sockets()) { + if (!socket->is_available()) { + continue; + } + if (socket->identifier == identifier) { + return counter; + } + counter++; + } + BLI_assert_unreachable(); + return -1; + } }; } // namespace blender::nodes diff --git a/source/blender/nodes/NOD_geometry_nodes_eval_log.hh b/source/blender/nodes/NOD_geometry_nodes_eval_log.hh deleted file mode 100644 index 46ba72d14d8..00000000000 --- a/source/blender/nodes/NOD_geometry_nodes_eval_log.hh +++ /dev/null @@ -1,411 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ - -#pragma once - -/** - * Many geometry nodes related UI features need access to data produced during evaluation. Not only - * is the final output required but also the intermediate results. Those features include - * attribute search, node warnings, socket inspection and the viewer node. - * - * This file provides the framework for logging data during evaluation and accessing the data after - * evaluation. - * - * During logging every thread gets its own local logger to avoid too much locking (logging - * generally happens for every socket). After geometry nodes evaluation is done, the thread-local - * logging information is combined and post-processed to make it easier for the UI to lookup. - * necessary information. - */ - -#include "BLI_enumerable_thread_specific.hh" -#include "BLI_function_ref.hh" -#include "BLI_generic_pointer.hh" -#include "BLI_linear_allocator.hh" -#include "BLI_map.hh" - -#include "BKE_geometry_set.hh" - -#include "NOD_derived_node_tree.hh" - -#include "FN_field.hh" - -#include <chrono> - -struct SpaceNode; -struct SpaceSpreadsheet; - -namespace blender::nodes::geometry_nodes_eval_log { - -/** Contains information about a value that has been computed during geometry nodes evaluation. */ -class ValueLog { - public: - virtual ~ValueLog() = default; -}; - -/** Contains an owned copy of a value of a generic type. */ -class GenericValueLog : public ValueLog { - private: - GMutablePointer data_; - - public: - GenericValueLog(GMutablePointer data) : data_(data) - { - } - - ~GenericValueLog() - { - data_.destruct(); - } - - GPointer value() const - { - return data_; - } -}; - -class GFieldValueLog : public ValueLog { - private: - fn::GField field_; - const CPPType &type_; - Vector<std::string> input_tooltips_; - - public: - GFieldValueLog(fn::GField field, bool log_full_field); - - const fn::GField &field() const - { - return field_; - } - - Span<std::string> input_tooltips() const - { - return input_tooltips_; - } - - const CPPType &type() const - { - return type_; - } -}; - -struct GeometryAttributeInfo { - std::string name; - /** Can be empty when #name does not actually exist on a geometry yet. */ - std::optional<eAttrDomain> domain; - std::optional<eCustomDataType> data_type; -}; - -/** Contains information about a geometry set. In most cases this does not store the entire - * geometry set as this would require too much memory. */ -class GeometryValueLog : public ValueLog { - private: - Vector<GeometryAttributeInfo> attributes_; - Vector<GeometryComponentType> component_types_; - std::unique_ptr<GeometrySet> full_geometry_; - - public: - struct MeshInfo { - int verts_num, edges_num, faces_num; - }; - struct CurveInfo { - int splines_num; - }; - struct PointCloudInfo { - int points_num; - }; - struct InstancesInfo { - int instances_num; - }; - struct EditDataInfo { - bool has_deformed_positions; - bool has_deform_matrices; - }; - - std::optional<MeshInfo> mesh_info; - std::optional<CurveInfo> curve_info; - std::optional<PointCloudInfo> pointcloud_info; - std::optional<InstancesInfo> instances_info; - std::optional<EditDataInfo> edit_data_info; - - GeometryValueLog(const GeometrySet &geometry_set, bool log_full_geometry = false); - - Span<GeometryAttributeInfo> attributes() const - { - return attributes_; - } - - Span<GeometryComponentType> component_types() const - { - return component_types_; - } - - const GeometrySet *full_geometry() const - { - return full_geometry_.get(); - } -}; - -enum class NodeWarningType { - Error, - Warning, - Info, -}; - -struct NodeWarning { - NodeWarningType type; - std::string message; -}; - -struct NodeWithWarning { - DNode node; - NodeWarning warning; -}; - -struct NodeWithExecutionTime { - DNode node; - std::chrono::microseconds exec_time; -}; - -struct NodeWithDebugMessage { - DNode node; - std::string message; -}; - -/** The same value can be referenced by multiple sockets when they are linked. */ -struct ValueOfSockets { - Span<DSocket> sockets; - destruct_ptr<ValueLog> value; -}; - -enum class eNamedAttrUsage { - None = 0, - Read = 1 << 0, - Write = 1 << 1, - Remove = 1 << 2, -}; -ENUM_OPERATORS(eNamedAttrUsage, eNamedAttrUsage::Remove); - -struct UsedNamedAttribute { - std::string name; - eNamedAttrUsage usage; -}; - -struct NodeWithUsedNamedAttribute { - DNode node; - UsedNamedAttribute attribute; -}; - -class GeoLogger; -class ModifierLog; - -/** Every thread has its own local logger to avoid having to communicate between threads during - * evaluation. After evaluation the individual logs are combined. */ -class LocalGeoLogger { - private: - /* Back pointer to the owner of this local logger. */ - GeoLogger *main_logger_; - /* Allocator for the many small allocations during logging. This is in a `unique_ptr` so that - * ownership can be transferred later on. */ - std::unique_ptr<LinearAllocator<>> allocator_; - Vector<ValueOfSockets> values_; - Vector<NodeWithWarning> node_warnings_; - Vector<NodeWithExecutionTime> node_exec_times_; - Vector<NodeWithDebugMessage> node_debug_messages_; - Vector<NodeWithUsedNamedAttribute> used_named_attributes_; - - friend ModifierLog; - - public: - LocalGeoLogger(GeoLogger &main_logger) : main_logger_(&main_logger) - { - this->allocator_ = std::make_unique<LinearAllocator<>>(); - } - - void log_value_for_sockets(Span<DSocket> sockets, GPointer value); - void log_multi_value_socket(DSocket socket, Span<GPointer> values); - void log_node_warning(DNode node, NodeWarningType type, std::string message); - void log_execution_time(DNode node, std::chrono::microseconds exec_time); - void log_used_named_attribute(DNode node, std::string attribute_name, eNamedAttrUsage usage); - /** - * Log a message that will be displayed in the node editor next to the node. - * This should only be used for debugging purposes and not to display information to users. - */ - void log_debug_message(DNode node, std::string message); -}; - -/** The root logger class. */ -class GeoLogger { - private: - /** - * Log the entire value for these sockets, because they may be inspected afterwards. - * We don't log everything, because that would take up too much memory and cause significant - * slowdowns. - */ - Set<DSocket> log_full_sockets_; - threading::EnumerableThreadSpecific<LocalGeoLogger> threadlocals_; - - /* These are only optional since they don't have a default constructor. */ - std::unique_ptr<GeometryValueLog> input_geometry_log_; - std::unique_ptr<GeometryValueLog> output_geometry_log_; - - friend LocalGeoLogger; - friend ModifierLog; - - public: - GeoLogger(Set<DSocket> log_full_sockets) - : log_full_sockets_(std::move(log_full_sockets)), - threadlocals_([this]() { return LocalGeoLogger(*this); }) - { - } - - void log_input_geometry(const GeometrySet &geometry) - { - input_geometry_log_ = std::make_unique<GeometryValueLog>(geometry); - } - - void log_output_geometry(const GeometrySet &geometry) - { - output_geometry_log_ = std::make_unique<GeometryValueLog>(geometry); - } - - LocalGeoLogger &local() - { - return threadlocals_.local(); - } - - auto begin() - { - return threadlocals_.begin(); - } - - auto end() - { - return threadlocals_.end(); - } -}; - -/** Contains information that has been logged for one specific socket. */ -class SocketLog { - private: - ValueLog *value_ = nullptr; - - friend ModifierLog; - - public: - const ValueLog *value() const - { - return value_; - } -}; - -/** Contains information that has been logged for one specific node. */ -class NodeLog { - private: - Vector<SocketLog> input_logs_; - Vector<SocketLog> output_logs_; - Vector<NodeWarning, 0> warnings_; - Vector<std::string, 0> debug_messages_; - Vector<UsedNamedAttribute, 0> used_named_attributes_; - std::chrono::microseconds exec_time_; - - friend ModifierLog; - - public: - const SocketLog *lookup_socket_log(eNodeSocketInOut in_out, int index) const; - const SocketLog *lookup_socket_log(const bNode &node, const bNodeSocket &socket) const; - void execution_time(std::chrono::microseconds exec_time); - - Span<SocketLog> input_logs() const - { - return input_logs_; - } - - Span<SocketLog> output_logs() const - { - return output_logs_; - } - - Span<NodeWarning> warnings() const - { - return warnings_; - } - - Span<std::string> debug_messages() const - { - return debug_messages_; - } - - Span<UsedNamedAttribute> used_named_attributes() const - { - return used_named_attributes_; - } - - std::chrono::microseconds execution_time() const - { - return exec_time_; - } - - Vector<const GeometryAttributeInfo *> lookup_available_attributes() const; -}; - -/** Contains information that has been logged for one specific tree. */ -class TreeLog { - private: - Map<std::string, destruct_ptr<NodeLog>> node_logs_; - Map<std::string, destruct_ptr<TreeLog>> child_logs_; - - friend ModifierLog; - - public: - const NodeLog *lookup_node_log(StringRef node_name) const; - const NodeLog *lookup_node_log(const bNode &node) const; - const TreeLog *lookup_child_log(StringRef node_name) const; - void foreach_node_log(FunctionRef<void(const NodeLog &)> fn) const; -}; - -/** Contains information about an entire geometry nodes evaluation. */ -class ModifierLog { - private: - LinearAllocator<> allocator_; - /* Allocators of the individual loggers. */ - Vector<std::unique_ptr<LinearAllocator<>>> logger_allocators_; - destruct_ptr<TreeLog> root_tree_logs_; - Vector<destruct_ptr<ValueLog>> logged_values_; - - std::unique_ptr<GeometryValueLog> input_geometry_log_; - std::unique_ptr<GeometryValueLog> output_geometry_log_; - - public: - ModifierLog(GeoLogger &logger); - - const TreeLog &root_tree() const - { - return *root_tree_logs_; - } - - /* Utilities to find logged information for a specific context. */ - static const ModifierLog *find_root_by_node_editor_context(const SpaceNode &snode); - static const TreeLog *find_tree_by_node_editor_context(const SpaceNode &snode); - static const NodeLog *find_node_by_node_editor_context(const SpaceNode &snode, - const bNode &node); - static const NodeLog *find_node_by_node_editor_context(const SpaceNode &snode, - const StringRef node_name); - static const SocketLog *find_socket_by_node_editor_context(const SpaceNode &snode, - const bNode &node, - const bNodeSocket &socket); - static const NodeLog *find_node_by_spreadsheet_editor_context( - const SpaceSpreadsheet &sspreadsheet); - void foreach_node_log(FunctionRef<void(const NodeLog &)> fn) const; - - const GeometryValueLog *input_geometry_log() const; - const GeometryValueLog *output_geometry_log() const; - - private: - using LogByTreeContext = Map<const DTreeContext *, TreeLog *>; - - TreeLog &lookup_or_add_tree_log(LogByTreeContext &log_by_tree_context, - const DTreeContext &tree_context); - NodeLog &lookup_or_add_node_log(LogByTreeContext &log_by_tree_context, DNode node); - SocketLog &lookup_or_add_socket_log(LogByTreeContext &log_by_tree_context, DSocket socket); -}; - -} // namespace blender::nodes::geometry_nodes_eval_log diff --git a/source/blender/nodes/NOD_geometry_nodes_lazy_function.hh b/source/blender/nodes/NOD_geometry_nodes_lazy_function.hh new file mode 100644 index 00000000000..3137dc41857 --- /dev/null +++ b/source/blender/nodes/NOD_geometry_nodes_lazy_function.hh @@ -0,0 +1,178 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +#pragma once + +/** + * For evaluation, geometry node groups are converted to a lazy-function graph. The generated graph + * is cached per node group, so it only has to be generated once after a change. + * + * Node groups are *not* inlined into the lazy-function graph. This could be added in the future as + * it might improve performance in some cases, but generally does not seem necessary. Inlining node + * groups also has disadvantages like making per-node-group caches less useful, resulting in more + * overhead. + * + * Instead, group nodes are just like all other nodes in the lazy-function graph. What makes them + * special is that they reference the lazy-function graph of the group they reference. + * + * During lazy-function graph generation, a mapping between the #bNodeTree and + * #lazy_function::Graph is build that can be used when evaluating the graph (e.g. for logging). + */ + +#include "FN_lazy_function_graph.hh" +#include "FN_lazy_function_graph_executor.hh" + +#include "NOD_geometry_nodes_log.hh" +#include "NOD_multi_function.hh" + +#include "BLI_compute_context.hh" + +struct Object; +struct Depsgraph; + +namespace blender::nodes { + +namespace lf = fn::lazy_function; +using lf::LazyFunction; + +/** + * Data that is passed into geometry nodes evaluation from the modifier. + */ +struct GeoNodesModifierData { + /** Object that is currently evaluated. */ + const Object *self_object = nullptr; + /** Depsgraph that is evaluating the modifier. */ + Depsgraph *depsgraph = nullptr; + /** Optional logger. */ + geo_eval_log::GeoModifierLog *eval_log = nullptr; + /** + * Some nodes should be executed even when their output is not used (e.g. active viewer nodes and + * the node groups they are contained in). + */ + const MultiValueMap<ComputeContextHash, const lf::FunctionNode *> *side_effect_nodes; +}; + +/** + * Custom user data that is passed to every geometry nodes related lazy-function evaluation. + */ +struct GeoNodesLFUserData : public lf::UserData { + /** + * Data from the modifier that is being evaluated. + */ + GeoNodesModifierData *modifier_data = nullptr; + /** + * Current compute context. This is different depending in the (nested) node group that is being + * evaluated. + */ + const ComputeContext *compute_context = nullptr; +}; + +/** + * Contains the mapping between the #bNodeTree and the corresponding lazy-function graph. + * This is *not* a one-to-one mapping. + */ +struct GeometryNodeLazyFunctionGraphMapping { + /** + * Contains mapping of sockets for special nodes like group input and group output. + */ + Map<const bNodeSocket *, lf::Socket *> dummy_socket_map; + /** + * The inputs sockets in the graph. Multiple group input nodes are combined into one in the + * lazy-function graph. + */ + Vector<lf::OutputSocket *> group_input_sockets; + /** + * A mapping used for logging intermediate values. + */ + MultiValueMap<const lf::Socket *, const bNodeSocket *> bsockets_by_lf_socket_map; + /** + * Mappings for some special node types. Generally, this mapping does not exist for all node + * types, so better have more specialized mappings for now. + */ + Map<const bNode *, const lf::FunctionNode *> group_node_map; + Map<const bNode *, const lf::FunctionNode *> viewer_node_map; +}; + +/** + * Data that is cached for every #bNodeTree. + */ +struct GeometryNodesLazyFunctionGraphInfo { + /** + * Allocator used for many things contained in this struct. + */ + LinearAllocator<> allocator; + /** + * Many nodes are implemented as multi-functions. So this contains a mapping from nodes to their + * corresponding multi-functions. + */ + std::unique_ptr<NodeMultiFunctions> node_multi_functions; + /** + * Many lazy-functions are build for the lazy-function graph. Since the graph does not own them, + * we have to keep track of them separately. + */ + Vector<std::unique_ptr<LazyFunction>> functions; + /** + * Many sockets have default values. Since those are not owned by the lazy-function graph, we + * have to keep track of them separately. This only owns the values, the memory is owned by the + * allocator above. + */ + Vector<GMutablePointer> values_to_destruct; + /** + * The actual lazy-function graph. + */ + lf::Graph graph; + /** + * Mappings between the lazy-function graph and the #bNodeTree. + */ + GeometryNodeLazyFunctionGraphMapping mapping; + + GeometryNodesLazyFunctionGraphInfo(); + ~GeometryNodesLazyFunctionGraphInfo(); +}; + +/** + * Logs intermediate values from the lazy-function graph evaluation into #GeoModifierLog based on + * the mapping between the lazy-function graph and the corresponding #bNodeTree. + */ +class GeometryNodesLazyFunctionLogger : public fn::lazy_function::GraphExecutor::Logger { + private: + const GeometryNodesLazyFunctionGraphInfo &lf_graph_info_; + + public: + GeometryNodesLazyFunctionLogger(const GeometryNodesLazyFunctionGraphInfo &lf_graph_info); + void log_socket_value(const fn::lazy_function::Socket &lf_socket, + GPointer value, + const fn::lazy_function::Context &context) const override; + void dump_when_outputs_are_missing(const lf::FunctionNode &node, + Span<const lf::OutputSocket *> missing_sockets, + const lf::Context &context) const override; + void dump_when_input_is_set_twice(const lf::InputSocket &target_socket, + const lf::OutputSocket &from_socket, + const lf::Context &context) const override; +}; + +/** + * Tells the lazy-function graph evaluator which nodes have side effects based on the current + * context. For example, the same viewer node can have side effects in one context, but not in + * another (depending on e.g. which tree path is currently viewed in the node editor). + */ +class GeometryNodesLazyFunctionSideEffectProvider + : public fn::lazy_function::GraphExecutor::SideEffectProvider { + private: + const GeometryNodesLazyFunctionGraphInfo &lf_graph_info_; + + public: + GeometryNodesLazyFunctionSideEffectProvider( + const GeometryNodesLazyFunctionGraphInfo &lf_graph_info); + Vector<const lf::FunctionNode *> get_nodes_with_side_effects( + const lf::Context &context) const override; +}; + +/** + * Main function that converts a #bNodeTree into a lazy-function graph. If the graph has been + * generated already, nothing is done. Under some circumstances a valid graph cannot be created. In + * those cases null is returned. + */ +const GeometryNodesLazyFunctionGraphInfo *ensure_geometry_nodes_lazy_function_graph( + const bNodeTree &btree); + +} // namespace blender::nodes diff --git a/source/blender/nodes/NOD_geometry_nodes_log.hh b/source/blender/nodes/NOD_geometry_nodes_log.hh new file mode 100644 index 00000000000..f48d38ecbbf --- /dev/null +++ b/source/blender/nodes/NOD_geometry_nodes_log.hh @@ -0,0 +1,340 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +#pragma once + +/** + * Many geometry nodes related UI features need access to data produced during evaluation. Not only + * is the final output required but also the intermediate results. Those features include attribute + * search, node warnings, socket inspection and the viewer node. + * + * This file provides the system for logging data during evaluation and accessing the data after + * evaluation. Geometry nodes is executed by a modifier, therefore the "root" of logging is + * #GeoModifierLog which will contain all data generated in a modifier. + * + * The system makes a distinction between "loggers" and the "log": + * - Logger (#GeoTreeLogger): Is used during geometry nodes evaluation. Each thread logs data + * independently to avoid communication between threads. Logging should generally be fast. + * Generally, the logged data is just dumped into simple containers. Any processing of the data + * happens later if necessary. This is important for performance, because in practice, most of + * the logged data is never used again. So any processing of the data is likely to be a waste of + * resources. + * - Log (#GeoTreeLog, #GeoNodeLog): Those are used when accessing logged data in UI code. They + * contain and cache preprocessed data produced during logging. The log combines data from all + * threadlocal loggers to provide simple access. Importantly, the (preprocessed) log is only + * created when it is actually used by UI code. + */ + +#include <chrono> + +#include "BLI_compute_context.hh" +#include "BLI_enumerable_thread_specific.hh" +#include "BLI_generic_pointer.hh" +#include "BLI_multi_value_map.hh" + +#include "BKE_attribute.h" +#include "BKE_geometry_set.hh" + +#include "FN_field.hh" + +#include "DNA_node_types.h" + +struct SpaceNode; +struct SpaceSpreadsheet; +struct NodesModifierData; + +namespace blender::nodes::geo_eval_log { + +using fn::GField; + +enum class NodeWarningType { + Error, + Warning, + Info, +}; + +struct NodeWarning { + NodeWarningType type; + std::string message; +}; + +enum class NamedAttributeUsage { + None = 0, + Read = 1 << 0, + Write = 1 << 1, + Remove = 1 << 2, +}; +ENUM_OPERATORS(NamedAttributeUsage, NamedAttributeUsage::Remove); + +/** + * Values of different types are logged differently. This is necesary because some types are so + * simple that we can log them entirely (e.g. `int`), while we don't want to log all intermediate + * geometries in their entirety. + * + * #ValueLog is a base class for the different ways we log values. + */ +class ValueLog { + public: + virtual ~ValueLog() = default; +}; + +/** + * Simplest logger. It just stores a copy of the entire value. This is used for most simple types + * like `int`. + */ +class GenericValueLog : public ValueLog { + public: + /** + * This is owning the value, but not the memory. + */ + GMutablePointer value; + + GenericValueLog(const GMutablePointer value) : value(value) + { + } + + ~GenericValueLog(); +}; + +/** + * Fields are not logged entirely, because they might contain arbitrarily large data (e.g. + * geometries that are sampled). Instead, only the data needed for ui features is logged. + */ +class FieldInfoLog : public ValueLog { + public: + const CPPType &type; + Vector<std::string> input_tooltips; + + FieldInfoLog(const GField &field); +}; + +struct GeometryAttributeInfo { + std::string name; + /** Can be empty when #name does not actually exist on a geometry yet. */ + std::optional<eAttrDomain> domain; + std::optional<eCustomDataType> data_type; +}; + +/** + * Geometries are not logged entirely, because that would result in a lot of time and memory + * overhead. Instead, only the data needed for ui features is logged. + */ +class GeometryInfoLog : public ValueLog { + public: + Vector<GeometryAttributeInfo> attributes; + Vector<GeometryComponentType> component_types; + + struct MeshInfo { + int verts_num, edges_num, faces_num; + }; + struct CurveInfo { + int splines_num; + }; + struct PointCloudInfo { + int points_num; + }; + struct InstancesInfo { + int instances_num; + }; + struct EditDataInfo { + bool has_deformed_positions; + bool has_deform_matrices; + }; + + std::optional<MeshInfo> mesh_info; + std::optional<CurveInfo> curve_info; + std::optional<PointCloudInfo> pointcloud_info; + std::optional<InstancesInfo> instances_info; + std::optional<EditDataInfo> edit_data_info; + + GeometryInfoLog(const GeometrySet &geometry_set); +}; + +/** + * Data logged by a viewer node when it is executed. In this case, we do want to log the entire + * geometry. + */ +class ViewerNodeLog { + public: + GeometrySet geometry; + GField field; +}; + +using Clock = std::chrono::steady_clock; +using TimePoint = Clock::time_point; + +/** + * Logs all data for a specific geometry node tree in a specific context. When the same node group + * is used in multiple times each instantiation will have a separate logger. + */ +class GeoTreeLogger { + public: + std::optional<ComputeContextHash> parent_hash; + std::optional<std::string> group_node_name; + Vector<ComputeContextHash> children_hashes; + + LinearAllocator<> *allocator = nullptr; + + struct WarningWithNode { + std::string node_name; + NodeWarning warning; + }; + struct SocketValueLog { + std::string node_name; + std::string socket_identifier; + destruct_ptr<ValueLog> value; + }; + struct NodeExecutionTime { + std::string node_name; + TimePoint start; + TimePoint end; + }; + struct ViewerNodeLogWithNode { + std::string node_name; + destruct_ptr<ViewerNodeLog> viewer_log; + }; + struct AttributeUsageWithNode { + std::string node_name; + std::string attribute_name; + NamedAttributeUsage usage; + }; + struct DebugMessage { + std::string node_name; + std::string message; + }; + + Vector<WarningWithNode> node_warnings; + Vector<SocketValueLog> input_socket_values; + Vector<SocketValueLog> output_socket_values; + Vector<NodeExecutionTime> node_execution_times; + Vector<ViewerNodeLogWithNode, 0> viewer_node_logs; + Vector<AttributeUsageWithNode, 0> used_named_attributes; + Vector<DebugMessage, 0> debug_messages; + + GeoTreeLogger(); + ~GeoTreeLogger(); + + void log_value(const bNode &node, const bNodeSocket &socket, GPointer value); + void log_viewer_node(const bNode &viewer_node, const GeometrySet &geometry, const GField &field); +}; + +/** + * Contains data that has been logged for a specific node in a context. So when the node is in a + * node group that is used multiple times, there will be a different #GeoNodeLog for every + * instance. + * + * By default, not all of the info below is valid. A #GeoTreeLog::ensure_* method has to be called + * first. + */ +class GeoNodeLog { + public: + /** Warnings generated for that node. */ + Vector<NodeWarning> warnings; + /** + * Time spend in that node. For node groups this is the sum of the run times of the nodes + * inside. + */ + std::chrono::nanoseconds run_time{0}; + /** Maps from socket identifiers to their values. */ + Map<std::string, ValueLog *> input_values_; + Map<std::string, ValueLog *> output_values_; + /** Maps from attribute name to their usage flags. */ + Map<std::string, NamedAttributeUsage> used_named_attributes; + /** Messages that are used for debugging purposes during development. */ + Vector<std::string> debug_messages; + + GeoNodeLog(); + ~GeoNodeLog(); +}; + +class GeoModifierLog; + +/** + * Contains data that has been logged for a specific node group in a context. If the same node + * group is used multiple times, there will be a different #GeoTreeLog for every instance. + * + * This contains lazily evaluated data. Call the corresponding `ensure_*` methods before accessing + * data. + */ +class GeoTreeLog { + private: + GeoModifierLog *modifier_log_; + Vector<GeoTreeLogger *> tree_loggers_; + VectorSet<ComputeContextHash> children_hashes_; + bool reduced_node_warnings_ = false; + bool reduced_node_run_times_ = false; + bool reduced_socket_values_ = false; + bool reduced_viewer_node_logs_ = false; + bool reduced_existing_attributes_ = false; + bool reduced_used_named_attributes_ = false; + bool reduced_debug_messages_ = false; + + public: + Map<std::string, GeoNodeLog> nodes; + Map<std::string, ViewerNodeLog *, 0> viewer_node_logs; + Vector<NodeWarning> all_warnings; + std::chrono::nanoseconds run_time_sum{0}; + Vector<const GeometryAttributeInfo *> existing_attributes; + Map<std::string, NamedAttributeUsage> used_named_attributes; + + GeoTreeLog(GeoModifierLog *modifier_log, Vector<GeoTreeLogger *> tree_loggers); + ~GeoTreeLog(); + + void ensure_node_warnings(); + void ensure_node_run_time(); + void ensure_socket_values(); + void ensure_viewer_node_logs(); + void ensure_existing_attributes(); + void ensure_used_named_attributes(); + void ensure_debug_messages(); + + ValueLog *find_socket_value_log(const bNodeSocket &query_socket); +}; + +/** + * There is one #GeoModifierLog for every modifier that evaluates geometry nodes. It contains all + * the loggers that are used during evaluation as well as the preprocessed logs that are used by UI + * code. + */ +class GeoModifierLog { + private: + /** Data that is stored for each thread. */ + struct LocalData { + /** Each thread has its own allocator. */ + LinearAllocator<> allocator; + /** + * Store a separate #GeoTreeLogger for each instance of the corresponding node group (e.g. + * when the same node group is used multiple times). + */ + Map<ComputeContextHash, destruct_ptr<GeoTreeLogger>> tree_logger_by_context; + }; + + /** Container for all threadlocal data. */ + threading::EnumerableThreadSpecific<LocalData> data_per_thread_; + /** + * A #GeoTreeLog for every compute context. Those are created lazily when requested by UI code. + */ + Map<ComputeContextHash, std::unique_ptr<GeoTreeLog>> tree_logs_; + + public: + GeoModifierLog(); + ~GeoModifierLog(); + + /** + * Get a threadlocal logger for the current node tree. + */ + GeoTreeLogger &get_local_tree_logger(const ComputeContext &compute_context); + + /** + * Get a log a specific node tree instance. + */ + GeoTreeLog &get_tree_log(const ComputeContextHash &compute_context_hash); + + /** + * Utility accessor to logged data. + */ + static GeoTreeLog *get_tree_log_for_node_editor(const SpaceNode &snode); + static const ViewerNodeLog *find_viewer_node_log_for_spreadsheet( + const SpaceSpreadsheet &sspreadsheet); +}; + +} // namespace blender::nodes::geo_eval_log diff --git a/source/blender/nodes/NOD_multi_function.hh b/source/blender/nodes/NOD_multi_function.hh index 21a94d9192b..676bf03927e 100644 --- a/source/blender/nodes/NOD_multi_function.hh +++ b/source/blender/nodes/NOD_multi_function.hh @@ -6,8 +6,6 @@ #include "DNA_node_types.h" -#include "NOD_derived_node_tree.hh" - namespace blender::nodes { using namespace fn::multi_function_types; @@ -60,9 +58,9 @@ class NodeMultiFunctions { Map<const bNode *, Item> map_; public: - NodeMultiFunctions(const DerivedNodeTree &tree); + NodeMultiFunctions(const bNodeTree &tree); - const Item &try_get(const DNode &node) const; + const Item &try_get(const bNode &node) const; }; /* -------------------------------------------------------------------- */ @@ -107,10 +105,10 @@ inline void NodeMultiFunctionBuilder::construct_and_set_matching_fn(Args &&...ar /** \name #NodeMultiFunctions Inline Methods * \{ */ -inline const NodeMultiFunctions::Item &NodeMultiFunctions::try_get(const DNode &node) const +inline const NodeMultiFunctions::Item &NodeMultiFunctions::try_get(const bNode &node) const { static Item empty_item; - const Item *item = map_.lookup_ptr(node.bnode()); + const Item *item = map_.lookup_ptr(&node); if (item == nullptr) { return empty_item; } diff --git a/source/blender/nodes/geometry/node_geometry_exec.cc b/source/blender/nodes/geometry/node_geometry_exec.cc index 58ded7aadd2..ef4daf94bbe 100644 --- a/source/blender/nodes/geometry/node_geometry_exec.cc +++ b/source/blender/nodes/geometry/node_geometry_exec.cc @@ -4,3 +4,4 @@ #include "NOD_geometry_exec.hh" BLI_CPP_TYPE_MAKE(GeometrySet, GeometrySet, CPPTypeFlags::Printable); +BLI_CPP_TYPE_MAKE(GeometrySetVector, blender::Vector<GeometrySet>, CPPTypeFlags::None); diff --git a/source/blender/nodes/geometry/nodes/node_geo_boolean.cc b/source/blender/nodes/geometry/nodes/node_geo_boolean.cc index a6af74645b6..c8c58945bce 100644 --- a/source/blender/nodes/geometry/nodes/node_geo_boolean.cc +++ b/source/blender/nodes/geometry/nodes/node_geo_boolean.cc @@ -93,7 +93,7 @@ static void node_geo_exec(GeoNodeExecParams params) /* The instance transform matrices are owned by the instance group, so we have to * keep all of them around for use during the boolean operation. */ Vector<bke::GeometryInstanceGroup> set_groups; - Vector<GeometrySet> geometry_sets = params.extract_multi_input<GeometrySet>("Mesh 2"); + Vector<GeometrySet> geometry_sets = params.extract_input<Vector<GeometrySet>>("Mesh 2"); for (const GeometrySet &geometry_set : geometry_sets) { bke::geometry_set_gather_instances(geometry_set, set_groups); } diff --git a/source/blender/nodes/geometry/nodes/node_geo_geometry_to_instance.cc b/source/blender/nodes/geometry/nodes/node_geo_geometry_to_instance.cc index 1f84f8f288d..8e64209a418 100644 --- a/source/blender/nodes/geometry/nodes/node_geo_geometry_to_instance.cc +++ b/source/blender/nodes/geometry/nodes/node_geo_geometry_to_instance.cc @@ -12,7 +12,7 @@ static void node_declare(NodeDeclarationBuilder &b) static void node_geo_exec(GeoNodeExecParams params) { - Vector<GeometrySet> geometries = params.extract_multi_input<GeometrySet>("Geometry"); + Vector<GeometrySet> geometries = params.extract_input<Vector<GeometrySet>>("Geometry"); GeometrySet instances_geometry; InstancesComponent &instances_component = instances_geometry.get_component_for_write<InstancesComponent>(); diff --git a/source/blender/nodes/geometry/nodes/node_geo_input_named_attribute.cc b/source/blender/nodes/geometry/nodes/node_geo_input_named_attribute.cc index 122c7b352c7..da09d3650e3 100644 --- a/source/blender/nodes/geometry/nodes/node_geo_input_named_attribute.cc +++ b/source/blender/nodes/geometry/nodes/node_geo_input_named_attribute.cc @@ -88,7 +88,7 @@ static void node_geo_exec(GeoNodeExecParams params) return; } - params.used_named_attribute(name, eNamedAttrUsage::Read); + params.used_named_attribute(name, NamedAttributeUsage::Read); switch (data_type) { case CD_PROP_FLOAT: diff --git a/source/blender/nodes/geometry/nodes/node_geo_join_geometry.cc b/source/blender/nodes/geometry/nodes/node_geo_join_geometry.cc index 023d7a32a61..9fdf7fe7d31 100644 --- a/source/blender/nodes/geometry/nodes/node_geo_join_geometry.cc +++ b/source/blender/nodes/geometry/nodes/node_geo_join_geometry.cc @@ -177,7 +177,7 @@ static void join_component_type(Span<GeometrySet> src_geometry_sets, GeometrySet static void node_geo_exec(GeoNodeExecParams params) { - Vector<GeometrySet> geometry_sets = params.extract_multi_input<GeometrySet>("Geometry"); + Vector<GeometrySet> geometry_sets = params.extract_input<Vector<GeometrySet>>("Geometry"); GeometrySet geometry_set_result; join_component_type<MeshComponent>(geometry_sets, geometry_set_result); diff --git a/source/blender/nodes/geometry/nodes/node_geo_remove_attribute.cc b/source/blender/nodes/geometry/nodes/node_geo_remove_attribute.cc index ee279ba58f9..1b398f63691 100644 --- a/source/blender/nodes/geometry/nodes/node_geo_remove_attribute.cc +++ b/source/blender/nodes/geometry/nodes/node_geo_remove_attribute.cc @@ -55,7 +55,7 @@ static void node_geo_exec(GeoNodeExecParams params) }); if (attribute_exists && !cannot_delete) { - params.used_named_attribute(name, eNamedAttrUsage::Remove); + params.used_named_attribute(name, NamedAttributeUsage::Remove); } if (!attribute_exists) { diff --git a/source/blender/nodes/geometry/nodes/node_geo_store_named_attribute.cc b/source/blender/nodes/geometry/nodes/node_geo_store_named_attribute.cc index c2d6f57ce8a..2a590f5bf4a 100644 --- a/source/blender/nodes/geometry/nodes/node_geo_store_named_attribute.cc +++ b/source/blender/nodes/geometry/nodes/node_geo_store_named_attribute.cc @@ -149,7 +149,7 @@ static void node_geo_exec(GeoNodeExecParams params) return; } - params.used_named_attribute(name, eNamedAttrUsage::Write); + params.used_named_attribute(name, NamedAttributeUsage::Write); const NodeGeometryStoreNamedAttribute &storage = node_storage(params.node()); const eCustomDataType data_type = static_cast<eCustomDataType>(storage.data_type); diff --git a/source/blender/nodes/geometry/nodes/node_geo_string_join.cc b/source/blender/nodes/geometry/nodes/node_geo_string_join.cc index bb33430a02f..09c01b8c627 100644 --- a/source/blender/nodes/geometry/nodes/node_geo_string_join.cc +++ b/source/blender/nodes/geometry/nodes/node_geo_string_join.cc @@ -13,12 +13,13 @@ static void node_declare(NodeDeclarationBuilder &b) static void node_geo_exec(GeoNodeExecParams params) { - Vector<std::string> strings = params.extract_multi_input<std::string>("Strings"); + Vector<fn::ValueOrField<std::string>> strings = + params.extract_input<Vector<fn::ValueOrField<std::string>>>("Strings"); const std::string delim = params.extract_input<std::string>("Delimiter"); std::string output; for (const int i : strings.index_range()) { - output += strings[i]; + output += strings[i].as_value(); if (i < (strings.size() - 1)) { output += delim; } diff --git a/source/blender/nodes/intern/geometry_nodes_eval_log.cc b/source/blender/nodes/intern/geometry_nodes_eval_log.cc deleted file mode 100644 index 89bfa5834e8..00000000000 --- a/source/blender/nodes/intern/geometry_nodes_eval_log.cc +++ /dev/null @@ -1,520 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ - -#include "NOD_geometry_nodes_eval_log.hh" - -#include "BKE_curves.hh" -#include "BKE_geometry_set_instances.hh" - -#include "DNA_modifier_types.h" -#include "DNA_space_types.h" - -#include "FN_field_cpp_type.hh" - -#include "BLT_translation.h" - -#include <chrono> - -namespace blender::nodes::geometry_nodes_eval_log { - -using fn::FieldCPPType; -using fn::FieldInput; -using fn::GField; -using fn::ValueOrFieldCPPType; - -ModifierLog::ModifierLog(GeoLogger &logger) - : input_geometry_log_(std::move(logger.input_geometry_log_)), - output_geometry_log_(std::move(logger.output_geometry_log_)) -{ - root_tree_logs_ = allocator_.construct<TreeLog>(); - - LogByTreeContext log_by_tree_context; - - /* Combine all the local loggers that have been used by separate threads. */ - for (LocalGeoLogger &local_logger : logger) { - /* Take ownership of the allocator. */ - logger_allocators_.append(std::move(local_logger.allocator_)); - - for (ValueOfSockets &value_of_sockets : local_logger.values_) { - ValueLog *value_log = value_of_sockets.value.get(); - - /* Take centralized ownership of the logged value. It might be referenced by multiple - * sockets. */ - logged_values_.append(std::move(value_of_sockets.value)); - - for (const DSocket &socket : value_of_sockets.sockets) { - SocketLog &socket_log = this->lookup_or_add_socket_log(log_by_tree_context, socket); - socket_log.value_ = value_log; - } - } - - for (NodeWithWarning &node_with_warning : local_logger.node_warnings_) { - NodeLog &node_log = this->lookup_or_add_node_log(log_by_tree_context, - node_with_warning.node); - node_log.warnings_.append(node_with_warning.warning); - } - - for (NodeWithExecutionTime &node_with_exec_time : local_logger.node_exec_times_) { - NodeLog &node_log = this->lookup_or_add_node_log(log_by_tree_context, - node_with_exec_time.node); - node_log.exec_time_ = node_with_exec_time.exec_time; - } - - for (NodeWithDebugMessage &debug_message : local_logger.node_debug_messages_) { - NodeLog &node_log = this->lookup_or_add_node_log(log_by_tree_context, debug_message.node); - node_log.debug_messages_.append(debug_message.message); - } - - for (NodeWithUsedNamedAttribute &node_with_attribute_name : - local_logger.used_named_attributes_) { - NodeLog &node_log = this->lookup_or_add_node_log(log_by_tree_context, - node_with_attribute_name.node); - node_log.used_named_attributes_.append(std::move(node_with_attribute_name.attribute)); - } - } -} - -TreeLog &ModifierLog::lookup_or_add_tree_log(LogByTreeContext &log_by_tree_context, - const DTreeContext &tree_context) -{ - TreeLog *tree_log = log_by_tree_context.lookup_default(&tree_context, nullptr); - if (tree_log != nullptr) { - return *tree_log; - } - - const DTreeContext *parent_context = tree_context.parent_context(); - if (parent_context == nullptr) { - return *root_tree_logs_.get(); - } - TreeLog &parent_log = this->lookup_or_add_tree_log(log_by_tree_context, *parent_context); - destruct_ptr<TreeLog> owned_tree_log = allocator_.construct<TreeLog>(); - tree_log = owned_tree_log.get(); - log_by_tree_context.add_new(&tree_context, tree_log); - parent_log.child_logs_.add_new(tree_context.parent_node()->name, std::move(owned_tree_log)); - return *tree_log; -} - -NodeLog &ModifierLog::lookup_or_add_node_log(LogByTreeContext &log_by_tree_context, DNode node) -{ - TreeLog &tree_log = this->lookup_or_add_tree_log(log_by_tree_context, *node.context()); - NodeLog &node_log = *tree_log.node_logs_.lookup_or_add_cb(node->name, [&]() { - destruct_ptr<NodeLog> node_log = allocator_.construct<NodeLog>(); - node_log->input_logs_.resize(node->input_sockets().size()); - node_log->output_logs_.resize(node->output_sockets().size()); - return node_log; - }); - return node_log; -} - -SocketLog &ModifierLog::lookup_or_add_socket_log(LogByTreeContext &log_by_tree_context, - DSocket socket) -{ - NodeLog &node_log = this->lookup_or_add_node_log(log_by_tree_context, socket.node()); - MutableSpan<SocketLog> socket_logs = socket->is_input() ? node_log.input_logs_ : - node_log.output_logs_; - SocketLog &socket_log = socket_logs[socket->index()]; - return socket_log; -} - -void ModifierLog::foreach_node_log(FunctionRef<void(const NodeLog &)> fn) const -{ - if (root_tree_logs_) { - root_tree_logs_->foreach_node_log(fn); - } -} - -const GeometryValueLog *ModifierLog::input_geometry_log() const -{ - return input_geometry_log_.get(); -} -const GeometryValueLog *ModifierLog::output_geometry_log() const -{ - return output_geometry_log_.get(); -} - -const NodeLog *TreeLog::lookup_node_log(StringRef node_name) const -{ - const destruct_ptr<NodeLog> *node_log = node_logs_.lookup_ptr_as(node_name); - if (node_log == nullptr) { - return nullptr; - } - return node_log->get(); -} - -const NodeLog *TreeLog::lookup_node_log(const bNode &node) const -{ - return this->lookup_node_log(node.name); -} - -const TreeLog *TreeLog::lookup_child_log(StringRef node_name) const -{ - const destruct_ptr<TreeLog> *tree_log = child_logs_.lookup_ptr_as(node_name); - if (tree_log == nullptr) { - return nullptr; - } - return tree_log->get(); -} - -void TreeLog::foreach_node_log(FunctionRef<void(const NodeLog &)> fn) const -{ - for (auto node_log : node_logs_.items()) { - fn(*node_log.value); - } - - for (auto child : child_logs_.items()) { - child.value->foreach_node_log(fn); - } -} - -const SocketLog *NodeLog::lookup_socket_log(eNodeSocketInOut in_out, int index) const -{ - BLI_assert(index >= 0); - Span<SocketLog> socket_logs = (in_out == SOCK_IN) ? input_logs_ : output_logs_; - if (index >= socket_logs.size()) { - return nullptr; - } - return &socket_logs[index]; -} - -const SocketLog *NodeLog::lookup_socket_log(const bNode &node, const bNodeSocket &socket) const -{ - ListBase sockets = socket.in_out == SOCK_IN ? node.inputs : node.outputs; - int index = BLI_findindex(&sockets, &socket); - return this->lookup_socket_log((eNodeSocketInOut)socket.in_out, index); -} - -GFieldValueLog::GFieldValueLog(fn::GField field, bool log_full_field) : type_(field.cpp_type()) -{ - const std::shared_ptr<const fn::FieldInputs> &field_input_nodes = field.node().field_inputs(); - - /* Put the deduplicated field inputs into a vector so that they can be sorted below. */ - Vector<std::reference_wrapper<const FieldInput>> field_inputs; - if (field_input_nodes) { - field_inputs.extend(field_input_nodes->deduplicated_nodes.begin(), - field_input_nodes->deduplicated_nodes.end()); - } - - std::sort( - field_inputs.begin(), field_inputs.end(), [](const FieldInput &a, const FieldInput &b) { - const int index_a = (int)a.category(); - const int index_b = (int)b.category(); - if (index_a == index_b) { - return a.socket_inspection_name().size() < b.socket_inspection_name().size(); - } - return index_a < index_b; - }); - - for (const FieldInput &field_input : field_inputs) { - input_tooltips_.append(field_input.socket_inspection_name()); - } - - if (log_full_field) { - field_ = std::move(field); - } -} - -GeometryValueLog::GeometryValueLog(const GeometrySet &geometry_set, bool log_full_geometry) -{ - static std::array all_component_types = {GEO_COMPONENT_TYPE_CURVE, - GEO_COMPONENT_TYPE_INSTANCES, - GEO_COMPONENT_TYPE_MESH, - GEO_COMPONENT_TYPE_POINT_CLOUD, - GEO_COMPONENT_TYPE_VOLUME}; - - /* Keep track handled attribute names to make sure that we do not return the same name twice. - * Currently #GeometrySet::attribute_foreach does not do that. Note that this will merge - * attributes with the same name but different domains or data types on separate components. */ - Set<StringRef> names; - - geometry_set.attribute_foreach( - all_component_types, - true, - [&](const bke::AttributeIDRef &attribute_id, - const bke::AttributeMetaData &meta_data, - const GeometryComponent &UNUSED(component)) { - if (attribute_id.is_named() && names.add(attribute_id.name())) { - this->attributes_.append({attribute_id.name(), meta_data.domain, meta_data.data_type}); - } - }); - - for (const GeometryComponent *component : geometry_set.get_components_for_read()) { - component_types_.append(component->type()); - switch (component->type()) { - case GEO_COMPONENT_TYPE_MESH: { - const MeshComponent &mesh_component = *(const MeshComponent *)component; - MeshInfo &info = this->mesh_info.emplace(); - info.verts_num = mesh_component.attribute_domain_size(ATTR_DOMAIN_POINT); - info.edges_num = mesh_component.attribute_domain_size(ATTR_DOMAIN_EDGE); - info.faces_num = mesh_component.attribute_domain_size(ATTR_DOMAIN_FACE); - break; - } - case GEO_COMPONENT_TYPE_CURVE: { - const CurveComponent &curve_component = *(const CurveComponent *)component; - CurveInfo &info = this->curve_info.emplace(); - info.splines_num = curve_component.attribute_domain_size(ATTR_DOMAIN_CURVE); - break; - } - case GEO_COMPONENT_TYPE_POINT_CLOUD: { - const PointCloudComponent &pointcloud_component = *(const PointCloudComponent *)component; - PointCloudInfo &info = this->pointcloud_info.emplace(); - info.points_num = pointcloud_component.attribute_domain_size(ATTR_DOMAIN_POINT); - break; - } - case GEO_COMPONENT_TYPE_INSTANCES: { - const InstancesComponent &instances_component = *(const InstancesComponent *)component; - InstancesInfo &info = this->instances_info.emplace(); - info.instances_num = instances_component.instances_num(); - break; - } - case GEO_COMPONENT_TYPE_EDIT: { - const GeometryComponentEditData &edit_component = *( - const GeometryComponentEditData *)component; - if (const bke::CurvesEditHints *curve_edit_hints = - edit_component.curves_edit_hints_.get()) { - EditDataInfo &info = this->edit_data_info.emplace(); - info.has_deform_matrices = curve_edit_hints->deform_mats.has_value(); - info.has_deformed_positions = curve_edit_hints->positions.has_value(); - } - break; - } - case GEO_COMPONENT_TYPE_VOLUME: { - break; - } - } - } - if (log_full_geometry) { - full_geometry_ = std::make_unique<GeometrySet>(geometry_set); - full_geometry_->ensure_owns_direct_data(); - } -} - -Vector<const GeometryAttributeInfo *> NodeLog::lookup_available_attributes() const -{ - Vector<const GeometryAttributeInfo *> attributes; - Set<StringRef> names; - for (const SocketLog &socket_log : input_logs_) { - const ValueLog *value_log = socket_log.value(); - if (const GeometryValueLog *geo_value_log = dynamic_cast<const GeometryValueLog *>( - value_log)) { - for (const GeometryAttributeInfo &attribute : geo_value_log->attributes()) { - if (names.add(attribute.name)) { - attributes.append(&attribute); - } - } - } - } - return attributes; -} - -const ModifierLog *ModifierLog::find_root_by_node_editor_context(const SpaceNode &snode) -{ - if (snode.id == nullptr) { - return nullptr; - } - if (GS(snode.id->name) != ID_OB) { - return nullptr; - } - Object *object = (Object *)snode.id; - LISTBASE_FOREACH (ModifierData *, md, &object->modifiers) { - if (md->type == eModifierType_Nodes) { - NodesModifierData *nmd = (NodesModifierData *)md; - if (nmd->node_group == snode.nodetree) { - return (ModifierLog *)nmd->runtime_eval_log; - } - } - } - return nullptr; -} - -const TreeLog *ModifierLog::find_tree_by_node_editor_context(const SpaceNode &snode) -{ - const ModifierLog *eval_log = ModifierLog::find_root_by_node_editor_context(snode); - if (eval_log == nullptr) { - return nullptr; - } - Vector<bNodeTreePath *> tree_path_vec = snode.treepath; - if (tree_path_vec.is_empty()) { - return nullptr; - } - TreeLog *current = eval_log->root_tree_logs_.get(); - for (bNodeTreePath *path : tree_path_vec.as_span().drop_front(1)) { - destruct_ptr<TreeLog> *tree_log = current->child_logs_.lookup_ptr_as(path->node_name); - if (tree_log == nullptr) { - return nullptr; - } - current = tree_log->get(); - } - return current; -} - -const NodeLog *ModifierLog::find_node_by_node_editor_context(const SpaceNode &snode, - const bNode &node) -{ - const TreeLog *tree_log = ModifierLog::find_tree_by_node_editor_context(snode); - if (tree_log == nullptr) { - return nullptr; - } - return tree_log->lookup_node_log(node); -} - -const NodeLog *ModifierLog::find_node_by_node_editor_context(const SpaceNode &snode, - const StringRef node_name) -{ - const TreeLog *tree_log = ModifierLog::find_tree_by_node_editor_context(snode); - if (tree_log == nullptr) { - return nullptr; - } - return tree_log->lookup_node_log(node_name); -} - -const SocketLog *ModifierLog::find_socket_by_node_editor_context(const SpaceNode &snode, - const bNode &node, - const bNodeSocket &socket) -{ - const NodeLog *node_log = ModifierLog::find_node_by_node_editor_context(snode, node); - if (node_log == nullptr) { - return nullptr; - } - return node_log->lookup_socket_log(node, socket); -} - -const NodeLog *ModifierLog::find_node_by_spreadsheet_editor_context( - const SpaceSpreadsheet &sspreadsheet) -{ - Vector<SpreadsheetContext *> context_path = sspreadsheet.context_path; - if (context_path.size() <= 2) { - return nullptr; - } - if (context_path[0]->type != SPREADSHEET_CONTEXT_OBJECT) { - return nullptr; - } - if (context_path[1]->type != SPREADSHEET_CONTEXT_MODIFIER) { - return nullptr; - } - for (SpreadsheetContext *context : context_path.as_span().drop_front(2)) { - if (context->type != SPREADSHEET_CONTEXT_NODE) { - return nullptr; - } - } - Span<SpreadsheetContextNode *> node_contexts = - context_path.as_span().drop_front(2).cast<SpreadsheetContextNode *>(); - - Object *object = ((SpreadsheetContextObject *)context_path[0])->object; - StringRefNull modifier_name = ((SpreadsheetContextModifier *)context_path[1])->modifier_name; - if (object == nullptr) { - return nullptr; - } - - const ModifierLog *eval_log = nullptr; - LISTBASE_FOREACH (ModifierData *, md, &object->modifiers) { - if (md->type == eModifierType_Nodes) { - if (md->name == modifier_name) { - NodesModifierData *nmd = (NodesModifierData *)md; - eval_log = (const ModifierLog *)nmd->runtime_eval_log; - break; - } - } - } - if (eval_log == nullptr) { - return nullptr; - } - - const TreeLog *tree_log = &eval_log->root_tree(); - for (SpreadsheetContextNode *context : node_contexts.drop_back(1)) { - tree_log = tree_log->lookup_child_log(context->node_name); - if (tree_log == nullptr) { - return nullptr; - } - } - const NodeLog *node_log = tree_log->lookup_node_log(node_contexts.last()->node_name); - return node_log; -} - -void LocalGeoLogger::log_value_for_sockets(Span<DSocket> sockets, GPointer value) -{ - const CPPType &type = *value.type(); - Span<DSocket> copied_sockets = allocator_->construct_array_copy(sockets); - if (type.is<GeometrySet>()) { - bool log_full_geometry = false; - for (const DSocket &socket : sockets) { - if (main_logger_->log_full_sockets_.contains(socket)) { - log_full_geometry = true; - break; - } - } - - const GeometrySet &geometry_set = *value.get<GeometrySet>(); - destruct_ptr<GeometryValueLog> value_log = allocator_->construct<GeometryValueLog>( - geometry_set, log_full_geometry); - values_.append({copied_sockets, std::move(value_log)}); - } - else if (const ValueOrFieldCPPType *value_or_field_type = - dynamic_cast<const ValueOrFieldCPPType *>(&type)) { - const void *value_or_field = value.get(); - if (value_or_field_type->is_field(value_or_field)) { - GField field = *value_or_field_type->get_field_ptr(value_or_field); - bool log_full_field = false; - if (!field.node().depends_on_input()) { - /* Always log constant fields so that their value can be shown in socket inspection. - * In the future we can also evaluate the field here and only store the value. */ - log_full_field = true; - } - if (!log_full_field) { - for (const DSocket &socket : sockets) { - if (main_logger_->log_full_sockets_.contains(socket)) { - log_full_field = true; - break; - } - } - } - destruct_ptr<GFieldValueLog> value_log = allocator_->construct<GFieldValueLog>( - std::move(field), log_full_field); - values_.append({copied_sockets, std::move(value_log)}); - } - else { - const CPPType &base_type = value_or_field_type->base_type(); - const void *value = value_or_field_type->get_value_ptr(value_or_field); - void *buffer = allocator_->allocate(base_type.size(), base_type.alignment()); - base_type.copy_construct(value, buffer); - destruct_ptr<GenericValueLog> value_log = allocator_->construct<GenericValueLog>( - GMutablePointer{base_type, buffer}); - values_.append({copied_sockets, std::move(value_log)}); - } - } - else { - void *buffer = allocator_->allocate(type.size(), type.alignment()); - type.copy_construct(value.get(), buffer); - destruct_ptr<GenericValueLog> value_log = allocator_->construct<GenericValueLog>( - GMutablePointer{type, buffer}); - values_.append({copied_sockets, std::move(value_log)}); - } -} - -void LocalGeoLogger::log_multi_value_socket(DSocket socket, Span<GPointer> values) -{ - /* Doesn't have to be logged currently. */ - UNUSED_VARS(socket, values); -} - -void LocalGeoLogger::log_node_warning(DNode node, NodeWarningType type, std::string message) -{ - node_warnings_.append({node, {type, std::move(message)}}); -} - -void LocalGeoLogger::log_execution_time(DNode node, std::chrono::microseconds exec_time) -{ - node_exec_times_.append({node, exec_time}); -} - -void LocalGeoLogger::log_used_named_attribute(DNode node, - std::string attribute_name, - eNamedAttrUsage usage) -{ - used_named_attributes_.append({node, {std::move(attribute_name), usage}}); -} - -void LocalGeoLogger::log_debug_message(DNode node, std::string message) -{ - node_debug_messages_.append({node, std::move(message)}); -} - -} // namespace blender::nodes::geometry_nodes_eval_log diff --git a/source/blender/nodes/intern/geometry_nodes_lazy_function.cc b/source/blender/nodes/intern/geometry_nodes_lazy_function.cc new file mode 100644 index 00000000000..442b17c6962 --- /dev/null +++ b/source/blender/nodes/intern/geometry_nodes_lazy_function.cc @@ -0,0 +1,1327 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +/** + * This file mainly converts a #bNodeTree into a lazy-function graph. This generally works by + * creating a lazy-function for every node, which is then put into the lazy-function graph. Then + * the nodes in the new graph are linked based on links in the original #bNodeTree. Some additional + * nodes are inserted for things like type conversions and multi-input sockets. + * + * Currently, lazy-functions are even created for nodes that don't strictly require it, like + * reroutes or muted nodes. In the future we could avoid that at the cost of additional code + * complexity. So far, this does not seem to be a performance issue. + */ + +#include "NOD_geometry_exec.hh" +#include "NOD_geometry_nodes_lazy_function.hh" +#include "NOD_multi_function.hh" +#include "NOD_node_declaration.hh" + +#include "BLI_map.hh" + +#include "DNA_ID.h" + +#include "BKE_compute_contexts.hh" +#include "BKE_geometry_set.hh" +#include "BKE_type_conversions.hh" + +#include "FN_field_cpp_type.hh" +#include "FN_lazy_function_graph_executor.hh" + +namespace blender::nodes { + +using fn::ValueOrField; +using fn::ValueOrFieldCPPType; +using namespace fn::multi_function_types; + +static const CPPType *get_socket_cpp_type(const bNodeSocketType &typeinfo) +{ + const CPPType *type = typeinfo.geometry_nodes_cpp_type; + if (type == nullptr) { + return nullptr; + } + BLI_assert(type->has_special_member_functions()); + return type; +} + +static const CPPType *get_socket_cpp_type(const bNodeSocket &socket) +{ + return get_socket_cpp_type(*socket.typeinfo); +} + +static const CPPType *get_vector_type(const CPPType &type) +{ + /* This could be generalized in the future. For now we only support a small set of vectors. */ + if (type.is<GeometrySet>()) { + return &CPPType::get<Vector<GeometrySet>>(); + } + if (type.is<ValueOrField<std::string>>()) { + return &CPPType::get<Vector<ValueOrField<std::string>>>(); + } + return nullptr; +} + +/** + * Checks which sockets of the node are available and creates corresponding inputs/outputs on the + * lazy-function. + */ +static void lazy_function_interface_from_node(const bNode &node, + Vector<const bNodeSocket *> &r_used_inputs, + Vector<const bNodeSocket *> &r_used_outputs, + Vector<lf::Input> &r_inputs, + Vector<lf::Output> &r_outputs) +{ + const bool is_muted = node.is_muted(); + const bool supports_lazyness = node.typeinfo->geometry_node_execute_supports_laziness || + node.is_group(); + const lf::ValueUsage input_usage = supports_lazyness ? lf::ValueUsage::Maybe : + lf::ValueUsage::Used; + for (const bNodeSocket *socket : node.input_sockets()) { + if (!socket->is_available()) { + continue; + } + const CPPType *type = get_socket_cpp_type(*socket); + if (type == nullptr) { + continue; + } + if (socket->is_multi_input() && !is_muted) { + type = get_vector_type(*type); + } + r_inputs.append({socket->identifier, *type, input_usage}); + r_used_inputs.append(socket); + } + for (const bNodeSocket *socket : node.output_sockets()) { + if (!socket->is_available()) { + continue; + } + const CPPType *type = get_socket_cpp_type(*socket); + if (type == nullptr) { + continue; + } + r_outputs.append({socket->identifier, *type}); + r_used_outputs.append(socket); + } +} + +/** + * Used for most normal geometry nodes like Subdivision Surface and Set Position. + */ +class LazyFunctionForGeometryNode : public LazyFunction { + private: + const bNode &node_; + + public: + LazyFunctionForGeometryNode(const bNode &node, + Vector<const bNodeSocket *> &r_used_inputs, + Vector<const bNodeSocket *> &r_used_outputs) + : node_(node) + { + BLI_assert(node.typeinfo->geometry_node_execute != nullptr); + debug_name_ = node.name; + lazy_function_interface_from_node(node, r_used_inputs, r_used_outputs, inputs_, outputs_); + } + + void execute_impl(lf::Params ¶ms, const lf::Context &context) const override + { + GeoNodesLFUserData *user_data = dynamic_cast<GeoNodesLFUserData *>(context.user_data); + BLI_assert(user_data != nullptr); + + GeoNodeExecParams geo_params{node_, params, context}; + + geo_eval_log::TimePoint start_time = geo_eval_log::Clock::now(); + node_.typeinfo->geometry_node_execute(geo_params); + geo_eval_log::TimePoint end_time = geo_eval_log::Clock::now(); + + if (geo_eval_log::GeoModifierLog *modifier_log = user_data->modifier_data->eval_log) { + geo_eval_log::GeoTreeLogger &tree_logger = modifier_log->get_local_tree_logger( + *user_data->compute_context); + tree_logger.node_execution_times.append({node_.name, start_time, end_time}); + } + } +}; + +/** + * Used to gather all inputs of a multi-input socket. A separate node is necessary, because + * multi-inputs are not supported in lazy-function graphs. + */ +class LazyFunctionForMultiInput : public LazyFunction { + private: + const CPPType *base_type_; + + public: + LazyFunctionForMultiInput(const bNodeSocket &socket) + { + debug_name_ = "Multi Input"; + base_type_ = get_socket_cpp_type(socket); + BLI_assert(base_type_ != nullptr); + BLI_assert(socket.is_multi_input()); + for (const bNodeLink *link : socket.directly_linked_links()) { + if (!link->is_muted()) { + inputs_.append({"Input", *base_type_}); + } + } + const CPPType *vector_type = get_vector_type(*base_type_); + BLI_assert(vector_type != nullptr); + outputs_.append({"Output", *vector_type}); + } + + void execute_impl(lf::Params ¶ms, const lf::Context &UNUSED(context)) const override + { + /* Currently we only have multi-inputs for geometry and string sockets. This could be + * generalized in the future. */ + base_type_->to_static_type_tag<GeometrySet, ValueOrField<std::string>>([&](auto type_tag) { + using T = typename decltype(type_tag)::type; + if constexpr (std::is_void_v<T>) { + /* This type is not support in this node for now. */ + BLI_assert_unreachable(); + } + else { + void *output_ptr = params.get_output_data_ptr(0); + Vector<T> &values = *new (output_ptr) Vector<T>(); + for (const int i : inputs_.index_range()) { + values.append(params.extract_input<T>(i)); + } + params.output_set(0); + } + }); + } +}; + +/** + * Simple lazy-function that just forwards the input. + */ +class LazyFunctionForRerouteNode : public LazyFunction { + public: + LazyFunctionForRerouteNode(const CPPType &type) + { + debug_name_ = "Reroute"; + inputs_.append({"Input", type}); + outputs_.append({"Output", type}); + } + + void execute_impl(lf::Params ¶ms, const lf::Context &UNUSED(context)) const override + { + void *input_value = params.try_get_input_data_ptr(0); + void *output_value = params.get_output_data_ptr(0); + BLI_assert(input_value != nullptr); + BLI_assert(output_value != nullptr); + const CPPType &type = *inputs_[0].type; + type.move_construct(input_value, output_value); + params.output_set(0); + } +}; + +/** + * Executes a multi-function. If all inputs are single values, the results will also be single + * values. If any input is a field, the outputs will also be fields. + */ +static void execute_multi_function_on_value_or_field( + const MultiFunction &fn, + const std::shared_ptr<MultiFunction> &owned_fn, + const Span<const ValueOrFieldCPPType *> input_types, + const Span<const ValueOrFieldCPPType *> output_types, + const Span<const void *> input_values, + const Span<void *> output_values) +{ + BLI_assert(fn.param_amount() == input_types.size() + output_types.size()); + BLI_assert(input_types.size() == input_values.size()); + BLI_assert(output_types.size() == output_values.size()); + + /* Check if any input is a field. */ + bool any_input_is_field = false; + for (const int i : input_types.index_range()) { + const ValueOrFieldCPPType &type = *input_types[i]; + const void *value_or_field = input_values[i]; + if (type.is_field(value_or_field)) { + any_input_is_field = true; + break; + } + } + + if (any_input_is_field) { + /* Convert all inputs into fields, so that they can be used as input in the new field. */ + Vector<GField> input_fields; + for (const int i : input_types.index_range()) { + const ValueOrFieldCPPType &type = *input_types[i]; + const void *value_or_field = input_values[i]; + input_fields.append(type.as_field(value_or_field)); + } + + /* Construct the new field node. */ + std::shared_ptr<fn::FieldOperation> operation; + if (owned_fn) { + operation = std::make_shared<fn::FieldOperation>(owned_fn, std::move(input_fields)); + } + else { + operation = std::make_shared<fn::FieldOperation>(fn, std::move(input_fields)); + } + + /* Store the new fields in the output. */ + for (const int i : output_types.index_range()) { + const ValueOrFieldCPPType &type = *output_types[i]; + void *value_or_field = output_values[i]; + type.construct_from_field(value_or_field, GField{operation, i}); + } + } + else { + /* In this case, the multi-function is evaluated directly. */ + MFParamsBuilder params{fn, 1}; + MFContextBuilder context; + + for (const int i : input_types.index_range()) { + const ValueOrFieldCPPType &type = *input_types[i]; + const CPPType &base_type = type.base_type(); + const void *value_or_field = input_values[i]; + const void *value = type.get_value_ptr(value_or_field); + params.add_readonly_single_input(GVArray::ForSingleRef(base_type, 1, value)); + } + for (const int i : output_types.index_range()) { + const ValueOrFieldCPPType &type = *output_types[i]; + const CPPType &base_type = type.base_type(); + void *value_or_field = output_values[i]; + type.default_construct(value_or_field); + void *value = type.get_value_ptr(value_or_field); + base_type.destruct(value); + params.add_uninitialized_single_output(GMutableSpan{base_type, value, 1}); + } + fn.call(IndexRange(1), params, context); + } +} + +/** + * Behavior of muted nodes: + * - Some inputs are forwarded to outputs without changes. + * - Some inputs are converted to a different type which becomes the output. + * - Some outputs are value initialized because they don't have a corresponding input. + */ +class LazyFunctionForMutedNode : public LazyFunction { + private: + Array<int> input_by_output_index_; + + public: + LazyFunctionForMutedNode(const bNode &node, + Vector<const bNodeSocket *> &r_used_inputs, + Vector<const bNodeSocket *> &r_used_outputs) + { + debug_name_ = "Muted"; + lazy_function_interface_from_node(node, r_used_inputs, r_used_outputs, inputs_, outputs_); + for (lf::Input &fn_input : inputs_) { + fn_input.usage = lf::ValueUsage::Maybe; + } + + for (lf::Input &fn_input : inputs_) { + fn_input.usage = lf::ValueUsage::Unused; + } + + input_by_output_index_.reinitialize(outputs_.size()); + input_by_output_index_.fill(-1); + for (const bNodeLink *internal_link : node.internal_links_span()) { + const int input_i = r_used_inputs.first_index_of_try(internal_link->fromsock); + const int output_i = r_used_outputs.first_index_of_try(internal_link->tosock); + if (ELEM(-1, input_i, output_i)) { + continue; + } + input_by_output_index_[output_i] = input_i; + inputs_[input_i].usage = lf::ValueUsage::Maybe; + } + } + + void execute_impl(lf::Params ¶ms, const lf::Context &UNUSED(context)) const override + { + for (const int output_i : outputs_.index_range()) { + if (params.output_was_set(output_i)) { + continue; + } + const CPPType &output_type = *outputs_[output_i].type; + void *output_value = params.get_output_data_ptr(output_i); + const int input_i = input_by_output_index_[output_i]; + if (input_i == -1) { + /* The output does not have a corresponding input. */ + output_type.value_initialize(output_value); + params.output_set(output_i); + continue; + } + const void *input_value = params.try_get_input_data_ptr_or_request(input_i); + if (input_value == nullptr) { + continue; + } + const CPPType &input_type = *inputs_[input_i].type; + if (input_type == output_type) { + /* Forward the value as is. */ + input_type.copy_construct(input_value, output_value); + params.output_set(output_i); + continue; + } + /* Perform a type conversion and then format the value. */ + const bke::DataTypeConversions &conversions = bke::get_implicit_type_conversions(); + const auto *from_field_type = dynamic_cast<const ValueOrFieldCPPType *>(&input_type); + const auto *to_field_type = dynamic_cast<const ValueOrFieldCPPType *>(&output_type); + if (from_field_type != nullptr && to_field_type != nullptr) { + const CPPType &from_base_type = from_field_type->base_type(); + const CPPType &to_base_type = to_field_type->base_type(); + if (conversions.is_convertible(from_base_type, to_base_type)) { + const MultiFunction &multi_fn = *conversions.get_conversion_multi_function( + MFDataType::ForSingle(from_base_type), MFDataType::ForSingle(to_base_type)); + execute_multi_function_on_value_or_field( + multi_fn, {}, {from_field_type}, {to_field_type}, {input_value}, {output_value}); + } + params.output_set(output_i); + continue; + } + /* Use a value initialization if the conversion does not work. */ + output_type.value_initialize(output_value); + params.output_set(output_i); + } + } +}; + +/** + * Type conversions are generally implemented as multi-functions. This node checks if the input is + * a field or single value and outputs a field or single value respectively. + */ +class LazyFunctionForMultiFunctionConversion : public LazyFunction { + private: + const MultiFunction &fn_; + const ValueOrFieldCPPType &from_type_; + const ValueOrFieldCPPType &to_type_; + const Vector<const bNodeSocket *> target_sockets_; + + public: + LazyFunctionForMultiFunctionConversion(const MultiFunction &fn, + const ValueOrFieldCPPType &from, + const ValueOrFieldCPPType &to, + Vector<const bNodeSocket *> &&target_sockets) + : fn_(fn), from_type_(from), to_type_(to), target_sockets_(std::move(target_sockets)) + { + debug_name_ = "Convert"; + inputs_.append({"From", from}); + outputs_.append({"To", to}); + } + + void execute_impl(lf::Params ¶ms, const lf::Context &UNUSED(context)) const override + { + const void *from_value = params.try_get_input_data_ptr(0); + void *to_value = params.get_output_data_ptr(0); + BLI_assert(from_value != nullptr); + BLI_assert(to_value != nullptr); + + execute_multi_function_on_value_or_field( + fn_, {}, {&from_type_}, {&to_type_}, {from_value}, {to_value}); + + params.output_set(0); + } +}; + +/** + * This lazy-function wraps nodes that are implemented as multi-function (mostly math nodes). + */ +class LazyFunctionForMultiFunctionNode : public LazyFunction { + private: + const bNode &node_; + const NodeMultiFunctions::Item fn_item_; + Vector<const ValueOrFieldCPPType *> input_types_; + Vector<const ValueOrFieldCPPType *> output_types_; + Vector<const bNodeSocket *> output_sockets_; + + public: + LazyFunctionForMultiFunctionNode(const bNode &node, + NodeMultiFunctions::Item fn_item, + Vector<const bNodeSocket *> &r_used_inputs, + Vector<const bNodeSocket *> &r_used_outputs) + : node_(node), fn_item_(std::move(fn_item)) + { + BLI_assert(fn_item_.fn != nullptr); + debug_name_ = node.name; + lazy_function_interface_from_node(node, r_used_inputs, r_used_outputs, inputs_, outputs_); + for (const lf::Input &fn_input : inputs_) { + input_types_.append(dynamic_cast<const ValueOrFieldCPPType *>(fn_input.type)); + } + for (const lf::Output &fn_output : outputs_) { + output_types_.append(dynamic_cast<const ValueOrFieldCPPType *>(fn_output.type)); + } + output_sockets_ = r_used_outputs; + } + + void execute_impl(lf::Params ¶ms, const lf::Context &UNUSED(context)) const override + { + Vector<const void *> input_values(inputs_.size()); + Vector<void *> output_values(outputs_.size()); + for (const int i : inputs_.index_range()) { + input_values[i] = params.try_get_input_data_ptr(i); + } + for (const int i : outputs_.index_range()) { + output_values[i] = params.get_output_data_ptr(i); + } + execute_multi_function_on_value_or_field( + *fn_item_.fn, fn_item_.owned_fn, input_types_, output_types_, input_values, output_values); + for (const int i : outputs_.index_range()) { + params.output_set(i); + } + } +}; + +/** + * Some sockets have non-trivial implicit inputs (e.g. the Position input of the Set Position + * node). Those are implemented as a separate node that outputs the value. + */ +class LazyFunctionForImplicitInput : public LazyFunction { + private: + /** + * The function that generates the implicit input. The passed in memory is uninitialized. + */ + std::function<void(void *)> init_fn_; + + public: + LazyFunctionForImplicitInput(const CPPType &type, std::function<void(void *)> init_fn) + : init_fn_(std::move(init_fn)) + { + debug_name_ = "Input"; + outputs_.append({"Output", type}); + } + + void execute_impl(lf::Params ¶ms, const lf::Context &UNUSED(context)) const override + { + void *value = params.get_output_data_ptr(0); + init_fn_(value); + params.output_set(0); + } +}; + +/** + * The viewer node does not have outputs. Instead it is executed because the executor knows that it + * has side effects. The side effect is that the inputs to the viewer are logged. + */ +class LazyFunctionForViewerNode : public LazyFunction { + private: + const bNode &bnode_; + /** The field is only logged when it is linked. */ + bool use_field_input_ = true; + + public: + LazyFunctionForViewerNode(const bNode &bnode, Vector<const bNodeSocket *> &r_used_inputs) + : bnode_(bnode) + { + debug_name_ = "Viewer"; + Vector<const bNodeSocket *> dummy_used_outputs; + lazy_function_interface_from_node(bnode, r_used_inputs, dummy_used_outputs, inputs_, outputs_); + if (!r_used_inputs[1]->is_directly_linked()) { + use_field_input_ = false; + r_used_inputs.pop_last(); + inputs_.pop_last(); + } + } + + void execute_impl(lf::Params ¶ms, const lf::Context &context) const override + { + GeoNodesLFUserData *user_data = dynamic_cast<GeoNodesLFUserData *>(context.user_data); + BLI_assert(user_data != nullptr); + + GeometrySet geometry = params.extract_input<GeometrySet>(0); + + GField field; + if (use_field_input_) { + const void *value_or_field = params.try_get_input_data_ptr(1); + BLI_assert(value_or_field != nullptr); + const ValueOrFieldCPPType &value_or_field_type = static_cast<const ValueOrFieldCPPType &>( + *inputs_[1].type); + field = value_or_field_type.as_field(value_or_field); + } + + geo_eval_log::GeoTreeLogger &tree_logger = + user_data->modifier_data->eval_log->get_local_tree_logger(*user_data->compute_context); + tree_logger.log_viewer_node(bnode_, geometry, field); + } +}; + +/** + * This lazy-function wraps a group node. Internally it just executes the lazy-function graph of + * the referenced group. + */ +class LazyFunctionForGroupNode : public LazyFunction { + private: + const bNode &group_node_; + std::optional<GeometryNodesLazyFunctionLogger> lf_logger_; + std::optional<GeometryNodesLazyFunctionSideEffectProvider> lf_side_effect_provider_; + std::optional<lf::GraphExecutor> graph_executor_; + + public: + LazyFunctionForGroupNode(const bNode &group_node, + const GeometryNodesLazyFunctionGraphInfo &lf_graph_info, + Vector<const bNodeSocket *> &r_used_inputs, + Vector<const bNodeSocket *> &r_used_outputs) + : group_node_(group_node) + { + debug_name_ = group_node.name; + lazy_function_interface_from_node( + group_node, r_used_inputs, r_used_outputs, inputs_, outputs_); + + bNodeTree *group_btree = reinterpret_cast<bNodeTree *>(group_node_.id); + BLI_assert(group_btree != nullptr); + + Vector<const lf::OutputSocket *> graph_inputs; + for (const lf::OutputSocket *socket : lf_graph_info.mapping.group_input_sockets) { + if (socket != nullptr) { + graph_inputs.append(socket); + } + } + Vector<const lf::InputSocket *> graph_outputs; + if (const bNode *group_output_bnode = group_btree->group_output_node()) { + for (const bNodeSocket *bsocket : group_output_bnode->input_sockets().drop_back(1)) { + const lf::Socket *socket = lf_graph_info.mapping.dummy_socket_map.lookup_default(bsocket, + nullptr); + if (socket != nullptr) { + graph_outputs.append(&socket->as_input()); + } + } + } + + lf_logger_.emplace(lf_graph_info); + lf_side_effect_provider_.emplace(lf_graph_info); + graph_executor_.emplace(lf_graph_info.graph, + std::move(graph_inputs), + std::move(graph_outputs), + &*lf_logger_, + &*lf_side_effect_provider_); + } + + void execute_impl(lf::Params ¶ms, const lf::Context &context) const override + { + GeoNodesLFUserData *user_data = dynamic_cast<GeoNodesLFUserData *>(context.user_data); + BLI_assert(user_data != nullptr); + + /* The compute context changes when entering a node group. */ + bke::NodeGroupComputeContext compute_context{user_data->compute_context, group_node_.name}; + GeoNodesLFUserData group_user_data = *user_data; + group_user_data.compute_context = &compute_context; + + lf::Context group_context = context; + group_context.user_data = &group_user_data; + + graph_executor_->execute(params, group_context); + } + + void *init_storage(LinearAllocator<> &allocator) const + { + return graph_executor_->init_storage(allocator); + } + + void destruct_storage(void *storage) const + { + graph_executor_->destruct_storage(storage); + } +}; + +static GMutablePointer get_socket_default_value(LinearAllocator<> &allocator, + const bNodeSocket &bsocket) +{ + const bNodeSocketType &typeinfo = *bsocket.typeinfo; + const CPPType *type = get_socket_cpp_type(typeinfo); + if (type == nullptr) { + return {}; + } + void *buffer = allocator.allocate(type->size(), type->alignment()); + typeinfo.get_geometry_nodes_cpp_value(bsocket, buffer); + return {type, buffer}; +} + +/** + * Utility class to build a lazy-function graph based on a geometry nodes tree. + * This is mainly a separate class because it makes it easier to have variables that can be + * accessed by many functions. + */ +struct GeometryNodesLazyFunctionGraphBuilder { + private: + const bNodeTree &btree_; + GeometryNodesLazyFunctionGraphInfo *lf_graph_info_; + lf::Graph *lf_graph_; + GeometryNodeLazyFunctionGraphMapping *mapping_; + MultiValueMap<const bNodeSocket *, lf::InputSocket *> input_socket_map_; + Map<const bNodeSocket *, lf::OutputSocket *> output_socket_map_; + Map<const bNodeSocket *, lf::Node *> multi_input_socket_nodes_; + const bke::DataTypeConversions *conversions_; + + /** + * All group input nodes are combined into one dummy node in the lazy-function graph. + * If some input has an invalid type, it is ignored in the new graph. In this case null and -1 is + * used in the vectors below. + */ + Vector<const CPPType *> group_input_types_; + Vector<int> group_input_indices_; + lf::DummyNode *group_input_lf_node_; + + /** + * The output types or null if an output is invalid. Each group output node gets a separate + * corresponding dummy node in the new graph. + */ + Vector<const CPPType *> group_output_types_; + Vector<int> group_output_indices_; + + public: + GeometryNodesLazyFunctionGraphBuilder(const bNodeTree &btree, + GeometryNodesLazyFunctionGraphInfo &lf_graph_info) + : btree_(btree), lf_graph_info_(&lf_graph_info) + { + } + + void build() + { + btree_.ensure_topology_cache(); + + lf_graph_ = &lf_graph_info_->graph; + mapping_ = &lf_graph_info_->mapping; + conversions_ = &bke::get_implicit_type_conversions(); + + this->prepare_node_multi_functions(); + this->prepare_group_inputs(); + this->prepare_group_outputs(); + this->build_group_input_node(); + this->handle_nodes(); + this->handle_links(); + this->add_default_inputs(); + + lf_graph_->update_node_indices(); + } + + private: + void prepare_node_multi_functions() + { + lf_graph_info_->node_multi_functions = std::make_unique<NodeMultiFunctions>(btree_); + } + + void prepare_group_inputs() + { + LISTBASE_FOREACH (const bNodeSocket *, interface_bsocket, &btree_.inputs) { + const CPPType *type = get_socket_cpp_type(*interface_bsocket->typeinfo); + if (type != nullptr) { + const int index = group_input_types_.append_and_get_index(type); + group_input_indices_.append(index); + } + else { + group_input_indices_.append(-1); + } + } + } + + void prepare_group_outputs() + { + LISTBASE_FOREACH (const bNodeSocket *, interface_bsocket, &btree_.outputs) { + const CPPType *type = get_socket_cpp_type(*interface_bsocket->typeinfo); + if (type != nullptr) { + const int index = group_output_types_.append_and_get_index(type); + group_output_indices_.append(index); + } + else { + group_output_indices_.append(-1); + } + } + } + + void build_group_input_node() + { + /* Create a dummy node for the group inputs. */ + group_input_lf_node_ = &lf_graph_->add_dummy({}, group_input_types_); + for (const int group_input_index : group_input_indices_) { + if (group_input_index == -1) { + mapping_->group_input_sockets.append(nullptr); + } + else { + mapping_->group_input_sockets.append(&group_input_lf_node_->output(group_input_index)); + } + } + } + + void handle_nodes() + { + /* Insert all nodes into the lazy function graph. */ + for (const bNode *bnode : btree_.all_nodes()) { + const bNodeType *node_type = bnode->typeinfo; + if (node_type == nullptr) { + continue; + } + if (bnode->is_muted()) { + this->handle_muted_node(*bnode); + continue; + } + switch (node_type->type) { + case NODE_FRAME: { + /* Ignored. */ + break; + } + case NODE_REROUTE: { + this->handle_reroute_node(*bnode); + break; + } + case NODE_GROUP_INPUT: { + this->handle_group_input_node(*bnode); + break; + } + case NODE_GROUP_OUTPUT: { + this->handle_group_output_node(*bnode); + break; + } + case NODE_CUSTOM_GROUP: + case NODE_GROUP: { + this->handle_group_node(*bnode); + break; + } + case GEO_NODE_VIEWER: { + this->handle_viewer_node(*bnode); + break; + } + default: { + if (node_type->geometry_node_execute) { + this->handle_geometry_node(*bnode); + break; + } + const NodeMultiFunctions::Item &fn_item = lf_graph_info_->node_multi_functions->try_get( + *bnode); + if (fn_item.fn != nullptr) { + this->handle_multi_function_node(*bnode, fn_item); + } + /* Nodes that don't match any of the criteria above are just ignored. */ + break; + } + } + } + } + + void handle_muted_node(const bNode &bnode) + { + Vector<const bNodeSocket *> used_inputs; + Vector<const bNodeSocket *> used_outputs; + auto lazy_function = std::make_unique<LazyFunctionForMutedNode>( + bnode, used_inputs, used_outputs); + lf::Node &lf_node = lf_graph_->add_function(*lazy_function); + lf_graph_info_->functions.append(std::move(lazy_function)); + for (const int i : used_inputs.index_range()) { + const bNodeSocket &bsocket = *used_inputs[i]; + lf::InputSocket &lf_socket = lf_node.input(i); + input_socket_map_.add(&bsocket, &lf_socket); + mapping_->bsockets_by_lf_socket_map.add(&lf_socket, &bsocket); + } + for (const int i : used_outputs.index_range()) { + const bNodeSocket &bsocket = *used_outputs[i]; + lf::OutputSocket &lf_socket = lf_node.output(i); + output_socket_map_.add_new(&bsocket, &lf_socket); + mapping_->bsockets_by_lf_socket_map.add(&lf_socket, &bsocket); + } + } + + void handle_reroute_node(const bNode &bnode) + { + const bNodeSocket &input_bsocket = bnode.input_socket(0); + const bNodeSocket &output_bsocket = bnode.output_socket(0); + const CPPType *type = get_socket_cpp_type(input_bsocket); + if (type == nullptr) { + return; + } + + auto lazy_function = std::make_unique<LazyFunctionForRerouteNode>(*type); + lf::Node &lf_node = lf_graph_->add_function(*lazy_function); + lf_graph_info_->functions.append(std::move(lazy_function)); + + lf::InputSocket &lf_input = lf_node.input(0); + lf::OutputSocket &lf_output = lf_node.output(0); + input_socket_map_.add(&input_bsocket, &lf_input); + output_socket_map_.add_new(&output_bsocket, &lf_output); + mapping_->bsockets_by_lf_socket_map.add(&lf_input, &input_bsocket); + mapping_->bsockets_by_lf_socket_map.add(&lf_output, &output_bsocket); + } + + void handle_group_input_node(const bNode &bnode) + { + for (const int btree_index : group_input_indices_.index_range()) { + const int lf_index = group_input_indices_[btree_index]; + if (lf_index == -1) { + continue; + } + const bNodeSocket &bsocket = bnode.output_socket(btree_index); + lf::OutputSocket &lf_socket = group_input_lf_node_->output(lf_index); + output_socket_map_.add_new(&bsocket, &lf_socket); + mapping_->dummy_socket_map.add_new(&bsocket, &lf_socket); + mapping_->bsockets_by_lf_socket_map.add(&lf_socket, &bsocket); + } + } + + void handle_group_output_node(const bNode &bnode) + { + lf::DummyNode &group_output_lf_node = lf_graph_->add_dummy(group_output_types_, {}); + for (const int btree_index : group_output_indices_.index_range()) { + const int lf_index = group_output_indices_[btree_index]; + if (lf_index == -1) { + continue; + } + const bNodeSocket &bsocket = bnode.input_socket(btree_index); + lf::InputSocket &lf_socket = group_output_lf_node.input(lf_index); + input_socket_map_.add(&bsocket, &lf_socket); + mapping_->dummy_socket_map.add(&bsocket, &lf_socket); + mapping_->bsockets_by_lf_socket_map.add(&lf_socket, &bsocket); + } + } + + void handle_group_node(const bNode &bnode) + { + const bNodeTree *group_btree = reinterpret_cast<bNodeTree *>(bnode.id); + if (group_btree == nullptr) { + return; + } + const GeometryNodesLazyFunctionGraphInfo *group_lf_graph_info = + ensure_geometry_nodes_lazy_function_graph(*group_btree); + if (group_lf_graph_info == nullptr) { + return; + } + + Vector<const bNodeSocket *> used_inputs; + Vector<const bNodeSocket *> used_outputs; + auto lazy_function = std::make_unique<LazyFunctionForGroupNode>( + bnode, *group_lf_graph_info, used_inputs, used_outputs); + lf::FunctionNode &lf_node = lf_graph_->add_function(*lazy_function); + lf_graph_info_->functions.append(std::move(lazy_function)); + for (const int i : used_inputs.index_range()) { + const bNodeSocket &bsocket = *used_inputs[i]; + BLI_assert(!bsocket.is_multi_input()); + lf::InputSocket &lf_socket = lf_node.input(i); + input_socket_map_.add(&bsocket, &lf_socket); + mapping_->bsockets_by_lf_socket_map.add(&lf_socket, &bsocket); + } + for (const int i : used_outputs.index_range()) { + const bNodeSocket &bsocket = *used_outputs[i]; + lf::OutputSocket &lf_socket = lf_node.output(i); + output_socket_map_.add_new(&bsocket, &lf_socket); + mapping_->bsockets_by_lf_socket_map.add(&lf_socket, &bsocket); + } + mapping_->group_node_map.add(&bnode, &lf_node); + } + + void handle_geometry_node(const bNode &bnode) + { + Vector<const bNodeSocket *> used_inputs; + Vector<const bNodeSocket *> used_outputs; + auto lazy_function = std::make_unique<LazyFunctionForGeometryNode>( + bnode, used_inputs, used_outputs); + lf::Node &lf_node = lf_graph_->add_function(*lazy_function); + lf_graph_info_->functions.append(std::move(lazy_function)); + + for (const int i : used_inputs.index_range()) { + const bNodeSocket &bsocket = *used_inputs[i]; + lf::InputSocket &lf_socket = lf_node.input(i); + + if (bsocket.is_multi_input()) { + auto multi_input_lazy_function = std::make_unique<LazyFunctionForMultiInput>(bsocket); + lf::Node &lf_multi_input_node = lf_graph_->add_function(*multi_input_lazy_function); + lf_graph_info_->functions.append(std::move(multi_input_lazy_function)); + lf_graph_->add_link(lf_multi_input_node.output(0), lf_socket); + multi_input_socket_nodes_.add_new(&bsocket, &lf_multi_input_node); + for (lf::InputSocket *lf_multi_input_socket : lf_multi_input_node.inputs()) { + mapping_->bsockets_by_lf_socket_map.add(lf_multi_input_socket, &bsocket); + } + } + else { + input_socket_map_.add(&bsocket, &lf_socket); + mapping_->bsockets_by_lf_socket_map.add(&lf_socket, &bsocket); + } + } + for (const int i : used_outputs.index_range()) { + const bNodeSocket &bsocket = *used_outputs[i]; + lf::OutputSocket &lf_socket = lf_node.output(i); + output_socket_map_.add_new(&bsocket, &lf_socket); + mapping_->bsockets_by_lf_socket_map.add(&lf_socket, &bsocket); + } + } + + void handle_multi_function_node(const bNode &bnode, const NodeMultiFunctions::Item &fn_item) + { + Vector<const bNodeSocket *> used_inputs; + Vector<const bNodeSocket *> used_outputs; + auto lazy_function = std::make_unique<LazyFunctionForMultiFunctionNode>( + bnode, fn_item, used_inputs, used_outputs); + lf::Node &lf_node = lf_graph_->add_function(*lazy_function); + lf_graph_info_->functions.append(std::move(lazy_function)); + + for (const int i : used_inputs.index_range()) { + const bNodeSocket &bsocket = *used_inputs[i]; + BLI_assert(!bsocket.is_multi_input()); + lf::InputSocket &lf_socket = lf_node.input(i); + input_socket_map_.add(&bsocket, &lf_socket); + mapping_->bsockets_by_lf_socket_map.add(&lf_socket, &bsocket); + } + for (const int i : used_outputs.index_range()) { + const bNodeSocket &bsocket = *used_outputs[i]; + lf::OutputSocket &lf_socket = lf_node.output(i); + output_socket_map_.add(&bsocket, &lf_socket); + mapping_->bsockets_by_lf_socket_map.add(&lf_socket, &bsocket); + } + } + + void handle_viewer_node(const bNode &bnode) + { + Vector<const bNodeSocket *> used_inputs; + auto lazy_function = std::make_unique<LazyFunctionForViewerNode>(bnode, used_inputs); + lf::FunctionNode &lf_node = lf_graph_->add_function(*lazy_function); + lf_graph_info_->functions.append(std::move(lazy_function)); + + for (const int i : used_inputs.index_range()) { + const bNodeSocket &bsocket = *used_inputs[i]; + lf::InputSocket &lf_socket = lf_node.input(i); + input_socket_map_.add(&bsocket, &lf_socket); + mapping_->bsockets_by_lf_socket_map.add(&lf_socket, &bsocket); + } + + mapping_->viewer_node_map.add(&bnode, &lf_node); + } + + void handle_links() + { + for (const auto item : output_socket_map_.items()) { + this->insert_links_from_socket(*item.key, *item.value); + } + } + + void insert_links_from_socket(const bNodeSocket &from_bsocket, lf::OutputSocket &from_lf_socket) + { + const Span<const bNodeLink *> links_from_bsocket = from_bsocket.directly_linked_links(); + + struct TypeWithLinks { + const CPPType *type; + Vector<const bNodeLink *> links; + }; + + /* Group available target sockets by type so that they can be handled together. */ + Vector<TypeWithLinks> types_with_links; + for (const bNodeLink *link : links_from_bsocket) { + if (link->is_muted()) { + continue; + } + const bNodeSocket &to_bsocket = *link->tosock; + if (!to_bsocket.is_available()) { + continue; + } + const CPPType *to_type = get_socket_cpp_type(to_bsocket); + if (to_type == nullptr) { + continue; + } + bool inserted = false; + for (TypeWithLinks &types_with_links : types_with_links) { + if (types_with_links.type == to_type) { + types_with_links.links.append(link); + inserted = true; + break; + } + } + if (inserted) { + continue; + } + types_with_links.append({to_type, {link}}); + } + + for (const TypeWithLinks &type_with_links : types_with_links) { + const CPPType &to_type = *type_with_links.type; + const Span<const bNodeLink *> links = type_with_links.links; + + Vector<const bNodeSocket *> target_bsockets; + for (const bNodeLink *link : links) { + target_bsockets.append(link->tosock); + } + + lf::OutputSocket *converted_from_lf_socket = this->insert_type_conversion_if_necessary( + from_lf_socket, to_type, std::move(target_bsockets)); + + auto make_input_link_or_set_default = [&](lf::InputSocket &to_lf_socket) { + if (converted_from_lf_socket == nullptr) { + const void *default_value = to_type.default_value(); + to_lf_socket.set_default_value(default_value); + } + else { + lf_graph_->add_link(*converted_from_lf_socket, to_lf_socket); + } + }; + + for (const bNodeLink *link : links) { + const bNodeSocket &to_bsocket = *link->tosock; + if (to_bsocket.is_multi_input()) { + /* TODO: Cache this index on the link. */ + int link_index = 0; + for (const bNodeLink *multi_input_link : to_bsocket.directly_linked_links()) { + if (multi_input_link == link) { + break; + } + if (!multi_input_link->is_muted()) { + link_index++; + } + } + if (to_bsocket.owner_node().is_muted()) { + if (link_index == 0) { + for (lf::InputSocket *to_lf_socket : input_socket_map_.lookup(&to_bsocket)) { + make_input_link_or_set_default(*to_lf_socket); + } + } + } + else { + lf::Node *multi_input_lf_node = multi_input_socket_nodes_.lookup_default(&to_bsocket, + nullptr); + if (multi_input_lf_node == nullptr) { + continue; + } + make_input_link_or_set_default(multi_input_lf_node->input(link_index)); + } + } + else { + for (lf::InputSocket *to_lf_socket : input_socket_map_.lookup(&to_bsocket)) { + make_input_link_or_set_default(*to_lf_socket); + } + } + } + } + } + + lf::OutputSocket *insert_type_conversion_if_necessary( + lf::OutputSocket &from_socket, + const CPPType &to_type, + Vector<const bNodeSocket *> &&target_sockets) + { + const CPPType &from_type = from_socket.type(); + if (from_type == to_type) { + return &from_socket; + } + const auto *from_field_type = dynamic_cast<const ValueOrFieldCPPType *>(&from_type); + const auto *to_field_type = dynamic_cast<const ValueOrFieldCPPType *>(&to_type); + if (from_field_type != nullptr && to_field_type != nullptr) { + const CPPType &from_base_type = from_field_type->base_type(); + const CPPType &to_base_type = to_field_type->base_type(); + if (conversions_->is_convertible(from_base_type, to_base_type)) { + const MultiFunction &multi_fn = *conversions_->get_conversion_multi_function( + MFDataType::ForSingle(from_base_type), MFDataType::ForSingle(to_base_type)); + auto fn = std::make_unique<LazyFunctionForMultiFunctionConversion>( + multi_fn, *from_field_type, *to_field_type, std::move(target_sockets)); + lf::Node &conversion_node = lf_graph_->add_function(*fn); + lf_graph_info_->functions.append(std::move(fn)); + lf_graph_->add_link(from_socket, conversion_node.input(0)); + return &conversion_node.output(0); + } + } + return nullptr; + } + + void add_default_inputs() + { + for (auto item : input_socket_map_.items()) { + const bNodeSocket &bsocket = *item.key; + const Span<lf::InputSocket *> lf_sockets = item.value; + for (lf::InputSocket *lf_socket : lf_sockets) { + if (lf_socket->origin() != nullptr) { + /* Is linked already. */ + continue; + } + this->add_default_input(bsocket, *lf_socket); + } + } + } + + void add_default_input(const bNodeSocket &input_bsocket, lf::InputSocket &input_lf_socket) + { + if (this->try_add_implicit_input(input_bsocket, input_lf_socket)) { + return; + } + GMutablePointer value = get_socket_default_value(lf_graph_info_->allocator, input_bsocket); + if (value.get() == nullptr) { + /* Not possible to add a default value. */ + return; + } + input_lf_socket.set_default_value(value.get()); + if (!value.type()->is_trivially_destructible()) { + lf_graph_info_->values_to_destruct.append(value); + } + } + + bool try_add_implicit_input(const bNodeSocket &input_bsocket, lf::InputSocket &input_lf_socket) + { + const bNode &bnode = input_bsocket.owner_node(); + const NodeDeclaration *node_declaration = bnode.declaration(); + if (node_declaration == nullptr) { + return false; + } + const SocketDeclaration &socket_declaration = + *node_declaration->inputs()[input_bsocket.index()]; + if (socket_declaration.input_field_type() != InputSocketFieldType::Implicit) { + return false; + } + const CPPType &type = input_lf_socket.type(); + std::function<void(void *)> init_fn = this->get_implicit_input_init_function(bnode, + input_bsocket); + if (!init_fn) { + return false; + } + + auto lazy_function = std::make_unique<LazyFunctionForImplicitInput>(type, std::move(init_fn)); + lf::Node &lf_node = lf_graph_->add_function(*lazy_function); + lf_graph_info_->functions.append(std::move(lazy_function)); + lf_graph_->add_link(lf_node.output(0), input_lf_socket); + return true; + } + + std::function<void(void *)> get_implicit_input_init_function(const bNode &bnode, + const bNodeSocket &bsocket) + { + const bNodeSocketType &socket_type = *bsocket.typeinfo; + if (socket_type.type == SOCK_VECTOR) { + if (bnode.type == GEO_NODE_SET_CURVE_HANDLES) { + StringRef side = ((NodeGeometrySetCurveHandlePositions *)bnode.storage)->mode == + GEO_NODE_CURVE_HANDLE_LEFT ? + "handle_left" : + "handle_right"; + return [side](void *r_value) { + new (r_value) ValueOrField<float3>(bke::AttributeFieldInput::Create<float3>(side)); + }; + } + else if (bnode.type == GEO_NODE_EXTRUDE_MESH) { + return [](void *r_value) { + new (r_value) + ValueOrField<float3>(Field<float3>(std::make_shared<bke::NormalFieldInput>())); + }; + } + else { + return [](void *r_value) { + new (r_value) ValueOrField<float3>(bke::AttributeFieldInput::Create<float3>("position")); + }; + } + } + else if (socket_type.type == SOCK_INT) { + if (ELEM(bnode.type, FN_NODE_RANDOM_VALUE, GEO_NODE_INSTANCE_ON_POINTS)) { + return [](void *r_value) { + new (r_value) + ValueOrField<int>(Field<int>(std::make_shared<bke::IDAttributeFieldInput>())); + }; + } + else { + return [](void *r_value) { + new (r_value) ValueOrField<int>(Field<int>(std::make_shared<fn::IndexFieldInput>())); + }; + } + } + return {}; + } +}; + +const GeometryNodesLazyFunctionGraphInfo *ensure_geometry_nodes_lazy_function_graph( + const bNodeTree &btree) +{ + btree.ensure_topology_cache(); + if (btree.has_link_cycle()) { + return nullptr; + } + + std::unique_ptr<GeometryNodesLazyFunctionGraphInfo> &lf_graph_info_ptr = + btree.runtime->geometry_nodes_lazy_function_graph_info; + + if (lf_graph_info_ptr) { + return lf_graph_info_ptr.get(); + } + std::lock_guard lock{btree.runtime->geometry_nodes_lazy_function_graph_info_mutex}; + if (lf_graph_info_ptr) { + return lf_graph_info_ptr.get(); + } + + auto lf_graph_info = std::make_unique<GeometryNodesLazyFunctionGraphInfo>(); + GeometryNodesLazyFunctionGraphBuilder builder{btree, *lf_graph_info}; + builder.build(); + + lf_graph_info_ptr = std::move(lf_graph_info); + return lf_graph_info_ptr.get(); +} + +GeometryNodesLazyFunctionLogger::GeometryNodesLazyFunctionLogger( + const GeometryNodesLazyFunctionGraphInfo &lf_graph_info) + : lf_graph_info_(lf_graph_info) +{ +} + +void GeometryNodesLazyFunctionLogger::log_socket_value( + const fn::lazy_function::Socket &lf_socket, + const GPointer value, + const fn::lazy_function::Context &context) const +{ + const Span<const bNodeSocket *> bsockets = + lf_graph_info_.mapping.bsockets_by_lf_socket_map.lookup(&lf_socket); + if (bsockets.is_empty()) { + return; + } + + GeoNodesLFUserData *user_data = dynamic_cast<GeoNodesLFUserData *>(context.user_data); + BLI_assert(user_data != nullptr); + if (user_data->modifier_data->eval_log == nullptr) { + return; + } + geo_eval_log::GeoTreeLogger &tree_logger = + user_data->modifier_data->eval_log->get_local_tree_logger(*user_data->compute_context); + for (const bNodeSocket *bsocket : bsockets) { + /* Avoid logging to some sockets when the same value will also be logged to a linked socket. + * This reduces the number of logged values without losing information. */ + if (bsocket->is_input() && bsocket->is_directly_linked()) { + continue; + } + const bNode &bnode = bsocket->owner_node(); + if (bnode.is_reroute()) { + continue; + } + tree_logger.log_value(bsocket->owner_node(), *bsocket, value); + } +} + +static std::mutex dump_error_context_mutex; + +void GeometryNodesLazyFunctionLogger::dump_when_outputs_are_missing( + const lf::FunctionNode &node, + Span<const lf::OutputSocket *> missing_sockets, + const lf::Context &context) const +{ + std::lock_guard lock{dump_error_context_mutex}; + + GeoNodesLFUserData *user_data = dynamic_cast<GeoNodesLFUserData *>(context.user_data); + BLI_assert(user_data != nullptr); + user_data->compute_context->print_stack(std::cout, node.name()); + std::cout << "Missing outputs:\n"; + for (const lf::OutputSocket *socket : missing_sockets) { + std::cout << " " << socket->name() << "\n"; + } +} + +void GeometryNodesLazyFunctionLogger::dump_when_input_is_set_twice( + const lf::InputSocket &target_socket, + const lf::OutputSocket &from_socket, + const lf::Context &context) const +{ + std::lock_guard lock{dump_error_context_mutex}; + + std::stringstream ss; + ss << from_socket.node().name() << ":" << from_socket.name() << " -> " + << target_socket.node().name() << ":" << target_socket.name(); + + GeoNodesLFUserData *user_data = dynamic_cast<GeoNodesLFUserData *>(context.user_data); + BLI_assert(user_data != nullptr); + user_data->compute_context->print_stack(std::cout, ss.str()); +} + +GeometryNodesLazyFunctionSideEffectProvider::GeometryNodesLazyFunctionSideEffectProvider( + const GeometryNodesLazyFunctionGraphInfo &lf_graph_info) + : lf_graph_info_(lf_graph_info) +{ +} + +Vector<const lf::FunctionNode *> GeometryNodesLazyFunctionSideEffectProvider:: + get_nodes_with_side_effects(const lf::Context &context) const +{ + GeoNodesLFUserData *user_data = dynamic_cast<GeoNodesLFUserData *>(context.user_data); + BLI_assert(user_data != nullptr); + const ComputeContextHash &context_hash = user_data->compute_context->hash(); + const GeoNodesModifierData &modifier_data = *user_data->modifier_data; + return modifier_data.side_effect_nodes->lookup(context_hash); +} + +GeometryNodesLazyFunctionGraphInfo::GeometryNodesLazyFunctionGraphInfo() = default; +GeometryNodesLazyFunctionGraphInfo::~GeometryNodesLazyFunctionGraphInfo() +{ + for (GMutablePointer &p : this->values_to_destruct) { + p.destruct(); + } +} + +} // namespace blender::nodes diff --git a/source/blender/nodes/intern/geometry_nodes_log.cc b/source/blender/nodes/intern/geometry_nodes_log.cc new file mode 100644 index 00000000000..2e0ac746ac0 --- /dev/null +++ b/source/blender/nodes/intern/geometry_nodes_log.cc @@ -0,0 +1,607 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +#include "NOD_geometry_nodes_lazy_function.hh" +#include "NOD_geometry_nodes_log.hh" + +#include "BKE_compute_contexts.hh" +#include "BKE_curves.hh" +#include "BKE_node_runtime.hh" + +#include "FN_field_cpp_type.hh" + +#include "DNA_modifier_types.h" +#include "DNA_space_types.h" + +namespace blender::nodes::geo_eval_log { + +using fn::FieldInput; +using fn::FieldInputs; + +GenericValueLog::~GenericValueLog() +{ + this->value.destruct(); +} + +FieldInfoLog::FieldInfoLog(const GField &field) : type(field.cpp_type()) +{ + const std::shared_ptr<const fn::FieldInputs> &field_input_nodes = field.node().field_inputs(); + + /* Put the deduplicated field inputs into a vector so that they can be sorted below. */ + Vector<std::reference_wrapper<const FieldInput>> field_inputs; + if (field_input_nodes) { + field_inputs.extend(field_input_nodes->deduplicated_nodes.begin(), + field_input_nodes->deduplicated_nodes.end()); + } + + std::sort( + field_inputs.begin(), field_inputs.end(), [](const FieldInput &a, const FieldInput &b) { + const int index_a = (int)a.category(); + const int index_b = (int)b.category(); + if (index_a == index_b) { + return a.socket_inspection_name().size() < b.socket_inspection_name().size(); + } + return index_a < index_b; + }); + + for (const FieldInput &field_input : field_inputs) { + this->input_tooltips.append(field_input.socket_inspection_name()); + } +} + +GeometryInfoLog::GeometryInfoLog(const GeometrySet &geometry_set) +{ + static std::array all_component_types = {GEO_COMPONENT_TYPE_CURVE, + GEO_COMPONENT_TYPE_INSTANCES, + GEO_COMPONENT_TYPE_MESH, + GEO_COMPONENT_TYPE_POINT_CLOUD, + GEO_COMPONENT_TYPE_VOLUME}; + + /* Keep track handled attribute names to make sure that we do not return the same name twice. + * Currently #GeometrySet::attribute_foreach does not do that. Note that this will merge + * attributes with the same name but different domains or data types on separate components. */ + Set<StringRef> names; + + geometry_set.attribute_foreach( + all_component_types, + true, + [&](const bke::AttributeIDRef &attribute_id, + const bke::AttributeMetaData &meta_data, + const GeometryComponent &UNUSED(component)) { + if (attribute_id.is_named() && names.add(attribute_id.name())) { + this->attributes.append({attribute_id.name(), meta_data.domain, meta_data.data_type}); + } + }); + + for (const GeometryComponent *component : geometry_set.get_components_for_read()) { + this->component_types.append(component->type()); + switch (component->type()) { + case GEO_COMPONENT_TYPE_MESH: { + const MeshComponent &mesh_component = *(const MeshComponent *)component; + MeshInfo &info = this->mesh_info.emplace(); + info.verts_num = mesh_component.attribute_domain_size(ATTR_DOMAIN_POINT); + info.edges_num = mesh_component.attribute_domain_size(ATTR_DOMAIN_EDGE); + info.faces_num = mesh_component.attribute_domain_size(ATTR_DOMAIN_FACE); + break; + } + case GEO_COMPONENT_TYPE_CURVE: { + const CurveComponent &curve_component = *(const CurveComponent *)component; + CurveInfo &info = this->curve_info.emplace(); + info.splines_num = curve_component.attribute_domain_size(ATTR_DOMAIN_CURVE); + break; + } + case GEO_COMPONENT_TYPE_POINT_CLOUD: { + const PointCloudComponent &pointcloud_component = *(const PointCloudComponent *)component; + PointCloudInfo &info = this->pointcloud_info.emplace(); + info.points_num = pointcloud_component.attribute_domain_size(ATTR_DOMAIN_POINT); + break; + } + case GEO_COMPONENT_TYPE_INSTANCES: { + const InstancesComponent &instances_component = *(const InstancesComponent *)component; + InstancesInfo &info = this->instances_info.emplace(); + info.instances_num = instances_component.instances_num(); + break; + } + case GEO_COMPONENT_TYPE_EDIT: { + const GeometryComponentEditData &edit_component = *( + const GeometryComponentEditData *)component; + if (const bke::CurvesEditHints *curve_edit_hints = + edit_component.curves_edit_hints_.get()) { + EditDataInfo &info = this->edit_data_info.emplace(); + info.has_deform_matrices = curve_edit_hints->deform_mats.has_value(); + info.has_deformed_positions = curve_edit_hints->positions.has_value(); + } + break; + } + case GEO_COMPONENT_TYPE_VOLUME: { + break; + } + } + } +} + +/* Avoid generating these in every translation unit. */ +GeoModifierLog::GeoModifierLog() = default; +GeoModifierLog::~GeoModifierLog() = default; + +GeoTreeLogger::GeoTreeLogger() = default; +GeoTreeLogger::~GeoTreeLogger() = default; + +GeoNodeLog::GeoNodeLog() = default; +GeoNodeLog::~GeoNodeLog() = default; + +GeoTreeLog::GeoTreeLog(GeoModifierLog *modifier_log, Vector<GeoTreeLogger *> tree_loggers) + : modifier_log_(modifier_log), tree_loggers_(std::move(tree_loggers)) +{ + for (GeoTreeLogger *tree_logger : tree_loggers_) { + for (const ComputeContextHash &hash : tree_logger->children_hashes) { + children_hashes_.add(hash); + } + } +} + +GeoTreeLog::~GeoTreeLog() = default; + +void GeoTreeLogger::log_value(const bNode &node, const bNodeSocket &socket, const GPointer value) +{ + const CPPType &type = *value.type(); + + auto store_logged_value = [&](destruct_ptr<ValueLog> value_log) { + auto &socket_values = socket.in_out == SOCK_IN ? this->input_socket_values : + this->output_socket_values; + socket_values.append({node.name, socket.identifier, std::move(value_log)}); + }; + + auto log_generic_value = [&](const CPPType &type, const void *value) { + void *buffer = this->allocator->allocate(type.size(), type.alignment()); + type.copy_construct(value, buffer); + store_logged_value(this->allocator->construct<GenericValueLog>(GMutablePointer{type, buffer})); + }; + + if (type.is<GeometrySet>()) { + const GeometrySet &geometry = *value.get<GeometrySet>(); + store_logged_value(this->allocator->construct<GeometryInfoLog>(geometry)); + } + else if (const auto *value_or_field_type = dynamic_cast<const fn::ValueOrFieldCPPType *>( + &type)) { + const void *value_or_field = value.get(); + const CPPType &base_type = value_or_field_type->base_type(); + if (value_or_field_type->is_field(value_or_field)) { + const GField *field = value_or_field_type->get_field_ptr(value_or_field); + if (field->node().depends_on_input()) { + store_logged_value(this->allocator->construct<FieldInfoLog>(*field)); + } + else { + BUFFER_FOR_CPP_TYPE_VALUE(base_type, value); + fn::evaluate_constant_field(*field, value); + log_generic_value(base_type, value); + } + } + else { + const void *value = value_or_field_type->get_value_ptr(value_or_field); + log_generic_value(base_type, value); + } + } + else { + log_generic_value(type, value.get()); + } +} + +void GeoTreeLogger::log_viewer_node(const bNode &viewer_node, + const GeometrySet &geometry, + const GField &field) +{ + destruct_ptr<ViewerNodeLog> log = this->allocator->construct<ViewerNodeLog>(); + log->geometry = geometry; + log->field = field; + log->geometry.ensure_owns_direct_data(); + this->viewer_node_logs.append({viewer_node.name, std::move(log)}); +} + +void GeoTreeLog::ensure_node_warnings() +{ + if (reduced_node_warnings_) { + return; + } + for (GeoTreeLogger *tree_logger : tree_loggers_) { + for (const GeoTreeLogger::WarningWithNode &warnings : tree_logger->node_warnings) { + this->nodes.lookup_or_add_default(warnings.node_name).warnings.append(warnings.warning); + this->all_warnings.append(warnings.warning); + } + } + for (const ComputeContextHash &child_hash : children_hashes_) { + GeoTreeLog &child_log = modifier_log_->get_tree_log(child_hash); + child_log.ensure_node_warnings(); + const std::optional<std::string> &group_node_name = + child_log.tree_loggers_[0]->group_node_name; + if (group_node_name.has_value()) { + this->nodes.lookup_or_add_default(*group_node_name).warnings.extend(child_log.all_warnings); + } + this->all_warnings.extend(child_log.all_warnings); + } + reduced_node_warnings_ = true; +} + +void GeoTreeLog::ensure_node_run_time() +{ + if (reduced_node_run_times_) { + return; + } + for (GeoTreeLogger *tree_logger : tree_loggers_) { + for (const GeoTreeLogger::NodeExecutionTime &timings : tree_logger->node_execution_times) { + const std::chrono::nanoseconds duration = timings.end - timings.start; + this->nodes.lookup_or_add_default_as(timings.node_name).run_time += duration; + this->run_time_sum += duration; + } + } + for (const ComputeContextHash &child_hash : children_hashes_) { + GeoTreeLog &child_log = modifier_log_->get_tree_log(child_hash); + child_log.ensure_node_run_time(); + const std::optional<std::string> &group_node_name = + child_log.tree_loggers_[0]->group_node_name; + if (group_node_name.has_value()) { + this->nodes.lookup_or_add_default(*group_node_name).run_time += child_log.run_time_sum; + } + this->run_time_sum += child_log.run_time_sum; + } + reduced_node_run_times_ = true; +} + +void GeoTreeLog::ensure_socket_values() +{ + if (reduced_socket_values_) { + return; + } + for (GeoTreeLogger *tree_logger : tree_loggers_) { + for (const GeoTreeLogger::SocketValueLog &value_log_data : tree_logger->input_socket_values) { + this->nodes.lookup_or_add_as(value_log_data.node_name) + .input_values_.add(value_log_data.socket_identifier, value_log_data.value.get()); + } + for (const GeoTreeLogger::SocketValueLog &value_log_data : tree_logger->output_socket_values) { + this->nodes.lookup_or_add_as(value_log_data.node_name) + .output_values_.add(value_log_data.socket_identifier, value_log_data.value.get()); + } + } + reduced_socket_values_ = true; +} + +void GeoTreeLog::ensure_viewer_node_logs() +{ + if (reduced_viewer_node_logs_) { + return; + } + for (GeoTreeLogger *tree_logger : tree_loggers_) { + for (const GeoTreeLogger::ViewerNodeLogWithNode &viewer_log : tree_logger->viewer_node_logs) { + this->viewer_node_logs.add(viewer_log.node_name, viewer_log.viewer_log.get()); + } + } + reduced_viewer_node_logs_ = true; +} + +void GeoTreeLog::ensure_existing_attributes() +{ + if (reduced_existing_attributes_) { + return; + } + this->ensure_socket_values(); + + Set<StringRef> names; + + auto handle_value_log = [&](const ValueLog &value_log) { + const GeometryInfoLog *geo_log = dynamic_cast<const GeometryInfoLog *>(&value_log); + if (geo_log == nullptr) { + return; + } + for (const GeometryAttributeInfo &attribute : geo_log->attributes) { + if (names.add(attribute.name)) { + this->existing_attributes.append(&attribute); + } + } + }; + + for (const GeoNodeLog &node_log : this->nodes.values()) { + for (const ValueLog *value_log : node_log.input_values_.values()) { + handle_value_log(*value_log); + } + for (const ValueLog *value_log : node_log.output_values_.values()) { + handle_value_log(*value_log); + } + } + reduced_existing_attributes_ = true; +} + +void GeoTreeLog::ensure_used_named_attributes() +{ + if (reduced_used_named_attributes_) { + return; + } + + auto add_attribute = [&](const StringRef node_name, + const StringRef attribute_name, + const NamedAttributeUsage &usage) { + this->nodes.lookup_or_add_as(node_name).used_named_attributes.lookup_or_add_as(attribute_name, + usage) |= usage; + this->used_named_attributes.lookup_or_add_as(attribute_name, usage) |= usage; + }; + + for (GeoTreeLogger *tree_logger : tree_loggers_) { + for (const GeoTreeLogger::AttributeUsageWithNode &item : tree_logger->used_named_attributes) { + add_attribute(item.node_name, item.attribute_name, item.usage); + } + } + for (const ComputeContextHash &child_hash : children_hashes_) { + GeoTreeLog &child_log = modifier_log_->get_tree_log(child_hash); + child_log.ensure_used_named_attributes(); + if (const std::optional<std::string> &group_node_name = + child_log.tree_loggers_[0]->group_node_name) { + for (const auto &item : child_log.used_named_attributes.items()) { + add_attribute(*group_node_name, item.key, item.value); + } + } + } + reduced_used_named_attributes_ = true; +} + +void GeoTreeLog::ensure_debug_messages() +{ + if (reduced_debug_messages_) { + return; + } + for (GeoTreeLogger *tree_logger : tree_loggers_) { + for (const GeoTreeLogger::DebugMessage &debug_message : tree_logger->debug_messages) { + this->nodes.lookup_or_add_as(debug_message.node_name) + .debug_messages.append(debug_message.message); + } + } + reduced_debug_messages_ = true; +} + +ValueLog *GeoTreeLog::find_socket_value_log(const bNodeSocket &query_socket) +{ + /** + * Geometry nodes does not log values for every socket. That would produce a lot of redundant + * data,because often many linked sockets have the same value. To find the logged value for a + * socket one might have to look at linked sockets as well. + */ + + BLI_assert(reduced_socket_values_); + if (query_socket.is_multi_input()) { + /* Not supported currently. */ + return nullptr; + } + + Set<const bNodeSocket *> added_sockets; + Stack<const bNodeSocket *> sockets_to_check; + sockets_to_check.push(&query_socket); + added_sockets.add(&query_socket); + + while (!sockets_to_check.is_empty()) { + const bNodeSocket &socket = *sockets_to_check.pop(); + const bNode &node = socket.owner_node(); + if (GeoNodeLog *node_log = this->nodes.lookup_ptr(node.name)) { + ValueLog *value_log = socket.is_input() ? + node_log->input_values_.lookup_default(socket.identifier, + nullptr) : + node_log->output_values_.lookup_default(socket.identifier, + nullptr); + if (value_log != nullptr) { + return value_log; + } + } + + if (socket.is_input()) { + const Span<const bNodeLink *> links = socket.directly_linked_links(); + for (const bNodeLink *link : links) { + const bNodeSocket &from_socket = *link->fromsock; + if (added_sockets.add(&from_socket)) { + sockets_to_check.push(&from_socket); + } + } + } + else { + if (node.is_reroute()) { + const bNodeSocket &input_socket = node.input_socket(0); + if (added_sockets.add(&input_socket)) { + sockets_to_check.push(&input_socket); + } + const Span<const bNodeLink *> links = input_socket.directly_linked_links(); + for (const bNodeLink *link : links) { + const bNodeSocket &from_socket = *link->fromsock; + if (added_sockets.add(&from_socket)) { + sockets_to_check.push(&from_socket); + } + } + } + else if (node.is_muted()) { + if (const bNodeSocket *input_socket = socket.internal_link_input()) { + if (added_sockets.add(input_socket)) { + sockets_to_check.push(input_socket); + } + const Span<const bNodeLink *> links = input_socket->directly_linked_links(); + for (const bNodeLink *link : links) { + const bNodeSocket &from_socket = *link->fromsock; + if (added_sockets.add(&from_socket)) { + sockets_to_check.push(&from_socket); + } + } + } + } + } + } + + return nullptr; +} + +GeoTreeLogger &GeoModifierLog::get_local_tree_logger(const ComputeContext &compute_context) +{ + LocalData &local_data = data_per_thread_.local(); + Map<ComputeContextHash, destruct_ptr<GeoTreeLogger>> &local_tree_loggers = + local_data.tree_logger_by_context; + destruct_ptr<GeoTreeLogger> &tree_logger_ptr = local_tree_loggers.lookup_or_add_default( + compute_context.hash()); + if (tree_logger_ptr) { + return *tree_logger_ptr; + } + tree_logger_ptr = local_data.allocator.construct<GeoTreeLogger>(); + GeoTreeLogger &tree_logger = *tree_logger_ptr; + tree_logger.allocator = &local_data.allocator; + const ComputeContext *parent_compute_context = compute_context.parent(); + if (parent_compute_context != nullptr) { + tree_logger.parent_hash = parent_compute_context->hash(); + GeoTreeLogger &parent_logger = this->get_local_tree_logger(*parent_compute_context); + parent_logger.children_hashes.append(compute_context.hash()); + } + if (const bke::NodeGroupComputeContext *node_group_compute_context = + dynamic_cast<const bke::NodeGroupComputeContext *>(&compute_context)) { + tree_logger.group_node_name.emplace(node_group_compute_context->node_name()); + } + return tree_logger; +} + +GeoTreeLog &GeoModifierLog::get_tree_log(const ComputeContextHash &compute_context_hash) +{ + GeoTreeLog &reduced_tree_log = *tree_logs_.lookup_or_add_cb(compute_context_hash, [&]() { + Vector<GeoTreeLogger *> tree_logs; + for (LocalData &local_data : data_per_thread_) { + destruct_ptr<GeoTreeLogger> *tree_log = local_data.tree_logger_by_context.lookup_ptr( + compute_context_hash); + if (tree_log != nullptr) { + tree_logs.append(tree_log->get()); + } + } + return std::make_unique<GeoTreeLog>(this, std::move(tree_logs)); + }); + return reduced_tree_log; +} + +struct ObjectAndModifier { + const Object *object; + const NodesModifierData *nmd; +}; + +static std::optional<ObjectAndModifier> get_modifier_for_node_editor(const SpaceNode &snode) +{ + if (snode.id == nullptr) { + return std::nullopt; + } + if (GS(snode.id->name) != ID_OB) { + return std::nullopt; + } + const Object *object = reinterpret_cast<Object *>(snode.id); + const NodesModifierData *used_modifier = nullptr; + if (snode.flag & SNODE_PIN) { + LISTBASE_FOREACH (const ModifierData *, md, &object->modifiers) { + if (md->type == eModifierType_Nodes) { + const NodesModifierData *nmd = reinterpret_cast<const NodesModifierData *>(md); + /* Would be good to store the name of the pinned modifier in the node editor. */ + if (nmd->node_group == snode.nodetree) { + used_modifier = nmd; + break; + } + } + } + } + else { + LISTBASE_FOREACH (const ModifierData *, md, &object->modifiers) { + if (md->type == eModifierType_Nodes) { + const NodesModifierData *nmd = reinterpret_cast<const NodesModifierData *>(md); + if (nmd->node_group == snode.nodetree) { + if (md->flag & eModifierFlag_Active) { + used_modifier = nmd; + break; + } + } + } + } + } + if (used_modifier == nullptr) { + return std::nullopt; + } + return ObjectAndModifier{object, used_modifier}; +} + +GeoTreeLog *GeoModifierLog::get_tree_log_for_node_editor(const SpaceNode &snode) +{ + std::optional<ObjectAndModifier> object_and_modifier = get_modifier_for_node_editor(snode); + if (!object_and_modifier) { + return nullptr; + } + GeoModifierLog *modifier_log = static_cast<GeoModifierLog *>( + object_and_modifier->nmd->runtime_eval_log); + if (modifier_log == nullptr) { + return nullptr; + } + Vector<const bNodeTreePath *> tree_path = snode.treepath; + if (tree_path.is_empty()) { + return nullptr; + } + ComputeContextBuilder compute_context_builder; + compute_context_builder.push<bke::ModifierComputeContext>( + object_and_modifier->nmd->modifier.name); + for (const bNodeTreePath *path_item : tree_path.as_span().drop_front(1)) { + compute_context_builder.push<bke::NodeGroupComputeContext>(path_item->node_name); + } + return &modifier_log->get_tree_log(compute_context_builder.hash()); +} + +const ViewerNodeLog *GeoModifierLog::find_viewer_node_log_for_spreadsheet( + const SpaceSpreadsheet &sspreadsheet) +{ + Vector<const SpreadsheetContext *> context_path = sspreadsheet.context_path; + if (context_path.size() < 3) { + return nullptr; + } + if (context_path[0]->type != SPREADSHEET_CONTEXT_OBJECT) { + return nullptr; + } + if (context_path[1]->type != SPREADSHEET_CONTEXT_MODIFIER) { + return nullptr; + } + const SpreadsheetContextObject *object_context = + reinterpret_cast<const SpreadsheetContextObject *>(context_path[0]); + const SpreadsheetContextModifier *modifier_context = + reinterpret_cast<const SpreadsheetContextModifier *>(context_path[1]); + if (object_context->object == nullptr) { + return nullptr; + } + NodesModifierData *nmd = nullptr; + LISTBASE_FOREACH (ModifierData *, md, &object_context->object->modifiers) { + if (STREQ(md->name, modifier_context->modifier_name)) { + if (md->type == eModifierType_Nodes) { + nmd = reinterpret_cast<NodesModifierData *>(md); + } + } + } + if (nmd == nullptr) { + return nullptr; + } + if (nmd->runtime_eval_log == nullptr) { + return nullptr; + } + nodes::geo_eval_log::GeoModifierLog *modifier_log = + static_cast<nodes::geo_eval_log::GeoModifierLog *>(nmd->runtime_eval_log); + + ComputeContextBuilder compute_context_builder; + compute_context_builder.push<bke::ModifierComputeContext>(modifier_context->modifier_name); + for (const SpreadsheetContext *context : context_path.as_span().drop_front(2).drop_back(1)) { + if (context->type != SPREADSHEET_CONTEXT_NODE) { + return nullptr; + } + const SpreadsheetContextNode &node_context = *reinterpret_cast<const SpreadsheetContextNode *>( + context); + compute_context_builder.push<bke::NodeGroupComputeContext>(node_context.node_name); + } + const ComputeContextHash context_hash = compute_context_builder.hash(); + nodes::geo_eval_log::GeoTreeLog &tree_log = modifier_log->get_tree_log(context_hash); + tree_log.ensure_viewer_node_logs(); + + const SpreadsheetContext *last_context = context_path.last(); + if (last_context->type != SPREADSHEET_CONTEXT_NODE) { + return nullptr; + } + const SpreadsheetContextNode &last_node_context = + *reinterpret_cast<const SpreadsheetContextNode *>(last_context); + const ViewerNodeLog *viewer_log = tree_log.viewer_node_logs.lookup(last_node_context.node_name); + return viewer_log; +} + +} // namespace blender::nodes::geo_eval_log diff --git a/source/blender/nodes/intern/node_geometry_exec.cc b/source/blender/nodes/intern/node_geometry_exec.cc index 953dce035c2..1833774fe33 100644 --- a/source/blender/nodes/intern/node_geometry_exec.cc +++ b/source/blender/nodes/intern/node_geometry_exec.cc @@ -11,34 +11,27 @@ #include "node_geometry_util.hh" -using blender::nodes::geometry_nodes_eval_log::LocalGeoLogger; - namespace blender::nodes { void GeoNodeExecParams::error_message_add(const NodeWarningType type, std::string message) const { - if (provider_->logger == nullptr) { - return; + if (geo_eval_log::GeoTreeLogger *tree_logger = this->get_local_tree_logger()) { + tree_logger->node_warnings.append({node_.name, {type, std::move(message)}}); } - LocalGeoLogger &local_logger = provider_->logger->local(); - local_logger.log_node_warning(provider_->dnode, type, std::move(message)); } void GeoNodeExecParams::used_named_attribute(std::string attribute_name, - const eNamedAttrUsage usage) + const NamedAttributeUsage usage) { - if (provider_->logger == nullptr) { - return; + if (geo_eval_log::GeoTreeLogger *tree_logger = this->get_local_tree_logger()) { + tree_logger->used_named_attributes.append({node_.name, std::move(attribute_name), usage}); } - LocalGeoLogger &local_logger = provider_->logger->local(); - local_logger.log_used_named_attribute(provider_->dnode, std::move(attribute_name), usage); } void GeoNodeExecParams::check_input_geometry_set(StringRef identifier, const GeometrySet &geometry_set) const { - const SocketDeclaration &decl = - *provider_->dnode->input_by_identifier(identifier).runtime->declaration; + const SocketDeclaration &decl = *node_.input_by_identifier(identifier).runtime->declaration; const decl::Geometry *geo_decl = dynamic_cast<const decl::Geometry *>(&decl); if (geo_decl == nullptr) { return; @@ -118,7 +111,7 @@ void GeoNodeExecParams::check_output_geometry_set(const GeometrySet &geometry_se const bNodeSocket *GeoNodeExecParams::find_available_socket(const StringRef name) const { - for (const bNodeSocket *socket : provider_->dnode->runtime->inputs) { + for (const bNodeSocket *socket : node_.input_sockets()) { if (socket->is_available() && socket->name == name) { return socket; } @@ -129,19 +122,19 @@ const bNodeSocket *GeoNodeExecParams::find_available_socket(const StringRef name std::string GeoNodeExecParams::attribute_producer_name() const { - return provider_->dnode->label_or_name() + TIP_(" node"); + return node_.label_or_name() + TIP_(" node"); } void GeoNodeExecParams::set_default_remaining_outputs() { - provider_->set_default_remaining_outputs(); + params_.set_default_remaining_outputs(); } void GeoNodeExecParams::check_input_access(StringRef identifier, const CPPType *requested_type) const { const bNodeSocket *found_socket = nullptr; - for (const bNodeSocket *socket : provider_->dnode->input_sockets()) { + for (const bNodeSocket *socket : node_.input_sockets()) { if (socket->identifier == identifier) { found_socket = socket; break; @@ -151,7 +144,7 @@ void GeoNodeExecParams::check_input_access(StringRef identifier, if (found_socket == nullptr) { std::cout << "Did not find an input socket with the identifier '" << identifier << "'.\n"; std::cout << "Possible identifiers are: "; - for (const bNodeSocket *socket : provider_->dnode->input_sockets()) { + for (const bNodeSocket *socket : node_.input_sockets()) { if (socket->is_available()) { std::cout << "'" << socket->identifier << "', "; } @@ -164,13 +157,7 @@ void GeoNodeExecParams::check_input_access(StringRef identifier, << "' is disabled.\n"; BLI_assert_unreachable(); } - else if (!provider_->can_get_input(identifier)) { - std::cout << "The identifier '" << identifier - << "' is valid, but there is no value for it anymore.\n"; - std::cout << "Most likely it has been extracted before.\n"; - BLI_assert_unreachable(); - } - else if (requested_type != nullptr) { + else if (requested_type != nullptr && (found_socket->flag & SOCK_MULTI_INPUT) == 0) { const CPPType &expected_type = *found_socket->typeinfo->geometry_nodes_cpp_type; if (*requested_type != expected_type) { std::cout << "The requested type '" << requested_type->name() << "' is incorrect. Expected '" @@ -183,7 +170,7 @@ void GeoNodeExecParams::check_input_access(StringRef identifier, void GeoNodeExecParams::check_output_access(StringRef identifier, const CPPType &value_type) const { const bNodeSocket *found_socket = nullptr; - for (const bNodeSocket *socket : provider_->dnode->output_sockets()) { + for (const bNodeSocket *socket : node_.output_sockets()) { if (socket->identifier == identifier) { found_socket = socket; break; @@ -193,8 +180,8 @@ void GeoNodeExecParams::check_output_access(StringRef identifier, const CPPType if (found_socket == nullptr) { std::cout << "Did not find an output socket with the identifier '" << identifier << "'.\n"; std::cout << "Possible identifiers are: "; - for (const bNodeSocket *socket : provider_->dnode->output_sockets()) { - if (!(socket->flag & SOCK_UNAVAIL)) { + for (const bNodeSocket *socket : node_.output_sockets()) { + if (socket->is_available()) { std::cout << "'" << socket->identifier << "', "; } } @@ -206,7 +193,7 @@ void GeoNodeExecParams::check_output_access(StringRef identifier, const CPPType << "' is disabled.\n"; BLI_assert_unreachable(); } - else if (!provider_->can_set_output(identifier)) { + else if (params_.output_was_set(this->get_output_index(identifier))) { std::cout << "The identifier '" << identifier << "' has been set already.\n"; BLI_assert_unreachable(); } diff --git a/source/blender/nodes/intern/node_multi_function.cc b/source/blender/nodes/intern/node_multi_function.cc index 1f8397923e9..d731fe8f877 100644 --- a/source/blender/nodes/intern/node_multi_function.cc +++ b/source/blender/nodes/intern/node_multi_function.cc @@ -3,21 +3,21 @@ #include "NOD_multi_function.hh" #include "BKE_node.h" +#include "BKE_node_runtime.hh" namespace blender::nodes { -NodeMultiFunctions::NodeMultiFunctions(const DerivedNodeTree &tree) +NodeMultiFunctions::NodeMultiFunctions(const bNodeTree &tree) { - for (const bNodeTree *btree : tree.used_btrees()) { - for (const bNode *bnode : btree->all_nodes()) { - if (bnode->typeinfo->build_multi_function == nullptr) { - continue; - } - NodeMultiFunctionBuilder builder{*bnode, *btree}; - bnode->typeinfo->build_multi_function(builder); - if (builder.built_fn_ != nullptr) { - map_.add_new(bnode, {builder.built_fn_, std::move(builder.owned_built_fn_)}); - } + tree.ensure_topology_cache(); + for (const bNode *bnode : tree.all_nodes()) { + if (bnode->typeinfo->build_multi_function == nullptr) { + continue; + } + NodeMultiFunctionBuilder builder{*bnode, tree}; + bnode->typeinfo->build_multi_function(builder); + if (builder.built_fn_ != nullptr) { + map_.add_new(bnode, {builder.built_fn_, std::move(builder.owned_built_fn_)}); } } } |