diff options
author | Hans Goudey <h.goudey@me.com> | 2022-08-11 18:03:47 +0300 |
---|---|---|
committer | Hans Goudey <h.goudey@me.com> | 2022-08-11 18:03:47 +0300 |
commit | 9b01fc7f2ab998f460dd7ac5bbdd593dca832c38 (patch) | |
tree | 2f43a3ae60daa97bcdd677753c3c561018e13aed /source/blender | |
parent | 96f1e44119ceff2d4f8a00e998910eed479eab03 (diff) | |
parent | 4cbd799373d07498813ad84f8cee5f3a8c269631 (diff) |
Merge branch 'master' into refactor-mesh-hide-generic
Diffstat (limited to 'source/blender')
258 files changed, 12443 insertions, 727 deletions
diff --git a/source/blender/blenfont/intern/blf_font.c b/source/blender/blenfont/intern/blf_font.c index 5f904d86b03..b958f3c2336 100644 --- a/source/blender/blenfont/intern/blf_font.c +++ b/source/blender/blenfont/intern/blf_font.c @@ -70,7 +70,7 @@ static ft_pix blf_font_width_max_ft_pix(struct FontBLF *font); /* Return glyph id from charcode. */ uint blf_get_char_index(struct FontBLF *font, uint charcode) { - return FT_Get_Char_Index(font->face, charcode); + return blf_ensure_face(font) ? FT_Get_Char_Index(font->face, charcode) : 0; } /* -------------------------------------------------------------------- */ diff --git a/source/blender/blenkernel/BKE_idtype.h b/source/blender/blenkernel/BKE_idtype.h index 30f7ed45859..ec8d6f76a0b 100644 --- a/source/blender/blenkernel/BKE_idtype.h +++ b/source/blender/blenkernel/BKE_idtype.h @@ -109,7 +109,7 @@ typedef struct IDTypeInfo { */ short id_code; /** - * Bitflag matching id_code, used for filtering (e.g. in file browser), see DNA_ID.h's + * Bit-flag matching id_code, used for filtering (e.g. in file browser), see DNA_ID.h's * FILTER_ID_XX enums. */ uint64_t id_filter; diff --git a/source/blender/blenkernel/BKE_key.h b/source/blender/blenkernel/BKE_key.h index 37b30d63722..9f506ded8e9 100644 --- a/source/blender/blenkernel/BKE_key.h +++ b/source/blender/blenkernel/BKE_key.h @@ -47,7 +47,7 @@ void key_curve_normal_weights(float t, float data[4], int type); /** * Returns key coordinates (+ tilt) when key applied, NULL otherwise. * - * \param obdata if given, also update that geometry with the result of the shape keys evaluation. + * \param obdata: if given, also update that geometry with the result of the shape keys evaluation. */ float *BKE_key_evaluate_object_ex( struct Object *ob, int *r_totelem, float *arr, size_t arr_size, struct ID *obdata); diff --git a/source/blender/blenkernel/BKE_lib_id.h b/source/blender/blenkernel/BKE_lib_id.h index 148e6ec2791..febdad2ca0d 100644 --- a/source/blender/blenkernel/BKE_lib_id.h +++ b/source/blender/blenkernel/BKE_lib_id.h @@ -444,18 +444,20 @@ struct ID *BKE_id_copy(struct Main *bmain, const struct ID *id); * Currently, it only handles the given ID, and their shape keys and actions if any, according to * the given `duplicate_flags`. * - * \param duplicate_flags is of type #eDupli_ID_Flags, see #UserDef.dupflag. Currently only + * \param duplicate_flags: is of type #eDupli_ID_Flags, see #UserDef.dupflag. Currently only * `USER_DUP_LINKED_ID` and `USER_DUP_ACT` have an effect here. - * \param copy_flags flags passed to #BKE_id_copy_ex. + * \param copy_flags: flags passed to #BKE_id_copy_ex. */ struct ID *BKE_id_copy_for_duplicate(struct Main *bmain, struct ID *id, uint duplicate_flags, int copy_flags); -/* Special version of BKE_id_copy which is safe from using evaluated id as source with a copy +/** + * Special version of #BKE_id_copy which is safe from using evaluated id as source with a copy * result appearing in the main database. - * Takes care of the referenced data-blocks consistency. */ + * Takes care of the referenced data-blocks consistency. + */ struct ID *BKE_id_copy_for_use_in_bmain(struct Main *bmain, const struct ID *id); /** diff --git a/source/blender/blenkernel/BKE_lib_override.h b/source/blender/blenkernel/BKE_lib_override.h index f933946164c..d1eb6e01ae3 100644 --- a/source/blender/blenkernel/BKE_lib_override.h +++ b/source/blender/blenkernel/BKE_lib_override.h @@ -66,7 +66,7 @@ void BKE_lib_override_library_free(struct IDOverrideLibrary **override, bool do_ * \note This is especially useful when `id` is a non-real override (e.g. embedded ID like a master * collection or root node tree, or a shape key). * - * \param r_owner_id If given, will be set with the actual ID owning the return liboverride data. + * \param r_owner_id: If given, will be set with the actual ID owning the return liboverride data. */ IDOverrideLibrary *BKE_lib_override_library_get(struct Main *bmain, struct ID *id, diff --git a/source/blender/blenkernel/BKE_mesh_mapping.h b/source/blender/blenkernel/BKE_mesh_mapping.h index 67f1bbbfd72..5bc6e8a02af 100644 --- a/source/blender/blenkernel/BKE_mesh_mapping.h +++ b/source/blender/blenkernel/BKE_mesh_mapping.h @@ -20,7 +20,7 @@ struct MVert; /* UvVertMap */ #define STD_UV_CONNECT_LIMIT 0.0001f -/* Map from uv vertex to face. Used by select linked, uv subsurf and obj exporter. */ +/* Map from uv vertex to face. Used by select linked, uv subdivision-surface and obj exporter. */ typedef struct UvVertMap { struct UvMapVert **vert; struct UvMapVert *buf; @@ -68,13 +68,20 @@ typedef struct UvElementMap { /** Total number of unique UVs. */ int total_unique_uvs; - /* If Non-NULL, address UvElements by `BM_elem_index_get(BMVert*)`. */ + /** If Non-NULL, address UvElements by `BM_elem_index_get(BMVert*)`. */ struct UvElement **vertex; - /* Number of Islands in the mesh */ - int totalIslands; - /* Stores the starting index in buf where each island begins */ - int *islandIndices; + /** If Non-NULL, pointer to local head of each unique UV. */ + struct UvElement **head_table; + + /** Number of islands, or zero if not calculated. */ + int total_islands; + /** Array of starting index in #storage where each island begins. */ + int *island_indices; + /** Array of number of UVs in each island. */ + int *island_total_uvs; + /** Array of number of unique UVs in each island. */ + int *island_total_unique_uvs; } UvElementMap; /* Connectivity data */ diff --git a/source/blender/blenkernel/BKE_node.h b/source/blender/blenkernel/BKE_node.h index 02e3cefe6a5..b42b9df510d 100644 --- a/source/blender/blenkernel/BKE_node.h +++ b/source/blender/blenkernel/BKE_node.h @@ -101,6 +101,7 @@ typedef struct bNodeSocketTemplate { namespace blender { class CPPType; namespace nodes { +class DNode; class NodeMultiFunctionBuilder; class GeoNodeExecParams; class NodeDeclarationBuilder; @@ -109,6 +110,11 @@ class GatherLinkSearchOpParams; namespace fn { class MFDataType; } // namespace fn +namespace realtime_compositor { +class Context; +class NodeOperation; +class ShaderNode; +} // namespace realtime_compositor } // namespace blender using CPPTypeHandle = blender::CPPType; @@ -123,7 +129,14 @@ using SocketGetGeometryNodesCPPValueFunction = void (*)(const struct bNodeSocket using NodeGatherSocketLinkOperationsFunction = void (*)(blender::nodes::GatherLinkSearchOpParams ¶ms); +using NodeGetCompositorOperationFunction = blender::realtime_compositor::NodeOperation + *(*)(blender::realtime_compositor::Context &context, blender::nodes::DNode node); +using NodeGetCompositorShaderNodeFunction = + blender::realtime_compositor::ShaderNode *(*)(blender::nodes::DNode node); + #else +typedef void *NodeGetCompositorOperationFunction; +typedef void *NodeGetCompositorShaderNodeFunction; typedef void *NodeMultiFunctionBuildFunction; typedef void *NodeGeometryExecFunction; typedef void *NodeDeclareFunction; @@ -309,6 +322,14 @@ typedef struct bNodeType { /* gpu */ NodeGPUExecFunction gpu_fn; + /* Get an instance of this node's compositor operation. Freeing the instance is the + * responsibility of the caller. */ + NodeGetCompositorOperationFunction get_compositor_operation; + + /* Get an instance of this node's compositor shader node. Freeing the instance is the + * responsibility of the caller. */ + NodeGetCompositorShaderNodeFunction get_compositor_shader_node; + /* Build a multi-function for this node. */ NodeMultiFunctionBuildFunction build_multi_function; diff --git a/source/blender/blenkernel/intern/cryptomatte_test.cc b/source/blender/blenkernel/intern/cryptomatte_test.cc index 2f15242b4a4..bb09b276645 100644 --- a/source/blender/blenkernel/intern/cryptomatte_test.cc +++ b/source/blender/blenkernel/intern/cryptomatte_test.cc @@ -163,7 +163,7 @@ TEST(cryptomatte, session_from_stamp_data) * best as possible. */ TEST(cryptomatte, parsing_malformed_manifests) { - /* Manifest from multilayer.exr in the cryptomatte git-repository. */ + /* Manifest from `multilayer.exr` in the cryptomatte git-repository. */ test_cryptomatte_manifest( R"({"/obj/instance1:instances:0":"0d54c6cc","/obj/instance1:instances:1":"293d9340","/obj/instance1:instances:110":"ccb9e1f2","/obj/instance1:instances:111":"f8dd3a48","/obj/instance1:instances:112":"a99e07a8","/obj/instance1:instances:113":"e75599a4","/obj/instance1:instances:114":"794200f3","/obj/instance1:instances:115":"2a3a1728","/obj/instance1:instances:116":"478544a1","/obj/instance1:instances:117":"b2bd969a","/obj/instance1:instances:10":"3a0c8681","/obj/instance1:instances:11":"01e5970d","/obj/box:polygons:1":"9d416418","/obj/instance1:instances:100":"2dcd2966","/obj/instance1:instances:101":"9331cd82","/obj/instance1:instances:102":"df50fccb","/obj/instance1:instances:103":"97f8590d","/obj/instance1:instances:104":"bbcd220d","/obj/instance1:instances:105":"4ae06139","/obj/instance1:instances:106":"8873d5ea","/obj/instance1:instances:107":"39d8af8d","/obj/instance1:instances:108":"bb11bd4e","/obj/instance1:instances:109":"a32bba35"})", R"({"\/obj\/box:polygons:1":"9d416418","\/obj\/instance1:instances:0":"0d54c6cc","\/obj\/instance1:instances:1":"293d9340","\/obj\/instance1:instances:10":"3a0c8681","\/obj\/instance1:instances:100":"2dcd2966","\/obj\/instance1:instances:101":"9331cd82","\/obj\/instance1:instances:102":"df50fccb","\/obj\/instance1:instances:103":"97f8590d","\/obj\/instance1:instances:104":"bbcd220d","\/obj\/instance1:instances:105":"4ae06139","\/obj\/instance1:instances:106":"8873d5ea","\/obj\/instance1:instances:107":"39d8af8d","\/obj\/instance1:instances:108":"bb11bd4e","\/obj\/instance1:instances:109":"a32bba35","\/obj\/instance1:instances:11":"01e5970d","\/obj\/instance1:instances:110":"ccb9e1f2","\/obj\/instance1:instances:111":"f8dd3a48","\/obj\/instance1:instances:112":"a99e07a8","\/obj\/instance1:instances:113":"e75599a4","\/obj\/instance1:instances:114":"794200f3","\/obj\/instance1:instances:115":"2a3a1728","\/obj\/instance1:instances:116":"478544a1","\/obj\/instance1:instances:117":"b2bd969a","\/obj\/instance1:instance)"); diff --git a/source/blender/blenkernel/intern/gpencil_curve.c b/source/blender/blenkernel/intern/gpencil_curve.c index d68b322e4c5..bf224a9613e 100644 --- a/source/blender/blenkernel/intern/gpencil_curve.c +++ b/source/blender/blenkernel/intern/gpencil_curve.c @@ -337,7 +337,6 @@ static void gpencil_convert_spline(Main *bmain, /* Add stroke to frame. */ BLI_addtail(&gpf->strokes, gps); - float *coord_array = NULL; float init_co[3]; switch (nu->type) { @@ -376,8 +375,7 @@ static void gpencil_convert_spline(Main *bmain, BezTriple *bezt = &nu->bezt[inext]; bool last = (bool)(s == segments - 1); - coord_array = MEM_callocN((size_t)3 * resolu * sizeof(float), __func__); - + float *coord_array = MEM_callocN(sizeof(float[3]) * resolu, __func__); for (int j = 0; j < 3; j++) { BKE_curve_forward_diff_bezier(prevbezt->vec[1][j], prevbezt->vec[2][j], @@ -397,8 +395,9 @@ static void gpencil_convert_spline(Main *bmain, gpencil_add_new_points( gps, coord_array, radius_start, radius_end, init, resolu, init_co, last); + /* Free memory. */ - MEM_SAFE_FREE(coord_array); + MEM_freeN(coord_array); /* As the last point of segment is the first point of next segment, back one array * element to avoid duplicated points on the same location. @@ -419,7 +418,7 @@ static void gpencil_convert_spline(Main *bmain, nurb_points = (nu->pntsu - 1) * resolu; } /* Get all curve points. */ - coord_array = MEM_callocN(sizeof(float[3]) * nurb_points, __func__); + float *coord_array = MEM_callocN(sizeof(float[3]) * nurb_points, __func__); BKE_nurb_makeCurve(nu, coord_array, NULL, NULL, NULL, resolu, sizeof(float[3])); /* Allocate memory for storage points. */ @@ -429,7 +428,7 @@ static void gpencil_convert_spline(Main *bmain, /* Add points. */ gpencil_add_new_points(gps, coord_array, 1.0f, 1.0f, 0, gps->totpoints, init_co, false); - MEM_SAFE_FREE(coord_array); + MEM_freeN(coord_array); } break; } diff --git a/source/blender/compositor/CMakeLists.txt b/source/blender/compositor/CMakeLists.txt index c555dbafbc5..f49a9694ab3 100644 --- a/source/blender/compositor/CMakeLists.txt +++ b/source/blender/compositor/CMakeLists.txt @@ -1,8 +1,9 @@ # SPDX-License-Identifier: GPL-2.0-or-later # Copyright 2011 Blender Foundation. All rights reserved. -if(WITH_COMPOSITOR_CPU) +add_subdirectory(realtime_compositor) +if(WITH_COMPOSITOR_CPU) set(INC . intern diff --git a/source/blender/compositor/operations/COM_MovieClipOperation.cc b/source/blender/compositor/operations/COM_MovieClipOperation.cc index d7cf41cf422..b62d972e807 100644 --- a/source/blender/compositor/operations/COM_MovieClipOperation.cc +++ b/source/blender/compositor/operations/COM_MovieClipOperation.cc @@ -74,7 +74,7 @@ void MovieClipBaseOperation::execute_pixel_sampled(float output[4], zero_v4(output); } else if (ibuf->rect == nullptr && ibuf->rect_float == nullptr) { - /* Happens for multilayer exr, i.e. */ + /* Happens for multi-layer EXR, i.e. */ zero_v4(output); } else { diff --git a/source/blender/compositor/realtime_compositor/CMakeLists.txt b/source/blender/compositor/realtime_compositor/CMakeLists.txt new file mode 100644 index 00000000000..9fe156c3ef2 --- /dev/null +++ b/source/blender/compositor/realtime_compositor/CMakeLists.txt @@ -0,0 +1,66 @@ +# SPDX-License-Identifier: GPL-2.0-or-later + +set(INC + . + ../../gpu + ../../nodes + ../../imbuf + ../../blenlib + ../../makesdna + ../../makesrna + ../../blenkernel + ../../gpu/intern + ../../../../intern/guardedalloc +) + + +set(SRC + intern/compile_state.cc + intern/context.cc + intern/conversion_operation.cc + intern/domain.cc + intern/evaluator.cc + intern/input_single_value_operation.cc + intern/node_operation.cc + intern/operation.cc + intern/realize_on_domain_operation.cc + intern/reduce_to_single_value_operation.cc + intern/result.cc + intern/scheduler.cc + intern/shader_node.cc + intern/shader_operation.cc + intern/simple_operation.cc + intern/static_shader_manager.cc + intern/texture_pool.cc + intern/utilities.cc + + COM_compile_state.hh + COM_context.hh + COM_conversion_operation.hh + COM_domain.hh + COM_evaluator.hh + COM_input_descriptor.hh + COM_input_single_value_operation.hh + COM_node_operation.hh + COM_operation.hh + COM_realize_on_domain_operation.hh + COM_reduce_to_single_value_operation.hh + COM_result.hh + COM_scheduler.hh + COM_shader_node.hh + COM_shader_operation.hh + COM_simple_operation.hh + COM_static_shader_manager.hh + COM_texture_pool.hh + COM_utilities.hh +) + +set(LIB + bf_gpu + bf_nodes + bf_imbuf + bf_blenlib + bf_blenkernel +) + +blender_add_lib(bf_realtime_compositor "${SRC}" "${INC}" "${INC_SYS}" "${LIB}") diff --git a/source/blender/compositor/realtime_compositor/COM_compile_state.hh b/source/blender/compositor/realtime_compositor/COM_compile_state.hh new file mode 100644 index 00000000000..ed6ad414e3b --- /dev/null +++ b/source/blender/compositor/realtime_compositor/COM_compile_state.hh @@ -0,0 +1,170 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +#pragma once + +#include "BLI_map.hh" + +#include "NOD_derived_node_tree.hh" + +#include "COM_domain.hh" +#include "COM_node_operation.hh" +#include "COM_scheduler.hh" +#include "COM_shader_operation.hh" + +namespace blender::realtime_compositor { + +using namespace nodes::derived_node_tree_types; + +/* ------------------------------------------------------------------------------------------------ + * Compile State + * + * The compile state is a utility class used to track the state of compilation when compiling the + * node tree. In particular, it tracks two important pieces of information, each of which is + * described in one of the following sections. + * + * First, it stores a mapping between all nodes and the operations they were compiled into. The + * mapping are stored independently depending on the type of the operation in the node_operations_ + * and shader_operations_ maps. So those two maps are mutually exclusive. The compiler should call + * the map_node_to_node_operation and map_node_to_shader_operation methods to populate those maps + * as soon as it compiles a node or multiple nodes into an operation. Those maps are used to + * retrieve the results of outputs linked to the inputs of operations. For more details, see the + * get_result_from_output_socket method. For the node tree shown below, nodes 1, 2, and 6 are + * mapped to their compiled operations in the node_operation_ map. While nodes 3 and 4 are both + * mapped to the first shader operation, and node 5 is mapped to the second shader operation in the + * shader_operations_ map. + * + * Shader Operation 1 Shader Operation 2 + * +-----------------------------------+ +------------------+ + * .------------. | .------------. .------------. | | .------------. | .------------. + * | Node 1 | | | Node 3 | | Node 4 | | | | Node 5 | | | Node 6 | + * | |----|--| |--| |---|-----|--| |--|--| | + * | | .-|--| | | | | .--|--| | | | | + * '------------' | | '------------' '------------' | | | '------------' | '------------' + * | +-----------------------------------+ | +------------------+ + * .------------. | | + * | Node 2 | | | + * | |--'----------------------------------------' + * | | + * '------------' + * + * Second, it stores the shader compile unit as well as its domain. One should first go over the + * discussion in COM_evaluator.hh for a high level description of the mechanism of the compile + * unit. The one important detail in this class is the should_compile_shader_compile_unit method, + * which implements the criteria of whether the compile unit should be compiled given the node + * currently being processed as an argument. Those criteria are described as follows. If the + * compile unit is empty as is the case when processing nodes 1, 2, and 3, then it plainly + * shouldn't be compiled. If the given node is not a shader node, then it can't be added to the + * compile unit and the unit is considered complete and should be compiled, as is the case when + * processing node 6. If the computed domain of the given node is not compatible with the domain of + * the compiled unit, then it can't be added to the unit and the unit is considered complete and + * should be compiled, as is the case when processing node 5, more on this in the next section. + * Otherwise, the given node is compatible with the compile unit and can be added to it, so the + * unit shouldn't be compiled just yet, as is the case when processing node 4. + * + * Special attention should be given to the aforementioned domain compatibility criterion. One + * should first go over the discussion in COM_domain.hh for more information on domains. When a + * compile unit gets eventually compiled to a shader operation, that operation will have a certain + * operation domain, and any node that gets added to the compile unit should itself have a computed + * node domain that is compatible with that operation domain, otherwise, had the node been compiled + * into its own operation separately, the result would have been be different. For instance, + * consider the above node tree where node 1 outputs a 100x100 result, node 2 outputs a 50x50 + * result, the first input in node 3 has the highest domain priority, and the second input in node + * 5 has the highest domain priority. In this case, shader operation 1 will output a 100x100 + * result, and shader operation 2 will output a 50x50 result, because that's the computed operation + * domain for each of them. So node 6 will get a 50x50 result. Now consider the same node tree, but + * where all three nodes 3, 4, and 5 were compiled into a single shader operation as shown the node + * tree below. In that case, shader operation 1 will output a 100x100 result, because that's its + * computed operation domain. So node 6 will get a 100x100 result. As can be seen, the final result + * is different even though the node tree is the same. That's why the compiler can decide to + * compile the compile unit early even though further nodes can still be technically added to it. + * + * Shader Operation 1 + * +------------------------------------------------------+ + * .------------. | .------------. .------------. .------------. | .------------. + * | Node 1 | | | Node 3 | | Node 4 | | Node 5 | | | Node 6 | + * | |----|--| |--| |------| |--|--| | + * | | .-|--| | | | .---| | | | | + * '------------' | | '------------' '------------' | '------------' | '------------' + * | +----------------------------------|-------------------+ + * .------------. | | + * | Node 2 | | | + * | |--'------------------------------------' + * | | + * '------------' + * + * To check for the domain compatibility between the compile unit and the node being processed, + * the domain of the compile unit is assumed to be the domain of the first node whose computed + * domain is not an identity domain. Identity domains corresponds to single value results, so those + * are always compatible with any domain. The domain of the compile unit is computed and set in + * the add_node_to_shader_compile_unit method. When processing a node, the computed domain of node + * is compared to the compile unit domain in the should_compile_shader_compile_unit method, noting + * that identity domains are always compatible. Node domains are computed in the + * compute_shader_node_domain method, which is analogous to Operation::compute_domain for nodes + * that are not yet compiled. */ +class CompileState { + private: + /* A reference to the node execution schedule that is being compiled. */ + const Schedule &schedule_; + /* Those two maps associate each node with the operation it was compiled into. Each node is + * either compiled into a node operation and added to node_operations, or compiled into a shader + * operation and added to shader_operations. Those maps are used to retrieve the results of + * outputs linked to the inputs of operations. See the get_result_from_output_socket method for + * more information. */ + Map<DNode, NodeOperation *> node_operations_; + Map<DNode, ShaderOperation *> shader_operations_; + /* A contiguous subset of the node execution schedule that contains the group of nodes that will + * be compiled together into a Shader Operation. See the discussion in COM_evaluator.hh for + * more information. */ + ShaderCompileUnit shader_compile_unit_; + /* The domain of the shader compile unit. */ + Domain shader_compile_unit_domain_ = Domain::identity(); + + public: + /* Construct a compile state from the node execution schedule being compiled. */ + CompileState(const Schedule &schedule); + + /* Get a reference to the node execution schedule being compiled. */ + const Schedule &get_schedule(); + + /* Add an association between the given node and the give node operation that the node was + * compiled into in the node_operations_ map. */ + void map_node_to_node_operation(DNode node, NodeOperation *operation); + + /* Add an association between the given node and the give shader operation that the node was + * compiled into in the shader_operations_ map. */ + void map_node_to_shader_operation(DNode node, ShaderOperation *operation); + + /* Returns a reference to the result of the operation corresponding to the given output that the + * given output's node was compiled to. */ + Result &get_result_from_output_socket(DOutputSocket output); + + /* Add the given node to the compile unit. And if the domain of the compile unit is not yet + * determined or was determined to be an identity domain, update it to the computed domain for + * the give node. */ + void add_node_to_shader_compile_unit(DNode node); + + /* Get a reference to the shader compile unit. */ + ShaderCompileUnit &get_shader_compile_unit(); + + /* Clear the compile unit. This should be called once the compile unit is compiled to ready it to + * track the next potential compile unit. */ + void reset_shader_compile_unit(); + + /* Determines if the compile unit should be compiled based on a number of criteria give the node + * currently being processed. Those criteria are as follows: + * - If compile unit is empty, then it can't and shouldn't be compiled. + * - If the given node is not a shader node, then it can't be added to the compile unit + * and the unit is considered complete and should be compiled. + * - If the computed domain of the given node is not compatible with the domain of the compile + * unit, then it can't be added to it and the unit is considered complete and should be + * compiled. */ + bool should_compile_shader_compile_unit(DNode node); + + private: + /* Compute the node domain of the given shader node. This is analogous to the + * Operation::compute_domain method, except it is computed from the node itself as opposed to a + * compiled operation. See the discussion in COM_domain.hh for more information. */ + Domain compute_shader_node_domain(DNode node); +}; + +} // namespace blender::realtime_compositor diff --git a/source/blender/compositor/realtime_compositor/COM_context.hh b/source/blender/compositor/realtime_compositor/COM_context.hh new file mode 100644 index 00000000000..b5c8cea641f --- /dev/null +++ b/source/blender/compositor/realtime_compositor/COM_context.hh @@ -0,0 +1,72 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +#pragma once + +#include "BLI_math_vec_types.hh" +#include "BLI_string_ref.hh" + +#include "DNA_scene_types.h" + +#include "GPU_texture.h" + +#include "COM_static_shader_manager.hh" +#include "COM_texture_pool.hh" + +namespace blender::realtime_compositor { + +/* ------------------------------------------------------------------------------------------------ + * Context + * + * A Context is an abstract class that is implemented by the caller of the evaluator to provide the + * necessary data and functionalities for the correct operation of the evaluator. This includes + * providing input data like render passes and the active scene, as well as references to the data + * where the output of the evaluator will be written. The class also provides a reference to the + * texture pool which should be implemented by the caller and provided during construction. + * Finally, the class have an instance of a static shader manager for convenient shader + * acquisition. */ +class Context { + private: + /* A texture pool that can be used to allocate textures for the compositor efficiently. */ + TexturePool &texture_pool_; + /* A static shader manager that can be used to acquire shaders for the compositor efficiently. */ + StaticShaderManager shader_manager_; + + public: + Context(TexturePool &texture_pool); + + /* Get the active compositing scene. */ + virtual const Scene *get_scene() const = 0; + + /* Get the dimensions of the output. */ + virtual int2 get_output_size() = 0; + + /* Get the texture representing the output where the result of the compositor should be + * written. This should be called by output nodes to get their target texture. */ + virtual GPUTexture *get_output_texture() = 0; + + /* Get the texture where the given render pass is stored. This should be called by the Render + * Layer node to populate its outputs. */ + virtual GPUTexture *get_input_texture(int view_layer, eScenePassType pass_type) = 0; + + /* Get the name of the view currently being rendered. */ + virtual StringRef get_view_name() = 0; + + /* Set an info message. This is called by the compositor evaluator to inform or warn the user + * about something, typically an error. The implementation should display the message in an + * appropriate place, which can be directly in the UI or just logged to the output stream. */ + virtual void set_info_message(StringRef message) const = 0; + + /* Get the current frame number of the active scene. */ + int get_frame_number() const; + + /* Get the current time in seconds of the active scene. */ + float get_time() const; + + /* Get a reference to the texture pool of this context. */ + TexturePool &texture_pool(); + + /* Get a reference to the static shader manager of this context. */ + StaticShaderManager &shader_manager(); +}; + +} // namespace blender::realtime_compositor diff --git a/source/blender/compositor/realtime_compositor/COM_conversion_operation.hh b/source/blender/compositor/realtime_compositor/COM_conversion_operation.hh new file mode 100644 index 00000000000..15e1d0722ea --- /dev/null +++ b/source/blender/compositor/realtime_compositor/COM_conversion_operation.hh @@ -0,0 +1,126 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +#pragma once + +#include "GPU_shader.h" + +#include "COM_context.hh" +#include "COM_input_descriptor.hh" +#include "COM_result.hh" +#include "COM_simple_operation.hh" + +namespace blender::realtime_compositor { + +/* ------------------------------------------------------------------------------------------------- + * Conversion Operation + * + * A simple operation that converts a result from a certain type to another. See the derived + * classes for more details. */ +class ConversionOperation : public SimpleOperation { + public: + using SimpleOperation::SimpleOperation; + + /* If the input result is a single value, execute_single is called. Otherwise, the shader + * provided by get_conversion_shader is dispatched. */ + void execute() override; + + /* Determine if a conversion operation is needed for the input with the given result and + * descriptor. If it is not needed, return a null pointer. If it is needed, return an instance of + * the appropriate conversion operation. */ + static SimpleOperation *construct_if_needed(Context &context, + const Result &input_result, + const InputDescriptor &input_descriptor); + + protected: + /* Convert the input single value result to the output single value result. */ + virtual void execute_single(const Result &input, Result &output) = 0; + + /* Get the shader the will be used for conversion. */ + virtual GPUShader *get_conversion_shader() const = 0; +}; + +/* ------------------------------------------------------------------------------------------------- + * Convert Float To Vector Operation + * + * Takes a float result and outputs a vector result. All three components of the output are filled + * with the input float. */ +class ConvertFloatToVectorOperation : public ConversionOperation { + public: + ConvertFloatToVectorOperation(Context &context); + + void execute_single(const Result &input, Result &output) override; + + GPUShader *get_conversion_shader() const override; +}; + +/* ------------------------------------------------------------------------------------------------- + * Convert Float To Color Operation + * + * Takes a float result and outputs a color result. All three color channels of the output are + * filled with the input float and the alpha channel is set to 1. */ +class ConvertFloatToColorOperation : public ConversionOperation { + public: + ConvertFloatToColorOperation(Context &context); + + void execute_single(const Result &input, Result &output) override; + + GPUShader *get_conversion_shader() const override; +}; + +/* ------------------------------------------------------------------------------------------------- + * Convert Color To Float Operation + * + * Takes a color result and outputs a float result. The output is the average of the three color + * channels, the alpha channel is ignored. */ +class ConvertColorToFloatOperation : public ConversionOperation { + public: + ConvertColorToFloatOperation(Context &context); + + void execute_single(const Result &input, Result &output) override; + + GPUShader *get_conversion_shader() const override; +}; + +/* ------------------------------------------------------------------------------------------------- + * Convert Color To Vector Operation + * + * Takes a color result and outputs a vector result. The output is a copy of the three color + * channels to the three vector components. */ +class ConvertColorToVectorOperation : public ConversionOperation { + public: + ConvertColorToVectorOperation(Context &context); + + void execute_single(const Result &input, Result &output) override; + + GPUShader *get_conversion_shader() const override; +}; + +/* ------------------------------------------------------------------------------------------------- + * Convert Vector To Float Operation + * + * Takes a vector result and outputs a float result. The output is the average of the three + * components. */ +class ConvertVectorToFloatOperation : public ConversionOperation { + public: + ConvertVectorToFloatOperation(Context &context); + + void execute_single(const Result &input, Result &output) override; + + GPUShader *get_conversion_shader() const override; +}; + +/* ------------------------------------------------------------------------------------------------- + * Convert Vector To Color Operation + * + * Takes a vector result and outputs a color result. The output is a copy of the three vector + * components to the three color channels with the alpha channel set to 1. */ +class ConvertVectorToColorOperation : public ConversionOperation { + public: + ConvertVectorToColorOperation(Context &context); + + void execute_single(const Result &input, Result &output) override; + + GPUShader *get_conversion_shader() const override; +}; + +} // namespace blender::realtime_compositor diff --git a/source/blender/compositor/realtime_compositor/COM_domain.hh b/source/blender/compositor/realtime_compositor/COM_domain.hh new file mode 100644 index 00000000000..a4f9eb68db4 --- /dev/null +++ b/source/blender/compositor/realtime_compositor/COM_domain.hh @@ -0,0 +1,166 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +#pragma once + +#include <cstdint> + +#include "BLI_float3x3.hh" +#include "BLI_math_vec_types.hh" + +namespace blender::realtime_compositor { + +/* Possible interpolations to use when realizing an input result of some domain on another domain. + * See the RealizationOptions struct for more information. */ +enum class Interpolation : uint8_t { + Nearest, + Bilinear, + Bicubic, +}; + +/* ------------------------------------------------------------------------------------------------ + * Realization Options + * + * The options that describe how an input result prefer to be realized on some other domain. This + * is used by the Realize On Domain Operation to identify the appropriate method of realization. + * See the Domain class for more information. */ +struct RealizationOptions { + /* The interpolation method that should be used when performing realization. Since realizing a + * result involves projecting it on a different domain, which in turn, involves sampling the + * result at arbitrary locations, the interpolation identifies the method used for computing the + * value at those arbitrary locations. */ + Interpolation interpolation = Interpolation::Nearest; + /* If true, the result will be repeated infinitely along the horizontal axis when realizing the + * result. If false, regions outside of bounds of the result along the horizontal axis will be + * filled with zeros. */ + bool repeat_x = false; + /* If true, the result will be repeated infinitely along the vertical axis when realizing the + * result. If false, regions outside of bounds of the result along the vertical axis will be + * filled with zeros. */ + bool repeat_y = false; +}; + +/* ------------------------------------------------------------------------------------------------ + * Domain + * + * The compositor is designed in such a way as to allow compositing in an infinite virtual + * compositing space. Consequently, any result of an operation is not only represented by its image + * output, but also by its transformation in that virtual space. The transformation of the result + * together with the dimension of its image is stored and represented by a Domain. In the figure + * below, two results of different domains are illustrated on the virtual compositing space. One of + * the results is centered in space with an image dimension of 800px × 600px, and the other result + * is scaled down and translated such that it lies in the upper right quadrant of the space with an + * image dimension of 800px × 400px. The position of the domain is in pixel space, and the domain + * is considered centered if it has an identity transformation. Note that both results have the + * same resolution, but occupy different areas of the virtual compositing space. + * + * y + * ^ + * 800px x 600px | + * .---------------------|---------------------. + * | | 800px x 600px | + * | | .-------------. | + * | | | | | + * | | '-------------' | + * ------|---------------------|---------------------|------> x + * | | | + * | | | + * | | | + * | | | + * '---------------------|---------------------' + * | + * + * By default, results have domains of identity transformations, that is, they are centered in + * space, but a transformation operation like the rotate, translate, or transform operations will + * adjust the transformation to make the result reside somewhere different in space. The domain of + * a single value result is irrelevant and always set to an identity domain. + * + * An operation is typically only concerned about a subset of the virtual compositing space, this + * subset is represented by a domain which is called the Operation Domain. It follows that before + * the operation itself is executed, inputs will typically be realized on the operation domain to + * be in the same domain and have the same dimension as that of the operation domain. This process + * is called Domain Realization and is implemented using an operation called the Realize On Domain + * Operation. Realization involves projecting the result onto the target domain, copying the area + * of the result that intersects the target domain, and filling the rest with zeros or repetitions + * of the result depending on the realization options that can be set by the user. Consequently, + * operations can generally expect their inputs to have the same dimension and can operate on them + * directly and transparently. For instance, if an operation takes both results illustrated in + * the figure above, and the operation has an operation domain that matches the bigger domain, the + * result with the bigger domain will not need to be realized because it already has a domain that + * matches that of the operation domain, but the result with the smaller domain will have to be + * realized into a new result that has the same domain as the domain of the bigger result. Assuming + * no repetition, the output of the realization will be an all zeros image with dimension 800px × + * 600px with a small scaled version of the smaller result copied into the upper right quadrant of + * the image. The following figure illustrates the realization process on a different set of + * results + * + * Realized Result + * +-------------+ +-------------+ + * | Operation | | | + * | Domain | | Zeros | + * | | ----> | | + * +-----|-----+ | |-----+ | + * | | C | | | C | | + * | +-----|-------+ +-----'-------+ + * | Domain Of | + * | Input | + * +-----------+ + * + * An operation can operate in an arbitrary operation domain, but in most cases, the operation + * domain is inferred from the inputs of the operation. In particular, one of the inputs is said to + * be the Domain Input of the operation and the operation domain is inferred from its domain. It + * follows that this particular input will not need realization, because it already has the correct + * domain. The domain input selection mechanism is as follows. Each of the inputs are assigned a + * value by the developer called the Domain Priority, the domain input is then chosen as the + * non-single value input with the highest domain priority, zero being the highest priority. See + * Operation::compute_domain for more information. + * + * The aforementioned logic for operation domain computation is only a default that works for most + * cases, but an operation can override the compute_domain method to implement a different logic. + * For instance, output nodes have an operation domain the same size as the viewport and with an + * identity transformation, their operation domain doesn't depend on the inputs at all. + * + * For instance, a filter operation has two inputs, a factor and a color, the latter of which is + * assigned a domain priority of 0 and the former is assigned a domain priority of 1. If the color + * input is not a single value input, then the color input is considered to be the domain input of + * the operation and the operation domain is computed to be the same domain as the color input, + * because it has the highest priority. It follows that if the factor input has a different domain + * than the computed domain of the operation, it will be projected and realized on it to have the + * same domain as described above. On the other hand, if the color input is a single value input, + * then the factor input is considered to be the domain input and the operation domain will be the + * same as the domain of the factor input, because it has the second highest domain priority. + * Finally, if both inputs are single value inputs, the operation domain will be an identity domain + * and is irrelevant, because the output will be a domain-less single value. */ +class Domain { + public: + /* The size of the domain in pixels. */ + int2 size; + /* The 2D transformation of the domain defining its translation in pixels, rotation, and scale in + * the virtual compositing space. */ + float3x3 transformation; + /* The options that describe how this domain prefer to be realized on some other domain. See the + * RealizationOptions struct for more information. */ + RealizationOptions realization_options; + + public: + /* A size only constructor that sets the transformation to identity. */ + Domain(int2 size); + + Domain(int2 size, float3x3 transformation); + + /* Transform the domain by the given transformation. This effectively pre-multiply the given + * transformation by the current transformation of the domain. */ + void transform(const float3x3 &transformation); + + /* Returns a domain of size 1x1 and an identity transformation. */ + static Domain identity(); +}; + +/* Compare the size and transformation of the domain. The realization_options are not compared + * because they only describe the method of realization on another domain, which is not technically + * a property of the domain itself. */ +bool operator==(const Domain &a, const Domain &b); + +/* Inverse of the above equality operator. */ +bool operator!=(const Domain &a, const Domain &b); + +} // namespace blender::realtime_compositor diff --git a/source/blender/compositor/realtime_compositor/COM_evaluator.hh b/source/blender/compositor/realtime_compositor/COM_evaluator.hh new file mode 100644 index 00000000000..fd6feb0948b --- /dev/null +++ b/source/blender/compositor/realtime_compositor/COM_evaluator.hh @@ -0,0 +1,173 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +#pragma once + +#include <memory> + +#include "BLI_vector.hh" + +#include "DNA_node_types.h" + +#include "NOD_derived_node_tree.hh" + +#include "COM_compile_state.hh" +#include "COM_context.hh" +#include "COM_node_operation.hh" +#include "COM_operation.hh" +#include "COM_shader_operation.hh" + +namespace blender::realtime_compositor { + +using namespace nodes::derived_node_tree_types; + +/* ------------------------------------------------------------------------------------------------ + * Evaluator + * + * The evaluator is the main class of the compositor and the entry point of its execution. The + * evaluator compiles the compositor node tree and evaluates it to compute its output. It is + * constructed from a compositor node tree and a compositor context. Upon calling the evaluate + * method, the evaluator will check if the node tree is already compiled into an operations stream, + * and if it is, it will go over it and evaluate the operations in order. It is then the + * responsibility of the caller to call the reset method when the node tree changes to invalidate + * the operations stream. A reset is also required if the resources used by the node tree change, + * for instances, when the dimensions of an image used by the node tree changes. This is necessary + * because the evaluator compiles the node tree into an operations stream that is specifically + * optimized for the structure of the resources used by the node tree. + * + * Otherwise, if the node tree is not yet compiled, the evaluator will compile it into an + * operations stream, evaluating the operations in the process. It should be noted that operations + * are evaluated as soon as they are compiled, as opposed to compiling the whole operations stream + * and then evaluating it in a separate step. This is important because, as mentioned before, the + * operations stream is optimized specifically for the structure of the resources used by the node + * tree, which is only known after the operations are evaluated. In other words, the evaluator uses + * the evaluated results of previously compiled operations to compile the operations that follow + * them in an optimized manner. + * + * Compilation starts by computing an optimized node execution schedule by calling the + * compute_schedule function, see the discussion in COM_scheduler.hh for more details. For the node + * tree shown below, the execution schedule is denoted by the node numbers. The compiler then goes + * over the execution schedule in order and compiles each node into either a Node Operation or a + * Shader Operation, depending on the node type, see the is_shader_node function. A Shader + * operation is constructed from a group of nodes forming a contiguous subset of the node execution + * schedule. For instance, in the node tree shown below, nodes 3 and 4 are compiled together into a + * shader operation and node 5 is compiled into its own shader operation, both of which are + * contiguous subsets of the node execution schedule. This process is described in details in the + * following section. + * + * Shader Operation 1 Shader Operation 2 + * +-----------------------------------+ +------------------+ + * .------------. | .------------. .------------. | | .------------. | .------------. + * | Node 1 | | | Node 3 | | Node 4 | | | | Node 5 | | | Node 6 | + * | |----|--| |--| |---|-----|--| |--|--| | + * | | .-|--| | | | | .--|--| | | | | + * '------------' | | '------------' '------------' | | | '------------' | '------------' + * | +-----------------------------------+ | +------------------+ + * .------------. | | + * | Node 2 | | | + * | |--'----------------------------------------' + * | | + * '------------' + * + * For non shader nodes, the compilation process is straight forward, the compiler instantiates a + * node operation from the node, map its inputs to the results of the outputs they are linked to, + * and evaluates the operations. However, for shader nodes, since a group of nodes can be compiled + * together into a shader operation, the compilation process is a bit involved. The compiler uses + * an instance of the Compile State class to keep track of the compilation process. The compiler + * state stores the so called "shader compile unit", which is the current group of nodes that will + * eventually be compiled together into a shader operation. While going over the schedule, the + * compiler adds the shader nodes to the compile unit until it decides that the compile unit is + * complete and should be compiled. This is typically decided when the current node is not + * compatible with the compile unit and can't be added to it, only then it compiles the compile + * unit into a shader operation and resets it to ready it to track the next potential group of + * nodes that will form a shader operation. This decision is made based on various criteria in the + * should_compile_shader_compile_unit function. See the discussion in COM_compile_state.hh for more + * details of those criteria, but perhaps the most evident of which is whether the node is actually + * a shader node, if it isn't, then it evidently can't be added to the compile unit and the compile + * unit is should be compiled. + * + * For the node tree above, the compilation process is as follows. The compiler goes over the node + * execution schedule in order considering each node. Nodes 1 and 2 are not shader node so they are + * compiled into node operations and added to the operations stream. The current compile unit is + * empty, so it is not compiled. Node 3 is a shader node, and since the compile unit is currently + * empty, it is unconditionally added to it. Node 4 is a shader node, it was decided---for the sake + * of the demonstration---that it is compatible with the compile unit and can be added to it. Node + * 5 is a shader node, but it was decided---for the sake of the demonstration---that it is not + * compatible with the compile unit, so the compile unit is considered complete and is compiled + * first, adding the first shader operation to the operations stream and resetting the compile + * unit. Node 5 is then added to the now empty compile unit similar to node 3. Node 6 is not a + * shader node, so the compile unit is considered complete and is compiled first, adding the first + * shader operation to the operations stream and resetting the compile unit. Finally, node 6 is + * compiled into a node operation similar to nodes 1 and 2 and added to the operations stream. */ +class Evaluator { + private: + /* A reference to the compositor context. */ + Context &context_; + /* A reference to the compositor node tree. */ + bNodeTree &node_tree_; + /* The derived and reference node trees representing the compositor node tree. Those are + * initialized when the node tree is compiled and freed when the evaluator resets. */ + NodeTreeRefMap node_tree_reference_map_; + std::unique_ptr<DerivedNodeTree> derived_node_tree_; + /* The compiled operations stream. This contains ordered pointers to the operations that were + * compiled. This is initialized when the node tree is compiled and freed when the evaluator + * resets. The is_compiled_ member indicates whether the operation stream can be used or needs to + * be compiled first. Note that the operations stream can be empty even when compiled, this can + * happen when the node tree is empty or invalid for instance. */ + Vector<std::unique_ptr<Operation>> operations_stream_; + /* True if the node tree is already compiled into an operations stream that can be evaluated + * directly. False if the node tree is not compiled yet and needs to be compiled. */ + bool is_compiled_ = false; + + public: + /* Construct an evaluator from a compositor node tree and a context. */ + Evaluator(Context &context, bNodeTree &node_tree); + + /* Evaluate the compositor node tree. If the node tree is already compiled into an operations + * stream, that stream will be evaluated directly. Otherwise, the node tree will be compiled and + * evaluated. */ + void evaluate(); + + /* Invalidate the operations stream that was compiled for the node tree. This should be called + * when the node tree changes or the structure of any of the resources used by it changes. By + * structure, we mean things like the dimensions of the used images, while changes to their + * contents do not necessitate a reset. */ + void reset(); + + private: + /* Check if the compositor node tree is valid by checking if it has: + * - Cyclic links. + * - Undefined nodes or sockets. + * - Unsupported nodes. + * If the node tree is valid, true is returned. Otherwise, false is returned, and an appropriate + * error message is set by calling the context's set_info_message method. */ + bool validate_node_tree(); + + /* Compile the node tree into an operations stream and evaluate it. */ + void compile_and_evaluate(); + + /* Compile the given node into a node operation, map each input to the result of the output + * linked to it, update the compile state, add the newly created operation to the operations + * stream, and evaluate the operation. */ + void compile_and_evaluate_node(DNode node, CompileState &compile_state); + + /* Map each input of the node operation to the result of the output linked to it. Unlinked inputs + * are mapped to the result of a newly created Input Single Value Operation, which is added to + * the operations stream and evaluated. Since this method might add operations to the operations + * stream, the actual node operation should only be added to the stream once this method is + * called. */ + void map_node_operation_inputs_to_their_results(DNode node, + NodeOperation *operation, + CompileState &compile_state); + + /* Compile the shader compile unit into a shader operation, map each input of the operation to + * the result of the output linked to it, update the compile state, add the newly created + * operation to the operations stream, evaluate the operation, and finally reset the shader + * compile unit. */ + void compile_and_evaluate_shader_compile_unit(CompileState &compile_state); + + /* Map each input of the shader operation to the result of the output linked to it. */ + void map_shader_operation_inputs_to_their_results(ShaderOperation *operation, + CompileState &compile_state); +}; + +} // namespace blender::realtime_compositor diff --git a/source/blender/compositor/realtime_compositor/COM_input_descriptor.hh b/source/blender/compositor/realtime_compositor/COM_input_descriptor.hh new file mode 100644 index 00000000000..215a92ab3b4 --- /dev/null +++ b/source/blender/compositor/realtime_compositor/COM_input_descriptor.hh @@ -0,0 +1,34 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +#pragma once + +#include "COM_result.hh" + +namespace blender::realtime_compositor { + +/* ------------------------------------------------------------------------------------------------ + * Input Descriptor + * + * A class that describes an input of an operation. */ +class InputDescriptor { + public: + /* The type of input. This may be different that the type of result that the operation will + * receive for the input, in which case, an implicit conversion operation will be added as an + * input processor to convert it to the required type. */ + ResultType type; + /* If true, then the input does not need to be realized on the domain of the operation before its + * execution. See the discussion in COM_domain.hh for more information. */ + bool skip_realization = false; + /* The priority of the input for determining the operation domain. The non-single value input + * with the highest priority will be used to infer the operation domain, the highest priority + * being zero. See the discussion in COM_domain.hh for more information. */ + int domain_priority = 0; + /* If true, the input expects a single value, and if a non-single value is provided, a default + * single value will be used instead, see the get_<type>_value_default methods in the Result + * class. It follows that this also implies skip_realization, because we don't need to realize a + * result that will be discarded anyways. If false, the input can work with both single and + * non-single values. */ + bool expects_single_value = false; +}; + +} // namespace blender::realtime_compositor diff --git a/source/blender/compositor/realtime_compositor/COM_input_single_value_operation.hh b/source/blender/compositor/realtime_compositor/COM_input_single_value_operation.hh new file mode 100644 index 00000000000..bbcd336ee11 --- /dev/null +++ b/source/blender/compositor/realtime_compositor/COM_input_single_value_operation.hh @@ -0,0 +1,46 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +#pragma once + +#include "BLI_string_ref.hh" + +#include "NOD_derived_node_tree.hh" + +#include "COM_context.hh" +#include "COM_operation.hh" +#include "COM_result.hh" + +namespace blender::realtime_compositor { + +using namespace nodes::derived_node_tree_types; + +/* ------------------------------------------------------------------------------------------------ + * Input Single Value Operation + * + * An input single value operation is an operation that outputs a single value result whose value + * is the value of an unlinked input socket. This is typically used to initialize the values of + * unlinked node input sockets. */ +class InputSingleValueOperation : public Operation { + private: + /* The identifier of the output. */ + static const StringRef output_identifier_; + /* The input socket whose value will be computed as the operation's result. */ + DInputSocket input_socket_; + + public: + InputSingleValueOperation(Context &context, DInputSocket input_socket); + + /* Allocate a single value result and set its value to the default value of the input socket. */ + void execute() override; + + /* Get a reference to the output result of the operation, this essentially calls the super + * get_result with the output identifier of the operation. */ + Result &get_result(); + + private: + /* Populate the result of the operation, this essentially calls the super populate_result method + * with the output identifier of the operation. */ + void populate_result(Result result); +}; + +} // namespace blender::realtime_compositor diff --git a/source/blender/compositor/realtime_compositor/COM_node_operation.hh b/source/blender/compositor/realtime_compositor/COM_node_operation.hh new file mode 100644 index 00000000000..94bc4dfd39d --- /dev/null +++ b/source/blender/compositor/realtime_compositor/COM_node_operation.hh @@ -0,0 +1,56 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +#pragma once + +#include "BLI_string_ref.hh" + +#include "DNA_node_types.h" + +#include "NOD_derived_node_tree.hh" + +#include "COM_context.hh" +#include "COM_operation.hh" +#include "COM_scheduler.hh" + +namespace blender::realtime_compositor { + +using namespace nodes::derived_node_tree_types; + +/* ------------------------------------------------------------------------------------------------ + * Node Operation + * + * A node operation is a subclass of operation that nodes should implement and instantiate in the + * get_compositor_operation function of bNodeType, passing the inputs given to that function to the + * constructor. This class essentially just implements a default constructor that populates output + * results for all outputs of the node as well as input descriptors for all inputs of the nodes + * based on their socket declaration. The class also provides some utility methods for easier + * implementation of nodes. */ +class NodeOperation : public Operation { + private: + /* The node that this operation represents. */ + DNode node_; + + public: + /* Populate the output results based on the node outputs and populate the input descriptors based + * on the node inputs. */ + NodeOperation(Context &context, DNode node); + + /* Compute and set the initial reference counts of all the results of the operation. The + * reference counts of the results are the number of operations that use those results, which is + * computed as the number of inputs whose node is part of the schedule and is linked to the + * output corresponding to each result. The node execution schedule is given as an input. */ + void compute_results_reference_counts(const Schedule &schedule); + + protected: + /* Returns a reference to the derived node that this operation represents. */ + const DNode &node() const; + + /* Returns a reference to the node that this operation represents. */ + const bNode &bnode() const; + + /* Returns true if the output identified by the given identifier is needed and should be + * computed, otherwise returns false. */ + bool should_compute_output(StringRef identifier); +}; + +} // namespace blender::realtime_compositor diff --git a/source/blender/compositor/realtime_compositor/COM_operation.hh b/source/blender/compositor/realtime_compositor/COM_operation.hh new file mode 100644 index 00000000000..38518c00c04 --- /dev/null +++ b/source/blender/compositor/realtime_compositor/COM_operation.hh @@ -0,0 +1,175 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +#pragma once + +#include <memory> +#include <string> + +#include "BLI_map.hh" +#include "BLI_string_ref.hh" +#include "BLI_vector.hh" + +#include "COM_context.hh" +#include "COM_domain.hh" +#include "COM_input_descriptor.hh" +#include "COM_result.hh" +#include "COM_static_shader_manager.hh" +#include "COM_texture_pool.hh" + +namespace blender::realtime_compositor { + +class SimpleOperation; + +/* A type representing a vector of simple operations that store the input processors for a + * particular input. */ +using ProcessorsVector = Vector<std::unique_ptr<SimpleOperation>>; + +/* ------------------------------------------------------------------------------------------------ + * Operation + * + * The operation is the basic unit of the compositor. The evaluator compiles the compositor node + * tree into an ordered stream of operations which are then executed in order during evaluation. + * The operation class can be sub-classed to implement a new operation. Operations have a number of + * inputs and outputs that are declared during construction and are identified by string + * identifiers. Inputs are declared by calling declare_input_descriptor providing an appropriate + * descriptor. Those inputs are mapped to the results computed by other operations whose outputs + * are linked to the inputs. Such mappings are established by the compiler during compilation by + * calling the map_input_to_result method. Outputs are populated by calling the populate_result + * method, providing a result of an appropriate type. Upon execution, the operation allocates a + * result for each of its outputs and computes their value based on its inputs and options. + * + * Each input may have one or more input processors, which are simple operations that process the + * inputs before the operation is executed, see the discussion in COM_simple_operation.hh for more + * information. And thus the effective input of the operation is the result of the last input + * processor if one exists. Input processors are added and evaluated by calling the + * add_and_evaluate_input_processors method, which provides a default implementation that does + * things like implicit conversion, domain realization, and more. This default implementation can, + * however, be overridden, extended, or removed. Once the input processors are added and evaluated + * for the first time, they are stored in the operation and future evaluations can evaluate them + * directly without having to add them again. + * + * The operation is evaluated by calling the evaluate method, which first adds the input processors + * if they weren't added already and evaluates them, then it resets the results of the operation, + * then it calls the execute method of the operation, and finally it releases the results mapped to + * the inputs to declare that they are no longer needed. */ +class Operation { + private: + /* A reference to the compositor context. This member references the same object in all + * operations but is included in the class for convenience. */ + Context &context_; + /* A mapping between each output of the operation identified by its identifier and the result for + * that output. A result for each output of the operation should be constructed and added to the + * map during operation construction by calling the populate_result method. The results should be + * allocated and their contents should be computed in the execute method. */ + Map<std::string, Result> results_; + /* A mapping between each input of the operation identified by its identifier and its input + * descriptor. Those descriptors should be declared during operation construction by calling the + * declare_input_descriptor method. */ + Map<std::string, InputDescriptor> input_descriptors_; + /* A mapping between each input of the operation identified by its identifier and a pointer to + * the computed result providing its data. The mapped result is either one that was computed by + * another operation or one that was internally computed in the operation by the last input + * processor for that input. It is the responsibility of the evaluator to map the inputs to their + * linked results before evaluating the operation by calling the map_input_to_result method. */ + Map<StringRef, Result *> results_mapped_to_inputs_; + /* A mapping between each input of the operation identified by its identifier and an ordered list + * of simple operations to process that input. This is initialized the first time the input + * processors are evaluated by calling the add_and_evaluate_input_processors method. Further + * evaluations will evaluate the processors directly without the need to add them again. The + * input_processors_added_ member indicates whether the processors were already added and can be + * evaluated directly or need to be added and evaluated. */ + Map<StringRef, ProcessorsVector> input_processors_; + /* True if the input processors were already added and can be evaluated directly. False if the + * input processors are not yet added and needs to be added. */ + bool input_processors_added_ = false; + + public: + Operation(Context &context); + + virtual ~Operation(); + + /* Evaluate the operation by: + * 1. Evaluating the input processors. + * 2. Resetting the results of the operation. + * 3. Calling the execute method of the operation. + * 4. Releasing the results mapped to the inputs. */ + void evaluate(); + + /* Get a reference to the output result identified by the given identifier. */ + Result &get_result(StringRef identifier); + + /* Map the input identified by the given identifier to the result providing its data. See + * results_mapped_to_inputs_ for more details. This should be called by the evaluator to + * establish links between different operations. */ + void map_input_to_result(StringRef identifier, Result *result); + + protected: + /* Compute the operation domain of this operation. By default, this implements a default logic + * that infers the operation domain from the inputs, which may be overridden for a different + * logic. See the discussion in COM_domain.hh for the inference logic and more information. */ + virtual Domain compute_domain(); + + /* Add and evaluate any needed input processors, which essentially just involves calling the + * add_and_evaluate_input_processor method with the needed processors. This is called before + * executing the operation to prepare its inputs. The class defines a default implementation + * which adds typically needed processors, but derived classes can override the method to have + * a different implementation, extend the implementation, or remove it entirely. */ + virtual void add_and_evaluate_input_processors(); + + /* Given the identifier of an input of the operation and a processor operation: + * - Add the given processor to the list of input processors for the input. + * - Map the input of the processor to be the result of the last input processor or the result + * mapped to the input if no previous processors exists. + * - Switch the result mapped to the input to be the output result of the processor. + * - Evaluate the processor. */ + void add_and_evaluate_input_processor(StringRef identifier, SimpleOperation *processor); + + /* This method should allocate the operation results, execute the operation, and compute the + * output results. */ + virtual void execute() = 0; + + /* Get a reference to the result connected to the input identified by the given identifier. */ + Result &get_input(StringRef identifier) const; + + /* Switch the result mapped to the input identified by the given identifier with the given + * result. */ + void switch_result_mapped_to_input(StringRef identifier, Result *result); + + /* Add the given result to the results_ map identified by the given output identifier. This + * should be called during operation construction for all outputs. The provided result shouldn't + * be allocated or initialized, this will happen later during execution. */ + void populate_result(StringRef identifier, Result result); + + /* Declare the descriptor of the input identified by the given identifier to be the given + * descriptor. Adds the given descriptor to the input_descriptors_ map identified by the given + * input identifier. This should be called during operation constructor for all inputs. */ + void declare_input_descriptor(StringRef identifier, InputDescriptor descriptor); + + /* Get a reference to the descriptor of the input identified by the given identified. */ + InputDescriptor &get_input_descriptor(StringRef identifier); + + /* Returns a reference to the compositor context. */ + Context &context(); + + /* Returns a reference to the texture pool of the compositor context. */ + TexturePool &texture_pool() const; + + /* Returns a reference to the shader manager of the compositor context. */ + StaticShaderManager &shader_manager() const; + + private: + /* Evaluate the input processors. If the input processors were already added they will be + * evaluated directly. Otherwise, the input processors will be added and evaluated. */ + void evaluate_input_processors(); + + /* Resets the results of the operation. See the reset method in the Result class for more + * information. */ + void reset_results(); + + /* Release the results that are mapped to the inputs of the operation. This is called after the + * evaluation of the operation to declare that the results are no longer needed by this + * operation. */ + void release_inputs(); +}; + +} // namespace blender::realtime_compositor diff --git a/source/blender/compositor/realtime_compositor/COM_realize_on_domain_operation.hh b/source/blender/compositor/realtime_compositor/COM_realize_on_domain_operation.hh new file mode 100644 index 00000000000..5a842e16008 --- /dev/null +++ b/source/blender/compositor/realtime_compositor/COM_realize_on_domain_operation.hh @@ -0,0 +1,49 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +#pragma once + +#include "GPU_shader.h" + +#include "COM_context.hh" +#include "COM_domain.hh" +#include "COM_input_descriptor.hh" +#include "COM_result.hh" +#include "COM_simple_operation.hh" + +namespace blender::realtime_compositor { + +/* ------------------------------------------------------------------------------------------------ + * Realize On Domain Operation + * + * A simple operation that projects the input on a certain target domain, copies the area of the + * input that intersects the target domain, and fill the rest with zeros or repetitions of the + * input depending on the realization options of the target domain. See the discussion in + * COM_domain.hh for more information. */ +class RealizeOnDomainOperation : public SimpleOperation { + private: + /* The target domain to realize the input on. */ + Domain domain_; + + public: + RealizeOnDomainOperation(Context &context, Domain domain, ResultType type); + + void execute() override; + + /* Determine if a realize on domain operation is needed for the input with the given result and + * descriptor in an operation with the given operation domain. If it is not needed, return a null + * pointer. If it is needed, return an instance of the operation. */ + static SimpleOperation *construct_if_needed(Context &context, + const Result &input_result, + const InputDescriptor &input_descriptor, + const Domain &operation_domain); + + protected: + /* The operation domain is just the target domain. */ + Domain compute_domain() override; + + private: + /* Get the realization shader of the appropriate type. */ + GPUShader *get_realization_shader(); +}; + +} // namespace blender::realtime_compositor diff --git a/source/blender/compositor/realtime_compositor/COM_reduce_to_single_value_operation.hh b/source/blender/compositor/realtime_compositor/COM_reduce_to_single_value_operation.hh new file mode 100644 index 00000000000..2f5a82c79b7 --- /dev/null +++ b/source/blender/compositor/realtime_compositor/COM_reduce_to_single_value_operation.hh @@ -0,0 +1,31 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +#pragma once + +#include "COM_context.hh" +#include "COM_result.hh" +#include "COM_simple_operation.hh" + +namespace blender::realtime_compositor { + +/* ------------------------------------------------------------------------------------------------ + * Reduce To Single Value Operation + * + * A simple operation that reduces its input result into a single value output result. The input is + * assumed to be a texture result of size 1x1, that is, a texture composed of a single pixel, the + * value of which shall serve as the single value of the output result. */ +class ReduceToSingleValueOperation : public SimpleOperation { + public: + ReduceToSingleValueOperation(Context &context, ResultType type); + + /* Download the input pixel from the GPU texture and set its value to the value of the allocated + * single value output result. */ + void execute() override; + + /* Determine if a reduce to single value operation is needed for the input with the + * given result. If it is not needed, return a null pointer. If it is needed, return an instance + * of the operation. */ + static SimpleOperation *construct_if_needed(Context &context, const Result &input_result); +}; + +} // namespace blender::realtime_compositor diff --git a/source/blender/compositor/realtime_compositor/COM_result.hh b/source/blender/compositor/realtime_compositor/COM_result.hh new file mode 100644 index 00000000000..a16d68bb92d --- /dev/null +++ b/source/blender/compositor/realtime_compositor/COM_result.hh @@ -0,0 +1,234 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +#pragma once + +#include "BLI_float3x3.hh" +#include "BLI_math_vec_types.hh" + +#include "GPU_shader.h" +#include "GPU_texture.h" + +#include "COM_domain.hh" +#include "COM_texture_pool.hh" + +namespace blender::realtime_compositor { + +/* Possible data types that operations can operate on. They either represent the base type of the + * result texture or a single value result. */ +enum class ResultType : uint8_t { + Float, + Vector, + Color, +}; + +/* ------------------------------------------------------------------------------------------------ + * Result + * + * A result represents the computed value of an output of an operation. A result can either + * represent an image or a single value. A result is typed, and can be of type color, vector, or + * float. Single value results are stored in 1x1 textures to make them easily accessible in + * shaders. But the same value is also stored in the value union member of the result for any + * host-side processing. The texture of the result is allocated from the texture pool referenced by + * the result. + * + * Results are reference counted and their textures are released once their reference count reaches + * zero. After constructing a result, the set_initial_reference_count method is called to declare + * the number of operations that needs this result. Once each operation that needs the result no + * longer needs it, the release method is called and the reference count is decremented, until it + * reaches zero, where the result's texture is then released. Since results are eventually + * decremented to zero by the end of every evaluation, the reference count is restored before every + * evaluation to its initial reference count by calling the reset method, which is why a separate + * member initial_reference_count_ is stored to keep track of the initial value. + * + * A result not only represents an image, but also the area it occupies in the virtual compositing + * space. This area is called the Domain of the result, see the discussion in COM_domain.hh for + * more information. + * + * A result can be a proxy result that merely wraps another master result, in which case, it shares + * its values and delegates all reference counting to it. While a proxy result shares the value of + * the master result, it can have a different domain. Consequently, transformation operations are + * implemented using proxy results, where their results are proxy results of their inputs but with + * their domains transformed based on their options. Moreover, proxy results can also be used as + * the results of identity operations, that is, operations that do nothing to their inputs in + * certain configurations. In which case, the proxy result is left as is with no extra + * transformation on its domain whatsoever. Proxy results can be created by calling the + * pass_through method, see that method for more details. */ +class Result { + private: + /* The base type of the texture or the type of the single value. */ + ResultType type_; + /* If true, the result is a single value, otherwise, the result is a texture. */ + bool is_single_value_; + /* A GPU texture storing the result data. This will be a 1x1 texture if the result is a single + * value, the value of which will be identical to that of the value member. See class description + * for more information. */ + GPUTexture *texture_ = nullptr; + /* The texture pool used to allocate the texture of the result, this should be initialized during + * construction. */ + TexturePool *texture_pool_ = nullptr; + /* The number of operations that currently needs this result. At the time when the result is + * computed, this member will have a value that matches initial_reference_count_. Once each + * operation that needs the result no longer needs it, the release method is called and the + * reference count is decremented, until it reaches zero, where the result's texture is then + * released. If this result have a master result, then this reference count is irrelevant and + * shadowed by the reference count of the master result. */ + int reference_count_; + /* The number of operations that reference and use this result at the time when it was initially + * computed. Since reference_count_ is decremented and always becomes zero at the end of the + * evaluation, this member is used to reset the reference count of the results for later + * evaluations by calling the reset method. This member is also used to determine if this result + * should be computed by calling the should_compute method. */ + int initial_reference_count_; + /* If the result is a single value, this member stores the value of the result, the value of + * which will be identical to that stored in the texture member. The active union member depends + * on the type of the result. This member is uninitialized and should not be used if the result + * is a texture. */ + union { + float float_value_; + float3 vector_value_; + float4 color_value_; + }; + /* The domain of the result. This only matters if the result was a texture. See the discussion in + * COM_domain.hh for more information. */ + Domain domain_ = Domain::identity(); + /* If not nullptr, then this result wraps and shares the value of another master result. In this + * case, calls to texture-related methods like increment_reference_count and release should + * operate on the master result as opposed to this result. This member is typically set upon + * calling the pass_through method, which sets this result to be the master of a target result. + * See that method for more information. */ + Result *master_ = nullptr; + + public: + /* Construct a result of the given type with the given texture pool that will be used to allocate + * and release the result's texture. */ + Result(ResultType type, TexturePool &texture_pool); + + /* Declare the result to be a texture result, allocate a texture of an appropriate type with + * the size of the given domain from the result's texture pool, and set the domain of the result + * to the given domain. */ + void allocate_texture(Domain domain); + + /* Declare the result to be a single value result, allocate a texture of an appropriate + * type with size 1x1 from the result's texture pool, and set the domain to be an identity + * domain. See class description for more information. */ + void allocate_single_value(); + + /* Allocate a single value result and set its value to zero. This is called for results whose + * value can't be computed and are considered invalid. */ + void allocate_invalid(); + + /* Bind the texture of the result to the texture image unit with the given name in the currently + * bound given shader. This also inserts a memory barrier for texture fetches to ensure any prior + * writes to the texture are reflected before reading from it. */ + void bind_as_texture(GPUShader *shader, const char *texture_name) const; + + /* Bind the texture of the result to the image unit with the given name in the currently bound + * given shader. */ + void bind_as_image(GPUShader *shader, const char *image_name) const; + + /* Unbind the texture which was previously bound using bind_as_texture. */ + void unbind_as_texture() const; + + /* Unbind the texture which was previously bound using bind_as_image. */ + void unbind_as_image() const; + + /* Pass this result through to a target result, in which case, the target result becomes a proxy + * result with this result as its master result. This is done by making the target result a copy + * of this result, essentially having identical values between the two and consequently sharing + * the underlying texture. An exception is the initial reference count, whose value is retained + * and not copied, because it is a property of the original result and is needed for correctly + * resetting the result before the next evaluation. Additionally, this result is set to be the + * master of the target result, by setting the master member of the target. Finally, the + * reference count of the result is incremented by the reference count of the target result. See + * the discussion above for more information. */ + void pass_through(Result &target); + + /* Transform the result by the given transformation. This effectively pre-multiply the given + * transformation by the current transformation of the domain of the result. */ + void transform(const float3x3 &transformation); + + /* Get a reference to the realization options of this result. See the RealizationOptions struct + * for more information. */ + RealizationOptions &get_realization_options(); + + /* If the result is a single value result of type float, return its float value. Otherwise, an + * uninitialized value is returned. */ + float get_float_value() const; + + /* If the result is a single value result of type vector, return its vector value. Otherwise, an + * uninitialized value is returned. */ + float3 get_vector_value() const; + + /* If the result is a single value result of type color, return its color value. Otherwise, an + * uninitialized value is returned. */ + float4 get_color_value() const; + + /* Same as get_float_value but returns a default value if the result is not a single value. */ + float get_float_value_default(float default_value) const; + + /* Same as get_vector_value but returns a default value if the result is not a single value. */ + float3 get_vector_value_default(const float3 &default_value) const; + + /* Same as get_color_value but returns a default value if the result is not a single value. */ + float4 get_color_value_default(const float4 &default_value) const; + + /* If the result is a single value result of type float, set its float value and upload it to the + * texture. Otherwise, an undefined behavior is invoked. */ + void set_float_value(float value); + + /* If the result is a single value result of type vector, set its vector value and upload it to + * the texture. Otherwise, an undefined behavior is invoked. */ + void set_vector_value(const float3 &value); + + /* If the result is a single value result of type color, set its color value and upload it to the + * texture. Otherwise, an undefined behavior is invoked. */ + void set_color_value(const float4 &value); + + /* Set the value of initial_reference_count_, see that member for more details. This should be + * called after constructing the result to declare the number of operations that needs it. */ + void set_initial_reference_count(int count); + + /* Reset the result to prepare it for a new evaluation. This should be called before evaluating + * the operation that computes this result. First, set the value of reference_count_ to the value + * of initial_reference_count_ since reference_count_ may have already been decremented to zero + * in a previous evaluation. Second, set master_ to nullptr because the result may have been + * turned into a proxy result in a previous evaluation. Other fields don't need to be reset + * because they are runtime and overwritten during evaluation. */ + void reset(); + + /* Increment the reference count of the result by the given count. If this result have a master + * result, the reference count of the master result is incremented instead. */ + void increment_reference_count(int count = 1); + + /* Decrement the reference count of the result and release the its texture back into the texture + * pool if the reference count reaches zero. This should be called when an operation that used + * this result no longer needs it. If this result have a master result, the master result is + * released instead. */ + void release(); + + /* Returns true if this result should be computed and false otherwise. The result should be + * computed if its reference count is not zero, that is, its result is used by at least one + * operation. */ + bool should_compute(); + + /* Returns the type of the result. */ + ResultType type() const; + + /* Returns true if the result is a texture and false of it is a single value. */ + bool is_texture() const; + + /* Returns true if the result is a single value and false of it is a texture. */ + bool is_single_value() const; + + /* Returns the allocated GPU texture of the result. */ + GPUTexture *texture() const; + + /* Returns the reference count of the result. If this result have a master result, then the + * reference count of the master result is returned instead. */ + int reference_count() const; + + /* Returns a reference to the domain of the result. See the Domain class. */ + const Domain &domain() const; +}; + +} // namespace blender::realtime_compositor diff --git a/source/blender/compositor/realtime_compositor/COM_scheduler.hh b/source/blender/compositor/realtime_compositor/COM_scheduler.hh new file mode 100644 index 00000000000..4f778b32145 --- /dev/null +++ b/source/blender/compositor/realtime_compositor/COM_scheduler.hh @@ -0,0 +1,21 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +#pragma once + +#include "BLI_vector_set.hh" + +#include "NOD_derived_node_tree.hh" + +namespace blender::realtime_compositor { + +using namespace nodes::derived_node_tree_types; + +/* A type representing the ordered set of nodes defining the schedule of node execution. */ +using Schedule = VectorSet<DNode>; + +/* Computes the execution schedule of the node tree. This is essentially a post-order depth first + * traversal of the node tree from the output node to the leaf input nodes, with informed order of + * traversal of dependencies based on a heuristic estimation of the number of needed buffers. */ +Schedule compute_schedule(DerivedNodeTree &tree); + +} // namespace blender::realtime_compositor diff --git a/source/blender/compositor/realtime_compositor/COM_shader_node.hh b/source/blender/compositor/realtime_compositor/COM_shader_node.hh new file mode 100644 index 00000000000..453226ec452 --- /dev/null +++ b/source/blender/compositor/realtime_compositor/COM_shader_node.hh @@ -0,0 +1,87 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +#pragma once + +#include "BLI_string_ref.hh" +#include "BLI_vector.hh" + +#include "DNA_node_types.h" + +#include "GPU_material.h" + +#include "NOD_derived_node_tree.hh" + +namespace blender::realtime_compositor { + +using namespace nodes::derived_node_tree_types; + +/* ------------------------------------------------------------------------------------------------ + * Shader Node + * + * A shader node encapsulates a compositor node tree that is capable of being used together with + * other shader nodes to construct a Shader Operation using the GPU material compiler. A GPU node + * stack for each of the node inputs and outputs is stored and populated during construction in + * order to represent the node as a GPU node inside the GPU material graph, see GPU_material.h for + * more information. Derived classes should implement the compile method to add the node and link + * it to the GPU material given to the method. The compiler is expected to initialize the input + * links of the node before invoking the compile method. See the discussion in + * COM_shader_operation.hh for more information. */ +class ShaderNode { + private: + /* The node that this operation represents. */ + DNode node_; + /* The GPU node stacks of the inputs of the node. Those are populated during construction in the + * populate_inputs method. The links of the inputs are initialized by the GPU material compiler + * prior to calling the compile method. There is an extra stack at the end to mark the end of the + * array, as this is what the GPU module functions expect. */ + Vector<GPUNodeStack> inputs_; + /* The GPU node stacks of the outputs of the node. Those are populated during construction in the + * populate_outputs method. There is an extra stack at the end to mark the end of the array, as + * this is what the GPU module functions expect. */ + Vector<GPUNodeStack> outputs_; + + public: + /* Construct the node by populating both its inputs and outputs. */ + ShaderNode(DNode node); + + virtual ~ShaderNode() = default; + + /* Compile the node by adding the appropriate GPU material graph nodes and linking the + * appropriate resources. */ + virtual void compile(GPUMaterial *material) = 0; + + /* Returns a contiguous array containing the GPU node stacks of each input. */ + GPUNodeStack *get_inputs_array(); + + /* Returns a contiguous array containing the GPU node stacks of each output. */ + GPUNodeStack *get_outputs_array(); + + /* Returns the GPU node stack of the input with the given identifier. */ + GPUNodeStack &get_input(StringRef identifier); + + /* Returns the GPU node stack of the output with the given identifier. */ + GPUNodeStack &get_output(StringRef identifier); + + /* Returns the GPU node link of the input with the given identifier, if the input is not linked, + * a uniform link carrying the value of the input will be created a returned. It is expected that + * the caller will use the returned link in a GPU material, otherwise, the link may not be + * properly freed. */ + GPUNodeLink *get_input_link(StringRef identifier); + + protected: + /* Returns a reference to the derived node that this operation represents. */ + const DNode &node() const; + + /* Returns a reference to the node this operations represents. */ + bNode &bnode() const; + + private: + /* Populate the inputs of the node. The input link is set to nullptr and is expected to be + * initialized by the GPU material compiler before calling the compile method. */ + void populate_inputs(); + /* Populate the outputs of the node. The output link is set to nullptr and is expected to be + * initialized by the compile method. */ + void populate_outputs(); +}; + +} // namespace blender::realtime_compositor diff --git a/source/blender/compositor/realtime_compositor/COM_shader_operation.hh b/source/blender/compositor/realtime_compositor/COM_shader_operation.hh new file mode 100644 index 00000000000..a33dcbf25be --- /dev/null +++ b/source/blender/compositor/realtime_compositor/COM_shader_operation.hh @@ -0,0 +1,242 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +#pragma once + +#include <memory> + +#include "BLI_map.hh" +#include "BLI_string_ref.hh" +#include "BLI_vector_set.hh" + +#include "GPU_material.h" +#include "GPU_shader.h" + +#include "gpu_shader_create_info.hh" + +#include "NOD_derived_node_tree.hh" + +#include "COM_context.hh" +#include "COM_operation.hh" +#include "COM_scheduler.hh" + +namespace blender::realtime_compositor { + +using namespace nodes::derived_node_tree_types; + +/* A type representing a contiguous subset of the node execution schedule that will be compiled + * into a Shader Operation. */ +using ShaderCompileUnit = VectorSet<DNode>; + +/* ------------------------------------------------------------------------------------------------ + * Shader Operation + * + * An operation that evaluates a shader compiled from a contiguous subset of the node execution + * schedule using the GPU material compiler, see GPU_material.h for more information. The subset + * of the node execution schedule is called a shader compile unit, see the discussion in + * COM_compile_state.hh for more information. + * + * Consider the following node graph with a node execution schedule denoted by the number on each + * node. The compiler may decide to compile a subset of the execution schedule into a shader + * operation, in this case, the nodes from 3 to 5 were compiled together into a shader operation. + * This subset is called the shader compile unit. See the discussion in COM_evaluator.hh for more + * information on the compilation process. Each of the nodes inside the compile unit implements a + * Shader Node which is instantiated, stored in shader_nodes_, and used during compilation. See the + * discussion in COM_shader_node.hh for more information. Links that are internal to the shader + * operation are established between the input and outputs of the shader nodes, for instance, the + * links between nodes 3 and 4 as well as those between nodes 4 and 5. However, links that cross + * the boundary of the shader operation needs special handling. + * + * Shader Operation + * +------------------------------------------------------+ + * .------------. | .------------. .------------. .------------. | .------------. + * | Node 1 | | | Node 3 | | Node 4 | | Node 5 | | | Node 6 | + * | |----|--| |--| |------| |--|--| | + * | | .-|--| | | | .---| | | | | + * '------------' | | '------------' '------------' | '------------' | '------------' + * | +----------------------------------|-------------------+ + * .------------. | | + * | Node 2 | | | + * | |--'------------------------------------' + * | | + * '------------' + * + * Links from nodes that are not part of the shader operation to nodes that are part of the shader + * operation are considered inputs of the operation itself and are declared as such. For instance, + * the link from node 1 to node 3 is declared as an input to the operation, and the same applies + * for the links from node 2 to nodes 3 and 5. Note, however, that only one input is declared for + * each distinct output socket, so both links from node 2 share the same input of the operation. + * An input to the operation is declared for a distinct output socket as follows: + * + * - A texture is added to the shader, which will be bound to the result of the output socket + * during evaluation. + * - A GPU attribute is added to the GPU material for that output socket and is linked to the GPU + * input stack of the inputs linked to the output socket. + * - Code is emitted to initialize the values of the attributes by sampling the textures + * corresponding to each of the inputs. + * - The newly added attribute is mapped to the output socket in output_to_material_attribute_map_ + * to share that same attributes for all inputs linked to the same output socket. + * + * Links from nodes that are part of the shader operation to nodes that are not part of the shader + * operation are considered outputs of the operation itself and are declared as such. For instance, + * the link from node 5 to node 6 is declared as an output to the operation. An output to the + * operation is declared for an output socket as follows: + * + * - An image is added in the shader where the output value will be written. + * - A storer GPU material node that stores the value of the output is added and linked to the GPU + * output stack of the output. The storer will store the value in the image identified by the + * index of the output given to the storer. + * - The storer functions are generated dynamically to map each index with its appropriate image. + * + * The GPU material code generator source is used to construct a compute shader that is then + * dispatched during operation evaluation after binding the inputs, outputs, and any necessary + * resources. */ +class ShaderOperation : public Operation { + private: + /* The compile unit that will be compiled into this shader operation. */ + ShaderCompileUnit compile_unit_; + /* The GPU material backing the operation. This is created and compiled during construction and + * freed during destruction. */ + GPUMaterial *material_; + /* A map that associates each node in the compile unit with an instance of its shader node. */ + Map<DNode, std::unique_ptr<ShaderNode>> shader_nodes_; + /* A map that associates the identifier of each input of the operation with the output socket it + * is linked to. This is needed to help the compiler establish links between operations. */ + Map<std::string, DOutputSocket> inputs_to_linked_outputs_map_; + /* A map that associates the output socket that provides the result of an output of the operation + * with the identifier of that output. This is needed to help the compiler establish links + * between operations. */ + Map<DOutputSocket, std::string> output_sockets_to_output_identifiers_map_; + /* A map that associates the output socket of a node that is not part of the shader operation to + * the attribute that was created for it. This is used to share the same attribute with all + * inputs that are linked to the same output socket. */ + Map<DOutputSocket, GPUNodeLink *> output_to_material_attribute_map_; + + public: + /* Construct and compile a GPU material from the given shader compile unit by calling + * GPU_material_from_callbacks with the appropriate callbacks. */ + ShaderOperation(Context &context, ShaderCompileUnit &compile_unit); + + /* Free the GPU material. */ + ~ShaderOperation(); + + /* Allocate the output results, bind the shader and all its needed resources, then dispatch the + * shader. */ + void execute() override; + + /* Get the identifier of the operation output corresponding to the given output socket. This is + * called by the compiler to identify the operation output that provides the result for an input + * by providing the output socket that the input is linked to. See + * output_sockets_to_output_identifiers_map_ for more information. */ + StringRef get_output_identifier_from_output_socket(DOutputSocket output_socket); + + /* Get a reference to the inputs to linked outputs map of the operation. This is called by the + * compiler to identify the output that each input of the operation is linked to for correct + * input mapping. See inputs_to_linked_outputs_map_ for more information. */ + Map<std::string, DOutputSocket> &get_inputs_to_linked_outputs_map(); + + /* Compute and set the initial reference counts of all the results of the operation. The + * reference counts of the results are the number of operations that use those results, which is + * computed as the number of inputs whose node is part of the schedule and is linked to the + * output corresponding to each of the results of the operation. The node execution schedule is + * given as an input. */ + void compute_results_reference_counts(const Schedule &schedule); + + private: + /* Bind the uniform buffer of the GPU material as well as any color band textures needed by the + * GPU material. The compiled shader of the material is given as an argument and assumed to be + * bound. */ + void bind_material_resources(GPUShader *shader); + + /* Bind the input results of the operation to the appropriate textures in the GPU material. The + * attributes stored in output_to_material_attribute_map_ have names that match the texture + * samplers in the shader as well as the identifiers of the operation inputs that they correspond + * to. The compiled shader of the material is given as an argument and assumed to be bound. */ + void bind_inputs(GPUShader *shader); + + /* Bind the output results of the operation to the appropriate images in the GPU material. The + * name of the images in the shader match the identifier of their corresponding outputs. The + * compiled shader of the material is given as an argument and assumed to be bound. */ + void bind_outputs(GPUShader *shader); + + /* A static callback method of interface ConstructGPUMaterialFn that is passed to + * GPU_material_from_callbacks to construct the GPU material graph. The thunk parameter will be a + * pointer to the instance of ShaderOperation that is being compiled. The method goes over the + * compile unit and does the following for each node: + * + * - Instantiate a ShaderNode from the node and add it to shader_nodes_. + * - Link the inputs of the node if needed. The inputs are either linked to other nodes in the + * GPU material graph or are exposed as inputs to the shader operation itself if they are + * linked to nodes that are not part of the shader operation. + * - Call the compile method of the shader node to actually add and link the GPU material graph + * nodes. + * - If any of the outputs of the node are linked to nodes that are not part of the shader + * operation, they are exposed as outputs to the shader operation itself. */ + static void construct_material(void *thunk, GPUMaterial *material); + + /* Link the inputs of the node if needed. Unlinked inputs are ignored as they will be linked by + * the node compile method. If the input is linked to a node that is not part of the shader + * operation, the input will be exposed as an input to the shader operation and linked to it. + * While if the input is linked to a node that is part of the shader operation, then it is linked + * to that node in the GPU material node graph. */ + void link_node_inputs(DNode node, GPUMaterial *material); + + /* Given the input socket of a node that is part of the shader operation which is linked to the + * given output socket of a node that is also part of the shader operation, just link the output + * link of the GPU node stack of the output socket to the input link of the GPU node stack of the + * input socket. This essentially establishes the needed links in the GPU material node graph. */ + void link_node_input_internal(DInputSocket input_socket, DOutputSocket output_socket); + + /* Given the input socket of a node that is part of the shader operation which is linked to the + * given output socket of a node that is not part of the shader operation, declare a new + * operation input and link it to the input link of the GPU node stack of the input socket. An + * operation input is only declared if no input was already declared for that same output socket + * before. */ + void link_node_input_external(DInputSocket input_socket, + DOutputSocket output_socket, + GPUMaterial *material); + + /* Given the input socket of a node that is part of the shader operation which is linked to the + * given output socket of a node that is not part of the shader operation, declare a new input to + * the operation that is represented in the GPU material by a newly created GPU attribute. It is + * assumed that no operation input was declared for this same output socket before. In the + * generate_code_for_inputs method, a texture will be added in the shader for each of the + * declared inputs, having the same name as the attribute. Additionally, code will be emitted to + * initialize the attributes by sampling their corresponding textures. */ + void declare_operation_input(DInputSocket input_socket, + DOutputSocket output_socket, + GPUMaterial *material); + + /* Populate the output results of the shader operation for output sockets of the given node that + * are linked to nodes outside of the shader operation. */ + void populate_results_for_node(DNode node, GPUMaterial *material); + + /* Given the output socket of a node that is part of the shader operation which is linked to an + * input socket of a node that is not part of the shader operation, declare a new output to the + * operation and link it to an output storer passing in the index of the output. In the + * generate_code_for_outputs method, an image will be added in the shader for each of the + * declared outputs. Additionally, code will be emitted to define the storer functions that store + * the value in the appropriate image identified by the given index. */ + void populate_operation_result(DOutputSocket output_socket, GPUMaterial *material); + + /* A static callback method of interface GPUCodegenCallbackFn that is passed to + * GPU_material_from_callbacks to create the shader create info of the GPU material. The thunk + * parameter will be a pointer to the instance of ShaderOperation that is being compiled. + * + * This method first generates the necessary code to load the inputs and store the outputs. Then, + * it creates a compute shader from the generated sources. Finally, it adds the necessary GPU + * resources to the shader. */ + static void generate_code(void *thunk, GPUMaterial *material, GPUCodegenOutput *code_generator); + + /* Add an image in the shader for each of the declared outputs. Additionally, emit code to define + * the storer functions that store the given value in the appropriate image identified by the + * given index. */ + void generate_code_for_outputs(gpu::shader::ShaderCreateInfo &shader_create_info); + + /* Add a texture will in the shader for each of the declared inputs/attributes in the operation, + * having the same name as the attribute. Additionally, emit code to initialize the attributes by + * sampling their corresponding textures. */ + void generate_code_for_inputs(GPUMaterial *material, + gpu::shader::ShaderCreateInfo &shader_create_info); +}; + +} // namespace blender::realtime_compositor diff --git a/source/blender/compositor/realtime_compositor/COM_simple_operation.hh b/source/blender/compositor/realtime_compositor/COM_simple_operation.hh new file mode 100644 index 00000000000..2f743da50d6 --- /dev/null +++ b/source/blender/compositor/realtime_compositor/COM_simple_operation.hh @@ -0,0 +1,64 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +#pragma once + +#include "BLI_string_ref.hh" + +#include "COM_operation.hh" +#include "COM_result.hh" + +namespace blender::realtime_compositor { + +/* ------------------------------------------------------------------------------------------------ + * Simple Operation + * + * A simple operation is an operation that takes exactly one input and computes exactly one output. + * Moreover, the output is guaranteed to only have a single user, that is, its reference count will + * be one. Such operations can be attached to the inputs of operations to pre-process the inputs to + * prepare them before the operation is executed.*/ +class SimpleOperation : public Operation { + private: + /* The identifier of the output. This is constant for all operations. */ + static const StringRef output_identifier_; + /* The identifier of the input. This is constant for all operations. */ + static const StringRef input_identifier_; + + public: + using Operation::Operation; + + /* Get a reference to the output result of the operation, this essentially calls the super + * get_result method with the output identifier of the operation. */ + Result &get_result(); + + /* Map the input of the operation to the given result, this essentially calls the super + * map_input_to_result method with the input identifier of the operation. */ + void map_input_to_result(Result *result); + + protected: + /* Simple operations don't need input processors, so override with an empty implementation. */ + void add_and_evaluate_input_processors() override; + + /* Get a reference to the input result of the operation, this essentially calls the super + * get_result method with the input identifier of the operation. */ + Result &get_input(); + + /* Switch the result mapped to the input with the given result, this essentially calls the super + * switch_result_mapped_to_input method with the input identifier of the operation. */ + void switch_result_mapped_to_input(Result *result); + + /* Populate the result of the operation, this essentially calls the super populate_result method + * with the output identifier of the operation and sets the initial reference count of the result + * to 1, since the result of an operation operation is guaranteed to have a single user. */ + void populate_result(Result result); + + /* Declare the descriptor of the input of the operation to be the given descriptor, this + * essentially calls the super declare_input_descriptor method with the input identifier of the + * operation. */ + void declare_input_descriptor(InputDescriptor descriptor); + + /* Get a reference to the descriptor of the input, this essentially calls the super + * get_input_descriptor method with the input identifier of the operation. */ + InputDescriptor &get_input_descriptor(); +}; + +} // namespace blender::realtime_compositor diff --git a/source/blender/compositor/realtime_compositor/COM_static_shader_manager.hh b/source/blender/compositor/realtime_compositor/COM_static_shader_manager.hh new file mode 100644 index 00000000000..161a80862a0 --- /dev/null +++ b/source/blender/compositor/realtime_compositor/COM_static_shader_manager.hh @@ -0,0 +1,33 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +#pragma once + +#include "BLI_map.hh" +#include "BLI_string_ref.hh" + +#include "GPU_shader.h" + +namespace blender::realtime_compositor { + +/* ------------------------------------------------------------------------------------------------- + * Static Shader Manager + * + * A static shader manager is a map of shaders identified by their info name that can be acquired + * and reused throughout the evaluation of the compositor and are only freed when the shader + * manager is destroyed. Once a shader is acquired for the first time, it will be cached in the + * manager to be potentially acquired later if needed without the shader creation overhead. */ +class StaticShaderManager { + private: + /* The set of shaders identified by their info name that are currently available in the manager + * to be acquired. */ + Map<StringRef, GPUShader *> shaders_; + + public: + ~StaticShaderManager(); + + /* Check if there is an available shader with the given info name in the manager, if such shader + * exists, return it, otherwise, return a newly created shader and add it to the manager. */ + GPUShader *get(const char *info_name); +}; + +} // namespace blender::realtime_compositor diff --git a/source/blender/compositor/realtime_compositor/COM_texture_pool.hh b/source/blender/compositor/realtime_compositor/COM_texture_pool.hh new file mode 100644 index 00000000000..cc6641d288f --- /dev/null +++ b/source/blender/compositor/realtime_compositor/COM_texture_pool.hh @@ -0,0 +1,86 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +#pragma once + +#include <cstdint> + +#include "BLI_map.hh" +#include "BLI_math_vec_types.hh" +#include "BLI_vector.hh" + +#include "GPU_texture.h" + +namespace blender::realtime_compositor { + +/* ------------------------------------------------------------------------------------------------ + * Texture Pool Key + * + * A key used to identify a texture specification in a texture pool. Defines a hash and an equality + * operator for use in a hash map. */ +class TexturePoolKey { + public: + int2 size; + eGPUTextureFormat format; + + /* Construct a key from the given texture size and format. */ + TexturePoolKey(int2 size, eGPUTextureFormat format); + + /* Construct a key from the size and format of the given texture. */ + TexturePoolKey(const GPUTexture *texture); + + uint64_t hash() const; +}; + +bool operator==(const TexturePoolKey &a, const TexturePoolKey &b); + +/* ------------------------------------------------------------------------------------------------ + * Texture Pool + * + * A texture pool allows the allocation and reuse of textures throughout the execution of the + * compositor to avoid memory fragmentation and texture allocation overheads. The texture pool + * delegates the actual texture allocation to an allocate_texture method that should be implemented + * by the caller of the compositor evaluator, allowing a more agnostic and flexible execution that + * can be controlled by the caller. If the compositor is expected to execute frequently, like on + * every redraw, then the allocation method should use a persistent texture pool to allow + * cross-evaluation texture pooling, for instance, by using the DRWTexturePool. But if the + * evaluator is expected to execute infrequently, the allocated textures can just be freed when the + * evaluator is done, that is, when the pool is destructed. */ +class TexturePool { + private: + /* The set of textures in the pool that are available to acquire for each distinct texture + * specification. */ + Map<TexturePoolKey, Vector<GPUTexture *>> textures_; + + public: + /* Check if there is an available texture with the given specification in the pool, if such + * texture exists, return it, otherwise, return a newly allocated texture. Expect the texture to + * be uncleared and possibly contains garbage data. */ + GPUTexture *acquire(int2 size, eGPUTextureFormat format); + + /* Shorthand for acquire with GPU_RGBA16F format. */ + GPUTexture *acquire_color(int2 size); + + /* Shorthand for acquire with GPU_RGBA16F format. Identical to acquire_color because vectors + * are stored in RGBA textures, due to the limited support for RGB textures. */ + GPUTexture *acquire_vector(int2 size); + + /* Shorthand for acquire with GPU_R16F format. */ + GPUTexture *acquire_float(int2 size); + + /* Put the texture back into the pool, potentially to be acquired later by another user. Expects + * the texture to be one that was acquired using the same texture pool. */ + void release(GPUTexture *texture); + + /* Reset the texture pool by clearing all available textures without freeing the textures. If the + * textures will no longer be needed, they should be freed in the destructor. This should be + * called after the compositor is done evaluating. */ + void reset(); + + private: + /* Returns a newly allocated texture with the given specification. This method should be + * implemented by the caller of the compositor evaluator. See the class description for more + * information. */ + virtual GPUTexture *allocate_texture(int2 size, eGPUTextureFormat format) = 0; +}; + +} // namespace blender::realtime_compositor diff --git a/source/blender/compositor/realtime_compositor/COM_utilities.hh b/source/blender/compositor/realtime_compositor/COM_utilities.hh new file mode 100644 index 00000000000..4bd61aab5cb --- /dev/null +++ b/source/blender/compositor/realtime_compositor/COM_utilities.hh @@ -0,0 +1,61 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +#pragma once + +#include "BLI_function_ref.hh" +#include "BLI_math_vec_types.hh" + +#include "NOD_derived_node_tree.hh" + +#include "GPU_shader.h" + +#include "COM_input_descriptor.hh" +#include "COM_result.hh" + +namespace blender::realtime_compositor { + +using namespace nodes::derived_node_tree_types; + +/* Get the origin socket of the given node input. If the input is not linked, the socket itself is + * returned. If the input is linked, the socket that is linked to it is returned, which could + * either be an input or an output. An input socket is returned when the given input is connected + * to an unlinked input of a group input node. */ +DSocket get_input_origin_socket(DInputSocket input); + +/* Get the output socket linked to the given node input. If the input is not linked to an output, a + * null output is returned. */ +DOutputSocket get_output_linked_to_input(DInputSocket input); + +/* Get the result type that corresponds to the type of the given socket. */ +ResultType get_node_socket_result_type(const SocketRef *socket); + +/* Returns true if any of the nodes linked to the given output satisfies the given condition, and + * false otherwise. */ +bool is_output_linked_to_node_conditioned(DOutputSocket output, + FunctionRef<bool(DNode)> condition); + +/* Returns the number of inputs linked to the given output that satisfy the given condition. */ +int number_of_inputs_linked_to_output_conditioned(DOutputSocket output, + FunctionRef<bool(DInputSocket)> condition); + +/* A node is a shader node if it defines a method to get a shader node operation. */ +bool is_shader_node(DNode node); + +/* Returns true if the given node is supported, that is, have an implementation. Returns false + * otherwise. */ +bool is_node_supported(DNode node); + +/* Get the input descriptor of the given input socket. */ +InputDescriptor input_descriptor_from_input_socket(const InputSocketRef *socket); + +/* Dispatch the given compute shader in a 2D compute space such that the number of threads in both + * dimensions is as small as possible but at least covers the entirety of threads_range assuming + * the shader has a local group size given by local_size. That means that the number of threads + * might be a bit larger than threads_range, so shaders has to put that into consideration. A + * default local size of 16x16 is assumed, which is the optimal local size for many image + * processing shaders. */ +void compute_dispatch_threads_at_least(GPUShader *shader, + int2 threads_range, + int2 local_size = int2(16)); + +} // namespace blender::realtime_compositor diff --git a/source/blender/compositor/realtime_compositor/intern/compile_state.cc b/source/blender/compositor/realtime_compositor/intern/compile_state.cc new file mode 100644 index 00000000000..5b485224111 --- /dev/null +++ b/source/blender/compositor/realtime_compositor/intern/compile_state.cc @@ -0,0 +1,163 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +#include <limits> + +#include "BLI_math_vec_types.hh" + +#include "DNA_node_types.h" + +#include "NOD_derived_node_tree.hh" + +#include "COM_compile_state.hh" +#include "COM_domain.hh" +#include "COM_input_descriptor.hh" +#include "COM_node_operation.hh" +#include "COM_result.hh" +#include "COM_scheduler.hh" +#include "COM_shader_operation.hh" +#include "COM_utilities.hh" + +namespace blender::realtime_compositor { + +using namespace nodes::derived_node_tree_types; + +CompileState::CompileState(const Schedule &schedule) : schedule_(schedule) +{ +} + +const Schedule &CompileState::get_schedule() +{ + return schedule_; +} + +void CompileState::map_node_to_node_operation(DNode node, NodeOperation *operations) +{ + return node_operations_.add_new(node, operations); +} + +void CompileState::map_node_to_shader_operation(DNode node, ShaderOperation *operations) +{ + return shader_operations_.add_new(node, operations); +} + +Result &CompileState::get_result_from_output_socket(DOutputSocket output) +{ + /* The output belongs to a node that was compiled into a standard node operation, so return a + * reference to the result from that operation using the output identifier. */ + if (node_operations_.contains(output.node())) { + NodeOperation *operation = node_operations_.lookup(output.node()); + return operation->get_result(output->identifier()); + } + + /* Otherwise, the output belongs to a node that was compiled into a shader operation, so + * retrieve the internal identifier of that output and return a reference to the result from + * that operation using the retrieved identifier. */ + ShaderOperation *operation = shader_operations_.lookup(output.node()); + return operation->get_result(operation->get_output_identifier_from_output_socket(output)); +} + +void CompileState::add_node_to_shader_compile_unit(DNode node) +{ + shader_compile_unit_.add_new(node); + + /* If the domain of the shader compile unit is not yet determined or was determined to be + * an identity domain, update it to be the computed domain of the node. */ + if (shader_compile_unit_domain_ == Domain::identity()) { + shader_compile_unit_domain_ = compute_shader_node_domain(node); + } +} + +ShaderCompileUnit &CompileState::get_shader_compile_unit() +{ + return shader_compile_unit_; +} + +void CompileState::reset_shader_compile_unit() +{ + return shader_compile_unit_.clear(); +} + +bool CompileState::should_compile_shader_compile_unit(DNode node) +{ + /* If the shader compile unit is empty, then it can't be compiled yet. */ + if (shader_compile_unit_.is_empty()) { + return false; + } + + /* If the node is not a shader node, then it can't be added to the shader compile unit and the + * shader compile unit is considered complete and should be compiled. */ + if (!is_shader_node(node)) { + return true; + } + + /* If the computed domain of the node doesn't matches the domain of the shader compile unit, then + * it can't be added to the shader compile unit and the shader compile unit is considered + * complete and should be compiled. Identity domains are an exception as they are always + * compatible because they represents single values. */ + if (shader_compile_unit_domain_ != Domain::identity() && + shader_compile_unit_domain_ != compute_shader_node_domain(node)) { + return true; + } + + /* Otherwise, the node is compatible and can be added to the compile unit and it shouldn't be + * compiled just yet. */ + return false; +} + +Domain CompileState::compute_shader_node_domain(DNode node) +{ + /* Default to an identity domain in case no domain input was found, most likely because all + * inputs are single values. */ + Domain node_domain = Domain::identity(); + int current_domain_priority = std::numeric_limits<int>::max(); + + /* Go over the inputs and find the domain of the non single value input with the highest domain + * priority. */ + for (const InputSocketRef *input_ref : node->inputs()) { + const DInputSocket input{node.context(), input_ref}; + + /* Get the output linked to the input. If it is null, that means the input is unlinked, so skip + * it. */ + const DOutputSocket output = get_output_linked_to_input(input); + if (!output) { + continue; + } + + const InputDescriptor input_descriptor = input_descriptor_from_input_socket(input_ref); + + /* If the output belongs to a node that is part of the shader compile unit, then the domain of + * the input is the domain of the compile unit itself. */ + if (shader_compile_unit_.contains(output.node())) { + /* Single value inputs can't be domain inputs. */ + if (shader_compile_unit_domain_.size == int2(1)) { + continue; + } + + /* Notice that the lower the domain priority value is, the higher the priority is, hence the + * less than comparison. */ + if (input_descriptor.domain_priority < current_domain_priority) { + node_domain = shader_compile_unit_domain_; + current_domain_priority = input_descriptor.domain_priority; + } + continue; + } + + const Result &result = get_result_from_output_socket(output); + + /* A single value input can't be a domain input. */ + if (result.is_single_value() || input_descriptor.expects_single_value) { + continue; + } + + /* Notice that the lower the domain priority value is, the higher the priority is, hence the + * less than comparison. */ + if (input_descriptor.domain_priority < current_domain_priority) { + node_domain = result.domain(); + current_domain_priority = input_descriptor.domain_priority; + } + } + + return node_domain; +} + +} // namespace blender::realtime_compositor diff --git a/source/blender/compositor/realtime_compositor/intern/context.cc b/source/blender/compositor/realtime_compositor/intern/context.cc new file mode 100644 index 00000000000..64ac29af3d1 --- /dev/null +++ b/source/blender/compositor/realtime_compositor/intern/context.cc @@ -0,0 +1,36 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +#include "COM_context.hh" +#include "COM_static_shader_manager.hh" +#include "COM_texture_pool.hh" + +namespace blender::realtime_compositor { + +Context::Context(TexturePool &texture_pool) : texture_pool_(texture_pool) +{ +} + +int Context::get_frame_number() const +{ + return get_scene()->r.cfra; +} + +float Context::get_time() const +{ + const float frame_number = static_cast<float>(get_frame_number()); + const float frame_rate = static_cast<float>(get_scene()->r.frs_sec) / + static_cast<float>(get_scene()->r.frs_sec_base); + return frame_number / frame_rate; +} + +TexturePool &Context::texture_pool() +{ + return texture_pool_; +} + +StaticShaderManager &Context::shader_manager() +{ + return shader_manager_; +} + +} // namespace blender::realtime_compositor diff --git a/source/blender/compositor/realtime_compositor/intern/conversion_operation.cc b/source/blender/compositor/realtime_compositor/intern/conversion_operation.cc new file mode 100644 index 00000000000..d6bf74ffbee --- /dev/null +++ b/source/blender/compositor/realtime_compositor/intern/conversion_operation.cc @@ -0,0 +1,225 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +#include "BLI_math_vec_types.hh" + +#include "GPU_shader.h" + +#include "COM_context.hh" +#include "COM_conversion_operation.hh" +#include "COM_input_descriptor.hh" +#include "COM_result.hh" +#include "COM_utilities.hh" + +namespace blender::realtime_compositor { + +/* ------------------------------------------------------------------------------------------------- + * Conversion Operation. + */ + +void ConversionOperation::execute() +{ + Result &result = get_result(); + const Result &input = get_input(); + + if (input.is_single_value()) { + result.allocate_single_value(); + execute_single(input, result); + return; + } + + result.allocate_texture(input.domain()); + + GPUShader *shader = get_conversion_shader(); + GPU_shader_bind(shader); + + input.bind_as_texture(shader, "input_tx"); + result.bind_as_image(shader, "output_img"); + + compute_dispatch_threads_at_least(shader, input.domain().size); + + input.unbind_as_texture(); + result.unbind_as_image(); + GPU_shader_unbind(); +} + +SimpleOperation *ConversionOperation::construct_if_needed(Context &context, + const Result &input_result, + const InputDescriptor &input_descriptor) +{ + ResultType result_type = input_result.type(); + ResultType expected_type = input_descriptor.type; + + /* If the result type differs from the expected type, return an instance of an appropriate + * conversion operation. Otherwise, return a null pointer. */ + + if (result_type == ResultType::Float && expected_type == ResultType::Vector) { + return new ConvertFloatToVectorOperation(context); + } + + if (result_type == ResultType::Float && expected_type == ResultType::Color) { + return new ConvertFloatToColorOperation(context); + } + + if (result_type == ResultType::Color && expected_type == ResultType::Float) { + return new ConvertColorToFloatOperation(context); + } + + if (result_type == ResultType::Color && expected_type == ResultType::Vector) { + return new ConvertColorToVectorOperation(context); + } + + if (result_type == ResultType::Vector && expected_type == ResultType::Float) { + return new ConvertVectorToFloatOperation(context); + } + + if (result_type == ResultType::Vector && expected_type == ResultType::Color) { + return new ConvertVectorToColorOperation(context); + } + + return nullptr; +} + +/* ------------------------------------------------------------------------------------------------- + * Convert Float To Vector Operation. + */ + +ConvertFloatToVectorOperation::ConvertFloatToVectorOperation(Context &context) + : ConversionOperation(context) +{ + InputDescriptor input_descriptor; + input_descriptor.type = ResultType::Float; + declare_input_descriptor(input_descriptor); + populate_result(Result(ResultType::Vector, texture_pool())); +} + +void ConvertFloatToVectorOperation::execute_single(const Result &input, Result &output) +{ + output.set_vector_value(float3(input.get_float_value())); +} + +GPUShader *ConvertFloatToVectorOperation::get_conversion_shader() const +{ + return shader_manager().get("compositor_convert_float_to_vector"); +} + +/* ------------------------------------------------------------------------------------------------- + * Convert Float To Color Operation. + */ + +ConvertFloatToColorOperation::ConvertFloatToColorOperation(Context &context) + : ConversionOperation(context) +{ + InputDescriptor input_descriptor; + input_descriptor.type = ResultType::Float; + declare_input_descriptor(input_descriptor); + populate_result(Result(ResultType::Color, texture_pool())); +} + +void ConvertFloatToColorOperation::execute_single(const Result &input, Result &output) +{ + float4 color = float4(input.get_float_value()); + color[3] = 1.0f; + output.set_color_value(color); +} + +GPUShader *ConvertFloatToColorOperation::get_conversion_shader() const +{ + return shader_manager().get("compositor_convert_float_to_color"); +} + +/* ------------------------------------------------------------------------------------------------- + * Convert Color To Float Operation. + */ + +ConvertColorToFloatOperation::ConvertColorToFloatOperation(Context &context) + : ConversionOperation(context) +{ + InputDescriptor input_descriptor; + input_descriptor.type = ResultType::Color; + declare_input_descriptor(input_descriptor); + populate_result(Result(ResultType::Float, texture_pool())); +} + +void ConvertColorToFloatOperation::execute_single(const Result &input, Result &output) +{ + float4 color = input.get_color_value(); + output.set_float_value((color[0] + color[1] + color[2]) / 3.0f); +} + +GPUShader *ConvertColorToFloatOperation::get_conversion_shader() const +{ + return shader_manager().get("compositor_convert_color_to_float"); +} + +/* ------------------------------------------------------------------------------------------------- + * Convert Color To Vector Operation. + */ + +ConvertColorToVectorOperation::ConvertColorToVectorOperation(Context &context) + : ConversionOperation(context) +{ + InputDescriptor input_descriptor; + input_descriptor.type = ResultType::Color; + declare_input_descriptor(input_descriptor); + populate_result(Result(ResultType::Vector, texture_pool())); +} + +void ConvertColorToVectorOperation::execute_single(const Result &input, Result &output) +{ + float4 color = input.get_color_value(); + output.set_vector_value(float3(color)); +} + +GPUShader *ConvertColorToVectorOperation::get_conversion_shader() const +{ + return shader_manager().get("compositor_convert_color_to_vector"); +} + +/* ------------------------------------------------------------------------------------------------- + * Convert Vector To Float Operation. + */ + +ConvertVectorToFloatOperation::ConvertVectorToFloatOperation(Context &context) + : ConversionOperation(context) +{ + InputDescriptor input_descriptor; + input_descriptor.type = ResultType::Vector; + declare_input_descriptor(input_descriptor); + populate_result(Result(ResultType::Float, texture_pool())); +} + +void ConvertVectorToFloatOperation::execute_single(const Result &input, Result &output) +{ + float3 vector = input.get_vector_value(); + output.set_float_value((vector[0] + vector[1] + vector[2]) / 3.0f); +} + +GPUShader *ConvertVectorToFloatOperation::get_conversion_shader() const +{ + return shader_manager().get("compositor_convert_vector_to_float"); +} + +/* ------------------------------------------------------------------------------------------------- + * Convert Vector To Color Operation. + */ + +ConvertVectorToColorOperation::ConvertVectorToColorOperation(Context &context) + : ConversionOperation(context) +{ + InputDescriptor input_descriptor; + input_descriptor.type = ResultType::Vector; + declare_input_descriptor(input_descriptor); + populate_result(Result(ResultType::Color, texture_pool())); +} + +void ConvertVectorToColorOperation::execute_single(const Result &input, Result &output) +{ + output.set_color_value(float4(input.get_vector_value(), 1.0f)); +} + +GPUShader *ConvertVectorToColorOperation::get_conversion_shader() const +{ + return shader_manager().get("compositor_convert_vector_to_color"); +} + +} // namespace blender::realtime_compositor diff --git a/source/blender/compositor/realtime_compositor/intern/domain.cc b/source/blender/compositor/realtime_compositor/intern/domain.cc new file mode 100644 index 00000000000..31b297c212e --- /dev/null +++ b/source/blender/compositor/realtime_compositor/intern/domain.cc @@ -0,0 +1,38 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +#include "BLI_float3x3.hh" +#include "BLI_math_vec_types.hh" + +#include "COM_domain.hh" + +namespace blender::realtime_compositor { + +Domain::Domain(int2 size) : size(size), transformation(float3x3::identity()) +{ +} + +Domain::Domain(int2 size, float3x3 transformation) : size(size), transformation(transformation) +{ +} + +void Domain::transform(const float3x3 &input_transformation) +{ + transformation = input_transformation * transformation; +} + +Domain Domain::identity() +{ + return Domain(int2(1), float3x3::identity()); +} + +bool operator==(const Domain &a, const Domain &b) +{ + return a.size == b.size && a.transformation == b.transformation; +} + +bool operator!=(const Domain &a, const Domain &b) +{ + return !(a == b); +} + +} // namespace blender::realtime_compositor diff --git a/source/blender/compositor/realtime_compositor/intern/evaluator.cc b/source/blender/compositor/realtime_compositor/intern/evaluator.cc new file mode 100644 index 00000000000..d358389f2e9 --- /dev/null +++ b/source/blender/compositor/realtime_compositor/intern/evaluator.cc @@ -0,0 +1,171 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +#include <string> + +#include "DNA_node_types.h" + +#include "NOD_derived_node_tree.hh" + +#include "COM_compile_state.hh" +#include "COM_context.hh" +#include "COM_evaluator.hh" +#include "COM_input_single_value_operation.hh" +#include "COM_node_operation.hh" +#include "COM_operation.hh" +#include "COM_result.hh" +#include "COM_scheduler.hh" +#include "COM_shader_operation.hh" +#include "COM_utilities.hh" + +namespace blender::realtime_compositor { + +using namespace nodes::derived_node_tree_types; + +Evaluator::Evaluator(Context &context, bNodeTree &node_tree) + : context_(context), node_tree_(node_tree) +{ +} + +void Evaluator::evaluate() +{ + context_.texture_pool().reset(); + + if (!is_compiled_) { + compile_and_evaluate(); + is_compiled_ = true; + return; + } + + for (const std::unique_ptr<Operation> &operation : operations_stream_) { + operation->evaluate(); + } +} + +void Evaluator::reset() +{ + operations_stream_.clear(); + derived_node_tree_.reset(); + node_tree_reference_map_.clear(); + + is_compiled_ = false; +} + +bool Evaluator::validate_node_tree() +{ + if (derived_node_tree_->has_link_cycles()) { + context_.set_info_message("Compositor node tree has cyclic links!"); + return false; + } + + if (derived_node_tree_->has_undefined_nodes_or_sockets()) { + context_.set_info_message("Compositor node tree has undefined nodes or sockets!"); + return false; + } + + return true; +} + +void Evaluator::compile_and_evaluate() +{ + derived_node_tree_ = std::make_unique<DerivedNodeTree>(node_tree_, node_tree_reference_map_); + + if (!validate_node_tree()) { + return; + } + + const Schedule schedule = compute_schedule(*derived_node_tree_); + + CompileState compile_state(schedule); + + for (const DNode &node : schedule) { + if (compile_state.should_compile_shader_compile_unit(node)) { + compile_and_evaluate_shader_compile_unit(compile_state); + } + + if (is_shader_node(node)) { + compile_state.add_node_to_shader_compile_unit(node); + } + else { + compile_and_evaluate_node(node, compile_state); + } + } +} + +void Evaluator::compile_and_evaluate_node(DNode node, CompileState &compile_state) +{ + NodeOperation *operation = node->typeinfo()->get_compositor_operation(context_, node); + + compile_state.map_node_to_node_operation(node, operation); + + map_node_operation_inputs_to_their_results(node, operation, compile_state); + + /* This has to be done after input mapping because the method may add Input Single Value + * Operations to the operations stream, which needs to be evaluated before the operation itself + * is evaluated. */ + operations_stream_.append(std::unique_ptr<Operation>(operation)); + + operation->compute_results_reference_counts(compile_state.get_schedule()); + + operation->evaluate(); +} + +void Evaluator::map_node_operation_inputs_to_their_results(DNode node, + NodeOperation *operation, + CompileState &compile_state) +{ + for (const InputSocketRef *input_ref : node->inputs()) { + const DInputSocket input{node.context(), input_ref}; + + DSocket origin = get_input_origin_socket(input); + + /* The origin socket is an output, which means the input is linked. So map the input to the + * result we get from the output. */ + if (origin->is_output()) { + Result &result = compile_state.get_result_from_output_socket(DOutputSocket(origin)); + operation->map_input_to_result(input->identifier(), &result); + continue; + } + + /* Otherwise, the origin socket is an input, which either means the input is unlinked and the + * origin is the input socket itself or the input is connected to an unlinked input of a group + * input node and the origin is the input of the group input node. So map the input to the + * result of a newly created Input Single Value Operation. */ + auto *input_operation = new InputSingleValueOperation(context_, DInputSocket(origin)); + operation->map_input_to_result(input->identifier(), &input_operation->get_result()); + + operations_stream_.append(std::unique_ptr<InputSingleValueOperation>(input_operation)); + + input_operation->evaluate(); + } +} + +void Evaluator::compile_and_evaluate_shader_compile_unit(CompileState &compile_state) +{ + ShaderCompileUnit &compile_unit = compile_state.get_shader_compile_unit(); + ShaderOperation *operation = new ShaderOperation(context_, compile_unit); + + for (DNode node : compile_unit) { + compile_state.map_node_to_shader_operation(node, operation); + } + + map_shader_operation_inputs_to_their_results(operation, compile_state); + + operations_stream_.append(std::unique_ptr<Operation>(operation)); + + operation->compute_results_reference_counts(compile_state.get_schedule()); + + operation->evaluate(); + + compile_state.reset_shader_compile_unit(); +} + +void Evaluator::map_shader_operation_inputs_to_their_results(ShaderOperation *operation, + CompileState &compile_state) +{ + for (const auto &item : operation->get_inputs_to_linked_outputs_map().items()) { + Result &result = compile_state.get_result_from_output_socket(item.value); + operation->map_input_to_result(item.key, &result); + } +} + +} // namespace blender::realtime_compositor diff --git a/source/blender/compositor/realtime_compositor/intern/input_single_value_operation.cc b/source/blender/compositor/realtime_compositor/intern/input_single_value_operation.cc new file mode 100644 index 00000000000..0bdd40e3636 --- /dev/null +++ b/source/blender/compositor/realtime_compositor/intern/input_single_value_operation.cc @@ -0,0 +1,57 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +#include "BLI_math_vec_types.hh" + +#include "COM_input_single_value_operation.hh" +#include "COM_operation.hh" +#include "COM_result.hh" +#include "COM_utilities.hh" + +namespace blender::realtime_compositor { + +const StringRef InputSingleValueOperation::output_identifier_ = StringRef("Output"); + +InputSingleValueOperation::InputSingleValueOperation(Context &context, DInputSocket input_socket) + : Operation(context), input_socket_(input_socket) +{ + const ResultType result_type = get_node_socket_result_type(input_socket_.socket_ref()); + Result result = Result(result_type, texture_pool()); + + /* The result of an input single value operation is guaranteed to have a single user. */ + result.set_initial_reference_count(1); + + populate_result(result); +} + +void InputSingleValueOperation::execute() +{ + /* Allocate a single value for the result. */ + Result &result = get_result(); + result.allocate_single_value(); + + /* Set the value of the result to the default value of the input socket. */ + switch (result.type()) { + case ResultType::Float: + result.set_float_value(input_socket_->default_value<bNodeSocketValueFloat>()->value); + break; + case ResultType::Vector: + result.set_vector_value( + float3(input_socket_->default_value<bNodeSocketValueVector>()->value)); + break; + case ResultType::Color: + result.set_color_value(float4(input_socket_->default_value<bNodeSocketValueRGBA>()->value)); + break; + } +} + +Result &InputSingleValueOperation::get_result() +{ + return Operation::get_result(output_identifier_); +} + +void InputSingleValueOperation::populate_result(Result result) +{ + Operation::populate_result(output_identifier_, result); +} + +} // namespace blender::realtime_compositor diff --git a/source/blender/compositor/realtime_compositor/intern/node_operation.cc b/source/blender/compositor/realtime_compositor/intern/node_operation.cc new file mode 100644 index 00000000000..f02d0906447 --- /dev/null +++ b/source/blender/compositor/realtime_compositor/intern/node_operation.cc @@ -0,0 +1,67 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +#include <memory> + +#include "BLI_map.hh" +#include "BLI_string_ref.hh" +#include "BLI_vector.hh" + +#include "DNA_node_types.h" + +#include "NOD_derived_node_tree.hh" +#include "NOD_node_declaration.hh" + +#include "COM_context.hh" +#include "COM_input_descriptor.hh" +#include "COM_node_operation.hh" +#include "COM_operation.hh" +#include "COM_result.hh" +#include "COM_scheduler.hh" +#include "COM_utilities.hh" + +namespace blender::realtime_compositor { + +using namespace nodes::derived_node_tree_types; + +NodeOperation::NodeOperation(Context &context, DNode node) : Operation(context), node_(node) +{ + for (const OutputSocketRef *output : node->outputs()) { + const ResultType result_type = get_node_socket_result_type(output); + const Result result = Result(result_type, texture_pool()); + populate_result(output->identifier(), result); + } + + for (const InputSocketRef *input : node->inputs()) { + const InputDescriptor input_descriptor = input_descriptor_from_input_socket(input); + declare_input_descriptor(input->identifier(), input_descriptor); + } +} + +void NodeOperation::compute_results_reference_counts(const Schedule &schedule) +{ + for (const OutputSocketRef *output_ref : node()->outputs()) { + const DOutputSocket output{node().context(), output_ref}; + + const int reference_count = number_of_inputs_linked_to_output_conditioned( + output, [&](DInputSocket input) { return schedule.contains(input.node()); }); + + get_result(output->identifier()).set_initial_reference_count(reference_count); + } +} + +const DNode &NodeOperation::node() const +{ + return node_; +} + +const bNode &NodeOperation::bnode() const +{ + return *node_->bnode(); +} + +bool NodeOperation::should_compute_output(StringRef identifier) +{ + return get_result(identifier).should_compute(); +} + +} // namespace blender::realtime_compositor diff --git a/source/blender/compositor/realtime_compositor/intern/operation.cc b/source/blender/compositor/realtime_compositor/intern/operation.cc new file mode 100644 index 00000000000..42dd5aeebe8 --- /dev/null +++ b/source/blender/compositor/realtime_compositor/intern/operation.cc @@ -0,0 +1,201 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +#include <limits> +#include <memory> + +#include "BLI_map.hh" +#include "BLI_string_ref.hh" +#include "BLI_vector.hh" + +#include "COM_context.hh" +#include "COM_conversion_operation.hh" +#include "COM_domain.hh" +#include "COM_input_descriptor.hh" +#include "COM_operation.hh" +#include "COM_realize_on_domain_operation.hh" +#include "COM_reduce_to_single_value_operation.hh" +#include "COM_result.hh" +#include "COM_simple_operation.hh" +#include "COM_static_shader_manager.hh" +#include "COM_texture_pool.hh" + +namespace blender::realtime_compositor { + +Operation::Operation(Context &context) : context_(context) +{ +} + +Operation::~Operation() = default; + +void Operation::evaluate() +{ + evaluate_input_processors(); + + reset_results(); + + execute(); + + release_inputs(); +} + +Result &Operation::get_result(StringRef identifier) +{ + return results_.lookup(identifier); +} + +void Operation::map_input_to_result(StringRef identifier, Result *result) +{ + results_mapped_to_inputs_.add_new(identifier, result); +} + +Domain Operation::compute_domain() +{ + /* Default to an identity domain in case no domain input was found, most likely because all + * inputs are single values. */ + Domain operation_domain = Domain::identity(); + int current_domain_priority = std::numeric_limits<int>::max(); + + /* Go over the inputs and find the domain of the non single value input with the highest domain + * priority. */ + for (StringRef identifier : input_descriptors_.keys()) { + const Result &result = get_input(identifier); + const InputDescriptor &descriptor = get_input_descriptor(identifier); + + /* A single value input can't be a domain input. */ + if (result.is_single_value() || descriptor.expects_single_value) { + continue; + } + + /* Notice that the lower the domain priority value is, the higher the priority is, hence the + * less than comparison. */ + if (descriptor.domain_priority < current_domain_priority) { + operation_domain = result.domain(); + current_domain_priority = descriptor.domain_priority; + } + } + + return operation_domain; +} + +void Operation::add_and_evaluate_input_processors() +{ + /* Each input processor type is added to all inputs entirely before the next type. This is done + * because the construction of the input processors may depend on the result of previous input + * processors for all inputs. For instance, the realize on domain input processor considers the + * value of all inputs, so previous input processors for all inputs needs to be added and + * evaluated first. */ + + for (const StringRef &identifier : results_mapped_to_inputs_.keys()) { + SimpleOperation *single_value = ReduceToSingleValueOperation::construct_if_needed( + context(), get_input(identifier)); + add_and_evaluate_input_processor(identifier, single_value); + } + + for (const StringRef &identifier : results_mapped_to_inputs_.keys()) { + SimpleOperation *conversion = ConversionOperation::construct_if_needed( + context(), get_input(identifier), get_input_descriptor(identifier)); + add_and_evaluate_input_processor(identifier, conversion); + } + + for (const StringRef &identifier : results_mapped_to_inputs_.keys()) { + SimpleOperation *realize_on_domain = RealizeOnDomainOperation::construct_if_needed( + context(), get_input(identifier), get_input_descriptor(identifier), compute_domain()); + add_and_evaluate_input_processor(identifier, realize_on_domain); + } +} + +void Operation::add_and_evaluate_input_processor(StringRef identifier, SimpleOperation *processor) +{ + /* Allow null inputs to facilitate construct_if_needed pattern of addition. For instance, see the + * implementation of the add_and_evaluate_input_processors method. */ + if (!processor) { + return; + } + + ProcessorsVector &processors = input_processors_.lookup_or_add_default(identifier); + + /* Get the result that should serve as the input for the processor. This is either the result + * mapped to the input or the result of the last processor depending on whether this is the first + * processor or not. */ + Result &result = processors.is_empty() ? get_input(identifier) : processors.last()->get_result(); + + /* Map the input result of the processor and add it to the processors vector. */ + processor->map_input_to_result(&result); + processors.append(std::unique_ptr<SimpleOperation>(processor)); + + /* Switch the result mapped to the input to be the output result of the processor. */ + switch_result_mapped_to_input(identifier, &processor->get_result()); + + processor->evaluate(); +} + +Result &Operation::get_input(StringRef identifier) const +{ + return *results_mapped_to_inputs_.lookup(identifier); +} + +void Operation::switch_result_mapped_to_input(StringRef identifier, Result *result) +{ + results_mapped_to_inputs_.lookup(identifier) = result; +} + +void Operation::populate_result(StringRef identifier, Result result) +{ + results_.add_new(identifier, result); +} + +void Operation::declare_input_descriptor(StringRef identifier, InputDescriptor descriptor) +{ + input_descriptors_.add_new(identifier, descriptor); +} + +InputDescriptor &Operation::get_input_descriptor(StringRef identifier) +{ + return input_descriptors_.lookup(identifier); +} + +Context &Operation::context() +{ + return context_; +} + +TexturePool &Operation::texture_pool() const +{ + return context_.texture_pool(); +} + +StaticShaderManager &Operation::shader_manager() const +{ + return context_.shader_manager(); +} + +void Operation::evaluate_input_processors() +{ + if (!input_processors_added_) { + add_and_evaluate_input_processors(); + input_processors_added_ = true; + return; + } + + for (const ProcessorsVector &processors : input_processors_.values()) { + for (const std::unique_ptr<SimpleOperation> &processor : processors) { + processor->evaluate(); + } + } +} + +void Operation::reset_results() +{ + for (Result &result : results_.values()) { + result.reset(); + } +} + +void Operation::release_inputs() +{ + for (Result *result : results_mapped_to_inputs_.values()) { + result->release(); + } +} + +} // namespace blender::realtime_compositor diff --git a/source/blender/compositor/realtime_compositor/intern/realize_on_domain_operation.cc b/source/blender/compositor/realtime_compositor/intern/realize_on_domain_operation.cc new file mode 100644 index 00000000000..47993060a74 --- /dev/null +++ b/source/blender/compositor/realtime_compositor/intern/realize_on_domain_operation.cc @@ -0,0 +1,130 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +#include "BLI_float3x3.hh" +#include "BLI_math_vec_types.hh" +#include "BLI_utildefines.h" + +#include "GPU_shader.h" +#include "GPU_texture.h" + +#include "COM_context.hh" +#include "COM_domain.hh" +#include "COM_input_descriptor.hh" +#include "COM_realize_on_domain_operation.hh" +#include "COM_result.hh" +#include "COM_utilities.hh" + +namespace blender::realtime_compositor { + +RealizeOnDomainOperation::RealizeOnDomainOperation(Context &context, + Domain domain, + ResultType type) + : SimpleOperation(context), domain_(domain) +{ + InputDescriptor input_descriptor; + input_descriptor.type = type; + declare_input_descriptor(input_descriptor); + populate_result(Result(type, texture_pool())); +} + +void RealizeOnDomainOperation::execute() +{ + Result &input = get_input(); + Result &result = get_result(); + + result.allocate_texture(domain_); + + GPUShader *shader = get_realization_shader(); + GPU_shader_bind(shader); + + /* Transform the input space into the domain space. */ + const float3x3 local_transformation = input.domain().transformation * + domain_.transformation.inverted(); + + /* Set the origin of the transformation to be the center of the domain. */ + const float3x3 transformation = float3x3::from_origin_transformation( + local_transformation, float2(domain_.size) / 2.0f); + + /* Invert the transformation because the shader transforms the domain coordinates instead of the + * input image itself and thus expect the inverse. */ + const float3x3 inverse_transformation = transformation.inverted(); + + GPU_shader_uniform_mat3_as_mat4(shader, "inverse_transformation", inverse_transformation.ptr()); + + /* The texture sampler should use bilinear interpolation for both the bilinear and bicubic + * cases, as the logic used by the bicubic realization shader expects textures to use bilinear + * interpolation. */ + const bool use_bilinear = ELEM(input.get_realization_options().interpolation, + Interpolation::Bilinear, + Interpolation::Bicubic); + GPU_texture_filter_mode(input.texture(), use_bilinear); + + /* Make out-of-bound texture access return zero by clamping to border color. And make texture + * wrap appropriately if the input repeats. */ + const bool repeats = input.get_realization_options().repeat_x || + input.get_realization_options().repeat_y; + GPU_texture_wrap_mode(input.texture(), repeats, false); + + input.bind_as_texture(shader, "input_tx"); + result.bind_as_image(shader, "domain_img"); + + compute_dispatch_threads_at_least(shader, domain_.size); + + input.unbind_as_texture(); + result.unbind_as_image(); + GPU_shader_unbind(); +} + +GPUShader *RealizeOnDomainOperation::get_realization_shader() +{ + switch (get_result().type()) { + case ResultType::Color: + return shader_manager().get("compositor_realize_on_domain_color"); + case ResultType::Vector: + return shader_manager().get("compositor_realize_on_domain_vector"); + case ResultType::Float: + return shader_manager().get("compositor_realize_on_domain_float"); + } + + BLI_assert_unreachable(); + return nullptr; +} + +Domain RealizeOnDomainOperation::compute_domain() +{ + return domain_; +} + +SimpleOperation *RealizeOnDomainOperation::construct_if_needed( + Context &context, + const Result &input_result, + const InputDescriptor &input_descriptor, + const Domain &operation_domain) +{ + /* This input wants to skip realization, the operation is not needed. */ + if (input_descriptor.skip_realization) { + return nullptr; + } + + /* The input expects a single value and if no single value is provided, it will be ignored and a + * default value will be used, so no need to realize it and the operation is not needed. */ + if (input_descriptor.expects_single_value) { + return nullptr; + } + + /* Input result is a single value and does not need realization, the operation is not needed. */ + if (input_result.is_single_value()) { + return nullptr; + } + + /* The input have an identical domain to the operation domain, so no need to realize it and the + * operation is not needed. */ + if (input_result.domain() == operation_domain) { + return nullptr; + } + + /* Otherwise, realization is needed. */ + return new RealizeOnDomainOperation(context, operation_domain, input_descriptor.type); +} + +} // namespace blender::realtime_compositor diff --git a/source/blender/compositor/realtime_compositor/intern/reduce_to_single_value_operation.cc b/source/blender/compositor/realtime_compositor/intern/reduce_to_single_value_operation.cc new file mode 100644 index 00000000000..acc9b4ab7d6 --- /dev/null +++ b/source/blender/compositor/realtime_compositor/intern/reduce_to_single_value_operation.cc @@ -0,0 +1,67 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +#include "GPU_state.h" +#include "GPU_texture.h" + +#include "MEM_guardedalloc.h" + +#include "COM_context.hh" +#include "COM_input_descriptor.hh" +#include "COM_reduce_to_single_value_operation.hh" +#include "COM_result.hh" + +namespace blender::realtime_compositor { + +ReduceToSingleValueOperation::ReduceToSingleValueOperation(Context &context, ResultType type) + : SimpleOperation(context) +{ + InputDescriptor input_descriptor; + input_descriptor.type = type; + declare_input_descriptor(input_descriptor); + populate_result(Result(type, texture_pool())); +} + +void ReduceToSingleValueOperation::execute() +{ + /* Make sure any prior writes to the texture are reflected before downloading it. */ + GPU_memory_barrier(GPU_BARRIER_TEXTURE_UPDATE); + + const Result &input = get_input(); + float *pixel = static_cast<float *>(GPU_texture_read(input.texture(), GPU_DATA_FLOAT, 0)); + + Result &result = get_result(); + result.allocate_single_value(); + switch (result.type()) { + case ResultType::Color: + result.set_color_value(pixel); + break; + case ResultType::Vector: + result.set_vector_value(pixel); + break; + case ResultType::Float: + result.set_float_value(*pixel); + break; + } + + MEM_freeN(pixel); +} + +SimpleOperation *ReduceToSingleValueOperation::construct_if_needed(Context &context, + const Result &input_result) +{ + /* Input result is already a single value, the operation is not needed. */ + if (input_result.is_single_value()) { + return nullptr; + } + + /* The input is a full sized texture and can't be reduced to a single value, the operation is not + * needed. */ + if (input_result.domain().size != int2(1)) { + return nullptr; + } + + /* The input is a texture of a single pixel and can be reduced to a single value. */ + return new ReduceToSingleValueOperation(context, input_result.type()); +} + +} // namespace blender::realtime_compositor diff --git a/source/blender/compositor/realtime_compositor/intern/result.cc b/source/blender/compositor/realtime_compositor/intern/result.cc new file mode 100644 index 00000000000..8059367d211 --- /dev/null +++ b/source/blender/compositor/realtime_compositor/intern/result.cc @@ -0,0 +1,257 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +#include "BLI_float3x3.hh" +#include "BLI_math_vec_types.hh" + +#include "GPU_shader.h" +#include "GPU_state.h" +#include "GPU_texture.h" + +#include "COM_domain.hh" +#include "COM_result.hh" +#include "COM_texture_pool.hh" + +namespace blender::realtime_compositor { + +Result::Result(ResultType type, TexturePool &texture_pool) + : type_(type), texture_pool_(&texture_pool) +{ +} + +void Result::allocate_texture(Domain domain) +{ + is_single_value_ = false; + switch (type_) { + case ResultType::Float: + texture_ = texture_pool_->acquire_float(domain.size); + break; + case ResultType::Vector: + texture_ = texture_pool_->acquire_vector(domain.size); + break; + case ResultType::Color: + texture_ = texture_pool_->acquire_color(domain.size); + break; + } + domain_ = domain; +} + +void Result::allocate_single_value() +{ + is_single_value_ = true; + /* Single values are stored in 1x1 textures as well as the single value members. */ + const int2 texture_size{1, 1}; + switch (type_) { + case ResultType::Float: + texture_ = texture_pool_->acquire_float(texture_size); + break; + case ResultType::Vector: + texture_ = texture_pool_->acquire_vector(texture_size); + break; + case ResultType::Color: + texture_ = texture_pool_->acquire_color(texture_size); + break; + } + domain_ = Domain::identity(); +} + +void Result::allocate_invalid() +{ + allocate_single_value(); + switch (type_) { + case ResultType::Float: + set_float_value(0.0f); + break; + case ResultType::Vector: + set_vector_value(float3(0.0f)); + break; + case ResultType::Color: + set_color_value(float4(0.0f)); + break; + } +} + +void Result::bind_as_texture(GPUShader *shader, const char *texture_name) const +{ + /* Make sure any prior writes to the texture are reflected before reading from it. */ + GPU_memory_barrier(GPU_BARRIER_TEXTURE_FETCH); + + const int texture_image_unit = GPU_shader_get_texture_binding(shader, texture_name); + GPU_texture_bind(texture_, texture_image_unit); +} + +void Result::bind_as_image(GPUShader *shader, const char *image_name) const +{ + const int image_unit = GPU_shader_get_texture_binding(shader, image_name); + GPU_texture_image_bind(texture_, image_unit); +} + +void Result::unbind_as_texture() const +{ + GPU_texture_unbind(texture_); +} + +void Result::unbind_as_image() const +{ + GPU_texture_image_unbind(texture_); +} + +void Result::pass_through(Result &target) +{ + /* Increment the reference count of the master by the original reference count of the target. */ + increment_reference_count(target.reference_count()); + + /* Make the target an exact copy of this result, but keep the initial reference count, as this is + * a property of the original result and is needed for correctly resetting the result before the + * next evaluation. */ + const int initial_reference_count = target.initial_reference_count_; + target = *this; + target.initial_reference_count_ = initial_reference_count; + + target.master_ = this; +} + +void Result::transform(const float3x3 &transformation) +{ + domain_.transform(transformation); +} + +RealizationOptions &Result::get_realization_options() +{ + return domain_.realization_options; +} + +float Result::get_float_value() const +{ + return float_value_; +} + +float3 Result::get_vector_value() const +{ + return vector_value_; +} + +float4 Result::get_color_value() const +{ + return color_value_; +} + +float Result::get_float_value_default(float default_value) const +{ + if (is_single_value()) { + return get_float_value(); + } + return default_value; +} + +float3 Result::get_vector_value_default(const float3 &default_value) const +{ + if (is_single_value()) { + return get_vector_value(); + } + return default_value; +} + +float4 Result::get_color_value_default(const float4 &default_value) const +{ + if (is_single_value()) { + return get_color_value(); + } + return default_value; +} + +void Result::set_float_value(float value) +{ + float_value_ = value; + GPU_texture_update(texture_, GPU_DATA_FLOAT, &float_value_); +} + +void Result::set_vector_value(const float3 &value) +{ + vector_value_ = value; + GPU_texture_update(texture_, GPU_DATA_FLOAT, vector_value_); +} + +void Result::set_color_value(const float4 &value) +{ + color_value_ = value; + GPU_texture_update(texture_, GPU_DATA_FLOAT, color_value_); +} + +void Result::set_initial_reference_count(int count) +{ + initial_reference_count_ = count; +} + +void Result::reset() +{ + master_ = nullptr; + reference_count_ = initial_reference_count_; +} + +void Result::increment_reference_count(int count) +{ + /* If there is a master result, increment its reference count instead. */ + if (master_) { + master_->increment_reference_count(count); + return; + } + + reference_count_ += count; +} + +void Result::release() +{ + /* If there is a master result, release it instead. */ + if (master_) { + master_->release(); + return; + } + + /* Decrement the reference count, and if it reaches zero, release the texture back into the + * texture pool. */ + reference_count_--; + if (reference_count_ == 0) { + texture_pool_->release(texture_); + } +} + +bool Result::should_compute() +{ + return initial_reference_count_ != 0; +} + +ResultType Result::type() const +{ + return type_; +} + +bool Result::is_texture() const +{ + return !is_single_value_; +} + +bool Result::is_single_value() const +{ + return is_single_value_; +} + +GPUTexture *Result::texture() const +{ + return texture_; +} + +int Result::reference_count() const +{ + /* If there is a master result, return its reference count instead. */ + if (master_) { + return master_->reference_count(); + } + return reference_count_; +} + +const Domain &Result::domain() const +{ + return domain_; +} + +} // namespace blender::realtime_compositor diff --git a/source/blender/compositor/realtime_compositor/intern/scheduler.cc b/source/blender/compositor/realtime_compositor/intern/scheduler.cc new file mode 100644 index 00000000000..ce8b9330541 --- /dev/null +++ b/source/blender/compositor/realtime_compositor/intern/scheduler.cc @@ -0,0 +1,311 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +#include "BLI_map.hh" +#include "BLI_set.hh" +#include "BLI_stack.hh" +#include "BLI_vector.hh" +#include "BLI_vector_set.hh" + +#include "NOD_derived_node_tree.hh" + +#include "COM_scheduler.hh" +#include "COM_utilities.hh" + +namespace blender::realtime_compositor { + +using namespace nodes::derived_node_tree_types; + +/* Compute the output node whose result should be computed. The output node is the node marked as + * NODE_DO_OUTPUT. If multiple types of output nodes are marked, then the preference will be + * CMP_NODE_COMPOSITE > CMP_NODE_VIEWER > CMP_NODE_SPLITVIEWER. If no output node exists, a null + * node will be returned. */ +static DNode compute_output_node(DerivedNodeTree &tree) +{ + const NodeTreeRef &root_tree = tree.root_context().tree(); + + for (const NodeRef *node : root_tree.nodes_by_type("CompositorNodeComposite")) { + if (node->bnode()->flag & NODE_DO_OUTPUT) { + return DNode(&tree.root_context(), node); + } + } + + for (const NodeRef *node : root_tree.nodes_by_type("CompositorNodeViewer")) { + if (node->bnode()->flag & NODE_DO_OUTPUT) { + return DNode(&tree.root_context(), node); + } + } + + for (const NodeRef *node : root_tree.nodes_by_type("CompositorNodeSplitViewer")) { + if (node->bnode()->flag & NODE_DO_OUTPUT) { + return DNode(&tree.root_context(), node); + } + } + + /* No output node found, return a null node. */ + return DNode(); +} + +/* A type representing a mapping that associates each node with a heuristic estimation of the + * number of intermediate buffers needed to compute it and all of its dependencies. See the + * compute_number_of_needed_buffers function for more information. */ +using NeededBuffers = Map<DNode, int>; + +/* Compute a heuristic estimation of the number of intermediate buffers needed to compute each node + * and all of its dependencies for all nodes that the given node depends on. The output is a map + * that maps each node with the number of intermediate buffers needed to compute it and all of its + * dependencies. + * + * Consider a node that takes n number of buffers as an input from a number of node dependencies, + * which we shall call the input nodes. The node also computes and outputs m number of buffers. + * In order for the node to compute its output, a number of intermediate buffers will be needed. + * Since the node takes n buffers and outputs m buffers, then the number of buffers directly + * needed by the node is (n + m). But each of the input buffers are computed by a node that, in + * turn, needs a number of buffers to compute its output. So the total number of buffers needed + * to compute the output of the node is max(n + m, d) where d is the number of buffers needed by + * the input node that needs the largest number of buffers. We only consider the input node that + * needs the largest number of buffers, because those buffers can be reused by any input node + * that needs a lesser number of buffers. + * + * Shader nodes, however, are a special case because links between two shader nodes inside the same + * shader operation don't pass a buffer, but a single value in the compiled shader. So for shader + * nodes, only inputs and outputs linked to nodes that are not shader nodes should be considered. + * Note that this might not actually be true, because the compiler may decide to split a shader + * operation into multiples ones that will pass buffers, but this is not something that can be + * known at scheduling-time. See the discussion in COM_compile_state.hh, COM_evaluator.hh, and + * COM_shader_operation.hh for more information. In the node tree shown below, node 4 will have + * exactly the same number of needed buffers by node 3, because its inputs and outputs are all + * internally linked in the shader operation. + * + * Shader Operation + * +------------------------------------------------------+ + * .------------. | .------------. .------------. .------------. | .------------. + * | Node 1 | | | Node 3 | | Node 4 | | Node 5 | | | Node 6 | + * | |----|--| |--| |------| |--|--| | + * | | .-|--| | | | .---| | | | | + * '------------' | | '------------' '------------' | '------------' | '------------' + * | +----------------------------------|-------------------+ + * .------------. | | + * | Node 2 | | | + * | |--'------------------------------------' + * | | + * '------------' + * + * Note that the computed output is not guaranteed to be accurate, and will not be in most cases. + * The computation is merely a heuristic estimation that works well in most cases. This is due to a + * number of reasons: + * - The node tree is actually a graph that allows output sharing, which is not something that was + * taken into consideration in this implementation because it is difficult to correctly consider. + * - Each node may allocate any number of internal buffers, which is not taken into account in this + * implementation because it rarely affects the output and is done by very few nodes. + * - The compiler may decide to compiler the schedule differently depending on runtime information + * which we can merely speculate at scheduling-time as described above. */ +static NeededBuffers compute_number_of_needed_buffers(DNode output_node) +{ + NeededBuffers needed_buffers; + + /* A stack of nodes used to traverse the node tree starting from the output node. */ + Stack<DNode> node_stack = {output_node}; + + /* Traverse the node tree in a post order depth first manner and compute the number of needed + * buffers for each node. Post order traversal guarantee that all the node dependencies of each + * node are computed before it. This is done by pushing all the uncomputed node dependencies to + * the node stack first and only popping and computing the node when all its node dependencies + * were computed. */ + while (!node_stack.is_empty()) { + /* Do not pop the node immediately, as it may turn out that we can't compute its number of + * needed buffers just yet because its dependencies weren't computed, it will be popped later + * when needed. */ + DNode &node = node_stack.peek(); + + /* Go over the node dependencies connected to the inputs of the node and push them to the node + * stack if they were not computed already. */ + Set<DNode> pushed_nodes; + for (const InputSocketRef *input_ref : node->inputs()) { + const DInputSocket input{node.context(), input_ref}; + + /* Get the output linked to the input. If it is null, that means the input is unlinked and + * has no dependency node. */ + const DOutputSocket output = get_output_linked_to_input(input); + if (!output) { + continue; + } + + /* The node dependency was already computed or pushed before, so skip it. */ + if (needed_buffers.contains(output.node()) || pushed_nodes.contains(output.node())) { + continue; + } + + /* The output node needs to be computed, push the node dependency to the node stack and + * indicate that it was pushed. */ + node_stack.push(output.node()); + pushed_nodes.add_new(output.node()); + } + + /* If any of the node dependencies were pushed, that means that not all of them were computed + * and consequently we can't compute the number of needed buffers for this node just yet. */ + if (!pushed_nodes.is_empty()) { + continue; + } + + /* We don't need to store the result of the pop because we already peeked at it before. */ + node_stack.pop(); + + /* Compute the number of buffers that the node takes as an input as well as the number of + * buffers needed to compute the most demanding of the node dependencies. */ + int number_of_input_buffers = 0; + int buffers_needed_by_dependencies = 0; + for (const InputSocketRef *input_ref : node->inputs()) { + const DInputSocket input{node.context(), input_ref}; + + /* Get the output linked to the input. If it is null, that means the input is unlinked. + * Unlinked inputs do not take a buffer, so skip those inputs. */ + const DOutputSocket output = get_output_linked_to_input(input); + if (!output) { + continue; + } + + /* Since this input is linked, if the link is not between two shader nodes, it means that the + * node takes a buffer through this input and so we increment the number of input buffers. */ + if (!is_shader_node(node) || !is_shader_node(output.node())) { + number_of_input_buffers++; + } + + /* If the number of buffers needed by the node dependency is more than the total number of + * buffers needed by the dependencies, then update the latter to be the former. This is + * computing the "d" in the aforementioned equation "max(n + m, d)". */ + const int buffers_needed_by_dependency = needed_buffers.lookup(output.node()); + if (buffers_needed_by_dependency > buffers_needed_by_dependencies) { + buffers_needed_by_dependencies = buffers_needed_by_dependency; + } + } + + /* Compute the number of buffers that will be computed/output by this node. */ + int number_of_output_buffers = 0; + for (const OutputSocketRef *output_ref : node->outputs()) { + const DOutputSocket output{node.context(), output_ref}; + + /* The output is not linked, it outputs no buffer. */ + if (output->logically_linked_sockets().is_empty()) { + continue; + } + + /* If any of the links is not between two shader nodes, it means that the node outputs + * a buffer through this output and so we increment the number of output buffers. */ + if (!is_output_linked_to_node_conditioned(output, is_shader_node) || !is_shader_node(node)) { + number_of_output_buffers++; + } + } + + /* Compute the heuristic estimation of the number of needed intermediate buffers to compute + * this node and all of its dependencies. This is computing the aforementioned equation + * "max(n + m, d)". */ + const int total_buffers = MAX2(number_of_input_buffers + number_of_output_buffers, + buffers_needed_by_dependencies); + needed_buffers.add(node, total_buffers); + } + + return needed_buffers; +} + +/* There are multiple different possible orders of evaluating a node graph, each of which needs + * to allocate a number of intermediate buffers to store its intermediate results. It follows + * that we need to find the evaluation order which uses the least amount of intermediate buffers. + * For instance, consider a node that takes two input buffers A and B. Each of those buffers is + * computed through a number of nodes constituting a sub-graph whose root is the node that + * outputs that buffer. Suppose the number of intermediate buffers needed to compute A and B are + * N(A) and N(B) respectively and N(A) > N(B). Then evaluating the sub-graph computing A would be + * a better option than that of B, because had B was computed first, its outputs will need to be + * stored in extra buffers in addition to the buffers needed by A. The number of buffers needed by + * each node is estimated as described in the compute_number_of_needed_buffers function. + * + * This is a heuristic generalization of the Sethi–Ullman algorithm, a generalization that + * doesn't always guarantee an optimal evaluation order, as the optimal evaluation order is very + * difficult to compute, however, this method works well in most cases. Moreover it assumes that + * all buffers will have roughly the same size, which may not always be the case. */ +Schedule compute_schedule(DerivedNodeTree &tree) +{ + Schedule schedule; + + /* Compute the output node whose result should be computed. */ + const DNode output_node = compute_output_node(tree); + + /* No output node, the node tree has no effect, return an empty schedule. */ + if (!output_node) { + return schedule; + } + + /* Compute the number of buffers needed by each node connected to the output. */ + const NeededBuffers needed_buffers = compute_number_of_needed_buffers(output_node); + + /* A stack of nodes used to traverse the node tree starting from the output node. */ + Stack<DNode> node_stack = {output_node}; + + /* Traverse the node tree in a post order depth first manner, scheduling the nodes in an order + * informed by the number of buffers needed by each node. Post order traversal guarantee that all + * the node dependencies of each node are scheduled before it. This is done by pushing all the + * unscheduled node dependencies to the node stack first and only popping and scheduling the node + * when all its node dependencies were scheduled. */ + while (!node_stack.is_empty()) { + /* Do not pop the node immediately, as it may turn out that we can't schedule it just yet + * because its dependencies weren't scheduled, it will be popped later when needed. */ + DNode &node = node_stack.peek(); + + /* Compute the nodes directly connected to the node inputs sorted by their needed buffers such + * that the node with the lowest number of needed buffers comes first. Note that we actually + * want the node with the highest number of needed buffers to be schedule first, but since + * those are pushed to the traversal stack, we need to push them in reverse order. */ + Vector<DNode> sorted_dependency_nodes; + for (const InputSocketRef *input_ref : node->inputs()) { + const DInputSocket input{node.context(), input_ref}; + + /* Get the output linked to the input. If it is null, that means the input is unlinked and + * has no dependency node, so skip it. */ + const DOutputSocket output = get_output_linked_to_input(input); + if (!output) { + continue; + } + + /* The dependency node was added before, so skip it. The number of dependency nodes is very + * small, typically less than 3, so a linear search is okay. */ + if (sorted_dependency_nodes.contains(output.node())) { + continue; + } + + /* The dependency node was already schedule, so skip it. */ + if (schedule.contains(output.node())) { + continue; + } + + /* Sort in ascending order on insertion, the number of dependency nodes is very small, + * typically less than 3, so insertion sort is okay. */ + int insertion_position = 0; + for (int i = 0; i < sorted_dependency_nodes.size(); i++) { + if (needed_buffers.lookup(output.node()) > + needed_buffers.lookup(sorted_dependency_nodes[i])) { + insertion_position++; + } + else { + break; + } + } + sorted_dependency_nodes.insert(insertion_position, output.node()); + } + + /* Push the sorted dependency nodes to the node stack in order. */ + for (const DNode &dependency_node : sorted_dependency_nodes) { + node_stack.push(dependency_node); + } + + /* If there are no sorted dependency nodes, that means they were all already scheduled or that + * none exists in the first place, so we can pop and schedule the node now. */ + if (sorted_dependency_nodes.is_empty()) { + /* The node might have already been scheduled, so we don't use add_new here and simply don't + * add it if it was already scheduled. */ + schedule.add(node_stack.pop()); + } + } + + return schedule; +} + +} // namespace blender::realtime_compositor diff --git a/source/blender/compositor/realtime_compositor/intern/shader_node.cc b/source/blender/compositor/realtime_compositor/intern/shader_node.cc new file mode 100644 index 00000000000..f23485cee96 --- /dev/null +++ b/source/blender/compositor/realtime_compositor/intern/shader_node.cc @@ -0,0 +1,155 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +#include "BLI_assert.h" +#include "BLI_math_vector.h" +#include "BLI_string_ref.hh" + +#include "DNA_node_types.h" + +#include "NOD_derived_node_tree.hh" + +#include "GPU_material.h" + +#include "COM_shader_node.hh" +#include "COM_utilities.hh" + +namespace blender::realtime_compositor { + +using namespace nodes::derived_node_tree_types; + +ShaderNode::ShaderNode(DNode node) : node_(node) +{ + populate_inputs(); + populate_outputs(); +} + +GPUNodeStack *ShaderNode::get_inputs_array() +{ + return inputs_.data(); +} + +GPUNodeStack *ShaderNode::get_outputs_array() +{ + return outputs_.data(); +} + +GPUNodeStack &ShaderNode::get_input(StringRef identifier) +{ + return inputs_[node_.input_by_identifier(identifier)->index()]; +} + +GPUNodeStack &ShaderNode::get_output(StringRef identifier) +{ + return outputs_[node_.output_by_identifier(identifier)->index()]; +} + +GPUNodeLink *ShaderNode::get_input_link(StringRef identifier) +{ + GPUNodeStack &input = get_input(identifier); + if (input.link) { + return input.link; + } + return GPU_uniform(input.vec); +} + +const DNode &ShaderNode::node() const +{ + return node_; +} + +bNode &ShaderNode::bnode() const +{ + return *node_->bnode(); +} + +static eGPUType gpu_type_from_socket_type(eNodeSocketDatatype type) +{ + switch (type) { + case SOCK_FLOAT: + return GPU_FLOAT; + case SOCK_VECTOR: + return GPU_VEC3; + case SOCK_RGBA: + return GPU_VEC4; + default: + BLI_assert_unreachable(); + return GPU_NONE; + } +} + +static void gpu_stack_vector_from_socket(float *vector, const SocketRef *socket) +{ + switch (socket->bsocket()->type) { + case SOCK_FLOAT: + vector[0] = socket->default_value<bNodeSocketValueFloat>()->value; + return; + case SOCK_VECTOR: + copy_v3_v3(vector, socket->default_value<bNodeSocketValueVector>()->value); + return; + case SOCK_RGBA: + copy_v4_v4(vector, socket->default_value<bNodeSocketValueRGBA>()->value); + return; + default: + BLI_assert_unreachable(); + } +} + +static void populate_gpu_node_stack(DSocket socket, GPUNodeStack &stack) +{ + /* Make sure this stack is not marked as the end of the stack array. */ + stack.end = false; + /* This will be initialized later by the GPU material compiler or the compile method. */ + stack.link = nullptr; + + stack.sockettype = socket->bsocket()->type; + stack.type = gpu_type_from_socket_type((eNodeSocketDatatype)socket->bsocket()->type); + + if (socket->is_input()) { + const DInputSocket input(socket); + + DSocket origin = get_input_origin_socket(input); + + /* The input is linked if the origin socket is an output socket. Had it been an input socket, + * then it is an unlinked input of a group input node. */ + stack.hasinput = origin->is_output(); + + /* Get the socket value from the origin if it is an input, because then it would either be an + * unlinked input or an unlinked input of a group input node that the socket is linked to, + * otherwise, get the value from the socket itself. */ + if (origin->is_input()) { + gpu_stack_vector_from_socket(stack.vec, origin.socket_ref()); + } + else { + gpu_stack_vector_from_socket(stack.vec, socket.socket_ref()); + } + } + else { + stack.hasoutput = socket->is_logically_linked(); + } +} + +void ShaderNode::populate_inputs() +{ + /* Reserve a stack for each input in addition to an extra stack at the end to mark the end of the + * array, as this is what the GPU module functions expect. */ + inputs_.resize(node_->inputs().size() + 1); + inputs_.last().end = true; + + for (int i = 0; i < node_->inputs().size(); i++) { + populate_gpu_node_stack(node_.input(i), inputs_[i]); + } +} + +void ShaderNode::populate_outputs() +{ + /* Reserve a stack for each output in addition to an extra stack at the end to mark the end of + * the array, as this is what the GPU module functions expect. */ + outputs_.resize(node_->outputs().size() + 1); + outputs_.last().end = true; + + for (int i = 0; i < node_->outputs().size(); i++) { + populate_gpu_node_stack(node_.output(i), outputs_[i]); + } +} + +} // namespace blender::realtime_compositor diff --git a/source/blender/compositor/realtime_compositor/intern/shader_operation.cc b/source/blender/compositor/realtime_compositor/intern/shader_operation.cc new file mode 100644 index 00000000000..a097c81a4c5 --- /dev/null +++ b/source/blender/compositor/realtime_compositor/intern/shader_operation.cc @@ -0,0 +1,522 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +#include <memory> +#include <string> + +#include "BLI_listbase.h" +#include "BLI_map.hh" +#include "BLI_string_ref.hh" +#include "BLI_utildefines.h" + +#include "DNA_customdata_types.h" + +#include "GPU_material.h" +#include "GPU_shader.h" +#include "GPU_texture.h" +#include "GPU_uniform_buffer.h" + +#include "gpu_shader_create_info.hh" + +#include "NOD_derived_node_tree.hh" +#include "NOD_node_declaration.hh" + +#include "COM_context.hh" +#include "COM_operation.hh" +#include "COM_result.hh" +#include "COM_scheduler.hh" +#include "COM_shader_node.hh" +#include "COM_shader_operation.hh" +#include "COM_utilities.hh" + +namespace blender::realtime_compositor { + +using namespace nodes::derived_node_tree_types; + +ShaderOperation::ShaderOperation(Context &context, ShaderCompileUnit &compile_unit) + : Operation(context), compile_unit_(compile_unit) +{ + material_ = GPU_material_from_callbacks(&construct_material, &generate_code, this); + GPU_material_status_set(material_, GPU_MAT_QUEUED); + GPU_material_compile(material_); +} + +ShaderOperation::~ShaderOperation() +{ + GPU_material_free_single(material_); +} + +void ShaderOperation::execute() +{ + const Domain domain = compute_domain(); + for (StringRef identifier : output_sockets_to_output_identifiers_map_.values()) { + Result &result = get_result(identifier); + result.allocate_texture(domain); + } + + GPUShader *shader = GPU_material_get_shader(material_); + GPU_shader_bind(shader); + + bind_material_resources(shader); + bind_inputs(shader); + bind_outputs(shader); + + compute_dispatch_threads_at_least(shader, domain.size); + + GPU_texture_unbind_all(); + GPU_texture_image_unbind_all(); + GPU_uniformbuf_unbind_all(); + GPU_shader_unbind(); +} + +StringRef ShaderOperation::get_output_identifier_from_output_socket(DOutputSocket output_socket) +{ + return output_sockets_to_output_identifiers_map_.lookup(output_socket); +} + +Map<std::string, DOutputSocket> &ShaderOperation::get_inputs_to_linked_outputs_map() +{ + return inputs_to_linked_outputs_map_; +} + +void ShaderOperation::compute_results_reference_counts(const Schedule &schedule) +{ + for (const auto &item : output_sockets_to_output_identifiers_map_.items()) { + const int reference_count = number_of_inputs_linked_to_output_conditioned( + item.key, [&](DInputSocket input) { return schedule.contains(input.node()); }); + + get_result(item.value).set_initial_reference_count(reference_count); + } +} + +void ShaderOperation::bind_material_resources(GPUShader *shader) +{ + /* Bind the uniform buffer of the material if it exists. It may not exist if the GPU material has + * no uniforms. */ + GPUUniformBuf *ubo = GPU_material_uniform_buffer_get(material_); + if (ubo) { + GPU_uniformbuf_bind(ubo, GPU_shader_get_uniform_block_binding(shader, GPU_UBO_BLOCK_NAME)); + } + + /* Bind color band textures needed by curve and ramp nodes. */ + ListBase textures = GPU_material_textures(material_); + LISTBASE_FOREACH (GPUMaterialTexture *, texture, &textures) { + if (texture->colorband) { + const int texture_image_unit = GPU_shader_get_texture_binding(shader, texture->sampler_name); + GPU_texture_bind(*texture->colorband, texture_image_unit); + } + } +} + +void ShaderOperation::bind_inputs(GPUShader *shader) +{ + /* Attributes represents the inputs of the operation and their names match those of the inputs of + * the operation as well as the corresponding texture samples in the shader. */ + ListBase attributes = GPU_material_attributes(material_); + LISTBASE_FOREACH (GPUMaterialAttribute *, attribute, &attributes) { + get_input(attribute->name).bind_as_texture(shader, attribute->name); + } +} + +void ShaderOperation::bind_outputs(GPUShader *shader) +{ + for (StringRefNull output_identifier : output_sockets_to_output_identifiers_map_.values()) { + get_result(output_identifier).bind_as_image(shader, output_identifier.c_str()); + } +} + +void ShaderOperation::construct_material(void *thunk, GPUMaterial *material) +{ + ShaderOperation *operation = static_cast<ShaderOperation *>(thunk); + for (DNode node : operation->compile_unit_) { + ShaderNode *shader_node = node->typeinfo()->get_compositor_shader_node(node); + operation->shader_nodes_.add_new(node, std::unique_ptr<ShaderNode>(shader_node)); + + operation->link_node_inputs(node, material); + + shader_node->compile(material); + + operation->populate_results_for_node(node, material); + } +} + +void ShaderOperation::link_node_inputs(DNode node, GPUMaterial *material) +{ + for (const InputSocketRef *input_ref : node->inputs()) { + const DInputSocket input{node.context(), input_ref}; + + /* Get the output linked to the input. If it is null, that means the input is unlinked. + * Unlinked inputs are linked by the node compile method, so skip this here. */ + const DOutputSocket output = get_output_linked_to_input(input); + if (!output) { + continue; + } + + /* If the origin node is part of the shader operation, then the link is internal to the GPU + * material graph and is linked appropriately. */ + if (compile_unit_.contains(output.node())) { + link_node_input_internal(input, output); + continue; + } + + /* Otherwise, the origin node is not part of the shader operation, then the link is external to + * the GPU material graph and an input to the shader operation must be declared and linked to + * the node input. */ + link_node_input_external(input, output, material); + } +} + +void ShaderOperation::link_node_input_internal(DInputSocket input_socket, + DOutputSocket output_socket) +{ + ShaderNode &output_node = *shader_nodes_.lookup(output_socket.node()); + GPUNodeStack &output_stack = output_node.get_output(output_socket->identifier()); + + ShaderNode &input_node = *shader_nodes_.lookup(input_socket.node()); + GPUNodeStack &input_stack = input_node.get_input(input_socket->identifier()); + + input_stack.link = output_stack.link; +} + +void ShaderOperation::link_node_input_external(DInputSocket input_socket, + DOutputSocket output_socket, + GPUMaterial *material) +{ + + ShaderNode &node = *shader_nodes_.lookup(input_socket.node()); + GPUNodeStack &stack = node.get_input(input_socket->identifier()); + + /* An input was already declared for that same output socket, so no need to declare it again. */ + if (!output_to_material_attribute_map_.contains(output_socket)) { + declare_operation_input(input_socket, output_socket, material); + } + + /* Link the attribute representing the shader operation input corresponding to the given output + * socket. */ + stack.link = output_to_material_attribute_map_.lookup(output_socket); +} + +static const char *get_set_function_name(ResultType type) +{ + switch (type) { + case ResultType::Float: + return "set_value"; + case ResultType::Vector: + return "set_rgb"; + case ResultType::Color: + return "set_rgba"; + } + + BLI_assert_unreachable(); + return nullptr; +} + +void ShaderOperation::declare_operation_input(DInputSocket input_socket, + DOutputSocket output_socket, + GPUMaterial *material) +{ + const int input_index = output_to_material_attribute_map_.size(); + std::string input_identifier = "input" + std::to_string(input_index); + + /* Declare the input descriptor for this input and prefer to declare its type to be the same as + * the type of the output socket because doing type conversion in the shader is much cheaper. */ + InputDescriptor input_descriptor = input_descriptor_from_input_socket(input_socket.socket_ref()); + input_descriptor.type = get_node_socket_result_type(output_socket.socket_ref()); + declare_input_descriptor(input_identifier, input_descriptor); + + /* Add a new GPU attribute representing an input to the GPU material. Instead of using the + * attribute directly, we link it to an appropriate set function and use its output link instead. + * This is needed because the `gputype` member of the attribute is only initialized if it is + * linked to a GPU node. */ + GPUNodeLink *attribute_link; + GPU_link(material, + get_set_function_name(input_descriptor.type), + GPU_attribute(material, CD_AUTO_FROM_NAME, input_identifier.c_str()), + &attribute_link); + + /* Map the output socket to the attribute that was created for it. */ + output_to_material_attribute_map_.add(output_socket, attribute_link); + + /* Map the identifier of the operation input to the output socket it is linked to. */ + inputs_to_linked_outputs_map_.add_new(input_identifier, output_socket); +} + +void ShaderOperation::populate_results_for_node(DNode node, GPUMaterial *material) +{ + for (const OutputSocketRef *output_ref : node->outputs()) { + const DOutputSocket output{node.context(), output_ref}; + + /* If any of the nodes linked to the output are not part of the shader operation, then an + * output result needs to be populated for it. */ + const bool need_to_populate_result = is_output_linked_to_node_conditioned( + output, [&](DNode node) { return !compile_unit_.contains(node); }); + + if (need_to_populate_result) { + populate_operation_result(output, material); + } + } +} + +static const char *get_store_function_name(ResultType type) +{ + switch (type) { + case ResultType::Float: + return "node_compositor_store_output_float"; + case ResultType::Vector: + return "node_compositor_store_output_vector"; + case ResultType::Color: + return "node_compositor_store_output_color"; + } + + BLI_assert_unreachable(); + return nullptr; +} + +void ShaderOperation::populate_operation_result(DOutputSocket output_socket, GPUMaterial *material) +{ + const unsigned int output_id = output_sockets_to_output_identifiers_map_.size(); + std::string output_identifier = "output" + std::to_string(output_id); + + const ResultType result_type = get_node_socket_result_type(output_socket.socket_ref()); + const Result result = Result(result_type, texture_pool()); + populate_result(output_identifier, result); + + /* Map the output socket to the identifier of the newly populated result. */ + output_sockets_to_output_identifiers_map_.add_new(output_socket, output_identifier); + + ShaderNode &node = *shader_nodes_.lookup(output_socket.node()); + GPUNodeLink *output_link = node.get_output(output_socket->identifier()).link; + + /* Link the output node stack to an output storer storing in the appropriate result. The result + * is identified by its index in the operation and the index is encoded as a float to be passed + * to the GPU function. Additionally, create an output link from the storer node to declare as an + * output to the GPU material. This storer output link is a dummy link in the sense that its + * value is ignored since it is already written in the output, but it is used to track nodes that + * contribute to the output of the compositor node tree. */ + GPUNodeLink *storer_output_link; + GPUNodeLink *id_link = GPU_constant((float *)&output_id); + const char *store_function_name = get_store_function_name(result_type); + GPU_link(material, store_function_name, id_link, output_link, &storer_output_link); + + /* Declare the output link of the storer node as an output of the GPU material to help the GPU + * code generator to track the nodes that contribute to the output of the shader. */ + GPU_material_add_output_link_composite(material, storer_output_link); +} + +using namespace gpu::shader; + +void ShaderOperation::generate_code(void *thunk, + GPUMaterial *material, + GPUCodegenOutput *code_generator_output) +{ + ShaderOperation *operation = static_cast<ShaderOperation *>(thunk); + ShaderCreateInfo &shader_create_info = *reinterpret_cast<ShaderCreateInfo *>( + code_generator_output->create_info); + + shader_create_info.local_group_size(16, 16); + + /* The resources are added without explicit locations, so make sure it is done by the + * shader creator. */ + shader_create_info.auto_resource_location(true); + + /* Add implementation for implicit conversion operations inserted by the code generator. This + * file should include the functions [float|vec3|vec4]_from_[float|vec3|vec4]. */ + shader_create_info.typedef_source("gpu_shader_compositor_type_conversion.glsl"); + + /* The source shader is a compute shader with a main function that calls the dynamically + * generated evaluate function. The evaluate function includes the serialized GPU material graph + * preceded by code that initialized the inputs of the operation. Additionally, the storer + * functions that writes the outputs are defined outside the evaluate function. */ + shader_create_info.compute_source("gpu_shader_compositor_main.glsl"); + + /* The main function is emitted in the shader before the evaluate function, so the evaluate + * function needs to be forward declared here. */ + shader_create_info.typedef_source_generated += "void evaluate();\n"; + + operation->generate_code_for_outputs(shader_create_info); + + shader_create_info.compute_source_generated += "void evaluate()\n{\n"; + + operation->generate_code_for_inputs(material, shader_create_info); + + shader_create_info.compute_source_generated += code_generator_output->composite; + + shader_create_info.compute_source_generated += "}\n"; +} + +static eGPUTextureFormat texture_format_from_result_type(ResultType type) +{ + switch (type) { + case ResultType::Float: + return GPU_R16F; + case ResultType::Vector: + return GPU_RGBA16F; + case ResultType::Color: + return GPU_RGBA16F; + } + + BLI_assert_unreachable(); + return GPU_RGBA16F; +} + +/* Texture storers in the shader always take a vec4 as an argument, so encode each type in a vec4 + * appropriately. */ +static const char *glsl_store_expression_from_result_type(ResultType type) +{ + switch (type) { + case ResultType::Float: + return "vec4(value)"; + case ResultType::Vector: + return "vec4(vector, 0.0)"; + case ResultType::Color: + return "color"; + } + + BLI_assert_unreachable(); + return nullptr; +} + +void ShaderOperation::generate_code_for_outputs(ShaderCreateInfo &shader_create_info) +{ + const std::string store_float_function_header = "void store_float(const uint id, float value)"; + const std::string store_vector_function_header = "void store_vector(const uint id, vec3 vector)"; + const std::string store_color_function_header = "void store_color(const uint id, vec4 color)"; + + /* The store functions are used by the node_compositor_store_output_[float|vector|color] + * functions but are only defined later as part of the compute source, so they need to be forward + * declared. */ + shader_create_info.typedef_source_generated += store_float_function_header + ";\n"; + shader_create_info.typedef_source_generated += store_vector_function_header + ";\n"; + shader_create_info.typedef_source_generated += store_color_function_header + ";\n"; + + /* Each of the store functions is essentially a single switch case on the given ID, so start by + * opening the function with a curly bracket followed by opening a switch statement in each of + * the functions. */ + std::stringstream store_float_function; + std::stringstream store_vector_function; + std::stringstream store_color_function; + const std::string store_function_start = "\n{\n switch (id) {\n"; + store_float_function << store_float_function_header << store_function_start; + store_vector_function << store_vector_function_header << store_function_start; + store_color_function << store_color_function_header << store_function_start; + + for (StringRefNull output_identifier : output_sockets_to_output_identifiers_map_.values()) { + const Result &result = get_result(output_identifier); + + /* Add a write-only image for this output where its values will be written. */ + shader_create_info.image(0, + texture_format_from_result_type(result.type()), + Qualifier::WRITE, + ImageType::FLOAT_2D, + output_identifier, + Frequency::BATCH); + + /* Add a case for the index of this output followed by a break statement. */ + std::stringstream case_code; + const std::string store_expression = glsl_store_expression_from_result_type(result.type()); + const std::string texel = ", ivec2(gl_GlobalInvocationID.xy), "; + case_code << " case " << StringRef(output_identifier).drop_known_prefix("output") << ":\n" + << " imageStore(" << output_identifier << texel << store_expression << ");\n" + << " break;\n"; + + /* Only add the case to the function with the matching type. */ + switch (result.type()) { + case ResultType::Float: + store_float_function << case_code.str(); + break; + case ResultType::Vector: + store_vector_function << case_code.str(); + break; + case ResultType::Color: + store_color_function << case_code.str(); + break; + } + } + + /* Close the previously opened switch statement as well as the function itself. */ + const std::string store_function_end = " }\n}\n\n"; + store_float_function << store_function_end; + store_vector_function << store_function_end; + store_color_function << store_function_end; + + shader_create_info.compute_source_generated += store_float_function.str() + + store_vector_function.str() + + store_color_function.str(); +} + +static const char *glsl_type_from_result_type(ResultType type) +{ + switch (type) { + case ResultType::Float: + return "float"; + case ResultType::Vector: + return "vec3"; + case ResultType::Color: + return "vec4"; + } + + BLI_assert_unreachable(); + return nullptr; +} + +/* Texture loaders in the shader always return a vec4, so a swizzle is needed to retrieve the + * actual value for each type. */ +static const char *glsl_swizzle_from_result_type(ResultType type) +{ + switch (type) { + case ResultType::Float: + return "x"; + case ResultType::Vector: + return "xyz"; + case ResultType::Color: + return "rgba"; + } + + BLI_assert_unreachable(); + return nullptr; +} + +void ShaderOperation::generate_code_for_inputs(GPUMaterial *material, + ShaderCreateInfo &shader_create_info) +{ + /* The attributes of the GPU material represents the inputs of the operation. */ + ListBase attributes = GPU_material_attributes(material); + + /* Add a texture sampler for each of the inputs with the same name as the attribute. */ + LISTBASE_FOREACH (GPUMaterialAttribute *, attribute, &attributes) { + shader_create_info.sampler(0, ImageType::FLOAT_2D, attribute->name, Frequency::BATCH); + } + + /* Declare a struct called var_attrs that includes an appropriately typed member for each of the + * inputs. The names of the members should be the letter v followed by the ID of the attribute + * corresponding to the input. Such names are expected by the code generator. */ + std::stringstream declare_attributes; + declare_attributes << "struct {\n"; + LISTBASE_FOREACH (GPUMaterialAttribute *, attribute, &attributes) { + const InputDescriptor &input_descriptor = get_input_descriptor(attribute->name); + const std::string type = glsl_type_from_result_type(input_descriptor.type); + declare_attributes << " " << type << " v" << attribute->id << ";\n"; + } + declare_attributes << "} var_attrs;\n\n"; + + shader_create_info.compute_source_generated += declare_attributes.str(); + + /* The texture loader utilities are needed to sample the input textures and initialize the + * attributes. */ + shader_create_info.typedef_source("gpu_shader_compositor_texture_utilities.glsl"); + + /* Initialize each member of the previously declared struct by loading its corresponding texture + * with an appropriate swizzle for its type. */ + std::stringstream initialize_attributes; + LISTBASE_FOREACH (GPUMaterialAttribute *, attribute, &attributes) { + const InputDescriptor &input_descriptor = get_input_descriptor(attribute->name); + const std::string swizzle = glsl_swizzle_from_result_type(input_descriptor.type); + initialize_attributes << "var_attrs.v" << attribute->id << " = " + << "texture_load(" << attribute->name + << ", ivec2(gl_GlobalInvocationID.xy))." << swizzle << ";\n"; + } + initialize_attributes << "\n"; + + shader_create_info.compute_source_generated += initialize_attributes.str(); +} + +} // namespace blender::realtime_compositor diff --git a/source/blender/compositor/realtime_compositor/intern/simple_operation.cc b/source/blender/compositor/realtime_compositor/intern/simple_operation.cc new file mode 100644 index 00000000000..d55a20e5c54 --- /dev/null +++ b/source/blender/compositor/realtime_compositor/intern/simple_operation.cc @@ -0,0 +1,55 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +#include "COM_input_descriptor.hh" +#include "COM_operation.hh" +#include "COM_result.hh" +#include "COM_simple_operation.hh" + +namespace blender::realtime_compositor { + +const StringRef SimpleOperation::input_identifier_ = StringRef("Input"); +const StringRef SimpleOperation::output_identifier_ = StringRef("Output"); + +Result &SimpleOperation::get_result() +{ + return Operation::get_result(output_identifier_); +} + +void SimpleOperation::map_input_to_result(Result *result) +{ + Operation::map_input_to_result(input_identifier_, result); +} + +void SimpleOperation::add_and_evaluate_input_processors() +{ +} + +Result &SimpleOperation::get_input() +{ + return Operation::get_input(input_identifier_); +} + +void SimpleOperation::switch_result_mapped_to_input(Result *result) +{ + Operation::switch_result_mapped_to_input(input_identifier_, result); +} + +void SimpleOperation::populate_result(Result result) +{ + Operation::populate_result(output_identifier_, result); + + /* The result of a simple operation is guaranteed to have a single user. */ + get_result().set_initial_reference_count(1); +} + +void SimpleOperation::declare_input_descriptor(InputDescriptor descriptor) +{ + Operation::declare_input_descriptor(input_identifier_, descriptor); +} + +InputDescriptor &SimpleOperation::get_input_descriptor() +{ + return Operation::get_input_descriptor(input_identifier_); +} + +} // namespace blender::realtime_compositor diff --git a/source/blender/compositor/realtime_compositor/intern/static_shader_manager.cc b/source/blender/compositor/realtime_compositor/intern/static_shader_manager.cc new file mode 100644 index 00000000000..c9c8a056f87 --- /dev/null +++ b/source/blender/compositor/realtime_compositor/intern/static_shader_manager.cc @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +#include "GPU_shader.h" + +#include "COM_static_shader_manager.hh" + +namespace blender::realtime_compositor { + +StaticShaderManager::~StaticShaderManager() +{ + for (GPUShader *shader : shaders_.values()) { + GPU_shader_free(shader); + } +} + +GPUShader *StaticShaderManager::get(const char *info_name) +{ + /* If a shader with the same info name already exists in the manager, return it, otherwise, + * create a new shader from the info name and return it. */ + return shaders_.lookup_or_add_cb( + info_name, [info_name]() { return GPU_shader_create_from_info_name(info_name); }); +} + +} // namespace blender::realtime_compositor diff --git a/source/blender/compositor/realtime_compositor/intern/texture_pool.cc b/source/blender/compositor/realtime_compositor/intern/texture_pool.cc new file mode 100644 index 00000000000..1568970a030 --- /dev/null +++ b/source/blender/compositor/realtime_compositor/intern/texture_pool.cc @@ -0,0 +1,84 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +#include <cstdint> + +#include "BLI_hash.hh" +#include "BLI_map.hh" +#include "BLI_math_vec_types.hh" +#include "BLI_vector.hh" + +#include "GPU_texture.h" + +#include "COM_texture_pool.hh" + +namespace blender::realtime_compositor { + +/* -------------------------------------------------------------------- + * Texture Pool Key. + */ + +TexturePoolKey::TexturePoolKey(int2 size, eGPUTextureFormat format) : size(size), format(format) +{ +} + +TexturePoolKey::TexturePoolKey(const GPUTexture *texture) +{ + size = int2(GPU_texture_width(texture), GPU_texture_height(texture)); + format = GPU_texture_format(texture); +} + +uint64_t TexturePoolKey::hash() const +{ + return get_default_hash_3(size.x, size.y, format); +} + +bool operator==(const TexturePoolKey &a, const TexturePoolKey &b) +{ + return a.size == b.size && a.format == b.format; +} + +/* -------------------------------------------------------------------- + * Texture Pool. + */ + +GPUTexture *TexturePool::acquire(int2 size, eGPUTextureFormat format) +{ + /* Check if there is an available texture with the required specification, and if one exists, + * return it. */ + const TexturePoolKey key = TexturePoolKey(size, format); + Vector<GPUTexture *> &available_textures = textures_.lookup_or_add_default(key); + if (!available_textures.is_empty()) { + return available_textures.pop_last(); + } + + /* Otherwise, allocate a new texture. */ + return allocate_texture(size, format); +} + +GPUTexture *TexturePool::acquire_color(int2 size) +{ + return acquire(size, GPU_RGBA16F); +} + +GPUTexture *TexturePool::acquire_vector(int2 size) +{ + /* Vectors are stored in RGBA textures because RGB textures have limited support. */ + return acquire(size, GPU_RGBA16F); +} + +GPUTexture *TexturePool::acquire_float(int2 size) +{ + return acquire(size, GPU_R16F); +} + +void TexturePool::release(GPUTexture *texture) +{ + textures_.lookup(TexturePoolKey(texture)).append(texture); +} + +void TexturePool::reset() +{ + textures_.clear(); +} + +} // namespace blender::realtime_compositor diff --git a/source/blender/compositor/realtime_compositor/intern/utilities.cc b/source/blender/compositor/realtime_compositor/intern/utilities.cc new file mode 100644 index 00000000000..169ba70e9eb --- /dev/null +++ b/source/blender/compositor/realtime_compositor/intern/utilities.cc @@ -0,0 +1,134 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +#include "BLI_assert.h" +#include "BLI_function_ref.hh" +#include "BLI_math_vec_types.hh" +#include "BLI_math_vector.hh" +#include "BLI_utildefines.h" + +#include "DNA_node_types.h" + +#include "NOD_derived_node_tree.hh" +#include "NOD_node_declaration.hh" + +#include "GPU_compute.h" +#include "GPU_shader.h" + +#include "COM_operation.hh" +#include "COM_result.hh" +#include "COM_utilities.hh" + +namespace blender::realtime_compositor { + +using namespace nodes::derived_node_tree_types; +using TargetSocketPathInfo = DOutputSocket::TargetSocketPathInfo; + +DSocket get_input_origin_socket(DInputSocket input) +{ + /* The input is unlinked. Return the socket itself. */ + if (input->logically_linked_sockets().is_empty()) { + return input; + } + + /* Only a single origin socket is guaranteed to exist. */ + DSocket socket; + input.foreach_origin_socket([&](const DSocket origin) { socket = origin; }); + return socket; +} + +DOutputSocket get_output_linked_to_input(DInputSocket input) +{ + /* Get the origin socket of this input, which will be an output socket if the input is linked + * to an output. */ + const DSocket origin = get_input_origin_socket(input); + + /* If the origin socket is an input, that means the input is unlinked, so return a null output + * socket. */ + if (origin->is_input()) { + return DOutputSocket(); + } + + /* Now that we know the origin is an output, return a derived output from it. */ + return DOutputSocket(origin); +} + +ResultType get_node_socket_result_type(const SocketRef *socket) +{ + switch (socket->bsocket()->type) { + case SOCK_FLOAT: + return ResultType::Float; + case SOCK_VECTOR: + return ResultType::Vector; + case SOCK_RGBA: + return ResultType::Color; + default: + BLI_assert_unreachable(); + return ResultType::Float; + } +} + +bool is_output_linked_to_node_conditioned(DOutputSocket output, FunctionRef<bool(DNode)> condition) +{ + bool condition_satisfied = false; + output.foreach_target_socket( + [&](DInputSocket target, const TargetSocketPathInfo &UNUSED(path_info)) { + if (condition(target.node())) { + condition_satisfied = true; + return; + } + }); + return condition_satisfied; +} + +int number_of_inputs_linked_to_output_conditioned(DOutputSocket output, + FunctionRef<bool(DInputSocket)> condition) +{ + int count = 0; + output.foreach_target_socket( + [&](DInputSocket target, const TargetSocketPathInfo &UNUSED(path_info)) { + if (condition(target)) { + count++; + } + }); + return count; +} + +bool is_shader_node(DNode node) +{ + return node->typeinfo()->get_compositor_shader_node; +} + +bool is_node_supported(DNode node) +{ + return node->typeinfo()->get_compositor_operation || + node->typeinfo()->get_compositor_shader_node; +} + +InputDescriptor input_descriptor_from_input_socket(const InputSocketRef *socket) +{ + using namespace nodes; + InputDescriptor input_descriptor; + input_descriptor.type = get_node_socket_result_type(socket); + const NodeDeclaration *node_declaration = socket->node().declaration(); + /* Not every node have a declaration, in which case, we assume the default values for the rest of + * the properties. */ + if (!node_declaration) { + return input_descriptor; + } + const SocketDeclarationPtr &socket_declaration = node_declaration->inputs()[socket->index()]; + input_descriptor.domain_priority = socket_declaration->compositor_domain_priority(); + input_descriptor.expects_single_value = socket_declaration->compositor_expects_single_value(); + return input_descriptor; +} + +void compute_dispatch_threads_at_least(GPUShader *shader, int2 threads_range, int2 local_size) +{ + /* If the threads range is divisible by the local size, dispatch the number of needed groups, + * which is their division. If it is not divisible, then dispatch an extra group to cover the + * remaining invocations, which means the actual threads range of the dispatch will be a bit + * larger than the given one. */ + const int2 groups_to_dispatch = math::divide_ceil(threads_range, local_size); + GPU_compute_dispatch(shader, groups_to_dispatch.x, groups_to_dispatch.y, 1); +} + +} // namespace blender::realtime_compositor diff --git a/source/blender/depsgraph/intern/builder/deg_builder.cc b/source/blender/depsgraph/intern/builder/deg_builder.cc index 5353f71685c..097c377ece4 100644 --- a/source/blender/depsgraph/intern/builder/deg_builder.cc +++ b/source/blender/depsgraph/intern/builder/deg_builder.cc @@ -13,6 +13,7 @@ #include "DNA_anim_types.h" #include "DNA_armature_types.h" #include "DNA_layer_types.h" +#include "DNA_modifier_types.h" #include "DNA_object_types.h" #include "BLI_stack.h" @@ -95,6 +96,24 @@ bool DepsgraphBuilder::is_object_visibility_animated(const Object *object) return cache_->isPropertyAnimated(&object->id, property_id); } +bool DepsgraphBuilder::is_modifier_visibility_animated(const Object *object, + const ModifierData *modifier) +{ + AnimatedPropertyID property_id; + if (graph_->mode == DAG_EVAL_VIEWPORT) { + property_id = AnimatedPropertyID( + &object->id, &RNA_Modifier, (void *)modifier, "show_viewport"); + } + else if (graph_->mode == DAG_EVAL_RENDER) { + property_id = AnimatedPropertyID(&object->id, &RNA_Modifier, (void *)modifier, "show_render"); + } + else { + BLI_assert_msg(0, "Unknown evaluation mode."); + return false; + } + return cache_->isPropertyAnimated(&object->id, property_id); +} + bool DepsgraphBuilder::check_pchan_has_bbone(const Object *object, const bPoseChannel *pchan) { BLI_assert(object->type == OB_ARMATURE); diff --git a/source/blender/depsgraph/intern/builder/deg_builder.h b/source/blender/depsgraph/intern/builder/deg_builder.h index c44e5fd5f4d..5d043f1fd3a 100644 --- a/source/blender/depsgraph/intern/builder/deg_builder.h +++ b/source/blender/depsgraph/intern/builder/deg_builder.h @@ -10,6 +10,7 @@ struct Base; struct ID; struct Main; +struct ModifierData; struct Object; struct bPoseChannel; @@ -25,6 +26,7 @@ class DepsgraphBuilder { virtual bool need_pull_base_into_graph(const Base *base); virtual bool is_object_visibility_animated(const Object *object); + virtual bool is_modifier_visibility_animated(const Object *object, const ModifierData *modifier); virtual bool check_pchan_has_bbone(const Object *object, const bPoseChannel *pchan); virtual bool check_pchan_has_bbone_segments(const Object *object, const bPoseChannel *pchan); diff --git a/source/blender/depsgraph/intern/builder/deg_builder_nodes.cc b/source/blender/depsgraph/intern/builder/deg_builder_nodes.cc index be087c0b2d4..dd62a6cdea2 100644 --- a/source/blender/depsgraph/intern/builder/deg_builder_nodes.cc +++ b/source/blender/depsgraph/intern/builder/deg_builder_nodes.cc @@ -769,11 +769,7 @@ void DepsgraphNodeBuilder::build_object(int base_index, build_object(-1, object->parent, DEG_ID_LINKED_INDIRECTLY, is_visible); } /* Modifiers. */ - if (object->modifiers.first != nullptr) { - BuilderWalkUserData data; - data.builder = this; - BKE_modifiers_foreach_ID_link(object, modifier_walk, &data); - } + build_object_modifiers(object); /* Grease Pencil Modifiers. */ if (object->greasepencil_modifiers.first != nullptr) { BuilderWalkUserData data; @@ -877,6 +873,44 @@ void DepsgraphNodeBuilder::build_object_instance_collection(Object *object, bool is_parent_collection_visible_ = is_current_parent_collection_visible; } +void DepsgraphNodeBuilder::build_object_modifiers(Object *object) +{ + if (BLI_listbase_is_empty(&object->modifiers)) { + return; + } + + const ModifierMode modifier_mode = (graph_->mode == DAG_EVAL_VIEWPORT) ? eModifierMode_Realtime : + eModifierMode_Render; + + IDNode *id_node = find_id_node(&object->id); + + add_operation_node(&object->id, + NodeType::GEOMETRY, + OperationCode::VISIBILITY, + [id_node](::Depsgraph *depsgraph) { + deg_evaluate_object_modifiers_mode_node_visibility(depsgraph, id_node); + }); + + LISTBASE_FOREACH (ModifierData *, modifier, &object->modifiers) { + OperationNode *modifier_node = add_operation_node( + &object->id, NodeType::GEOMETRY, OperationCode::MODIFIER, nullptr, modifier->name); + + /* Mute modifier mode if the modifier is not enabled for the dependency graph mode. + * This handles static (non-animated) mode of the modifier. */ + if ((modifier->mode & modifier_mode) == 0) { + modifier_node->flag |= DEPSOP_FLAG_MUTE; + } + + if (is_modifier_visibility_animated(object, modifier)) { + graph_->has_animated_visibility = true; + } + } + + BuilderWalkUserData data; + data.builder = this; + BKE_modifiers_foreach_ID_link(object, modifier_walk, &data); +} + void DepsgraphNodeBuilder::build_object_data(Object *object) { if (object->data == nullptr) { diff --git a/source/blender/depsgraph/intern/builder/deg_builder_nodes.h b/source/blender/depsgraph/intern/builder/deg_builder_nodes.h index 18e28311132..d5ac601ebff 100644 --- a/source/blender/depsgraph/intern/builder/deg_builder_nodes.h +++ b/source/blender/depsgraph/intern/builder/deg_builder_nodes.h @@ -174,6 +174,7 @@ class DepsgraphNodeBuilder : public DepsgraphBuilder { virtual void build_object_flags(int base_index, Object *object, eDepsNode_LinkedState_Type linked_state); + virtual void build_object_modifiers(Object *object); virtual void build_object_data(Object *object); virtual void build_object_data_camera(Object *object); virtual void build_object_data_geometry(Object *object); diff --git a/source/blender/depsgraph/intern/builder/deg_builder_relations.cc b/source/blender/depsgraph/intern/builder/deg_builder_relations.cc index fc5e5189e82..d6ee1286fc4 100644 --- a/source/blender/depsgraph/intern/builder/deg_builder_relations.cc +++ b/source/blender/depsgraph/intern/builder/deg_builder_relations.cc @@ -298,7 +298,8 @@ void DepsgraphRelationBuilder::add_depends_on_transform_relation(const DepsNodeH { IDNode *id_node = handle->node->owner->owner; ID *id = id_node->id_orig; - ComponentKey geometry_key(id, NodeType::GEOMETRY); + const OperationKey geometry_key( + id, NodeType::GEOMETRY, OperationCode::MODIFIER, handle->node->name.c_str()); /* Wire up the actual relation. */ add_depends_on_transform_relation(id, geometry_key, description); } @@ -718,11 +719,7 @@ void DepsgraphRelationBuilder::build_object(Object *object) } /* Modifiers. */ - if (object->modifiers.first != nullptr) { - BuilderWalkUserData data; - data.builder = this; - BKE_modifiers_foreach_ID_link(object, modifier_walk, &data); - } + build_object_modifiers(object); /* Grease Pencil Modifiers. */ if (object->greasepencil_modifiers.first != nullptr) { @@ -870,6 +867,63 @@ void DepsgraphRelationBuilder::build_object_layer_component_relations(Object *ob add_relation(object_from_layer_exit_key, synchronize_key, "Synchronize to Original"); } +void DepsgraphRelationBuilder::build_object_modifiers(Object *object) +{ + if (BLI_listbase_is_empty(&object->modifiers)) { + return; + } + + const OperationKey eval_init_key( + &object->id, NodeType::GEOMETRY, OperationCode::GEOMETRY_EVAL_INIT); + const OperationKey eval_key(&object->id, NodeType::GEOMETRY, OperationCode::GEOMETRY_EVAL); + + const ComponentKey object_visibility_key(&object->id, NodeType::VISIBILITY); + const OperationKey modifier_visibility_key( + &object->id, NodeType::GEOMETRY, OperationCode::VISIBILITY); + add_relation(modifier_visibility_key, + object_visibility_key, + "modifier -> object visibility", + RELATION_NO_VISIBILITY_CHANGE); + + add_relation(modifier_visibility_key, eval_key, "modifier visibility -> geometry eval"); + + ModifierUpdateDepsgraphContext ctx = {}; + ctx.scene = scene_; + ctx.object = object; + + OperationKey previous_key = eval_init_key; + LISTBASE_FOREACH (ModifierData *, modifier, &object->modifiers) { + const OperationKey modifier_key( + &object->id, NodeType::GEOMETRY, OperationCode::MODIFIER, modifier->name); + + /* Relation for the modifier stack chain. */ + add_relation(previous_key, modifier_key, "Modifier"); + + const ModifierTypeInfo *mti = BKE_modifier_get_info((ModifierType)modifier->type); + if (mti->updateDepsgraph) { + const BuilderStack::ScopedEntry stack_entry = stack_.trace(*modifier); + + DepsNodeHandle handle = create_node_handle(modifier_key); + ctx.node = reinterpret_cast<::DepsNodeHandle *>(&handle); + mti->updateDepsgraph(modifier, &ctx); + } + + /* Time dependency. */ + if (BKE_modifier_depends_ontime(scene_, modifier)) { + const TimeSourceKey time_src_key; + add_relation(time_src_key, modifier_key, "Time Source -> Modifier"); + } + + previous_key = modifier_key; + } + add_relation(previous_key, eval_key, "modifier stack order"); + + /* Build IDs referenced by the modifiers. */ + BuilderWalkUserData data; + data.builder = this; + BKE_modifiers_foreach_ID_link(object, modifier_walk, &data); +} + void DepsgraphRelationBuilder::build_object_data(Object *object) { if (object->data == nullptr) { @@ -2193,26 +2247,6 @@ void DepsgraphRelationBuilder::build_object_data_geometry(Object *object) * evaluated prior to Scene's CoW is ready. */ OperationKey scene_key(&scene_->id, NodeType::PARAMETERS, OperationCode::SCENE_EVAL); add_relation(scene_key, obdata_ubereval_key, "CoW Relation", RELATION_FLAG_NO_FLUSH); - /* Modifiers */ - if (object->modifiers.first != nullptr) { - ModifierUpdateDepsgraphContext ctx = {}; - ctx.scene = scene_; - ctx.object = object; - LISTBASE_FOREACH (ModifierData *, md, &object->modifiers) { - const ModifierTypeInfo *mti = BKE_modifier_get_info((ModifierType)md->type); - if (mti->updateDepsgraph) { - const BuilderStack::ScopedEntry stack_entry = stack_.trace(*md); - - DepsNodeHandle handle = create_node_handle(obdata_ubereval_key); - ctx.node = reinterpret_cast<::DepsNodeHandle *>(&handle); - mti->updateDepsgraph(md, &ctx); - } - if (BKE_modifier_depends_ontime(scene_, md)) { - TimeSourceKey time_src_key; - add_relation(time_src_key, obdata_ubereval_key, "Time Source -> Modifier"); - } - } - } /* Grease Pencil Modifiers. */ if (object->greasepencil_modifiers.first != nullptr) { ModifierUpdateDepsgraphContext ctx = {}; @@ -2256,8 +2290,13 @@ void DepsgraphRelationBuilder::build_object_data_geometry(Object *object) if (ELEM(object->type, OB_MESH, OB_CURVES_LEGACY, OB_LATTICE)) { // add geometry collider relations } - /* Make sure uber update is the last in the dependencies. */ - add_relation(geom_init_key, obdata_ubereval_key, "Object Geometry UberEval"); + /* Make sure uber update is the last in the dependencies. + * Only do it here unless there are modifiers. This avoids transitive relations. */ + if (BLI_listbase_is_empty(&object->modifiers)) { + OperationKey obdata_ubereval_key( + &object->id, NodeType::GEOMETRY, OperationCode::GEOMETRY_EVAL); + add_relation(geom_init_key, obdata_ubereval_key, "Object Geometry UberEval"); + } if (object->type == OB_MBALL) { Object *mom = BKE_mball_basis_find(scene_, object); ComponentKey mom_geom_key(&mom->id, NodeType::GEOMETRY); diff --git a/source/blender/depsgraph/intern/builder/deg_builder_relations.h b/source/blender/depsgraph/intern/builder/deg_builder_relations.h index db237303027..7d3a0fd9217 100644 --- a/source/blender/depsgraph/intern/builder/deg_builder_relations.h +++ b/source/blender/depsgraph/intern/builder/deg_builder_relations.h @@ -265,6 +265,7 @@ class DepsgraphRelationBuilder : public DepsgraphBuilder { virtual void build_object(Object *object); virtual void build_object_from_view_layer_base(Object *object); virtual void build_object_layer_component_relations(Object *object); + virtual void build_object_modifiers(Object *object); virtual void build_object_data(Object *object); virtual void build_object_data_camera(Object *object); virtual void build_object_data_geometry(Object *object); diff --git a/source/blender/depsgraph/intern/builder/deg_builder_rna.cc b/source/blender/depsgraph/intern/builder/deg_builder_rna.cc index 5202ada5408..d94746fb7fa 100644 --- a/source/blender/depsgraph/intern/builder/deg_builder_rna.cc +++ b/source/blender/depsgraph/intern/builder/deg_builder_rna.cc @@ -225,6 +225,10 @@ RNANodeIdentifier RNANodeQuery::construct_node_identifier(const PointerRNA *ptr, } return node_identifier; } + + const char *prop_identifier = prop != nullptr ? RNA_property_identifier((PropertyRNA *)prop) : + ""; + if (RNA_struct_is_a(ptr->type, &RNA_Constraint)) { const Object *object = reinterpret_cast<const Object *>(ptr->owner_id); const bConstraint *constraint = static_cast<const bConstraint *>(ptr->data); @@ -264,6 +268,13 @@ RNANodeIdentifier RNANodeQuery::construct_node_identifier(const PointerRNA *ptr, return node_identifier; } } + else if (RNA_struct_is_a(ptr->type, &RNA_Modifier) && + (contains(prop_identifier, "show_viewport") || + contains(prop_identifier, "show_render"))) { + node_identifier.type = NodeType::GEOMETRY; + node_identifier.operation_code = OperationCode::VISIBILITY; + return node_identifier; + } else if (RNA_struct_is_a(ptr->type, &RNA_Mesh) || RNA_struct_is_a(ptr->type, &RNA_Modifier) || RNA_struct_is_a(ptr->type, &RNA_GpencilModifier) || RNA_struct_is_a(ptr->type, &RNA_Spline) || RNA_struct_is_a(ptr->type, &RNA_TextBox) || @@ -290,7 +301,6 @@ RNANodeIdentifier RNANodeQuery::construct_node_identifier(const PointerRNA *ptr, else if (ptr->type == &RNA_Object) { /* Transforms props? */ if (prop != nullptr) { - const char *prop_identifier = RNA_property_identifier((PropertyRNA *)prop); /* TODO(sergey): How to optimize this? */ if (contains(prop_identifier, "location") || contains(prop_identifier, "matrix_basis") || contains(prop_identifier, "matrix_channel") || diff --git a/source/blender/depsgraph/intern/depsgraph_relation.h b/source/blender/depsgraph/intern/depsgraph_relation.h index 1bacb9abfa6..3f316fa84e8 100644 --- a/source/blender/depsgraph/intern/depsgraph_relation.h +++ b/source/blender/depsgraph/intern/depsgraph_relation.h @@ -28,6 +28,8 @@ enum RelationFlag { RELATION_FLAG_GODMODE = (1 << 4), /* Relation will check existence before being added. */ RELATION_CHECK_BEFORE_ADD = (1 << 5), + /* The relation does not participate in visibility checks. */ + RELATION_NO_VISIBILITY_CHANGE = (1 << 6), }; /* B depends on A (A -> B) */ diff --git a/source/blender/depsgraph/intern/eval/deg_eval.cc b/source/blender/depsgraph/intern/eval/deg_eval.cc index 9b2ce2bb707..cd0015ff717 100644 --- a/source/blender/depsgraph/intern/eval/deg_eval.cc +++ b/source/blender/depsgraph/intern/eval/deg_eval.cc @@ -437,7 +437,7 @@ void deg_evaluate_on_refresh(Depsgraph *graph) evaluate_graph_threaded_stage(&state, task_pool, EvaluationStage::COPY_ON_WRITE); - if (graph->has_animated_visibility) { + if (graph->has_animated_visibility || graph->need_update_nodes_visibility) { /* Update pending parents including only the ones which are affecting operations which are * affecting visibility. */ state.need_update_pending_parents = true; diff --git a/source/blender/depsgraph/intern/eval/deg_eval_visibility.cc b/source/blender/depsgraph/intern/eval/deg_eval_visibility.cc index 05f7631b0d4..7b6aec0a73c 100644 --- a/source/blender/depsgraph/intern/eval/deg_eval_visibility.cc +++ b/source/blender/depsgraph/intern/eval/deg_eval_visibility.cc @@ -8,6 +8,7 @@ #include "intern/eval/deg_eval_visibility.h" #include "DNA_layer_types.h" +#include "DNA_modifier_types.h" #include "DNA_object_types.h" #include "BLI_assert.h" @@ -47,6 +48,38 @@ void deg_evaluate_object_node_visibility(::Depsgraph *depsgraph, IDNode *id_node } } +void deg_evaluate_object_modifiers_mode_node_visibility(::Depsgraph *depsgraph, IDNode *id_node) +{ + BLI_assert(GS(id_node->id_cow->name) == ID_OB); + + Depsgraph *graph = reinterpret_cast<Depsgraph *>(depsgraph); + const Object *object = reinterpret_cast<const Object *>(id_node->id_cow); + + DEG_debug_print_eval(depsgraph, __func__, object->id.name, &object->id); + + if (BLI_listbase_is_empty(&object->modifiers)) { + return; + } + + const ModifierMode modifier_mode = (graph->mode == DAG_EVAL_VIEWPORT) ? eModifierMode_Realtime : + eModifierMode_Render; + + const ComponentNode *geometry_component = id_node->find_component(NodeType::GEOMETRY); + LISTBASE_FOREACH (ModifierData *, modifier, &object->modifiers) { + OperationNode *modifier_node = geometry_component->find_operation(OperationCode::MODIFIER, + modifier->name); + + const bool modifier_enabled = modifier->mode & modifier_mode; + const int mute_flag = modifier_enabled ? 0 : DEPSOP_FLAG_MUTE; + if ((modifier_node->flag & DEPSOP_FLAG_MUTE) != mute_flag) { + modifier_node->flag &= ~DEPSOP_FLAG_MUTE; + modifier_node->flag |= mute_flag; + + graph->need_update_nodes_visibility = true; + } + } +} + void deg_graph_flush_visibility_flags(Depsgraph *graph) { enum { @@ -116,10 +149,30 @@ void deg_graph_flush_visibility_flags(Depsgraph *graph) OperationNode *op_from = reinterpret_cast<OperationNode *>(rel->from); ComponentNode *comp_from = op_from->owner; + op_from->flag |= (op_to->flag & OperationFlag::DEPSOP_FLAG_AFFECTS_VISIBILITY); + + if (rel->flag & RELATION_NO_VISIBILITY_CHANGE) { + continue; + } + const bool target_possibly_affects_visible_id = comp_to->possibly_affects_visible_id; - const bool target_affects_visible_id = comp_to->affects_visible_id; - op_from->flag |= (op_to->flag & OperationFlag::DEPSOP_FLAG_AFFECTS_VISIBILITY); + bool target_affects_visible_id = comp_to->affects_visible_id; + + /* This is a bit arbitrary but the idea here is following: + * + * - When another object is used by a disabled modifier we do not want that object to + * be considered needed for evaluation. + * + * - However, we do not want to take mute flag during visibility propagation within the + * same object. Otherwise drivers and transform dependencies of the geometry component + * entry component might not be properly handled. + * + * This code works fine for muting modifiers, but might need tweaks when mute is used for + * something else. */ + if (comp_from != comp_to && (op_to->flag & DEPSOP_FLAG_MUTE)) { + target_affects_visible_id = false; + } /* Visibility component forces all components of the current ID to be considered as * affecting directly visible. */ diff --git a/source/blender/depsgraph/intern/eval/deg_eval_visibility.h b/source/blender/depsgraph/intern/eval/deg_eval_visibility.h index 9e9db8ab34a..6544654f3cf 100644 --- a/source/blender/depsgraph/intern/eval/deg_eval_visibility.h +++ b/source/blender/depsgraph/intern/eval/deg_eval_visibility.h @@ -18,6 +18,9 @@ struct IDNode; * restriction flags. */ void deg_evaluate_object_node_visibility(::Depsgraph *depsgraph, IDNode *id_node); +/* Update node visibility flags based on actual modifiers mode flags. */ +void deg_evaluate_object_modifiers_mode_node_visibility(::Depsgraph *depsgraph, IDNode *id_node); + /* Flush both static and dynamic visibility flags from leaves up to the roots, making it possible * to know whether a node has affect on something (potentially) visible. */ void deg_graph_flush_visibility_flags(Depsgraph *graph); diff --git a/source/blender/depsgraph/intern/node/deg_node_operation.cc b/source/blender/depsgraph/intern/node/deg_node_operation.cc index de4bc0668cf..016af735fcf 100644 --- a/source/blender/depsgraph/intern/node/deg_node_operation.cc +++ b/source/blender/depsgraph/intern/node/deg_node_operation.cc @@ -84,6 +84,8 @@ const char *operationCodeAsString(OperationCode opcode) /* Geometry. */ case OperationCode::GEOMETRY_EVAL_INIT: return "GEOMETRY_EVAL_INIT"; + case OperationCode::MODIFIER: + return "MODIFIER"; case OperationCode::GEOMETRY_EVAL: return "GEOMETRY_EVAL"; case OperationCode::GEOMETRY_EVAL_DONE: @@ -225,6 +227,16 @@ void OperationNode::tag_update(Depsgraph *graph, eUpdateSource source) * the evaluated clues that evaluation needs to happen again. */ graph->add_entry_tag(this); + /* Enforce dynamic visibility code-path update. + * This ensures visibility flags are consistently propagated throughout the dependency graph when + * there is no animated visibility in the graph. + * + * For example this ensures that graph is updated properly when manually toggling non-animated + * modifier visibility. */ + if (opcode == OperationCode::VISIBILITY) { + graph->need_update_nodes_visibility = true; + } + /* Tag for update, but also note that this was the source of an update. */ flag |= (DEPSOP_FLAG_NEEDS_UPDATE | DEPSOP_FLAG_DIRECTLY_MODIFIED); switch (source) { diff --git a/source/blender/depsgraph/intern/node/deg_node_operation.h b/source/blender/depsgraph/intern/node/deg_node_operation.h index 656b27550f6..cb3beb56556 100644 --- a/source/blender/depsgraph/intern/node/deg_node_operation.h +++ b/source/blender/depsgraph/intern/node/deg_node_operation.h @@ -84,6 +84,8 @@ enum class OperationCode { /* Initialize evaluation of the geometry. Is an entry operation of geometry * component. */ GEOMETRY_EVAL_INIT, + /* Modifier. */ + MODIFIER, /* Evaluate the whole geometry, including modifiers. */ GEOMETRY_EVAL, /* Evaluation of geometry is completely done. */ @@ -217,6 +219,9 @@ enum OperationFlag { /* The operation directly or indirectly affects ID node visibility. */ DEPSOP_FLAG_AFFECTS_VISIBILITY = (1 << 4), + /* Evaluation of the node is temporarily disabled. */ + DEPSOP_FLAG_MUTE = (1 << 5), + /* Set of flags which gets flushed along the relations. */ DEPSOP_FLAG_FLUSH = (DEPSOP_FLAG_USER_MODIFIED), }; diff --git a/source/blender/draw/CMakeLists.txt b/source/blender/draw/CMakeLists.txt index ec57c302b31..391c3bb3512 100644 --- a/source/blender/draw/CMakeLists.txt +++ b/source/blender/draw/CMakeLists.txt @@ -23,6 +23,7 @@ set(INC ../nodes ../render ../render/intern + ../compositor/realtime_compositor ../windowmanager ../../../intern/atomic @@ -103,6 +104,7 @@ set(SRC intern/smaa_textures.c engines/basic/basic_engine.c engines/basic/basic_shader.c + engines/compositor/compositor_engine.cc engines/image/image_engine.cc engines/image/image_shader.cc engines/eevee/eevee_bloom.c @@ -231,6 +233,7 @@ set(SRC intern/smaa_textures.h engines/basic/basic_engine.h engines/basic/basic_private.h + engines/compositor/compositor_engine.h engines/eevee/eevee_engine.h engines/eevee/eevee_lightcache.h engines/eevee/eevee_lut.h @@ -262,6 +265,7 @@ set(SRC set(LIB bf_blenkernel bf_blenlib + bf_realtime_compositor bf_windowmanager ) diff --git a/source/blender/draw/engines/compositor/compositor_engine.cc b/source/blender/draw/engines/compositor/compositor_engine.cc new file mode 100644 index 00000000000..f36a59a4ce6 --- /dev/null +++ b/source/blender/draw/engines/compositor/compositor_engine.cc @@ -0,0 +1,203 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +#include "BLI_listbase.h" +#include "BLI_math_vec_types.hh" +#include "BLI_string_ref.hh" +#include "BLI_utildefines.h" + +#include "BLT_translation.h" + +#include "DNA_ID_enums.h" +#include "DNA_scene_types.h" + +#include "DEG_depsgraph_query.h" + +#include "DRW_render.h" + +#include "IMB_colormanagement.h" + +#include "COM_context.hh" +#include "COM_evaluator.hh" +#include "COM_texture_pool.hh" + +#include "GPU_texture.h" + +namespace blender::draw::compositor { + +class TexturePool : public realtime_compositor::TexturePool { + public: + GPUTexture *allocate_texture(int2 size, eGPUTextureFormat format) override + { + DrawEngineType *owner = (DrawEngineType *)this; + return DRW_texture_pool_query_2d(size.x, size.y, format, owner); + } +}; + +class Context : public realtime_compositor::Context { + private: + /* A pointer to the info message of the compositor engine. This is a char array of size + * GPU_INFO_SIZE. The message is cleared prior to updating or evaluating the compositor. */ + char *info_message_; + + public: + Context(realtime_compositor::TexturePool &texture_pool, char *info_message) + : realtime_compositor::Context(texture_pool), info_message_(info_message) + { + } + + const Scene *get_scene() const override + { + return DRW_context_state_get()->scene; + } + + int2 get_output_size() override + { + return int2(float2(DRW_viewport_size_get())); + } + + GPUTexture *get_output_texture() override + { + return DRW_viewport_texture_list_get()->color; + } + + GPUTexture *get_input_texture(int UNUSED(view_layer), eScenePassType UNUSED(pass_type)) override + { + return get_output_texture(); + } + + StringRef get_view_name() override + { + const SceneRenderView *view = static_cast<SceneRenderView *>( + BLI_findlink(&get_scene()->r.views, DRW_context_state_get()->v3d->multiview_eye)); + return view->name; + } + + void set_info_message(StringRef message) const override + { + message.copy(info_message_, GPU_INFO_SIZE); + } +}; + +class Engine { + private: + TexturePool texture_pool_; + Context context_; + realtime_compositor::Evaluator evaluator_; + /* Stores the viewport size at the time the last compositor evaluation happened. See the + * update_viewport_size method for more information. */ + int2 last_viewport_size_; + + public: + Engine(char *info_message) + : context_(texture_pool_, info_message), + evaluator_(context_, node_tree()), + last_viewport_size_(context_.get_output_size()) + { + } + + /* Update the viewport size and evaluate the compositor. */ + void draw() + { + update_viewport_size(); + evaluator_.evaluate(); + } + + /* If the size of the viewport changed from the last time the compositor was evaluated, update + * the viewport size and reset the evaluator. That's because the evaluator compiles the node tree + * in a manner that is specifically optimized for the size of the viewport. This should be called + * before evaluating the compositor. */ + void update_viewport_size() + { + if (last_viewport_size_ == context_.get_output_size()) { + return; + } + + last_viewport_size_ = context_.get_output_size(); + + evaluator_.reset(); + } + + /* If the compositor node tree changed, reset the evaluator. */ + void update(const Depsgraph *depsgraph) + { + if (DEG_id_type_updated(depsgraph, ID_NT)) { + evaluator_.reset(); + } + } + + /* Get a reference to the compositor node tree. */ + static bNodeTree &node_tree() + { + return *DRW_context_state_get()->scene->nodetree; + } +}; + +} // namespace blender::draw::compositor + +using namespace blender::draw::compositor; + +struct COMPOSITOR_Data { + DrawEngineType *engine_type; + DRWViewportEmptyList *fbl; + DRWViewportEmptyList *txl; + DRWViewportEmptyList *psl; + DRWViewportEmptyList *stl; + Engine *instance_data; + char info[GPU_INFO_SIZE]; +}; + +static void compositor_engine_init(void *data) +{ + COMPOSITOR_Data *compositor_data = static_cast<COMPOSITOR_Data *>(data); + + if (!compositor_data->instance_data) { + compositor_data->instance_data = new Engine(compositor_data->info); + } +} + +static void compositor_engine_free(void *instance_data) +{ + Engine *engine = static_cast<Engine *>(instance_data); + delete engine; +} + +static void compositor_engine_draw(void *data) +{ + const COMPOSITOR_Data *compositor_data = static_cast<COMPOSITOR_Data *>(data); + compositor_data->instance_data->draw(); +} + +static void compositor_engine_update(void *data) +{ + COMPOSITOR_Data *compositor_data = static_cast<COMPOSITOR_Data *>(data); + + /* Clear any info message that was set in a previous update. */ + compositor_data->info[0] = '\0'; + + if (compositor_data->instance_data) { + compositor_data->instance_data->update(DRW_context_state_get()->depsgraph); + } +} + +extern "C" { + +static const DrawEngineDataSize compositor_data_size = DRW_VIEWPORT_DATA_SIZE(COMPOSITOR_Data); + +DrawEngineType draw_engine_compositor_type = { + nullptr, /* next */ + nullptr, /* prev */ + N_("Compositor"), /* idname */ + &compositor_data_size, /* vedata_size */ + &compositor_engine_init, /* engine_init */ + nullptr, /* engine_free */ + &compositor_engine_free, /* instance_free */ + nullptr, /* cache_init */ + nullptr, /* cache_populate */ + nullptr, /* cache_finish */ + &compositor_engine_draw, /* draw_scene */ + &compositor_engine_update, /* view_update */ + nullptr, /* id_update */ + nullptr, /* render_to_image */ + nullptr, /* store_metadata */ +}; +} diff --git a/source/blender/draw/engines/compositor/compositor_engine.h b/source/blender/draw/engines/compositor/compositor_engine.h new file mode 100644 index 00000000000..5de0de8a0b3 --- /dev/null +++ b/source/blender/draw/engines/compositor/compositor_engine.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +#pragma once + +#ifdef __cplusplus +extern "C" { +#endif + +extern DrawEngineType draw_engine_compositor_type; + +#ifdef __cplusplus +} +#endif diff --git a/source/blender/draw/intern/draw_debug.cc b/source/blender/draw/intern/draw_debug.cc index c610d7a2ffc..aaf18014143 100644 --- a/source/blender/draw/intern/draw_debug.cc +++ b/source/blender/draw/intern/draw_debug.cc @@ -599,7 +599,7 @@ void drw_debug_draw() if (!GPU_shader_storage_buffer_objects_support() || DST.debug == nullptr) { return; } - /* TODO(fclem): Convenience for now. Will have to move to DRWManager. */ + /* TODO(@fclem): Convenience for now. Will have to move to #DRWManager. */ reinterpret_cast<blender::draw::DebugDraw *>(DST.debug)->display_to_view(); #endif } @@ -610,12 +610,12 @@ void drw_debug_draw() void drw_debug_init() { /* Module should not be used in release builds. */ - /* TODO(fclem): Hide the functions declarations without using ifdefs everywhere. */ + /* TODO(@fclem): Hide the functions declarations without using `ifdefs` everywhere. */ #ifdef DEBUG if (!GPU_shader_storage_buffer_objects_support()) { return; } - /* TODO(fclem): Convenience for now. Will have to move to DRWManager. */ + /* TODO(@fclem): Convenience for now. Will have to move to #DRWManager. */ if (DST.debug == nullptr) { DST.debug = reinterpret_cast<DRWDebugModule *>(new blender::draw::DebugDraw()); } diff --git a/source/blender/draw/intern/draw_debug.hh b/source/blender/draw/intern/draw_debug.hh index 4a2a367aef0..730b69ed954 100644 --- a/source/blender/draw/intern/draw_debug.hh +++ b/source/blender/draw/intern/draw_debug.hh @@ -98,7 +98,7 @@ class DebugDraw { void draw_matrix_as_bbox(float4x4 mat, const float4 color = {1, 0, 0, 1}); /** - * Will draw all debug shapes and text cached up until now to the current view / framebuffer. + * Will draw all debug shapes and text cached up until now to the current view / frame-buffer. * Draw buffers will be emptied and ready for new debug data. */ void display_to_view(); diff --git a/source/blender/draw/intern/draw_manager.c b/source/blender/draw/intern/draw_manager.c index 63c7cc667cb..4693e5f8e20 100644 --- a/source/blender/draw/intern/draw_manager.c +++ b/source/blender/draw/intern/draw_manager.c @@ -43,6 +43,7 @@ #include "DNA_camera_types.h" #include "DNA_mesh_types.h" #include "DNA_meshdata_types.h" +#include "DNA_userdef_types.h" #include "DNA_world_types.h" #include "ED_gpencil.h" @@ -84,6 +85,7 @@ #include "draw_cache_impl.h" #include "engines/basic/basic_engine.h" +#include "engines/compositor/compositor_engine.h" #include "engines/eevee/eevee_engine.h" #include "engines/eevee_next/eevee_engine.h" #include "engines/external/external_engine.h" @@ -1214,6 +1216,31 @@ static void drw_engines_enable_editors(void) } } +static bool is_compositor_enabled(void) +{ + if (!U.experimental.use_realtime_compositor) { + return false; + } + + if (!(DST.draw_ctx.v3d->shading.flag & V3D_SHADING_COMPOSITOR)) { + return false; + } + + if (!(DST.draw_ctx.v3d->shading.type >= OB_MATERIAL)) { + return false; + } + + if (!DST.draw_ctx.scene->use_nodes) { + return false; + } + + if (!DST.draw_ctx.scene->nodetree) { + return false; + } + + return true; +} + static void drw_engines_enable(ViewLayer *UNUSED(view_layer), RenderEngineType *engine_type, bool gpencil_engine_needed) @@ -1226,6 +1253,11 @@ static void drw_engines_enable(ViewLayer *UNUSED(view_layer), if (gpencil_engine_needed && ((drawtype >= OB_SOLID) || !use_xray)) { use_drw_engine(&draw_engine_gpencil_type); } + + if (is_compositor_enabled()) { + use_drw_engine(&draw_engine_compositor_type); + } + drw_engines_enable_overlays(); #ifdef WITH_DRAW_DEBUG @@ -1597,7 +1629,6 @@ void DRW_draw_render_loop_ex(struct Depsgraph *depsgraph, GPUViewport *viewport, const bContext *evil_C) { - Scene *scene = DEG_get_evaluated_scene(depsgraph); ViewLayer *view_layer = DEG_get_evaluated_view_layer(depsgraph); RegionView3D *rv3d = region->regiondata; @@ -2948,6 +2979,7 @@ void DRW_engines_register(void) DRW_engine_register(&draw_engine_overlay_type); DRW_engine_register(&draw_engine_select_type); DRW_engine_register(&draw_engine_basic_type); + DRW_engine_register(&draw_engine_compositor_type); #ifdef WITH_DRAW_DEBUG DRW_engine_register(&draw_engine_debug_select_type); #endif diff --git a/source/blender/draw/intern/draw_manager_data.c b/source/blender/draw/intern/draw_manager_data.c index 2f6090b22c4..9d4a2c58ab6 100644 --- a/source/blender/draw/intern/draw_manager_data.c +++ b/source/blender/draw/intern/draw_manager_data.c @@ -47,8 +47,8 @@ /** * IMPORTANT: * In order to be able to write to the same print buffer sequentially, we add a barrier to allow - * multiple shader calls writting to the same buffer. - * However, this adds explicit synchronisation events which might change the rest of the + * multiple shader calls writing to the same buffer. + * However, this adds explicit synchronization events which might change the rest of the * application behavior and hide some bugs. If you know you are using shader debug print in only * one shader pass, you can comment this out to remove the aforementioned barrier. */ @@ -662,7 +662,7 @@ static void drw_call_obinfos_init(DRWObjectInfos *ob_infos, Object *ob) drw_call_calc_orco(ob, ob_infos->orcotexfac); /* Random float value. */ uint random = (DST.dupli_source) ? - DST.dupli_source->random_id : + DST.dupli_source->random_id : /* TODO(fclem): this is rather costly to do at runtime. Maybe we can * put it in ob->runtime and make depsgraph ensure it is up to date. */ BLI_hash_int_2d(BLI_hash_string(ob->id.name + 2), 0); @@ -1532,7 +1532,7 @@ static void drw_shgroup_init(DRWShadingGroup *shgroup, GPUShader *shader) drw_shgroup_uniform_create_ex( shgroup, debug_print_location, DRW_UNIFORM_STORAGE_BLOCK, buf, 0, 0, 1); # ifndef DISABLE_DEBUG_SHADER_PRINT_BARRIER - /* Add a barrier to allow multiple shader writting to the same buffer. */ + /* Add a barrier to allow multiple shader writing to the same buffer. */ DRW_shgroup_barrier(shgroup, GPU_BARRIER_SHADER_STORAGE); # endif } diff --git a/source/blender/draw/intern/draw_shader_shared.h b/source/blender/draw/intern/draw_shader_shared.h index 0fe4f3fbbff..90a6475c42b 100644 --- a/source/blender/draw/intern/draw_shader_shared.h +++ b/source/blender/draw/intern/draw_shader_shared.h @@ -154,7 +154,7 @@ BLI_STATIC_ASSERT_ALIGN(DispatchCommand, 16) * But we use plain array in shader code instead because of driver issues. */ struct DRWDebugPrintBuffer { DrawCommand command; - /** Each character is encoded as 3 uchar with char_index, row and column position. */ + /** Each character is encoded as 3 `uchar` with char_index, row and column position. */ uint char_array[DRW_DEBUG_PRINT_MAX]; }; BLI_STATIC_ASSERT_ALIGN(DRWDebugPrintBuffer, 16) diff --git a/source/blender/draw/intern/shaders/draw_debug_info.hh b/source/blender/draw/intern/shaders/draw_debug_info.hh index 7fbbc858d61..893a5e537d9 100644 --- a/source/blender/draw/intern/shaders/draw_debug_info.hh +++ b/source/blender/draw/intern/shaders/draw_debug_info.hh @@ -1,3 +1,4 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ #include "gpu_shader_create_info.hh" diff --git a/source/blender/editors/curve/editcurve.c b/source/blender/editors/curve/editcurve.c index 2ff18599b02..164336c4b22 100644 --- a/source/blender/editors/curve/editcurve.c +++ b/source/blender/editors/curve/editcurve.c @@ -1975,7 +1975,7 @@ static int sel_to_copy_ints(const BPoint *bp, else if (not_full == -1) { not_full = selected_in_curr_leg; } - /* We have partialy selected leg in opposite dimension if condition is met. */ + /* We have partially selected leg in opposite dimension if condition is met. */ else if (not_full != selected_in_curr_leg) { return -1; } diff --git a/source/blender/editors/include/ED_view3d.h b/source/blender/editors/include/ED_view3d.h index 7d31950c869..bb95ea97c1c 100644 --- a/source/blender/editors/include/ED_view3d.h +++ b/source/blender/editors/include/ED_view3d.h @@ -1171,7 +1171,7 @@ void ED_view3d_camera_lock_init(const struct Depsgraph *depsgraph, * * Apply the 3D Viewport transformation back to the camera object. * - * \return true if the camera is moved. + * \return true if the camera (or one of it's parents) was moved. */ bool ED_view3d_camera_lock_sync(const struct Depsgraph *depsgraph, struct View3D *v3d, @@ -1211,8 +1211,8 @@ bool ED_view3d_camera_lock_undo_test(const View3D *v3d, * \return true when the call to push an undo step was made. */ bool ED_view3d_camera_lock_undo_push(const char *str, - View3D *v3d, - struct RegionView3D *rv3d, + const View3D *v3d, + const struct RegionView3D *rv3d, struct bContext *C); /** @@ -1222,8 +1222,8 @@ bool ED_view3d_camera_lock_undo_push(const char *str, * where adding a separate undo step each time isn't desirable. */ bool ED_view3d_camera_lock_undo_grouped_push(const char *str, - View3D *v3d, - struct RegionView3D *rv3d, + const View3D *v3d, + const struct RegionView3D *rv3d, struct bContext *C); #define VIEW3D_MARGIN 1.4f diff --git a/source/blender/editors/io/io_obj.c b/source/blender/editors/io/io_obj.c index 662ff601e29..c151baf13ef 100644 --- a/source/blender/editors/io/io_obj.c +++ b/source/blender/editors/io/io_obj.c @@ -382,11 +382,6 @@ static int wm_obj_import_invoke(bContext *C, wmOperator *op, const wmEvent *UNUS static int wm_obj_import_exec(bContext *C, wmOperator *op) { - if (!RNA_struct_property_is_set(op->ptr, "filepath")) { - BKE_report(op->reports, RPT_ERROR, "No filename given"); - return OPERATOR_CANCELLED; - } - struct OBJImportParams import_params; RNA_string_get(op->ptr, "filepath", import_params.filepath); import_params.clamp_size = RNA_float_get(op->ptr, "clamp_size"); @@ -395,8 +390,35 @@ static int wm_obj_import_exec(bContext *C, wmOperator *op) import_params.import_vertex_groups = RNA_boolean_get(op->ptr, "import_vertex_groups"); import_params.validate_meshes = RNA_boolean_get(op->ptr, "validate_meshes"); import_params.relative_paths = ((U.flag & USER_RELPATHS) != 0); - - OBJ_import(C, &import_params); + import_params.clear_selection = true; + + int files_len = RNA_collection_length(op->ptr, "files"); + if (files_len) { + /* Importing multiple files: loop over them and import one by one. */ + PointerRNA fileptr; + PropertyRNA *prop; + char dir_only[FILE_MAX], file_only[FILE_MAX]; + + RNA_string_get(op->ptr, "directory", dir_only); + prop = RNA_struct_find_property(op->ptr, "files"); + for (int i = 0; i < files_len; i++) { + RNA_property_collection_lookup_int(op->ptr, prop, i, &fileptr); + RNA_string_get(&fileptr, "name", file_only); + BLI_join_dirfile( + import_params.filepath, sizeof(import_params.filepath), dir_only, file_only); + import_params.clear_selection = (i == 0); + OBJ_import(C, &import_params); + } + } + else if (RNA_struct_property_is_set(op->ptr, "filepath")) { + /* Importing one file. */ + RNA_string_get(op->ptr, "filepath", import_params.filepath); + OBJ_import(C, &import_params); + } + else { + BKE_report(op->reports, RPT_ERROR, "No filename given"); + return OPERATOR_CANCELLED; + } Scene *scene = CTX_data_scene(C); WM_event_add_notifier(C, NC_SCENE | ND_OB_SELECT, scene); @@ -454,7 +476,8 @@ void WM_OT_obj_import(struct wmOperatorType *ot) FILE_TYPE_FOLDER, FILE_BLENDER, FILE_OPENFILE, - WM_FILESEL_FILEPATH | WM_FILESEL_SHOW_PROPS, + WM_FILESEL_FILEPATH | WM_FILESEL_SHOW_PROPS | + WM_FILESEL_DIRECTORY | WM_FILESEL_FILES, FILE_DEFAULTDISPLAY, FILE_SORT_ALPHA); RNA_def_float( diff --git a/source/blender/editors/mesh/editmesh_utils.c b/source/blender/editors/mesh/editmesh_utils.c index 208022326d3..e931dd02a9e 100644 --- a/source/blender/editors/mesh/editmesh_utils.c +++ b/source/blender/editors/mesh/editmesh_utils.c @@ -593,6 +593,31 @@ UvMapVert *BM_uv_vert_map_at_index(UvVertMap *vmap, uint v) return vmap->vert[v]; } +static void bm_uv_ensure_head_table(UvElementMap *element_map) +{ + if (element_map->head_table) { + return; + } + + /* For each UvElement, locate the "separate" UvElement that precedes it in the linked list. */ + element_map->head_table = MEM_mallocN(sizeof(*element_map->head_table) * element_map->total_uvs, + "uv_element_map_head_table"); + UvElement **head_table = element_map->head_table; + for (int i = 0; i < element_map->total_uvs; i++) { + UvElement *head = element_map->storage + i; + if (head->separate) { + UvElement *element = head; + while (element) { + head_table[element - element_map->storage] = head; + element = element->next; + if (element && element->separate) { + break; + } + } + } + } +} + #define INVALID_ISLAND ((unsigned int)-1) static void bm_uv_assign_island(UvElementMap *element_map, @@ -620,23 +645,9 @@ static int bm_uv_edge_select_build_islands(UvElementMap *element_map, bool uv_selected, int cd_loop_uv_offset) { - int total_uvs = element_map->total_uvs; + bm_uv_ensure_head_table(element_map); - /* For each UvElement, locate the "separate" UvElement that precedes it in the linked list. */ - UvElement **head_table = MEM_mallocN(sizeof(*head_table) * total_uvs, "uv_island_head_table"); - for (int i = 0; i < total_uvs; i++) { - UvElement *head = element_map->storage + i; - if (head->separate) { - UvElement *element = head; - while (element) { - head_table[element - element_map->storage] = head; - element = element->next; - if (element && element->separate) { - break; - } - } - } - } + int total_uvs = element_map->total_uvs; /* Depth first search the graph, building islands as we go. */ int nislands = 0; @@ -676,7 +687,7 @@ static int bm_uv_edge_select_build_islands(UvElementMap *element_map, if (!uv_selected || uvedit_edge_select_test(scene, element->l, cd_loop_uv_offset)) { UvElement *next = BM_uv_element_get(element_map, element->l->next->f, element->l->next); if (next->island == INVALID_ISLAND) { - UvElement *tail = head_table[next - element_map->storage]; + UvElement *tail = element_map->head_table[next - element_map->storage]; stack_uv[stacksize_uv++] = tail; while (tail) { bm_uv_assign_island(element_map, tail, nislands, map, islandbuf, islandbufsize++); @@ -692,7 +703,7 @@ static int bm_uv_edge_select_build_islands(UvElementMap *element_map, if (!uv_selected || uvedit_edge_select_test(scene, element->l->prev, cd_loop_uv_offset)) { UvElement *prev = BM_uv_element_get(element_map, element->l->prev->f, element->l->prev); if (prev->island == INVALID_ISLAND) { - UvElement *tail = head_table[prev - element_map->storage]; + UvElement *tail = element_map->head_table[prev - element_map->storage]; stack_uv[stacksize_uv++] = tail; while (tail) { bm_uv_assign_island(element_map, tail, nislands, map, islandbuf, islandbufsize++); @@ -716,11 +727,129 @@ static int bm_uv_edge_select_build_islands(UvElementMap *element_map, BLI_assert(islandbufsize == total_uvs); MEM_SAFE_FREE(stack_uv); - MEM_SAFE_FREE(head_table); + MEM_SAFE_FREE(element_map->head_table); return nislands; } +static void bm_uv_build_islands(UvElementMap *element_map, + BMesh *bm, + const Scene *scene, + bool uv_selected) +{ + int totuv = element_map->total_uvs; + int nislands = 0; + int islandbufsize = 0; + + /* map holds the map from current vmap->buf to the new, sorted map */ + uint *map = MEM_mallocN(sizeof(*map) * totuv, "uvelement_remap"); + BMFace **stack = MEM_mallocN(sizeof(*stack) * bm->totface, "uv_island_face_stack"); + UvElement *islandbuf = MEM_callocN(sizeof(*islandbuf) * totuv, "uvelement_island_buffer"); + /* Island number for BMFaces. */ + int *island_number = MEM_callocN(sizeof(*island_number) * bm->totface, "uv_island_number_face"); + copy_vn_i(island_number, bm->totface, INVALID_ISLAND); + + const int cd_loop_uv_offset = CustomData_get_offset(&bm->ldata, CD_MLOOPUV); + + const bool use_uv_edge_connectivity = scene->toolsettings->uv_flag & UV_SYNC_SELECTION ? + scene->toolsettings->selectmode & SCE_SELECT_EDGE : + scene->toolsettings->uv_selectmode & UV_SELECT_EDGE; + if (use_uv_edge_connectivity) { + nislands = bm_uv_edge_select_build_islands( + element_map, scene, islandbuf, map, uv_selected, cd_loop_uv_offset); + islandbufsize = totuv; + } + + for (int i = 0; i < totuv; i++) { + if (element_map->storage[i].island == INVALID_ISLAND) { + int stacksize = 0; + element_map->storage[i].island = nislands; + stack[0] = element_map->storage[i].l->f; + island_number[BM_elem_index_get(stack[0])] = nislands; + stacksize = 1; + + while (stacksize > 0) { + BMFace *efa = stack[--stacksize]; + + BMLoop *l; + BMIter liter; + BM_ITER_ELEM (l, &liter, efa, BM_LOOPS_OF_FACE) { + if (uv_selected && !uvedit_uv_select_test(scene, l, cd_loop_uv_offset)) { + continue; + } + + UvElement *initelement = element_map->vertex[BM_elem_index_get(l->v)]; + + for (UvElement *element = initelement; element; element = element->next) { + if (element->separate) { + initelement = element; + } + + if (element->l->f == efa) { + /* found the uv corresponding to our face and vertex. + * Now fill it to the buffer */ + bm_uv_assign_island(element_map, element, nislands, map, islandbuf, islandbufsize++); + + for (element = initelement; element; element = element->next) { + if (element->separate && element != initelement) { + break; + } + + if (island_number[BM_elem_index_get(element->l->f)] == INVALID_ISLAND) { + stack[stacksize++] = element->l->f; + island_number[BM_elem_index_get(element->l->f)] = nislands; + } + } + break; + } + } + } + } + + nislands++; + } + } + + MEM_SAFE_FREE(island_number); + + /* remap */ + for (int i = 0; i < bm->totvert; i++) { + /* important since we may do selection only. Some of these may be NULL */ + if (element_map->vertex[i]) { + element_map->vertex[i] = &islandbuf[map[element_map->vertex[i] - element_map->storage]]; + } + } + + element_map->island_indices = MEM_callocN(sizeof(*element_map->island_indices) * nislands, + __func__); + element_map->island_total_uvs = MEM_callocN(sizeof(*element_map->island_total_uvs) * nislands, + __func__); + element_map->island_total_unique_uvs = MEM_callocN( + sizeof(*element_map->island_total_unique_uvs) * nislands, __func__); + int j = 0; + for (int i = 0; i < totuv; i++) { + UvElement *next = element_map->storage[i].next; + islandbuf[map[i]].next = next ? &islandbuf[map[next - element_map->storage]] : NULL; + + if (islandbuf[i].island != j) { + j++; + element_map->island_indices[j] = i; + } + BLI_assert(islandbuf[i].island == j); + element_map->island_total_uvs[j]++; + if (islandbuf[i].separate) { + element_map->island_total_unique_uvs[j]++; + } + } + + MEM_SAFE_FREE(element_map->storage); + element_map->storage = islandbuf; + islandbuf = NULL; + element_map->total_islands = nislands; + MEM_SAFE_FREE(stack); + MEM_SAFE_FREE(map); +} + UvElementMap *BM_uv_element_map_create(BMesh *bm, const Scene *scene, const bool uv_selected, @@ -824,6 +953,7 @@ UvElementMap *BM_uv_element_map_create(BMesh *bm, winding[j] = cross_poly_v2(tf_uv, efa->len) > 0; } } + BLI_buffer_free(&tf_uv_buf); /* For each BMVert, sort associated linked list into unique uvs. */ int ev_index; @@ -845,21 +975,32 @@ UvElementMap *BM_uv_element_map_create(BMesh *bm, UvElement *lastv = NULL; UvElement *iterv = vlist; - /* Scan through unsorted list, finding UvElements which match `v`. */ + /* Scan through unsorted list, finding UvElements which are connected to `v`. */ while (iterv) { UvElement *next = iterv->next; - luv = BM_ELEM_CD_GET_VOID_P(iterv->l, cd_loop_uv_offset); - const float *uv2 = luv->uv; - const bool uv2_vert_sel = uvedit_uv_select_test(scene, iterv->l, cd_loop_uv_offset); - /* Check if the uv loops share the same selection state (if not, they are not connected as - * they have been ripped or other edit commands have separated them). */ - const bool connected = (uv_vert_sel == uv2_vert_sel) && - compare_v2v2(uv2, uv, STD_UV_CONNECT_LIMIT); + bool connected = true; /* Assume connected unless we can prove otherwise. */ + + if (connected) { + /* Are the two UVs close together? */ + const float *uv2 = luv->uv; + connected = compare_v2v2(uv2, uv, STD_UV_CONNECT_LIMIT); + } + + if (connected) { + /* Check if the uv loops share the same selection state (if not, they are not connected + * as they have been ripped or other edit commands have separated them). */ + const bool uv2_vert_sel = uvedit_uv_select_test(scene, iterv->l, cd_loop_uv_offset); + connected = (uv_vert_sel == uv2_vert_sel); + } + + if (connected && use_winding) { + connected = winding[BM_elem_index_get(iterv->l->f)] == + winding[BM_elem_index_get(v->l->f)]; + } - if (connected && (!use_winding || winding[BM_elem_index_get(iterv->l->f)] == - winding[BM_elem_index_get(v->l->f)])) { + if (connected) { if (lastv) { lastv->next = next; } @@ -886,120 +1027,13 @@ UvElementMap *BM_uv_element_map_create(BMesh *bm, MEM_SAFE_FREE(winding); + /* at this point, every UvElement in vert points to a UvElement sharing the same vertex. + * Now we should sort uv's in islands. */ if (do_islands) { - uint *map; - UvElement *islandbuf; - - int nislands = 0, islandbufsize = 0; - - /* map holds the map from current vmap->buf to the new, sorted map */ - map = MEM_mallocN(sizeof(*map) * totuv, "uvelement_remap"); - BMFace **stack = MEM_mallocN(sizeof(*stack) * bm->totface, "uv_island_face_stack"); - islandbuf = MEM_callocN(sizeof(*islandbuf) * totuv, "uvelement_island_buffer"); - /* Island number for BMFaces. */ - int *island_number = MEM_callocN(sizeof(*island_number) * bm->totface, - "uv_island_number_face"); - copy_vn_i(island_number, bm->totface, INVALID_ISLAND); - - const bool use_uv_edge_connectivity = scene->toolsettings->uv_flag & UV_SYNC_SELECTION ? - scene->toolsettings->selectmode & SCE_SELECT_EDGE : - scene->toolsettings->uv_selectmode & UV_SELECT_EDGE; - if (use_uv_edge_connectivity) { - nislands = bm_uv_edge_select_build_islands( - element_map, scene, islandbuf, map, uv_selected, cd_loop_uv_offset); - islandbufsize = totuv; - } - - /* at this point, every UvElement in vert points to a UvElement sharing the same vertex. - * Now we should sort uv's in islands. */ - for (int i = 0; i < totuv; i++) { - if (element_map->storage[i].island == INVALID_ISLAND) { - int stacksize = 0; - element_map->storage[i].island = nislands; - stack[0] = element_map->storage[i].l->f; - island_number[BM_elem_index_get(stack[0])] = nislands; - stacksize = 1; - - while (stacksize > 0) { - efa = stack[--stacksize]; - - BMLoop *l; - BM_ITER_ELEM (l, &liter, efa, BM_LOOPS_OF_FACE) { - if (uv_selected && !uvedit_uv_select_test(scene, l, cd_loop_uv_offset)) { - continue; - } - - UvElement *initelement = element_map->vertex[BM_elem_index_get(l->v)]; - - for (UvElement *element = initelement; element; element = element->next) { - if (element->separate) { - initelement = element; - } - - if (element->l->f == efa) { - /* found the uv corresponding to our face and vertex. - * Now fill it to the buffer */ - bm_uv_assign_island( - element_map, element, nislands, map, islandbuf, islandbufsize++); - - for (element = initelement; element; element = element->next) { - if (element->separate && element != initelement) { - break; - } - - if (island_number[BM_elem_index_get(element->l->f)] == INVALID_ISLAND) { - stack[stacksize++] = element->l->f; - island_number[BM_elem_index_get(element->l->f)] = nislands; - } - } - break; - } - } - } - } - - nislands++; - } - } - - MEM_SAFE_FREE(island_number); - - /* remap */ - for (int i = 0; i < bm->totvert; i++) { - /* important since we may do selection only. Some of these may be NULL */ - if (element_map->vertex[i]) { - element_map->vertex[i] = &islandbuf[map[element_map->vertex[i] - element_map->storage]]; - } - } - - element_map->islandIndices = MEM_callocN(sizeof(*element_map->islandIndices) * nislands, - "UvElementMap_island_indices"); - j = 0; - for (int i = 0; i < totuv; i++) { - UvElement *element = element_map->storage[i].next; - if (element == NULL) { - islandbuf[map[i]].next = NULL; - } - else { - islandbuf[map[i]].next = &islandbuf[map[element - element_map->storage]]; - } - - if (islandbuf[i].island != j) { - j++; - element_map->islandIndices[j] = i; - } - } - - MEM_SAFE_FREE(element_map->storage); - element_map->storage = islandbuf; - islandbuf = NULL; - element_map->totalIslands = nislands; - MEM_SAFE_FREE(stack); - MEM_SAFE_FREE(map); + bm_uv_build_islands(element_map, bm, scene, uv_selected); } - BLI_buffer_free(&tf_uv_buf); - + /* TODO: Confirm element_map->total_unique_uvs doesn't require recalculating. */ element_map->total_unique_uvs = 0; for (int i = 0; i < element_map->total_uvs; i++) { if (element_map->storage[i].separate) { @@ -1028,7 +1062,10 @@ void BM_uv_element_map_free(UvElementMap *element_map) if (element_map) { MEM_SAFE_FREE(element_map->storage); MEM_SAFE_FREE(element_map->vertex); - MEM_SAFE_FREE(element_map->islandIndices); + MEM_SAFE_FREE(element_map->head_table); + MEM_SAFE_FREE(element_map->island_indices); + MEM_SAFE_FREE(element_map->island_total_uvs); + MEM_SAFE_FREE(element_map->island_total_unique_uvs); MEM_SAFE_FREE(element_map); } } diff --git a/source/blender/editors/sculpt_paint/sculpt_uv.c b/source/blender/editors/sculpt_paint/sculpt_uv.c index 2e2abd30ea2..14b06f888fe 100644 --- a/source/blender/editors/sculpt_paint/sculpt_uv.c +++ b/source/blender/editors/sculpt_paint/sculpt_uv.c @@ -518,13 +518,7 @@ static UvSculptData *uv_sculpt_stroke_init(bContext *C, wmOperator *op, const wm /* Count 'unique' UV's */ int unique_uvs = data->elementMap->total_unique_uvs; if (do_island_optimization) { - unique_uvs = 0; - for (int i = 0; i < data->elementMap->total_uvs; i++) { - if (data->elementMap->storage[i].separate && - (data->elementMap->storage[i].island == island_index)) { - unique_uvs++; - } - } + unique_uvs = data->elementMap->island_total_unique_uvs[island_index]; } /* Allocate the unique uv buffers */ @@ -572,6 +566,7 @@ static UvSculptData *uv_sculpt_stroke_init(bContext *C, wmOperator *op, const wm uniqueUv[element - data->elementMap->storage] = counter; } } + BLI_assert(counter + 1 == unique_uvs); /* Now, on to generate our uv connectivity data */ counter = 0; diff --git a/source/blender/editors/space_outliner/tree/tree_element_overrides.cc b/source/blender/editors/space_outliner/tree/tree_element_overrides.cc index e19459ced61..49cabd5117f 100644 --- a/source/blender/editors/space_outliner/tree/tree_element_overrides.cc +++ b/source/blender/editors/space_outliner/tree/tree_element_overrides.cc @@ -371,8 +371,7 @@ void OverrideRNAPathTreeBuilder::ensure_entire_collection( const char *coll_prop_path, short &index) { - AbstractTreeElement *abstract_parent = tree_element_cast<AbstractTreeElement>(&te_to_expand); - BLI_assert(abstract_parent != nullptr); + BLI_assert(tree_element_cast<AbstractTreeElement>(&te_to_expand) != nullptr); TreeElement *previous_te = nullptr; int item_idx = 0; diff --git a/source/blender/editors/space_view3d/space_view3d.c b/source/blender/editors/space_view3d/space_view3d.c index 4408f254c68..1a2eb20d1a9 100644 --- a/source/blender/editors/space_view3d/space_view3d.c +++ b/source/blender/editors/space_view3d/space_view3d.c @@ -1210,6 +1210,9 @@ static void view3d_main_region_listener(const wmRegionListenerParams *params) break; } break; + case NC_NODE: + ED_region_tag_redraw(region); + break; case NC_WORLD: switch (wmn->data) { case ND_WORLD_DRAW: diff --git a/source/blender/editors/space_view3d/view3d_navigate.h b/source/blender/editors/space_view3d/view3d_navigate.h index 721476ace57..925acd90573 100644 --- a/source/blender/editors/space_view3d/view3d_navigate.h +++ b/source/blender/editors/space_view3d/view3d_navigate.h @@ -266,12 +266,12 @@ void ED_view3d_smooth_view(struct bContext *C, * or when calling #ED_view3d_smooth_view_ex. * Otherwise pass in #V3D_SmoothParams.undo_str so an undo step is pushed as needed. */ -void ED_view3d_smooth_view_undo_begin(struct bContext *C, struct ScrArea *area); +void ED_view3d_smooth_view_undo_begin(struct bContext *C, const struct ScrArea *area); /** * Run after multiple smooth-view operations have run to push undo as needed. */ void ED_view3d_smooth_view_undo_end(struct bContext *C, - struct ScrArea *area, + const struct ScrArea *area, const char *undo_str, bool undo_grouped); diff --git a/source/blender/editors/space_view3d/view3d_navigate_smoothview.c b/source/blender/editors/space_view3d/view3d_navigate_smoothview.c index 8125e334492..6b150d1e771 100644 --- a/source/blender/editors/space_view3d/view3d_navigate_smoothview.c +++ b/source/blender/editors/space_view3d/view3d_navigate_smoothview.c @@ -23,7 +23,7 @@ #include "view3d_navigate.h" /* own include */ static void view3d_smoothview_apply_with_interp( - bContext *C, View3D *v3d, ARegion *region, const bool use_autokey, const float factor); + bContext *C, View3D *v3d, RegionView3D *rv3d, const bool use_autokey, const float factor); /* -------------------------------------------------------------------- */ /** \name Smooth View Undo Handling @@ -40,7 +40,7 @@ static void view3d_smoothview_apply_with_interp( * operations are executed once smooth-view has started. * \{ */ -void ED_view3d_smooth_view_undo_begin(bContext *C, ScrArea *area) +void ED_view3d_smooth_view_undo_begin(bContext *C, const ScrArea *area) { const View3D *v3d = area->spacedata.first; Object *camera = v3d->camera; @@ -53,11 +53,11 @@ void ED_view3d_smooth_view_undo_begin(bContext *C, ScrArea *area) * NOTE: It doesn't matter if the actual object being manipulated is the camera or not. */ camera->id.tag &= ~LIB_TAG_DOIT; - LISTBASE_FOREACH (ARegion *, region, &area->regionbase) { + LISTBASE_FOREACH (const ARegion *, region, &area->regionbase) { if (region->regiontype != RGN_TYPE_WINDOW) { continue; } - RegionView3D *rv3d = region->regiondata; + const RegionView3D *rv3d = region->regiondata; if (ED_view3d_camera_lock_undo_test(v3d, rv3d, C)) { camera->id.tag |= LIB_TAG_DOIT; break; @@ -66,7 +66,7 @@ void ED_view3d_smooth_view_undo_begin(bContext *C, ScrArea *area) } void ED_view3d_smooth_view_undo_end(bContext *C, - ScrArea *area, + const ScrArea *area, const char *undo_str, const bool undo_grouped) { @@ -89,15 +89,15 @@ void ED_view3d_smooth_view_undo_end(bContext *C, * so even in the case there is a quad-view with multiple camera views set, these will all * reference the same camera. In this case it doesn't matter which region is used. * If in the future multiple cameras are supported, this logic can be extended. */ - ARegion *region_camera = NULL; + const ARegion *region_camera = NULL; /* An undo push should be performed. */ bool is_interactive = false; - LISTBASE_FOREACH (ARegion *, region, &area->regionbase) { + LISTBASE_FOREACH (const ARegion *, region, &area->regionbase) { if (region->regiontype != RGN_TYPE_WINDOW) { continue; } - RegionView3D *rv3d = region->regiondata; + const RegionView3D *rv3d = region->regiondata; if (ED_view3d_camera_lock_undo_test(v3d, rv3d, C)) { region_camera = region; if (rv3d->sms) { @@ -110,12 +110,13 @@ void ED_view3d_smooth_view_undo_end(bContext *C, return; } + RegionView3D *rv3d = region_camera->regiondata; + /* Fast forward, undo push, then rewind. */ if (is_interactive) { - view3d_smoothview_apply_with_interp(C, v3d, region_camera, false, 1.0f); + view3d_smoothview_apply_with_interp(C, v3d, rv3d, false, 1.0f); } - RegionView3D *rv3d = region_camera->regiondata; if (undo_grouped) { ED_view3d_camera_lock_undo_grouped_push(undo_str, v3d, rv3d, C); } @@ -124,7 +125,7 @@ void ED_view3d_smooth_view_undo_end(bContext *C, } if (is_interactive) { - view3d_smoothview_apply_with_interp(C, v3d, region_camera, false, 0.0f); + view3d_smoothview_apply_with_interp(C, v3d, rv3d, false, 0.0f); } } @@ -391,9 +392,8 @@ void ED_view3d_smooth_view(bContext *C, * Apply with interpolation, on completion run #view3d_smoothview_apply_and_finish. */ static void view3d_smoothview_apply_with_interp( - bContext *C, View3D *v3d, ARegion *region, const bool use_autokey, const float factor) + bContext *C, View3D *v3d, RegionView3D *rv3d, const bool use_autokey, const float factor) { - RegionView3D *rv3d = region->regiondata; struct SmoothView3DStore *sms = rv3d->sms; interp_qt_qtqt(rv3d->viewquat, sms->src.quat, sms->dst.quat, factor); @@ -410,21 +410,19 @@ static void view3d_smoothview_apply_with_interp( v3d->lens = interpf(sms->dst.lens, sms->src.lens, factor); const Depsgraph *depsgraph = CTX_data_ensure_evaluated_depsgraph(C); - ED_view3d_camera_lock_sync(depsgraph, v3d, rv3d); - if (use_autokey) { - ED_view3d_camera_lock_autokey(v3d, rv3d, C, true, true); + if (ED_view3d_camera_lock_sync(depsgraph, v3d, rv3d)) { + if (use_autokey) { + ED_view3d_camera_lock_autokey(v3d, rv3d, C, true, true); + } } - - ED_region_tag_redraw(region); } /** * Apply the view-port transformation & free smooth-view related data. */ -static void view3d_smoothview_apply_and_finish(bContext *C, View3D *v3d, ARegion *region) +static void view3d_smoothview_apply_and_finish(bContext *C, View3D *v3d, RegionView3D *rv3d) { wmWindowManager *wm = CTX_wm_manager(C); - RegionView3D *rv3d = region->regiondata; struct SmoothView3DStore *sms = rv3d->sms; wmWindow *win = CTX_wm_window(C); @@ -439,8 +437,9 @@ static void view3d_smoothview_apply_and_finish(bContext *C, View3D *v3d, ARegion view3d_smooth_view_state_restore(&sms->dst, v3d, rv3d); - ED_view3d_camera_lock_sync(depsgraph, v3d, rv3d); - ED_view3d_camera_lock_autokey(v3d, rv3d, C, true, true); + if (ED_view3d_camera_lock_sync(depsgraph, v3d, rv3d)) { + ED_view3d_camera_lock_autokey(v3d, rv3d, C, true, true); + } } if ((RV3D_LOCK_FLAGS(rv3d) & RV3D_LOCK_ROTATION) == 0) { @@ -481,18 +480,20 @@ static void view3d_smoothview_apply_from_timer(bContext *C, View3D *v3d, ARegion factor = 1.0f; } if (factor >= 1.0f) { - view3d_smoothview_apply_and_finish(C, v3d, region); + view3d_smoothview_apply_and_finish(C, v3d, rv3d); } else { /* Ease in/out smoothing. */ factor = (3.0f * factor * factor - 2.0f * factor * factor * factor); const bool use_autokey = ED_screen_animation_playing(wm); - view3d_smoothview_apply_with_interp(C, v3d, region, use_autokey, factor); + view3d_smoothview_apply_with_interp(C, v3d, rv3d, use_autokey, factor); } if (RV3D_LOCK_FLAGS(rv3d) & RV3D_BOXVIEW) { view3d_boxview_copy(CTX_wm_area(C), region); } + + ED_region_tag_redraw(region); } static int view3d_smoothview_invoke(bContext *C, wmOperator *UNUSED(op), const wmEvent *event) @@ -514,11 +515,10 @@ static int view3d_smoothview_invoke(bContext *C, wmOperator *UNUSED(op), const w void ED_view3d_smooth_view_force_finish(bContext *C, View3D *v3d, ARegion *region) { RegionView3D *rv3d = region->regiondata; - if (rv3d && rv3d->sms) { - view3d_smoothview_apply_and_finish(C, v3d, region); + view3d_smoothview_apply_and_finish(C, v3d, rv3d); - /* force update of view matrix so tools that run immediately after + /* Force update of view matrix so tools that run immediately after * can use them without redrawing first */ Depsgraph *depsgraph = CTX_data_ensure_evaluated_depsgraph(C); Scene *scene = CTX_data_scene(C); diff --git a/source/blender/editors/space_view3d/view3d_select.c b/source/blender/editors/space_view3d/view3d_select.c index ecbf20cef68..fbfd9046112 100644 --- a/source/blender/editors/space_view3d/view3d_select.c +++ b/source/blender/editors/space_view3d/view3d_select.c @@ -2897,11 +2897,6 @@ static int view3d_select_exec(bContext *C, wmOperator *op) bool changed = false; int mval[2]; - RNA_int_get_array(op->ptr, "location", mval); - - view3d_operator_needs_opengl(C); - BKE_object_update_select_id(CTX_data_main(C)); - if (object_only) { obedit = NULL; obact = NULL; @@ -2912,6 +2907,19 @@ static int view3d_select_exec(bContext *C, wmOperator *op) center = false; } + if (obedit && enumerate) { + /* Enumerate makes no sense in edit-mode unless also explicitly picking objects or bones. + * Pass the event through so the event may be handled by loop-select for e.g. see: T100204. */ + if (obedit->type != OB_ARMATURE) { + return OPERATOR_PASS_THROUGH | OPERATOR_CANCELLED; + } + } + + RNA_int_get_array(op->ptr, "location", mval); + + view3d_operator_needs_opengl(C); + BKE_object_update_select_id(CTX_data_main(C)); + if (obedit && object_only == false) { if (obedit->type == OB_MESH) { changed = EDBM_select_pick(C, mval, ¶ms); diff --git a/source/blender/editors/space_view3d/view3d_utils.c b/source/blender/editors/space_view3d/view3d_utils.c index 0d88824a784..5f2a4e8c4cc 100644 --- a/source/blender/editors/space_view3d/view3d_utils.c +++ b/source/blender/editors/space_view3d/view3d_utils.c @@ -708,8 +708,11 @@ bool ED_view3d_camera_lock_undo_test(const View3D *v3d, * unnecessary undo steps so undo push for them is not supported for now. Also operators that uses * smooth view for navigation are excluded too, but they can be supported, see: D15345. */ -static bool view3d_camera_lock_undo_ex( - const char *str, View3D *v3d, RegionView3D *rv3d, struct bContext *C, bool undo_group) +static bool view3d_camera_lock_undo_ex(const char *str, + const View3D *v3d, + const RegionView3D *rv3d, + struct bContext *C, + const bool undo_group) { if (ED_view3d_camera_lock_undo_test(v3d, rv3d, C)) { if (undo_group) { @@ -723,14 +726,17 @@ static bool view3d_camera_lock_undo_ex( return false; } -bool ED_view3d_camera_lock_undo_push(const char *str, View3D *v3d, RegionView3D *rv3d, bContext *C) +bool ED_view3d_camera_lock_undo_push(const char *str, + const View3D *v3d, + const RegionView3D *rv3d, + bContext *C) { return view3d_camera_lock_undo_ex(str, v3d, rv3d, C, false); } bool ED_view3d_camera_lock_undo_grouped_push(const char *str, - View3D *v3d, - RegionView3D *rv3d, + const View3D *v3d, + const RegionView3D *rv3d, bContext *C) { return view3d_camera_lock_undo_ex(str, v3d, rv3d, C, true); diff --git a/source/blender/editors/transform/transform_convert_mesh_uv.c b/source/blender/editors/transform/transform_convert_mesh_uv.c index d95bc7b976f..f3bef2c283b 100644 --- a/source/blender/editors/transform/transform_convert_mesh_uv.c +++ b/source/blender/editors/transform/transform_convert_mesh_uv.c @@ -270,7 +270,7 @@ static void createTransUVs(bContext *C, TransInfo *t) continue; } - island_center = MEM_callocN(sizeof(*island_center) * elementmap->totalIslands, __func__); + island_center = MEM_callocN(sizeof(*island_center) * elementmap->total_islands, __func__); } BM_ITER_MESH (efa, &iter, em->bm, BM_FACES_OF_MESH) { @@ -315,9 +315,7 @@ static void createTransUVs(bContext *C, TransInfo *t) } if (is_island_center) { - int i; - - for (i = 0; i < elementmap->totalIslands; i++) { + for (int i = 0; i < elementmap->total_islands; i++) { mul_v2_fl(island_center[i].co, 1.0f / island_center[i].co_num); mul_v2_v2(island_center[i].co, t->aspect); } diff --git a/source/blender/editors/uvedit/uvedit_ops.c b/source/blender/editors/uvedit/uvedit_ops.c index 092f0c49d8a..6755630d3ef 100644 --- a/source/blender/editors/uvedit/uvedit_ops.c +++ b/source/blender/editors/uvedit/uvedit_ops.c @@ -541,14 +541,11 @@ static bool uvedit_uv_straighten(Scene *scene, BMesh *bm, eUVWeldAlign tool) } bool changed = false; - - /* Loop backwards to simplify logic. */ - int j1 = element_map->total_uvs; - for (int i = element_map->totalIslands - 1; i >= 0; --i) { - int j0 = element_map->islandIndices[i]; - changed |= uvedit_uv_straighten_elements( - element_map->storage + j0, j1 - j0, cd_loop_uv_offset, tool); - j1 = j0; + for (int i = 0; i < element_map->total_islands; i++) { + changed |= uvedit_uv_straighten_elements(element_map->storage + element_map->island_indices[i], + element_map->island_total_uvs[i], + cd_loop_uv_offset, + tool); } BM_uv_element_map_free(element_map); diff --git a/source/blender/editors/uvedit/uvedit_select.c b/source/blender/editors/uvedit/uvedit_select.c index 749c59ea6a4..d88da21ef98 100644 --- a/source/blender/editors/uvedit/uvedit_select.c +++ b/source/blender/editors/uvedit/uvedit_select.c @@ -5395,7 +5395,7 @@ static void uv_isolate_selected_islands(const Scene *scene, return; } - int num_islands = elementmap->totalIslands; + int num_islands = elementmap->total_islands; /* Boolean array that tells if island with index i is completely selected or not. */ bool *is_island_not_selected = MEM_callocN(sizeof(bool) * (num_islands), __func__); diff --git a/source/blender/editors/uvedit/uvedit_smart_stitch.c b/source/blender/editors/uvedit/uvedit_smart_stitch.c index 26ed98ba236..4a2ea5c3aa6 100644 --- a/source/blender/editors/uvedit/uvedit_smart_stitch.c +++ b/source/blender/editors/uvedit/uvedit_smart_stitch.c @@ -287,14 +287,6 @@ static void stitch_update_header(StitchStateContainer *ssc, bContext *C) } } -static int getNumOfIslandUvs(UvElementMap *elementMap, int island) -{ - if (island == elementMap->totalIslands - 1) { - return elementMap->total_uvs - elementMap->islandIndices[island]; - } - return elementMap->islandIndices[island + 1] - elementMap->islandIndices[island]; -} - static void stitch_uv_rotate(const float mat[2][2], const float medianPoint[2], float uv[2], @@ -419,10 +411,9 @@ static void stitch_calculate_island_snapping(StitchState *state, int final) { BMesh *bm = state->em->bm; - int i; UvElement *element; - for (i = 0; i < state->element_map->totalIslands; i++) { + for (int i = 0; i < state->element_map->total_islands; i++) { if (island_stitch_data[i].addedForPreview) { int numOfIslandUVs = 0, j; int totelem = island_stitch_data[i].num_rot_elements_neg + @@ -464,8 +455,8 @@ static void stitch_calculate_island_snapping(StitchState *state, } angle_to_mat2(rotation_mat, rotation); - numOfIslandUVs = getNumOfIslandUvs(state->element_map, i); - element = &state->element_map->storage[state->element_map->islandIndices[i]]; + numOfIslandUVs = state->element_map->island_total_uvs[i]; + element = &state->element_map->storage[state->element_map->island_indices[i]]; for (j = 0; j < numOfIslandUVs; j++, element++) { /* stitchable uvs have already been processed, don't process */ if (!(element->flag & STITCH_PROCESSED)) { @@ -984,7 +975,7 @@ static int stitch_process_data(StitchStateContainer *ssc, preview_position[i].data_position = STITCH_NO_PREVIEW; } - island_stitch_data = MEM_callocN(sizeof(*island_stitch_data) * state->element_map->totalIslands, + island_stitch_data = MEM_callocN(sizeof(*island_stitch_data) * state->element_map->total_islands, "stitch_island_data"); if (!island_stitch_data) { return 0; @@ -1009,7 +1000,7 @@ static int stitch_process_data(StitchStateContainer *ssc, } /* Remember stitchable candidates as places the 'I' button will stop at. */ - for (int island_idx = 0; island_idx < state->element_map->totalIslands; island_idx++) { + for (int island_idx = 0; island_idx < state->element_map->total_islands; island_idx++) { state->island_is_stitchable[island_idx] = island_stitch_data[island_idx].stitchableCandidate ? true : false; @@ -1017,10 +1008,10 @@ static int stitch_process_data(StitchStateContainer *ssc, if (is_active_state) { /* set static island to one that is added for preview */ - ssc->static_island %= state->element_map->totalIslands; + ssc->static_island %= state->element_map->total_islands; while (!(island_stitch_data[ssc->static_island].stitchableCandidate)) { ssc->static_island++; - ssc->static_island %= state->element_map->totalIslands; + ssc->static_island %= state->element_map->total_islands; /* this is entirely possible if for example limit stitching * with no stitchable verts or no selection */ if (ssc->static_island == previous_island) { @@ -1141,13 +1132,11 @@ static int stitch_process_data(StitchStateContainer *ssc, * Setup preview for stitchable islands * ****************************************/ if (ssc->snap_islands) { - for (i = 0; i < state->element_map->totalIslands; i++) { + for (i = 0; i < state->element_map->total_islands; i++) { if (island_stitch_data[i].addedForPreview) { - int numOfIslandUVs = 0, j; - UvElement *element; - numOfIslandUVs = getNumOfIslandUvs(state->element_map, i); - element = &state->element_map->storage[state->element_map->islandIndices[i]]; - for (j = 0; j < numOfIslandUVs; j++, element++) { + int numOfIslandUVs = state->element_map->island_total_uvs[i]; + UvElement *element = &state->element_map->storage[state->element_map->island_indices[i]]; + for (int j = 0; j < numOfIslandUVs; j++, element++) { stitch_set_face_preview_buffer_position(element->l->f, preview, preview_position); } } @@ -2120,8 +2109,8 @@ static StitchState *stitch_init(bContext *C, /***** initialize static island preview data *****/ state->tris_per_island = MEM_mallocN( - sizeof(*state->tris_per_island) * state->element_map->totalIslands, "stitch island tris"); - for (i = 0; i < state->element_map->totalIslands; i++) { + sizeof(*state->tris_per_island) * state->element_map->total_islands, "stitch island tris"); + for (i = 0; i < state->element_map->total_islands; i++) { state->tris_per_island[i] = 0; } @@ -2133,7 +2122,7 @@ static StitchState *stitch_init(bContext *C, } } - state->island_is_stitchable = MEM_callocN(sizeof(bool) * state->element_map->totalIslands, + state->island_is_stitchable = MEM_callocN(sizeof(bool) * state->element_map->total_islands, "stitch I stops"); if (!state->island_is_stitchable) { state_delete(state); @@ -2157,7 +2146,7 @@ static bool goto_next_island(StitchStateContainer *ssc) do { ssc->static_island++; - if (ssc->static_island >= active_state->element_map->totalIslands) { + if (ssc->static_island >= active_state->element_map->total_islands) { /* go to next object */ ssc->active_object_index++; ssc->active_object_index %= ssc->objects_len; @@ -2307,7 +2296,7 @@ static int stitch_init_all(bContext *C, wmOperator *op) ssc->static_island = RNA_int_get(op->ptr, "static_island"); StitchState *state = ssc->states[ssc->active_object_index]; - ssc->static_island %= state->element_map->totalIslands; + ssc->static_island %= state->element_map->total_islands; /* If the initial active object doesn't have any stitchable islands * then no active island will be seen in the UI. diff --git a/source/blender/geometry/GEO_uv_parametrizer.h b/source/blender/geometry/GEO_uv_parametrizer.h index 5285aefbd4c..ff110f18ffb 100644 --- a/source/blender/geometry/GEO_uv_parametrizer.h +++ b/source/blender/geometry/GEO_uv_parametrizer.h @@ -13,8 +13,8 @@ extern "C" { #endif typedef struct ParamHandle ParamHandle; /* Handle to an array of charts. */ -typedef intptr_t ParamKey; /* Key (hash) for identifying verts and faces. */ -#define PARAM_KEY_MAX INTPTR_MAX +typedef uintptr_t ParamKey; /* Key (hash) for identifying verts and faces. */ +#define PARAM_KEY_MAX UINTPTR_MAX /* -------------------------------------------------------------------- */ /** \name Chart Construction: diff --git a/source/blender/geometry/intern/uv_parametrizer.cc b/source/blender/geometry/intern/uv_parametrizer.cc index ae2794bf9bc..b7526d82ecc 100644 --- a/source/blender/geometry/intern/uv_parametrizer.cc +++ b/source/blender/geometry/intern/uv_parametrizer.cc @@ -30,7 +30,7 @@ /* Special Purpose Hash */ -typedef intptr_t PHashKey; +typedef uintptr_t PHashKey; typedef struct PHashLink { struct PHashLink *next; @@ -45,7 +45,7 @@ typedef struct PHash { /* Simplices */ -typedef struct PVert { +struct PVert { struct PVert *nextlink; union PVertUnion { @@ -58,9 +58,9 @@ typedef struct PVert { float co[3]; float uv[2]; uint flag; -} PVert; +}; -typedef struct PEdge { +struct PEdge { struct PEdge *nextlink; union PEdgeUnion { @@ -76,9 +76,9 @@ typedef struct PEdge { struct PFace *face; float *orig_uv, old_uv[2]; uint flag; -} PEdge; +}; -typedef struct PFace { +struct PFace { struct PFace *nextlink; union PFaceUnion { @@ -89,8 +89,8 @@ typedef struct PFace { } u; struct PEdge *edge; - uchar flag; -} PFace; + uint flag; +}; enum PVertFlag { PVERT_PIN = 1, @@ -123,7 +123,7 @@ enum PFaceFlag { /* Chart */ -typedef struct PChart { +struct PChart { PVert *verts; PEdge *edges; PFace *faces; @@ -151,7 +151,7 @@ typedef struct PChart { } u; bool has_pins; -} PChart; +}; enum PHandleState { PHANDLE_STATE_ALLOCATED, @@ -160,7 +160,7 @@ enum PHandleState { PHANDLE_STATE_STRETCH, }; -typedef struct ParamHandle { +struct ParamHandle { enum PHandleState state; MemArena *arena; MemArena *polyfill_arena; @@ -181,7 +181,7 @@ typedef struct ParamHandle { RNG *rng; float blend; -} ParamHandle; +}; /* PHash * - special purpose hash that keeps all its elements in a single linked list. @@ -219,8 +219,8 @@ static void phash_delete(PHash *ph) { if (ph) { MEM_SAFE_FREE(ph->buckets); + MEM_freeN(ph); } - MEM_SAFE_FREE(ph); } static int phash_size(PHash *ph) @@ -234,7 +234,7 @@ static void phash_insert(PHash *ph, PHashLink *link) uintptr_t hash = PHASH_hash(ph, link->key); PHashLink *lookup = ph->buckets[hash]; - if (lookup == NULL) { + if (lookup == nullptr) { /* insert in front of the list */ ph->buckets[hash] = link; link->next = *(ph->list); @@ -249,13 +249,13 @@ static void phash_insert(PHash *ph, PHashLink *link) ph->size++; if (ph->size > (size * 3)) { - PHashLink *next = NULL, *first = *(ph->list); + PHashLink *next = nullptr, *first = *(ph->list); ph->cursize = PHashSizes[++ph->cursize_id]; MEM_freeN(ph->buckets); ph->buckets = (PHashLink **)MEM_callocN(ph->cursize * sizeof(*ph->buckets), "PHashBuckets"); ph->size = 0; - *(ph->list) = NULL; + *(ph->list) = nullptr; for (link = first; link; link = next) { next = link->next; @@ -274,7 +274,7 @@ static PHashLink *phash_lookup(PHash *ph, PHashKey key) return link; } if (PHASH_hash(ph, link->key) != hash) { - return NULL; + return nullptr; } } @@ -290,7 +290,7 @@ static PHashLink *phash_next(PHash *ph, PHashKey key, PHashLink *link) return link; } if (PHASH_hash(ph, link->key) != hash) { - return NULL; + return nullptr; } } @@ -456,7 +456,7 @@ static PEdge *p_wheel_edge_next(PEdge *e) static PEdge *p_wheel_edge_prev(PEdge *e) { - return (e->pair) ? e->pair->next : NULL; + return (e->pair) ? e->pair->next : nullptr; } static PEdge *p_boundary_edge_next(PEdge *e) @@ -694,7 +694,7 @@ static PEdge *p_edge_lookup(ParamHandle *handle, const PHashKey *vkeys) e = (PEdge *)phash_next(handle->hash_edges, key, (PHashLink *)e); } - return NULL; + return nullptr; } static int p_face_exists(ParamHandle *handle, const ParamKey *pvkeys, int i1, int i2, int i3) @@ -769,7 +769,7 @@ static bool p_edge_has_pair(ParamHandle *handle, PEdge *e, bool topology_from_uv key = PHASH_edge(key1, key2); pe = (PEdge *)phash_lookup(handle->hash_edges, key); - *r_pair = NULL; + *r_pair = nullptr; while (pe) { if (pe != e) { @@ -782,7 +782,7 @@ static bool p_edge_has_pair(ParamHandle *handle, PEdge *e, bool topology_from_uv /* don't connect seams and t-junctions */ if ((pe->flag & PEDGE_SEAM) || *r_pair || (topology_from_uvs && p_edge_implicit_seam(e, pe))) { - *r_pair = NULL; + *r_pair = nullptr; return false; } @@ -796,12 +796,12 @@ static bool p_edge_has_pair(ParamHandle *handle, PEdge *e, bool topology_from_uv if (*r_pair && (e->vert == (*r_pair)->vert)) { if ((*r_pair)->next->pair || (*r_pair)->next->next->pair) { /* non unfoldable, maybe mobius ring or klein bottle */ - *r_pair = NULL; + *r_pair = nullptr; return false; } } - return (*r_pair != NULL); + return (*r_pair != nullptr); } static bool p_edge_connect_pair(ParamHandle *handle, @@ -809,7 +809,7 @@ static bool p_edge_connect_pair(ParamHandle *handle, bool topology_from_uvs, PEdge ***stack) { - PEdge *pair = NULL; + PEdge *pair = nullptr; if (!e->pair && p_edge_has_pair(handle, e, topology_from_uvs, &pair)) { if (e->vert == pair->vert) { @@ -825,7 +825,7 @@ static bool p_edge_connect_pair(ParamHandle *handle, } } - return (e->pair != NULL); + return (e->pair != nullptr); } static int p_connect_pairs(ParamHandle *handle, bool topology_from_uvs) @@ -873,14 +873,14 @@ static int p_connect_pairs(ParamHandle *handle, bool topology_from_uvs) ncharts++; } - MEM_SAFE_FREE(stackbase); + MEM_freeN(stackbase); return ncharts; } static void p_split_vert(ParamHandle *handle, PChart *chart, PEdge *e) { - PEdge *we, *lastwe = NULL; + PEdge *we, *lastwe = nullptr; PVert *v = e->vert; bool copy = true; @@ -993,9 +993,9 @@ static PFace *p_face_add(ParamHandle *handle) e2->next = e3; e3->next = e1; - e1->pair = NULL; - e2->pair = NULL; - e3->pair = NULL; + e1->pair = nullptr; + e2->pair = nullptr; + e3->pair = nullptr; e1->flag = 0; e2->flag = 0; @@ -1073,7 +1073,7 @@ static PFace *p_face_add_fill(ParamHandle *handle, PChart *chart, PVert *v1, PVe e2->vert = v2; e3->vert = v3; - e1->orig_uv = e2->orig_uv = e3->orig_uv = NULL; + e1->orig_uv = e2->orig_uv = e3->orig_uv = nullptr; f->nextlink = chart->faces; chart->faces = f; @@ -1125,7 +1125,7 @@ static void p_chart_boundaries(PChart *chart, PEdge **r_outer) chart->nboundaries = 0; if (r_outer) { - *r_outer = NULL; + *r_outer = nullptr; } for (e = chart->edges; e; e = e->nextlink) { @@ -1216,7 +1216,7 @@ static void p_chart_fill_boundary(ParamHandle *handle, PChart *chart, PEdge *be, BLI_heap_remove(heap, e1->u.heaplink); BLI_heap_remove(heap, e2->u.heaplink); - e->u.heaplink = e1->u.heaplink = e2->u.heaplink = NULL; + e->u.heaplink = e1->u.heaplink = e2->u.heaplink = nullptr; e->flag |= PEDGE_FILLED; e1->flag |= PEDGE_FILLED; @@ -1254,7 +1254,7 @@ static void p_chart_fill_boundary(ParamHandle *handle, PChart *chart, PEdge *be, } } - BLI_heap_free(heap, NULL); + BLI_heap_free(heap, nullptr); } static void p_chart_fill_boundaries(ParamHandle *handle, PChart *chart, PEdge *outer) @@ -1537,7 +1537,7 @@ static void p_vert_harmonic_insert(PVert *v) e = p_wheel_edge_next(e); } while (e && (e != v->edge)); - if (e == NULL) { + if (e == nullptr) { npoints++; } @@ -1551,7 +1551,7 @@ static void p_vert_harmonic_insert(PVert *v) points[i][0] = e->next->vert->uv[0]; points[i][1] = e->next->vert->uv[1]; - if (nexte == NULL) { + if (nexte == nullptr) { i++; points[i][0] = e->next->next->vert->uv[0]; points[i][1] = e->next->next->vert->uv[1]; @@ -1895,8 +1895,8 @@ static float p_collapse_cost(PEdge *edge, PEdge *pair) int nshapeold = 0, nshapenew = 0; p_collapsing_verts(edge, pair, &oldv, &keepv); - oldf1 = (edge) ? edge->face : NULL; - oldf2 = (pair) ? pair->face : NULL; + oldf1 = (edge) ? edge->face : nullptr; + oldf2 = (pair) ? pair->face : nullptr; sub_v3_v3v3(edgevec, keepv->co, oldv->co); @@ -1917,7 +1917,7 @@ static float p_collapse_cost(PEdge *edge, PEdge *pair) # if 0 shapecost += dot_v3v3(co1, keepv->co); - if (p_wheel_edge_next(e) == NULL) { + if (p_wheel_edge_next(e) == nullptr) { shapecost += dot_v3v3(co2, keepv->co); } # endif @@ -1970,14 +1970,14 @@ static void p_collapse_cost_vertex(PVert *vert, float *r_mincost, PEdge **r_mine { PEdge *e, *enext, *pair; - *r_mine = NULL; + *r_mine = nullptr; *r_mincost = 0.0f; e = vert->edge; do { if (p_collapse_allowed(e, e->pair)) { float cost = p_collapse_cost(e, e->pair); - if ((*r_mine == NULL) || (cost < *r_mincost)) { + if ((*r_mine == nullptr) || (cost < *r_mincost)) { *r_mincost = cost; *r_mine = e; } @@ -1985,14 +1985,14 @@ static void p_collapse_cost_vertex(PVert *vert, float *r_mincost, PEdge **r_mine enext = p_wheel_edge_next(e); - if (enext == NULL) { + if (enext == nullptr) { /* The other boundary edge, where we only have the pair half-edge. */ pair = e->next->next; - if (p_collapse_allowed(NULL, pair)) { - float cost = p_collapse_cost(NULL, pair); + if (p_collapse_allowed(nullptr, pair)) { + float cost = p_collapse_cost(nullptr, pair); - if ((*r_mine == NULL) || (cost < *r_mincost)) { + if ((*r_mine == nullptr) || (cost < *r_mincost)) { *r_mincost = cost; *r_mine = pair; } @@ -2009,13 +2009,13 @@ static void p_chart_post_collapse_flush(PChart *chart, PEdge *collapsed) { /* Move to `collapsed_*`. */ - PVert *v, *nextv = NULL, *verts = chart->verts; - PEdge *e, *nexte = NULL, *edges = chart->edges, *laste = NULL; - PFace *f, *nextf = NULL, *faces = chart->faces; + PVert *v, *nextv = nullptr, *verts = chart->verts; + PEdge *e, *nexte = nullptr, *edges = chart->edges, *laste = nullptr; + PFace *f, *nextf = nullptr, *faces = chart->faces; - chart->verts = chart->collapsed_verts = NULL; - chart->edges = chart->collapsed_edges = NULL; - chart->faces = chart->collapsed_faces = NULL; + chart->verts = chart->collapsed_verts = nullptr; + chart->edges = chart->collapsed_edges = nullptr; + chart->faces = chart->collapsed_faces = nullptr; chart->nverts = chart->nedges = chart->nfaces = 0; @@ -2079,9 +2079,9 @@ static void p_chart_post_split_flush(PChart *chart) { /* Move from `collapsed_*`. */ - PVert *v, *nextv = NULL; - PEdge *e, *nexte = NULL; - PFace *f, *nextf = NULL; + PVert *v, *nextv = nullptr; + PEdge *e, *nexte = nullptr; + PFace *f, *nextf = nullptr; for (v = chart->collapsed_verts; v; v = nextv) { nextv = v->nextlink; @@ -2104,9 +2104,9 @@ static void p_chart_post_split_flush(PChart *chart) chart->nfaces++; } - chart->collapsed_verts = NULL; - chart->collapsed_edges = NULL; - chart->collapsed_faces = NULL; + chart->collapsed_verts = nullptr; + chart->collapsed_edges = nullptr; + chart->collapsed_faces = nullptr; } static void p_chart_simplify_compute(PChart *chart) @@ -2118,7 +2118,7 @@ static void p_chart_simplify_compute(PChart *chart) Heap *heap = BLI_heap_new(); PVert *v, **wheelverts; - PEdge *collapsededges = NULL, *e; + PEdge *collapsededges = nullptr, *e; int nwheelverts, i, ncollapsed = 0; wheelverts = MEM_mallocN(sizeof(PVert *) * chart->nverts, "PChartWheelVerts"); @@ -2126,7 +2126,7 @@ static void p_chart_simplify_compute(PChart *chart) /* insert all potential collapses into heap */ for (v = chart->verts; v; v = v->nextlink) { float cost; - PEdge *e = NULL; + PEdge *e = nullptr; p_collapse_cost_vertex(v, &cost, &e); @@ -2134,12 +2134,12 @@ static void p_chart_simplify_compute(PChart *chart) v->u.heaplink = BLI_heap_insert(heap, cost, e); } else { - v->u.heaplink = NULL; + v->u.heaplink = nullptr; } } for (e = chart->edges; e; e = e->nextlink) { - e->u.nextcollapse = NULL; + e->u.nextcollapse = nullptr; } /* pop edge collapse out of heap one by one */ @@ -2159,12 +2159,12 @@ static void p_chart_simplify_compute(PChart *chart) if (edge->vert->u.heaplink != link) { edge->flag |= (PEDGE_COLLAPSE_EDGE | PEDGE_COLLAPSE_PAIR); - edge->next->vert->u.heaplink = NULL; + edge->next->vert->u.heaplink = nullptr; SWAP(PEdge *, edge, pair); } else { edge->flag |= PEDGE_COLLAPSE_EDGE; - edge->vert->u.heaplink = NULL; + edge->vert->u.heaplink = nullptr; } p_collapsing_verts(edge, pair, &oldv, &keepv); @@ -2177,7 +2177,7 @@ static void p_chart_simplify_compute(PChart *chart) wheelverts[nwheelverts++] = wheele->next->vert; nexte = p_wheel_edge_next(wheele); - if (nexte == NULL) { + if (nexte == nullptr) { wheelverts[nwheelverts++] = wheele->next->next->vert; } @@ -2189,13 +2189,13 @@ static void p_chart_simplify_compute(PChart *chart) for (i = 0; i < nwheelverts; i++) { float cost; - PEdge *collapse = NULL; + PEdge *collapse = nullptr; v = wheelverts[i]; if (v->u.heaplink) { BLI_heap_remove(heap, v->u.heaplink); - v->u.heaplink = NULL; + v->u.heaplink = nullptr; } p_collapse_cost_vertex(v, &cost, &collapse); @@ -2209,7 +2209,7 @@ static void p_chart_simplify_compute(PChart *chart) } MEM_freeN(wheelverts); - BLI_heap_free(heap, NULL); + BLI_heap_free(heap, nullptr); p_chart_post_collapse_flush(chart, collapsededges); } @@ -2260,14 +2260,14 @@ static void p_chart_simplify(PChart *chart) #define ABF_MAX_ITER 20 -typedef struct PAbfSystem { +using PAbfSystem = struct PAbfSystem { int ninterior, nfaces, nangles; float *alpha, *beta, *sine, *cosine, *weight; float *bAlpha, *bTriangle, *bInterior; float *lambdaTriangle, *lambdaPlanar, *lambdaLength; float (*J2dt)[3], *bstar, *dstar; float minangle, maxangle; -} PAbfSystem; +}; static void p_abf_setup_system(PAbfSystem *sys) { @@ -2847,8 +2847,8 @@ static void p_chart_pin_positions(PChart *chart, PVert **pin1, PVert **pin2) static bool p_chart_symmetry_pins(PChart *chart, PEdge *outer, PVert **pin1, PVert **pin2) { - PEdge *be, *lastbe = NULL, *maxe1 = NULL, *maxe2 = NULL, *be1, *be2; - PEdge *cure = NULL, *firste1 = NULL, *firste2 = NULL, *nextbe; + PEdge *be, *lastbe = nullptr, *maxe1 = nullptr, *maxe2 = nullptr, *be1, *be2; + PEdge *cure = nullptr, *firste1 = nullptr, *firste2 = nullptr, *nextbe; float maxlen = 0.0f, curlen = 0.0f, totlen = 0.0f, firstlen = 0.0f; float len1, len2; @@ -2887,7 +2887,7 @@ static bool p_chart_symmetry_pins(PChart *chart, PEdge *outer, PVert **pin1, PVe } curlen = 0.0f; - cure = NULL; + cure = nullptr; } lastbe = be; @@ -2962,8 +2962,8 @@ static void p_chart_extrema_verts(PChart *chart, PVert **pin1, PVert **pin2) minv[0] = minv[1] = minv[2] = 1e20; maxv[0] = maxv[1] = maxv[2] = -1e20; - minvert[0] = minvert[1] = minvert[2] = NULL; - maxvert[0] = maxvert[1] = maxvert[2] = NULL; + minvert[0] = minvert[1] = minvert[2] = nullptr; + maxvert[0] = maxvert[1] = maxvert[2] = nullptr; for (v = chart->verts; v; v = v->nextlink) { for (i = 0; i < 3; i++) { @@ -3027,7 +3027,7 @@ static void p_chart_lscm_begin(PChart *chart, bool live, bool abf) } if ((live && (!select || !deselect))) { - chart->u.lscm.context = NULL; + chart->u.lscm.context = nullptr; } else { #if 0 @@ -3245,13 +3245,13 @@ static void p_chart_lscm_transform_single_pin(PChart *chart) static void p_chart_lscm_end(PChart *chart) { EIG_linear_solver_delete(chart->u.lscm.context); - chart->u.lscm.context = NULL; + chart->u.lscm.context = nullptr; MEM_SAFE_FREE(chart->u.lscm.abf_alpha); - chart->u.lscm.pin1 = NULL; - chart->u.lscm.pin2 = NULL; - chart->u.lscm.single_pin = NULL; + chart->u.lscm.pin1 = nullptr; + chart->u.lscm.pin2 = nullptr; + chart->u.lscm.single_pin = nullptr; chart->u.lscm.single_pin_area = 0.0f; } @@ -3264,7 +3264,7 @@ static void p_stretch_pin_boundary(PChart *chart) PVert *v; for (v = chart->verts; v; v = v->nextlink) { - if (v->edge->pair == NULL) { + if (v->edge->pair == nullptr) { v->flag |= PVERT_PIN; } else { @@ -3495,8 +3495,8 @@ static bool p_chart_convex_hull(PChart *chart, PVert ***r_verts, int *r_nverts, *r_nverts = npoints; *r_right = ulen - 1; - MEM_SAFE_FREE(U); - MEM_SAFE_FREE(L); + MEM_freeN(U); + MEM_freeN(L); return true; } @@ -3644,8 +3644,8 @@ static float p_chart_minimum_area_angle(PChart *chart) minangle -= (float)M_PI_2; } - MEM_SAFE_FREE(angles); - MEM_SAFE_FREE(points); + MEM_freeN(angles); + MEM_freeN(points); return minangle; } @@ -3684,9 +3684,9 @@ static void p_chart_rotate_fit_aabb(PChart *chart) /* Exported */ -ParamHandle *GEO_uv_parametrizer_construct_begin(void) +ParamHandle *GEO_uv_parametrizer_construct_begin() { - ParamHandle *handle = (ParamHandle *)MEM_callocN(sizeof(*handle), "ParamHandle"); + ParamHandle *handle = new ParamHandle(); handle->construction_chart = (PChart *)MEM_callocN(sizeof(PChart), "PChart"); handle->state = PHANDLE_STATE_ALLOCATED; handle->arena = BLI_memarena_new(MEM_SIZE_OPTIMAL(1 << 16), "param construct arena"); @@ -3710,6 +3710,9 @@ void GEO_uv_parametrizer_aspect_ratio(ParamHandle *phandle, float aspx, float as void GEO_uv_parametrizer_delete(ParamHandle *phandle) { + if (!phandle) { + return; + } param_assert(ELEM(phandle->state, PHANDLE_STATE_ALLOCATED, PHANDLE_STATE_CONSTRUCTED)); for (int i = 0; i < phandle->ncharts; i++) { @@ -3719,8 +3722,8 @@ void GEO_uv_parametrizer_delete(ParamHandle *phandle) MEM_SAFE_FREE(phandle->charts); if (phandle->pin_hash) { - BLI_ghash_free(phandle->pin_hash, NULL, NULL); - phandle->pin_hash = NULL; + BLI_ghash_free(phandle->pin_hash, nullptr, nullptr); + phandle->pin_hash = nullptr; } MEM_SAFE_FREE(phandle->construction_chart); @@ -3731,19 +3734,21 @@ void GEO_uv_parametrizer_delete(ParamHandle *phandle) BLI_memarena_free(phandle->arena); BLI_memarena_free(phandle->polyfill_arena); - BLI_heap_free(phandle->polyfill_heap, NULL); + BLI_heap_free(phandle->polyfill_heap, nullptr); - BLI_rng_free(phandle->rng); - phandle->rng = NULL; + if (phandle->rng) { + BLI_rng_free(phandle->rng); + phandle->rng = nullptr; + } - MEM_freeN(phandle); + delete phandle; } -typedef struct GeoUVPinIndex { +using GeoUVPinIndex = struct GeoUVPinIndex { struct GeoUVPinIndex *next; float uv[2]; ParamKey reindex; -} GeoUVPinIndex; +}; /* Find a (mostly) unique ParamKey given a BMVert index and UV co-ordinates. * For each unique pinned UVs, return a unique ParamKey, starting with @@ -3783,7 +3788,7 @@ ParamKey GEO_uv_find_pin_index(ParamHandle *handle, const int bmvertindex, const static GeoUVPinIndex *new_geo_uv_pinindex(ParamHandle *handle, const float uv[2]) { GeoUVPinIndex *pinuv = (GeoUVPinIndex *)BLI_memarena_alloc(handle->arena, sizeof(*pinuv)); - pinuv->next = NULL; + pinuv->next = nullptr; copy_v2_v2(pinuv->uv, uv); pinuv->reindex = PARAM_KEY_MAX - (handle->unique_pin_count++); return pinuv; @@ -3860,7 +3865,7 @@ static void p_add_ngon(ParamHandle *handle, BLI_polyfill_beautify(projverts, nverts, tris, arena, heap); /* Add triangles. */ - for (int j = 0; j < nfilltri; j++) { + for (uint j = 0; j < nfilltri; j++) { uint *tri = tris[j]; uint v0 = tri[0]; uint v1 = tri[1]; @@ -3887,7 +3892,7 @@ void GEO_uv_parametrizer_face_add(ParamHandle *phandle, const bool *pin, const bool *select) { - param_assert(phash_lookup(phandle->hash_faces, key) == NULL); + param_assert(phash_lookup(phandle->hash_faces, key) == nullptr); param_assert(phandle->state == PHANDLE_STATE_ALLOCATED); param_assert(ELEM(nverts, 3, 4)); @@ -3938,12 +3943,13 @@ void GEO_uv_parametrizer_construct_end(ParamHandle *phandle, phandle->ncharts = p_connect_pairs(phandle, topology_from_uvs); phandle->charts = p_split_charts(phandle, chart, phandle->ncharts); - MEM_SAFE_FREE(phandle->construction_chart); + MEM_freeN(phandle->construction_chart); + phandle->construction_chart = nullptr; phash_delete(phandle->hash_verts); phash_delete(phandle->hash_edges); phash_delete(phandle->hash_faces); - phandle->hash_verts = phandle->hash_edges = phandle->hash_faces = NULL; + phandle->hash_verts = phandle->hash_edges = phandle->hash_faces = nullptr; for (i = j = 0; i < phandle->ncharts; i++) { PVert *v; @@ -3952,8 +3958,8 @@ void GEO_uv_parametrizer_construct_end(ParamHandle *phandle, p_chart_boundaries(chart, &outer); if (!topology_from_uvs && chart->nboundaries == 0) { - MEM_SAFE_FREE(chart); - if (count_fail != NULL) { + MEM_freeN(chart); + if (count_fail != nullptr) { *count_fail += 1; } continue; @@ -4018,12 +4024,12 @@ void GEO_uv_parametrizer_lscm_solve(ParamHandle *phandle, int *count_changed, in } if (result) { - if (count_changed != NULL) { + if (count_changed != nullptr) { *count_changed += 1; } } else { - if (count_failed != NULL) { + if (count_failed != nullptr) { *count_failed += 1; } } @@ -4214,7 +4220,7 @@ void GEO_uv_parametrizer_pack(ParamHandle *handle, p_chart_uv_translate(chart, trans); p_chart_uv_scale(chart, scale); } - MEM_SAFE_FREE(boxarray); + MEM_freeN(boxarray); if (handle->aspx != handle->aspy) { GEO_uv_parametrizer_scale(handle, handle->aspx, handle->aspy); diff --git a/source/blender/gpencil_modifiers/intern/lineart/lineart_cpu.c b/source/blender/gpencil_modifiers/intern/lineart/lineart_cpu.c index 011c79025c4..bb638f8929d 100644 --- a/source/blender/gpencil_modifiers/intern/lineart/lineart_cpu.c +++ b/source/blender/gpencil_modifiers/intern/lineart/lineart_cpu.c @@ -5227,7 +5227,7 @@ static void lineart_gpencil_generate(LineartCache *cache, } if (shaodow_selection) { if (ec->shadow_mask_bits != LRT_SHADOW_MASK_UNDEFINED) { - /* TODO(Yiming): Give a behaviour option for how to display undefined shadow info. */ + /* TODO(Yiming): Give a behavior option for how to display undefined shadow info. */ if ((shaodow_selection == LRT_SHADOW_FILTER_ILLUMINATED && (!(ec->shadow_mask_bits & LRT_SHADOW_MASK_ILLUMINATED)))) { continue; diff --git a/source/blender/gpu/CMakeLists.txt b/source/blender/gpu/CMakeLists.txt index 3777501eca8..ff12ae5054a 100644 --- a/source/blender/gpu/CMakeLists.txt +++ b/source/blender/gpu/CMakeLists.txt @@ -323,6 +323,45 @@ set(GLSL_SRC shaders/common/gpu_shader_common_math_utils.glsl shaders/common/gpu_shader_common_mix_rgb.glsl + shaders/compositor/compositor_alpha_crop.glsl + shaders/compositor/compositor_box_mask.glsl + shaders/compositor/compositor_convert.glsl + shaders/compositor/compositor_ellipse_mask.glsl + shaders/compositor/compositor_flip.glsl + shaders/compositor/compositor_image_crop.glsl + shaders/compositor/compositor_projector_lens_distortion.glsl + shaders/compositor/compositor_realize_on_domain.glsl + shaders/compositor/compositor_screen_lens_distortion.glsl + shaders/compositor/compositor_set_alpha.glsl + shaders/compositor/compositor_split_viewer.glsl + + shaders/compositor/library/gpu_shader_compositor_alpha_over.glsl + shaders/compositor/library/gpu_shader_compositor_bright_contrast.glsl + shaders/compositor/library/gpu_shader_compositor_channel_matte.glsl + shaders/compositor/library/gpu_shader_compositor_chroma_matte.glsl + shaders/compositor/library/gpu_shader_compositor_color_balance.glsl + shaders/compositor/library/gpu_shader_compositor_color_correction.glsl + shaders/compositor/library/gpu_shader_compositor_color_matte.glsl + shaders/compositor/library/gpu_shader_compositor_color_spill.glsl + shaders/compositor/library/gpu_shader_compositor_color_to_luminance.glsl + shaders/compositor/library/gpu_shader_compositor_difference_matte.glsl + shaders/compositor/library/gpu_shader_compositor_distance_matte.glsl + shaders/compositor/library/gpu_shader_compositor_exposure.glsl + shaders/compositor/library/gpu_shader_compositor_gamma.glsl + shaders/compositor/library/gpu_shader_compositor_hue_correct.glsl + shaders/compositor/library/gpu_shader_compositor_hue_saturation_value.glsl + shaders/compositor/library/gpu_shader_compositor_invert.glsl + shaders/compositor/library/gpu_shader_compositor_luminance_matte.glsl + shaders/compositor/library/gpu_shader_compositor_main.glsl + shaders/compositor/library/gpu_shader_compositor_map_value.glsl + shaders/compositor/library/gpu_shader_compositor_normal.glsl + shaders/compositor/library/gpu_shader_compositor_posterize.glsl + shaders/compositor/library/gpu_shader_compositor_separate_combine.glsl + shaders/compositor/library/gpu_shader_compositor_set_alpha.glsl + shaders/compositor/library/gpu_shader_compositor_store_output.glsl + shaders/compositor/library/gpu_shader_compositor_texture_utilities.glsl + shaders/compositor/library/gpu_shader_compositor_type_conversion.glsl + shaders/material/gpu_shader_material_add_shader.glsl shaders/material/gpu_shader_material_ambient_occlusion.glsl shaders/material/gpu_shader_material_anisotropic.glsl @@ -527,6 +566,18 @@ set(SRC_SHADER_CREATE_INFOS shaders/infos/gpu_shader_simple_lighting_info.hh shaders/infos/gpu_shader_text_info.hh shaders/infos/gpu_srgb_to_framebuffer_space_info.hh + + shaders/compositor/infos/compositor_alpha_crop_info.hh + shaders/compositor/infos/compositor_box_mask_info.hh + shaders/compositor/infos/compositor_convert_info.hh + shaders/compositor/infos/compositor_ellipse_mask_info.hh + shaders/compositor/infos/compositor_flip_info.hh + shaders/compositor/infos/compositor_image_crop_info.hh + shaders/compositor/infos/compositor_projector_lens_distortion_info.hh + shaders/compositor/infos/compositor_realize_on_domain_info.hh + shaders/compositor/infos/compositor_screen_lens_distortion_info.hh + shaders/compositor/infos/compositor_set_alpha_info.hh + shaders/compositor/infos/compositor_split_viewer_info.hh ) set(SHADER_CREATE_INFOS_CONTENT "") diff --git a/source/blender/gpu/intern/gpu_shader_dependency.cc b/source/blender/gpu/intern/gpu_shader_dependency.cc index 953f656c2b5..961d3fcfe5b 100644 --- a/source/blender/gpu/intern/gpu_shader_dependency.cc +++ b/source/blender/gpu/intern/gpu_shader_dependency.cc @@ -660,7 +660,7 @@ struct GPUSource { std::string func_args = input_args; /* Workaround to support function call inside prints. We replace commas by a non control - * caracter $ in order to use simpler regex later. */ + * character `$` in order to use simpler regex later. */ bool string_scope = false; int func_scope = 0; for (char &c : func_args) { @@ -682,7 +682,7 @@ struct GPUSource { const bool print_as_variable = (input_args[0] != '"') && find_token(input_args, ',') == -1; if (print_as_variable) { - /* Variable or expression debuging. */ + /* Variable or expression debugging. */ std::string arg = input_args; /* Pad align most values. */ while (arg.length() % 4 != 0) { diff --git a/source/blender/gpu/metal/mtl_query.mm b/source/blender/gpu/metal/mtl_query.mm index dfda0a8de7f..f574140531d 100644 --- a/source/blender/gpu/metal/mtl_query.mm +++ b/source/blender/gpu/metal/mtl_query.mm @@ -9,7 +9,7 @@ namespace blender::gpu { static const size_t VISIBILITY_COUNT_PER_BUFFER = 512; -/* defined in the documentation but not queryable programmatically: +/* Defined in the documentation but can't be queried programmatically: * https://developer.apple.com/documentation/metal/mtlvisibilityresultmode/mtlvisibilityresultmodeboolean?language=objc */ static const size_t VISIBILITY_RESULT_SIZE_IN_BYTES = 8; diff --git a/source/blender/gpu/shaders/common/gpu_shader_common_color_utils.glsl b/source/blender/gpu/shaders/common/gpu_shader_common_color_utils.glsl index fe89985ae7f..33108d3a989 100644 --- a/source/blender/gpu/shaders/common/gpu_shader_common_color_utils.glsl +++ b/source/blender/gpu/shaders/common/gpu_shader_common_color_utils.glsl @@ -140,6 +140,84 @@ void hsl_to_rgb(vec4 hsl, out vec4 outcol) outcol = vec4((nr - 0.5) * chroma + l, (ng - 0.5) * chroma + l, (nb - 0.5) * chroma + l, hsl.w); } +/* ** YCCA to RGBA ** */ + +void ycca_to_rgba_itu_601(vec4 ycca, out vec4 color) +{ + ycca.xyz *= 255.0; + ycca.xyz -= vec3(16.0, 128.0, 128.0); + color.rgb = mat3(vec3(1.164), 0.0, -0.392, 2.017, 1.596, -0.813, 0.0) * ycca.xyz; + color.rgb /= 255.0; + color.a = ycca.a; +} + +void ycca_to_rgba_itu_709(vec4 ycca, out vec4 color) +{ + ycca.xyz *= 255.0; + ycca.xyz -= vec3(16.0, 128.0, 128.0); + color.rgb = mat3(vec3(1.164), 0.0, -0.213, 2.115, 1.793, -0.534, 0.0) * ycca.xyz; + color.rgb /= 255.0; + color.a = ycca.a; +} + +void ycca_to_rgba_jpeg(vec4 ycca, out vec4 color) +{ + ycca.xyz *= 255.0; + color.rgb = mat3(vec3(1.0), 0.0, -0.34414, 1.772, 1.402, -0.71414, 0.0) * ycca.xyz; + color.rgb += vec3(-179.456, 135.45984, -226.816); + color.rgb /= 255.0; + color.a = ycca.a; +} + +/* ** RGBA to YCCA ** */ + +void rgba_to_ycca_itu_601(vec4 rgba, out vec4 ycca) +{ + rgba.rgb *= 255.0; + ycca.xyz = mat3(0.257, -0.148, 0.439, 0.504, -0.291, -0.368, 0.098, 0.439, -0.071) * rgba.rgb; + ycca.xyz += vec3(16.0, 128.0, 128.0); + ycca.xyz /= 255.0; + ycca.a = rgba.a; +} + +void rgba_to_ycca_itu_709(vec4 rgba, out vec4 ycca) +{ + rgba.rgb *= 255.0; + ycca.xyz = mat3(0.183, -0.101, 0.439, 0.614, -0.338, -0.399, 0.062, 0.439, -0.040) * rgba.rgb; + ycca.xyz += vec3(16.0, 128.0, 128.0); + ycca.xyz /= 255.0; + ycca.a = rgba.a; +} + +void rgba_to_ycca_jpeg(vec4 rgba, out vec4 ycca) +{ + rgba.rgb *= 255.0; + ycca.xyz = mat3(0.299, -0.16874, 0.5, 0.587, -0.33126, -0.41869, 0.114, 0.5, -0.08131) * + rgba.rgb; + ycca.xyz += vec3(0.0, 128.0, 128.0); + ycca.xyz /= 255.0; + ycca.a = rgba.a; +} + +/* ** YUVA to RGBA ** */ + +void yuva_to_rgba_itu_709(vec4 yuva, out vec4 color) +{ + color.rgb = mat3(vec3(1.0), 0.0, -0.21482, 2.12798, 1.28033, -0.38059, 0.0) * yuva.xyz; + color.a = yuva.a; +} + +/* ** RGBA to YUVA ** */ + +void rgba_to_yuva_itu_709(vec4 rgba, out vec4 yuva) +{ + yuva.xyz = mat3(0.2126, -0.09991, 0.615, 0.7152, -0.33609, -0.55861, 0.0722, 0.436, -0.05639) * + rgba.rgb; + yuva.a = rgba.a; +} + +/* ** Alpha Handling ** */ + void color_alpha_clear(vec4 color, out vec4 result) { result = vec4(color.rgb, 1.0); @@ -147,15 +225,50 @@ void color_alpha_clear(vec4 color, out vec4 result) void color_alpha_premultiply(vec4 color, out vec4 result) { - result = vec4(color.rgb * color.a, 1.0); + result = vec4(color.rgb * color.a, color.a); } void color_alpha_unpremultiply(vec4 color, out vec4 result) { if (color.a == 0.0 || color.a == 1.0) { - result = vec4(color.rgb, 1.0); + result = color; } else { - result = vec4(color.rgb / color.a, 1.0); + result = vec4(color.rgb / color.a, color.a); + } +} + +float linear_rgb_to_srgb(float color) +{ + if (color < 0.0031308) { + return (color < 0.0) ? 0.0 : color * 12.92; + } + + return 1.055 * pow(color, 1.0 / 2.4) - 0.055; +} + +vec3 linear_rgb_to_srgb(vec3 color) +{ + return vec3( + linear_rgb_to_srgb(color.r), linear_rgb_to_srgb(color.g), linear_rgb_to_srgb(color.b)); +} + +float srgb_to_linear_rgb(float color) +{ + if (color < 0.04045) { + return (color < 0.0) ? 0.0 : color * (1.0 / 12.92); } + + return pow((color + 0.055) * (1.0 / 1.055), 2.4); +} + +vec3 srgb_to_linear_rgb(vec3 color) +{ + return vec3( + srgb_to_linear_rgb(color.r), srgb_to_linear_rgb(color.g), srgb_to_linear_rgb(color.b)); +} + +float get_luminance(vec3 color, vec3 luminance_coefficients) +{ + return dot(color, luminance_coefficients); } diff --git a/source/blender/gpu/shaders/common/gpu_shader_common_curves.glsl b/source/blender/gpu/shaders/common/gpu_shader_common_curves.glsl index 8948ed77557..db8e114ec7a 100644 --- a/source/blender/gpu/shaders/common/gpu_shader_common_curves.glsl +++ b/source/blender/gpu/shaders/common/gpu_shader_common_curves.glsl @@ -95,6 +95,81 @@ void curves_combined_only(float factor, result = mix(color, result, factor); } +/* Contrary to standard tone curve implementations, the film-like implementation tries to preserve + * the hue of the colors as much as possible. To understand why this might be a problem, consider + * the violet color (0.5, 0.0, 1.0). If this color was to be evaluated at a power curve x^4, the + * color will be blue (0.0625, 0.0, 1.0). So the color changes and not just its luminosity, which + * is what film-like tone curves tries to avoid. + * + * First, the channels with the lowest and highest values are identified and evaluated at the + * curve. Then, the third channel---the median---is computed while maintaining the original hue of + * the color. To do that, we look at the equation for deriving the hue from RGB values. Assuming + * the maximum, minimum, and median channels are known, and ignoring the 1/3 period offset of the + * hue, the equation is: + * + * hue = (median - min) / (max - min) [1] + * + * Since we have the new values for the minimum and maximum after evaluating at the curve, we also + * have: + * + * hue = (new_median - new_min) / (new_max - new_min) [2] + * + * Since we want the hue to be equivalent, by equating [1] and [2] and rearranging: + * + * (new_median - new_min) / (new_max - new_min) = (median - min) / (max - min) + * new_median - new_min = (new_max - new_min) * (median - min) / (max - min) + * new_median = new_min + (new_max - new_min) * (median - min) / (max - min) + * new_median = new_min + (median - min) * ((new_max - new_min) / (max - min)) [QED] + * + * Which gives us the median color that preserves the hue. More intuitively, the median is computed + * such that the change in the distance from the median to the minimum is proportional to the + * change in the distance from the minimum to the maximum. Finally, each of the new minimum, + * maximum, and median values are written to the color channel that they were originally extracted + * from. */ +void curves_film_like(float factor, + vec4 color, + vec4 black_level, + vec4 white_level, + sampler1DArray curve_map, + const float layer, + float range_minimum, + float range_divider, + float start_slope, + float end_slope, + out vec4 result) +{ + vec4 balanced = white_balance(color, black_level, white_level); + + /* Find the maximum, minimum, and median of the color channels. */ + float minimum = min(balanced.r, min(balanced.g, balanced.b)); + float maximum = max(balanced.r, max(balanced.g, balanced.b)); + float median = max(min(balanced.r, balanced.g), min(balanced.b, max(balanced.r, balanced.g))); + + /* Evaluate alpha curve map at the maximum and minimum channels. The alpha curve is the Combined + * curve in the UI. */ + float min_parameter = NORMALIZE_PARAMETER(minimum, range_minimum, range_divider); + float max_parameter = NORMALIZE_PARAMETER(maximum, range_minimum, range_divider); + float new_min = texture(curve_map, vec2(min_parameter, layer)).a; + float new_max = texture(curve_map, vec2(max_parameter, layer)).a; + + /* Then, extrapolate if needed. */ + new_min = extrapolate_if_needed(min_parameter, new_min, start_slope, end_slope); + new_max = extrapolate_if_needed(max_parameter, new_max, start_slope, end_slope); + + /* Compute the new median using the ratio between the new and the original range. */ + float scaling_ratio = (new_max - new_min) / (maximum - minimum); + float new_median = new_min + (median - minimum) * scaling_ratio; + + /* Write each value to its original channel. */ + bvec3 channel_is_min = equal(balanced.rgb, vec3(minimum)); + vec3 median_or_min = mix(vec3(new_median), vec3(new_min), channel_is_min); + bvec3 channel_is_max = equal(balanced.rgb, vec3(maximum)); + result.rgb = mix(median_or_min, vec3(new_max), channel_is_max); + result.a = color.a; + + result = mix(color, result, clamp(factor, 0.0, 1.0)); +} + void curves_vector(vec3 vector, sampler1DArray curve_map, const float layer, diff --git a/source/blender/gpu/shaders/common/gpu_shader_common_math_utils.glsl b/source/blender/gpu/shaders/common/gpu_shader_common_math_utils.glsl index 124654963fd..1ba22b4c5da 100644 --- a/source/blender/gpu/shaders/common/gpu_shader_common_math_utils.glsl +++ b/source/blender/gpu/shaders/common/gpu_shader_common_math_utils.glsl @@ -34,6 +34,17 @@ float compatible_pow(float x, float y) return pow(x, y); } +/* A version of pow that returns a fallback value if the computation is undefined. From the spec: + * The result is undefined if x < 0 or if x = 0 and y is less than or equal 0. */ +float fallback_pow(float x, float y, float fallback) +{ + if (x < 0.0 || (x == 0.0 && y <= 0.0)) { + return fallback; + } + + return pow(x, y); +} + float wrap(float a, float b, float c) { float range = b - c; @@ -114,8 +125,24 @@ void vector_copy(vec3 normal, out vec3 outnormal) outnormal = normal; } +vec3 fallback_pow(vec3 a, float b, vec3 fallback) +{ + return vec3(fallback_pow(a.x, b, fallback.x), + fallback_pow(a.y, b, fallback.y), + fallback_pow(a.z, b, fallback.z)); +} + /* Matirx Math */ +/* Return a 2D rotation matrix with the angle that the input 2D vector makes with the x axis. */ +mat2 vector_to_rotation_matrix(vec2 vector) +{ + vec2 normalized_vector = normalize(vector); + float cos_angle = normalized_vector.x; + float sin_angle = normalized_vector.y; + return mat2(cos_angle, sin_angle, -sin_angle, cos_angle); +} + mat3 euler_to_mat3(vec3 euler) { float cx = cos(euler.x); diff --git a/source/blender/gpu/shaders/common/gpu_shader_common_mix_rgb.glsl b/source/blender/gpu/shaders/common/gpu_shader_common_mix_rgb.glsl index f9652f1150b..39f3c722dd2 100644 --- a/source/blender/gpu/shaders/common/gpu_shader_common_mix_rgb.glsl +++ b/source/blender/gpu/shaders/common/gpu_shader_common_mix_rgb.glsl @@ -2,28 +2,24 @@ void mix_blend(float fac, vec4 col1, vec4 col2, out vec4 outcol) { - fac = clamp(fac, 0.0, 1.0); outcol = mix(col1, col2, fac); outcol.a = col1.a; } void mix_add(float fac, vec4 col1, vec4 col2, out vec4 outcol) { - fac = clamp(fac, 0.0, 1.0); outcol = mix(col1, col1 + col2, fac); outcol.a = col1.a; } void mix_mult(float fac, vec4 col1, vec4 col2, out vec4 outcol) { - fac = clamp(fac, 0.0, 1.0); outcol = mix(col1, col1 * col2, fac); outcol.a = col1.a; } void mix_screen(float fac, vec4 col1, vec4 col2, out vec4 outcol) { - fac = clamp(fac, 0.0, 1.0); float facm = 1.0 - fac; outcol = vec4(1.0) - (vec4(facm) + fac * (vec4(1.0) - col2)) * (vec4(1.0) - col1); @@ -32,7 +28,6 @@ void mix_screen(float fac, vec4 col1, vec4 col2, out vec4 outcol) void mix_overlay(float fac, vec4 col1, vec4 col2, out vec4 outcol) { - fac = clamp(fac, 0.0, 1.0); float facm = 1.0 - fac; outcol = col1; @@ -61,14 +56,30 @@ void mix_overlay(float fac, vec4 col1, vec4 col2, out vec4 outcol) void mix_sub(float fac, vec4 col1, vec4 col2, out vec4 outcol) { - fac = clamp(fac, 0.0, 1.0); outcol = mix(col1, col1 - col2, fac); outcol.a = col1.a; } void mix_div(float fac, vec4 col1, vec4 col2, out vec4 outcol) { - fac = clamp(fac, 0.0, 1.0); + float facm = 1.0 - fac; + + outcol = vec4(vec3(0.0), col1.a); + + if (col2.r != 0.0) { + outcol.r = facm * col1.r + fac * col1.r / col2.r; + } + if (col2.g != 0.0) { + outcol.g = facm * col1.g + fac * col1.g / col2.g; + } + if (col2.b != 0.0) { + outcol.b = facm * col1.b + fac * col1.b / col2.b; + } +} + +/* A variant of mix_div that fallback to the first color upon zero division. */ +void mix_div_fallback(float fac, vec4 col1, vec4 col2, out vec4 outcol) +{ float facm = 1.0 - fac; outcol = col1; @@ -86,28 +97,24 @@ void mix_div(float fac, vec4 col1, vec4 col2, out vec4 outcol) void mix_diff(float fac, vec4 col1, vec4 col2, out vec4 outcol) { - fac = clamp(fac, 0.0, 1.0); outcol = mix(col1, abs(col1 - col2), fac); outcol.a = col1.a; } void mix_dark(float fac, vec4 col1, vec4 col2, out vec4 outcol) { - fac = clamp(fac, 0.0, 1.0); outcol.rgb = mix(col1.rgb, min(col1.rgb, col2.rgb), fac); outcol.a = col1.a; } void mix_light(float fac, vec4 col1, vec4 col2, out vec4 outcol) { - fac = clamp(fac, 0.0, 1.0); outcol.rgb = mix(col1.rgb, max(col1.rgb, col2.rgb), fac); outcol.a = col1.a; } void mix_dodge(float fac, vec4 col1, vec4 col2, out vec4 outcol) { - fac = clamp(fac, 0.0, 1.0); outcol = col1; if (outcol.r != 0.0) { @@ -150,7 +157,6 @@ void mix_dodge(float fac, vec4 col1, vec4 col2, out vec4 outcol) void mix_burn(float fac, vec4 col1, vec4 col2, out vec4 outcol) { - fac = clamp(fac, 0.0, 1.0); float tmp, facm = 1.0 - fac; outcol = col1; @@ -200,7 +206,6 @@ void mix_burn(float fac, vec4 col1, vec4 col2, out vec4 outcol) void mix_hue(float fac, vec4 col1, vec4 col2, out vec4 outcol) { - fac = clamp(fac, 0.0, 1.0); float facm = 1.0 - fac; outcol = col1; @@ -220,7 +225,6 @@ void mix_hue(float fac, vec4 col1, vec4 col2, out vec4 outcol) void mix_sat(float fac, vec4 col1, vec4 col2, out vec4 outcol) { - fac = clamp(fac, 0.0, 1.0); float facm = 1.0 - fac; outcol = col1; @@ -238,7 +242,6 @@ void mix_sat(float fac, vec4 col1, vec4 col2, out vec4 outcol) void mix_val(float fac, vec4 col1, vec4 col2, out vec4 outcol) { - fac = clamp(fac, 0.0, 1.0); float facm = 1.0 - fac; vec4 hsv, hsv2; @@ -251,7 +254,6 @@ void mix_val(float fac, vec4 col1, vec4 col2, out vec4 outcol) void mix_color(float fac, vec4 col1, vec4 col2, out vec4 outcol) { - fac = clamp(fac, 0.0, 1.0); float facm = 1.0 - fac; outcol = col1; @@ -272,22 +274,26 @@ void mix_color(float fac, vec4 col1, vec4 col2, out vec4 outcol) void mix_soft(float fac, vec4 col1, vec4 col2, out vec4 outcol) { - fac = clamp(fac, 0.0, 1.0); float facm = 1.0 - fac; vec4 one = vec4(1.0); vec4 scr = one - (one - col2) * (one - col1); outcol = facm * col1 + fac * ((one - col1) * col2 * col1 + col1 * scr); + outcol.a = col1.a; } void mix_linear(float fac, vec4 col1, vec4 col2, out vec4 outcol) { - fac = clamp(fac, 0.0, 1.0); - outcol = col1 + fac * (2.0 * (col2 - vec4(0.5))); + outcol.a = col1.a; } -void clamp_color(vec3 vec, vec3 min, vec3 max, out vec3 out_vec) +void clamp_color(vec4 vec, const vec4 min, const vec4 max, out vec4 out_vec) { out_vec = clamp(vec, min, max); } + +void multiply_by_alpha(float factor, vec4 color, out float result) +{ + result = factor * color.a; +} diff --git a/source/blender/gpu/shaders/compositor/compositor_alpha_crop.glsl b/source/blender/gpu/shaders/compositor/compositor_alpha_crop.glsl new file mode 100644 index 00000000000..d55c8efd4c6 --- /dev/null +++ b/source/blender/gpu/shaders/compositor/compositor_alpha_crop.glsl @@ -0,0 +1,11 @@ +#pragma BLENDER_REQUIRE(gpu_shader_compositor_texture_utilities.glsl) + +void main() +{ + ivec2 texel = ivec2(gl_GlobalInvocationID.xy); + /* The lower bound is inclusive and upper bound is exclusive. */ + bool is_inside = all(greaterThanEqual(texel, lower_bound)) && all(lessThan(texel, upper_bound)); + /* Write the pixel color if it is inside the cropping region, otherwise, write zero. */ + vec4 color = is_inside ? texture_load(input_tx, texel) : vec4(0.0); + imageStore(output_img, texel, color); +} diff --git a/source/blender/gpu/shaders/compositor/compositor_box_mask.glsl b/source/blender/gpu/shaders/compositor/compositor_box_mask.glsl new file mode 100644 index 00000000000..fad23f28fde --- /dev/null +++ b/source/blender/gpu/shaders/compositor/compositor_box_mask.glsl @@ -0,0 +1,27 @@ +#pragma BLENDER_REQUIRE(gpu_shader_compositor_texture_utilities.glsl) + +void main() +{ + ivec2 texel = ivec2(gl_GlobalInvocationID.xy); + + vec2 uv = vec2(texel) / vec2(domain_size - ivec2(1)); + uv -= location; + uv.y *= float(domain_size.y) / float(domain_size.x); + uv = mat2(cos_angle, -sin_angle, sin_angle, cos_angle) * uv; + bool is_inside = all(lessThan(abs(uv), size)); + + float base_mask_value = texture_load(base_mask_tx, texel).x; + float value = texture_load(mask_value_tx, texel).x; + +#if defined(CMP_NODE_MASKTYPE_ADD) + float output_mask_value = is_inside ? max(base_mask_value, value) : base_mask_value; +#elif defined(CMP_NODE_MASKTYPE_SUBTRACT) + float output_mask_value = is_inside ? clamp(base_mask_value - value, 0.0, 1.0) : base_mask_value; +#elif defined(CMP_NODE_MASKTYPE_MULTIPLY) + float output_mask_value = is_inside ? base_mask_value * value : 0.0; +#elif defined(CMP_NODE_MASKTYPE_NOT) + float output_mask_value = is_inside ? (base_mask_value > 0.0 ? 0.0 : value) : base_mask_value; +#endif + + imageStore(output_mask_img, texel, vec4(output_mask_value)); +} diff --git a/source/blender/gpu/shaders/compositor/compositor_convert.glsl b/source/blender/gpu/shaders/compositor/compositor_convert.glsl new file mode 100644 index 00000000000..044fb057ca5 --- /dev/null +++ b/source/blender/gpu/shaders/compositor/compositor_convert.glsl @@ -0,0 +1,8 @@ +#pragma BLENDER_REQUIRE(gpu_shader_compositor_texture_utilities.glsl) + +void main() +{ + ivec2 texel = ivec2(gl_GlobalInvocationID.xy); + vec4 value = texture_load(input_tx, texel); + imageStore(output_img, texel, CONVERT_EXPRESSION(value)); +} diff --git a/source/blender/gpu/shaders/compositor/compositor_ellipse_mask.glsl b/source/blender/gpu/shaders/compositor/compositor_ellipse_mask.glsl new file mode 100644 index 00000000000..28f725067e0 --- /dev/null +++ b/source/blender/gpu/shaders/compositor/compositor_ellipse_mask.glsl @@ -0,0 +1,27 @@ +#pragma BLENDER_REQUIRE(gpu_shader_compositor_texture_utilities.glsl) + +void main() +{ + ivec2 texel = ivec2(gl_GlobalInvocationID.xy); + + vec2 uv = vec2(texel) / vec2(domain_size - ivec2(1)); + uv -= location; + uv.y *= float(domain_size.y) / float(domain_size.x); + uv = mat2(cos_angle, -sin_angle, sin_angle, cos_angle) * uv; + bool is_inside = length(uv / radius) < 1.0; + + float base_mask_value = texture_load(base_mask_tx, texel).x; + float value = texture_load(mask_value_tx, texel).x; + +#if defined(CMP_NODE_MASKTYPE_ADD) + float output_mask_value = is_inside ? max(base_mask_value, value) : base_mask_value; +#elif defined(CMP_NODE_MASKTYPE_SUBTRACT) + float output_mask_value = is_inside ? clamp(base_mask_value - value, 0.0, 1.0) : base_mask_value; +#elif defined(CMP_NODE_MASKTYPE_MULTIPLY) + float output_mask_value = is_inside ? base_mask_value * value : 0.0; +#elif defined(CMP_NODE_MASKTYPE_NOT) + float output_mask_value = is_inside ? (base_mask_value > 0.0 ? 0.0 : value) : base_mask_value; +#endif + + imageStore(output_mask_img, texel, vec4(output_mask_value)); +} diff --git a/source/blender/gpu/shaders/compositor/compositor_flip.glsl b/source/blender/gpu/shaders/compositor/compositor_flip.glsl new file mode 100644 index 00000000000..919c454ee63 --- /dev/null +++ b/source/blender/gpu/shaders/compositor/compositor_flip.glsl @@ -0,0 +1,15 @@ +#pragma BLENDER_REQUIRE(gpu_shader_compositor_texture_utilities.glsl) + +void main() +{ + ivec2 texel = ivec2(gl_GlobalInvocationID.xy); + ivec2 size = texture_size(input_tx); + ivec2 flipped_texel = texel; + if (flip_x) { + flipped_texel.x = size.x - texel.x - 1; + } + if (flip_y) { + flipped_texel.y = size.y - texel.y - 1; + } + imageStore(output_img, texel, texture_load(input_tx, flipped_texel)); +} diff --git a/source/blender/gpu/shaders/compositor/compositor_image_crop.glsl b/source/blender/gpu/shaders/compositor/compositor_image_crop.glsl new file mode 100644 index 00000000000..f20e033dee4 --- /dev/null +++ b/source/blender/gpu/shaders/compositor/compositor_image_crop.glsl @@ -0,0 +1,7 @@ +#pragma BLENDER_REQUIRE(gpu_shader_compositor_texture_utilities.glsl) + +void main() +{ + ivec2 texel = ivec2(gl_GlobalInvocationID.xy); + imageStore(output_img, texel, texture_load(input_tx, texel + lower_bound)); +} diff --git a/source/blender/gpu/shaders/compositor/compositor_projector_lens_distortion.glsl b/source/blender/gpu/shaders/compositor/compositor_projector_lens_distortion.glsl new file mode 100644 index 00000000000..cf961b20b34 --- /dev/null +++ b/source/blender/gpu/shaders/compositor/compositor_projector_lens_distortion.glsl @@ -0,0 +1,16 @@ +#pragma BLENDER_REQUIRE(gpu_shader_compositor_texture_utilities.glsl) + +void main() +{ + ivec2 texel = ivec2(gl_GlobalInvocationID.xy); + + /* Get the normalized coordinates of the pixel centers. */ + vec2 normalized_texel = (vec2(texel) + vec2(0.5)) / vec2(texture_size(input_tx)); + + /* Sample the red and blue channels shifted by the dispersion amount. */ + const float red = texture(input_tx, normalized_texel + vec2(dispersion, 0.0)).r; + const float green = texture_load(input_tx, texel).g; + const float blue = texture(input_tx, normalized_texel - vec2(dispersion, 0.0)).b; + + imageStore(output_img, texel, vec4(red, green, blue, 1.0)); +} diff --git a/source/blender/gpu/shaders/compositor/compositor_realize_on_domain.glsl b/source/blender/gpu/shaders/compositor/compositor_realize_on_domain.glsl new file mode 100644 index 00000000000..b2961d07219 --- /dev/null +++ b/source/blender/gpu/shaders/compositor/compositor_realize_on_domain.glsl @@ -0,0 +1,25 @@ +#pragma BLENDER_REQUIRE(gpu_shader_compositor_texture_utilities.glsl) + +void main() +{ + ivec2 texel = ivec2(gl_GlobalInvocationID.xy); + + /* First, transform the input image by transforming the domain coordinates with the inverse of + * input image's transformation. The inverse transformation is an affine matrix and thus the + * coordinates should be in homogeneous coordinates. */ + vec2 coordinates = (mat3(inverse_transformation) * vec3(texel, 1.0)).xy; + + /* Since an input image with an identity transformation is supposed to be centered in the domain, + * we subtract the offset between the lower left corners of the input image and the domain, which + * is half the difference between their sizes, because the difference in size is on both sides of + * the centered image. */ + ivec2 domain_size = imageSize(domain_img); + ivec2 input_size = texture_size(input_tx); + vec2 offset = (domain_size - input_size) / 2.0; + + /* Subtract the offset and divide by the input image size to get the relevant coordinates into + * the sampler's expected [0, 1] range. */ + vec2 normalized_coordinates = (coordinates - offset) / input_size; + + imageStore(domain_img, texel, texture(input_tx, normalized_coordinates)); +} diff --git a/source/blender/gpu/shaders/compositor/compositor_screen_lens_distortion.glsl b/source/blender/gpu/shaders/compositor/compositor_screen_lens_distortion.glsl new file mode 100644 index 00000000000..dc572ea5aaf --- /dev/null +++ b/source/blender/gpu/shaders/compositor/compositor_screen_lens_distortion.glsl @@ -0,0 +1,151 @@ +#pragma BLENDER_REQUIRE(gpu_shader_common_hash.glsl) +#pragma BLENDER_REQUIRE(gpu_shader_compositor_texture_utilities.glsl) + +/* A model that approximates lens distortion parameterized by a distortion parameter and dependent + * on the squared distance to the center of the image. The distorted pixel is then computed as the + * scalar multiplication of the pixel coordinates with the value returned by this model. See the + * compute_distorted_uv function for more details. */ +float compute_distortion_scale(float distortion, float distance_squared) +{ + return 1.0 / (1.0 + sqrt(max(0.0, 1.0 - distortion * distance_squared))); +} + +/* A vectorized version of compute_distortion_scale that is applied on the chromatic distortion + * parameters passed to the shader. */ +vec3 compute_chromatic_distortion_scale(float distance_squared) +{ + return 1.0 / (1.0 + sqrt(max(vec3(0.0), 1.0 - chromatic_distortion * distance_squared))); +} + +/* Compute the image coordinates after distortion by the given distortion scale computed by the + * compute_distortion_scale function. Note that the function expects centered normalized UV + * coordinates but outputs non-centered image coordinates. */ +vec2 compute_distorted_uv(vec2 uv, float scale) +{ + return (uv * scale + 0.5) * texture_size(input_tx) - 0.5; +} + +/* Compute the number of integration steps that should be used to approximate the distorted pixel + * using a heuristic, see the compute_number_of_steps function for more details. The numbers of + * steps is proportional to the number of pixels spanned by the distortion amount. For jitter + * distortion, the square root of the distortion amount plus 1 is used with a minimum of 2 steps. + * For non-jitter distortion, the distortion amount plus 1 is used as the number of steps */ +int compute_number_of_integration_steps_heuristic(float distortion) +{ +#if defined(JITTER) + return distortion < 4.0 ? 2 : int(sqrt(distortion + 1.0)); +#else + return int(distortion + 1.0); +#endif +} + +/* Compute the number of integration steps that should be used to compute each channel of the + * distorted pixel. Each of the channels are distorted by their respective chromatic distortion + * amount, then the amount of distortion between each two consecutive channels is computed, this + * amount is then used to heuristically infer the number of needed integration steps, see the + * integrate_distortion function for more information. */ +ivec3 compute_number_of_integration_steps(vec2 uv, float distance_squared) +{ + /* Distort each channel by its respective chromatic distortion amount. */ + vec3 distortion_scale = compute_chromatic_distortion_scale(distance_squared); + vec2 distorted_uv_red = compute_distorted_uv(uv, distortion_scale.r); + vec2 distorted_uv_green = compute_distorted_uv(uv, distortion_scale.g); + vec2 distorted_uv_blue = compute_distorted_uv(uv, distortion_scale.b); + + /* Infer the number of needed integration steps to compute the distorted red channel starting + * from the green channel. */ + float distortion_red = distance(distorted_uv_red, distorted_uv_green); + int steps_red = compute_number_of_integration_steps_heuristic(distortion_red); + + /* Infer the number of needed integration steps to compute the distorted blue channel starting + * from the green channel. */ + float distortion_blue = distance(distorted_uv_green, distorted_uv_blue); + int steps_blue = compute_number_of_integration_steps_heuristic(distortion_blue); + + /* The number of integration steps used to compute the green channel is the sum of both the red + * and the blue channel steps because it is computed once with each of them. */ + return ivec3(steps_red, steps_red + steps_blue, steps_blue); +} + +/* Returns a random jitter amount, which is essentially a random value in the [0, 1] range. If + * jitter is not enabled, return a constant 0.5 value instead. */ +float get_jitter(int seed) +{ +#if defined(JITTER) + return hash_uint3_to_float(gl_GlobalInvocationID.x, gl_GlobalInvocationID.y, seed); +#else + return 0.5; +#endif +} + +/* Each color channel may have a different distortion with the guarantee that the red will have the + * lowest distortion while the blue will have the highest one. If each channel is distorted + * independently, the image will look disintegrated, with each channel seemingly merely shifted. + * Consequently, the distorted pixels needs to be computed by integrating along the path of change + * of distortion starting from one channel to another. For instance, to compute the distorted red + * from the distorted green, we accumulate the color of the distorted pixel starting from the + * distortion of the red, taking small steps until we reach the distortion of the green. The pixel + * color is weighted such that it is maximum at the start distortion and zero at the end distortion + * in an arithmetic progression. The integration steps can be augmented with random values to + * simulate lens jitter. Finally, it should be noted that this function integrates both the start + * and end channels in reverse directions for more efficient computation. */ +vec3 integrate_distortion(int start, int end, float distance_squared, vec2 uv, int steps) +{ + vec3 accumulated_color = vec3(0.0); + float distortion_amount = chromatic_distortion[end] - chromatic_distortion[start]; + for (int i = 0; i < steps; i++) { + /* The increment will be in the [0, 1) range across iterations. */ + float increment = (i + get_jitter(i)) / steps; + float distortion = chromatic_distortion[start] + increment * distortion_amount; + float distortion_scale = compute_distortion_scale(distortion, distance_squared); + + /* Sample the color at the distorted coordinates and accumulate it weighted by the increment + * value for both the start and end channels. */ + vec2 distorted_uv = compute_distorted_uv(uv, distortion_scale); + vec4 color = texture(input_tx, distorted_uv / texture_size(input_tx)); + accumulated_color[start] += (1.0 - increment) * color[start]; + accumulated_color[end] += increment * color[end]; + } + return accumulated_color; +} + +void main() +{ + ivec2 texel = ivec2(gl_GlobalInvocationID.xy); + + /* Compute the UV image coordinates in the range [-1, 1] as well as the squared distance to the + * center of the image, which is at (0, 0) in the UV coordinates. */ + vec2 center = texture_size(input_tx) / 2.0; + vec2 uv = scale * (texel + 0.5 - center) / center; + float distance_squared = dot(uv, uv); + + /* If any of the color channels will get distorted outside of the screen beyond what is possible, + * write a zero transparent color and return. */ + if (any(greaterThan(chromatic_distortion * distance_squared, vec3(1.0)))) { + imageStore(output_img, texel, vec4(0.0)); + return; + } + + /* Compute the number of integration steps that should be used to compute each channel of the + * distorted pixel. */ + ivec3 number_of_steps = compute_number_of_integration_steps(uv, distance_squared); + + /* Integrate the distortion of the red and green, then the green and blue channels. That means + * the green will be integrated twice, but this is accounted for in the number of steps which the + * color will later be divided by. See the compute_number_of_integration_steps function for more + * details. */ + vec3 color = vec3(0.0); + color += integrate_distortion(0, 1, distance_squared, uv, number_of_steps.r); + color += integrate_distortion(1, 2, distance_squared, uv, number_of_steps.b); + + /* The integration above performed weighted accumulation, and thus the color needs to be divided + * by the sum of the weights. Assuming no jitter, the weights are generated as an arithmetic + * progression starting from (0.5 / n) to ((n - 0.5) / n) for n terms. The sum of an arithmetic + * progression can be computed as (n * (start + end) / 2), which when subsisting the start and + * end reduces to (n / 2). So the color should be multiplied by 2 / n. The jitter sequence + * approximately sums to the same value because it is a uniform random value whose mean value is + * 0.5, so the expression doesn't change regardless of jitter. */ + color *= 2.0 / vec3(number_of_steps); + + imageStore(output_img, texel, vec4(color, 1.0)); +} diff --git a/source/blender/gpu/shaders/compositor/compositor_set_alpha.glsl b/source/blender/gpu/shaders/compositor/compositor_set_alpha.glsl new file mode 100644 index 00000000000..7dd40581790 --- /dev/null +++ b/source/blender/gpu/shaders/compositor/compositor_set_alpha.glsl @@ -0,0 +1,8 @@ +#pragma BLENDER_REQUIRE(gpu_shader_compositor_texture_utilities.glsl) + +void main() +{ + ivec2 texel = ivec2(gl_GlobalInvocationID.xy); + vec4 color = vec4(texture_load(image_tx, texel).rgb, texture_load(alpha_tx, texel).x); + imageStore(output_img, texel, color); +} diff --git a/source/blender/gpu/shaders/compositor/compositor_split_viewer.glsl b/source/blender/gpu/shaders/compositor/compositor_split_viewer.glsl new file mode 100644 index 00000000000..866b9045da2 --- /dev/null +++ b/source/blender/gpu/shaders/compositor/compositor_split_viewer.glsl @@ -0,0 +1,14 @@ +#pragma BLENDER_REQUIRE(gpu_shader_compositor_texture_utilities.glsl) + +void main() +{ + ivec2 texel = ivec2(gl_GlobalInvocationID.xy); +#if defined(SPLIT_HORIZONTAL) + bool condition = (view_size.x * split_ratio) < texel.x; +#elif defined(SPLIT_VERTICAL) + bool condition = (view_size.y * split_ratio) < texel.y; +#endif + vec4 color = condition ? texture_load(first_image_tx, texel) : + texture_load(second_image_tx, texel); + imageStore(output_img, texel, color); +} diff --git a/source/blender/gpu/shaders/compositor/infos/compositor_alpha_crop_info.hh b/source/blender/gpu/shaders/compositor/infos/compositor_alpha_crop_info.hh new file mode 100644 index 00000000000..11f2f329cd8 --- /dev/null +++ b/source/blender/gpu/shaders/compositor/infos/compositor_alpha_crop_info.hh @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +#include "gpu_shader_create_info.hh" + +GPU_SHADER_CREATE_INFO(compositor_alpha_crop) + .local_group_size(16, 16) + .push_constant(Type::IVEC2, "lower_bound") + .push_constant(Type::IVEC2, "upper_bound") + .sampler(0, ImageType::FLOAT_2D, "input_tx") + .image(0, GPU_RGBA16F, Qualifier::WRITE, ImageType::FLOAT_2D, "output_img") + .compute_source("compositor_alpha_crop.glsl") + .do_static_compilation(true); diff --git a/source/blender/gpu/shaders/compositor/infos/compositor_box_mask_info.hh b/source/blender/gpu/shaders/compositor/infos/compositor_box_mask_info.hh new file mode 100644 index 00000000000..ecb253bbab1 --- /dev/null +++ b/source/blender/gpu/shaders/compositor/infos/compositor_box_mask_info.hh @@ -0,0 +1,35 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +#include "gpu_shader_create_info.hh" + +GPU_SHADER_CREATE_INFO(compositor_box_mask_shared) + .local_group_size(16, 16) + .push_constant(Type::IVEC2, "domain_size") + .push_constant(Type::VEC2, "location") + .push_constant(Type::VEC2, "size") + .push_constant(Type::FLOAT, "cos_angle") + .push_constant(Type::FLOAT, "sin_angle") + .sampler(0, ImageType::FLOAT_2D, "base_mask_tx") + .sampler(1, ImageType::FLOAT_2D, "mask_value_tx") + .image(0, GPU_R16F, Qualifier::WRITE, ImageType::FLOAT_2D, "output_mask_img") + .compute_source("compositor_box_mask.glsl"); + +GPU_SHADER_CREATE_INFO(compositor_box_mask_add) + .additional_info("compositor_box_mask_shared") + .define("CMP_NODE_MASKTYPE_ADD") + .do_static_compilation(true); + +GPU_SHADER_CREATE_INFO(compositor_box_mask_subtract) + .additional_info("compositor_box_mask_shared") + .define("CMP_NODE_MASKTYPE_SUBTRACT") + .do_static_compilation(true); + +GPU_SHADER_CREATE_INFO(compositor_box_mask_multiply) + .additional_info("compositor_box_mask_shared") + .define("CMP_NODE_MASKTYPE_MULTIPLY") + .do_static_compilation(true); + +GPU_SHADER_CREATE_INFO(compositor_box_mask_not) + .additional_info("compositor_box_mask_shared") + .define("CMP_NODE_MASKTYPE_NOT") + .do_static_compilation(true); diff --git a/source/blender/gpu/shaders/compositor/infos/compositor_convert_info.hh b/source/blender/gpu/shaders/compositor/infos/compositor_convert_info.hh new file mode 100644 index 00000000000..35e60056736 --- /dev/null +++ b/source/blender/gpu/shaders/compositor/infos/compositor_convert_info.hh @@ -0,0 +1,69 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +#include "gpu_shader_create_info.hh" + +GPU_SHADER_CREATE_INFO(compositor_convert_shared) + .local_group_size(16, 16) + .sampler(0, ImageType::FLOAT_2D, "input_tx") + .typedef_source("gpu_shader_compositor_type_conversion.glsl") + .compute_source("compositor_convert.glsl"); + +GPU_SHADER_CREATE_INFO(compositor_convert_float_to_vector) + .additional_info("compositor_convert_shared") + .image(0, GPU_RGBA16F, Qualifier::WRITE, ImageType::FLOAT_2D, "output_img") + .define("CONVERT_EXPRESSION(value)", "vec4(vec3_from_float(value.x), 0.0)") + .do_static_compilation(true); + +GPU_SHADER_CREATE_INFO(compositor_convert_float_to_color) + .additional_info("compositor_convert_shared") + .image(0, GPU_RGBA16F, Qualifier::WRITE, ImageType::FLOAT_2D, "output_img") + .define("CONVERT_EXPRESSION(value)", "vec4_from_float(value.x)") + .do_static_compilation(true); + +GPU_SHADER_CREATE_INFO(compositor_convert_color_to_float) + .additional_info("compositor_convert_shared") + .image(0, GPU_R16F, Qualifier::WRITE, ImageType::FLOAT_2D, "output_img") + .define("CONVERT_EXPRESSION(value)", "vec4(float_from_vec4(value), vec3(0.0))") + .do_static_compilation(true); + +GPU_SHADER_CREATE_INFO(compositor_convert_color_to_vector) + .additional_info("compositor_convert_shared") + .image(0, GPU_RGBA16F, Qualifier::WRITE, ImageType::FLOAT_2D, "output_img") + .define("CONVERT_EXPRESSION(value)", "vec4(vec3_from_vec4(value), 0.0)") + .do_static_compilation(true); + +GPU_SHADER_CREATE_INFO(compositor_convert_vector_to_float) + .additional_info("compositor_convert_shared") + .image(0, GPU_R16F, Qualifier::WRITE, ImageType::FLOAT_2D, "output_img") + .define("CONVERT_EXPRESSION(value)", "vec4(float_from_vec3(value.xyz), vec3(0.0))") + .do_static_compilation(true); + +GPU_SHADER_CREATE_INFO(compositor_convert_vector_to_color) + .additional_info("compositor_convert_shared") + .image(0, GPU_RGBA16F, Qualifier::WRITE, ImageType::FLOAT_2D, "output_img") + .define("CONVERT_EXPRESSION(value)", "vec4_from_vec3(value.xyz)") + .do_static_compilation(true); + +GPU_SHADER_CREATE_INFO(compositor_extract_alpha_from_color) + .additional_info("compositor_convert_shared") + .image(0, GPU_R16F, Qualifier::WRITE, ImageType::FLOAT_2D, "output_img") + .define("CONVERT_EXPRESSION(value)", "vec4(value.a, vec3(0.0))") + .do_static_compilation(true); + +GPU_SHADER_CREATE_INFO(compositor_convert_color_to_half_color) + .additional_info("compositor_convert_shared") + .image(0, GPU_RGBA16F, Qualifier::WRITE, ImageType::FLOAT_2D, "output_img") + .define("CONVERT_EXPRESSION(value)", "value") + .do_static_compilation(true); + +GPU_SHADER_CREATE_INFO(compositor_convert_float_to_half_float) + .additional_info("compositor_convert_shared") + .image(0, GPU_R16F, Qualifier::WRITE, ImageType::FLOAT_2D, "output_img") + .define("CONVERT_EXPRESSION(value)", "vec4(value.r, vec3(0.0))") + .do_static_compilation(true); + +GPU_SHADER_CREATE_INFO(compositor_convert_color_to_opaque) + .additional_info("compositor_convert_shared") + .image(0, GPU_RGBA16F, Qualifier::WRITE, ImageType::FLOAT_2D, "output_img") + .define("CONVERT_EXPRESSION(value)", "vec4(value.rgb, 1.0)") + .do_static_compilation(true); diff --git a/source/blender/gpu/shaders/compositor/infos/compositor_ellipse_mask_info.hh b/source/blender/gpu/shaders/compositor/infos/compositor_ellipse_mask_info.hh new file mode 100644 index 00000000000..52db91c94e5 --- /dev/null +++ b/source/blender/gpu/shaders/compositor/infos/compositor_ellipse_mask_info.hh @@ -0,0 +1,35 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +#include "gpu_shader_create_info.hh" + +GPU_SHADER_CREATE_INFO(compositor_ellipse_mask_shared) + .local_group_size(16, 16) + .push_constant(Type::IVEC2, "domain_size") + .push_constant(Type::VEC2, "location") + .push_constant(Type::VEC2, "radius") + .push_constant(Type::FLOAT, "cos_angle") + .push_constant(Type::FLOAT, "sin_angle") + .sampler(0, ImageType::FLOAT_2D, "base_mask_tx") + .sampler(1, ImageType::FLOAT_2D, "mask_value_tx") + .image(0, GPU_R16F, Qualifier::WRITE, ImageType::FLOAT_2D, "output_mask_img") + .compute_source("compositor_ellipse_mask.glsl"); + +GPU_SHADER_CREATE_INFO(compositor_ellipse_mask_add) + .additional_info("compositor_ellipse_mask_shared") + .define("CMP_NODE_MASKTYPE_ADD") + .do_static_compilation(true); + +GPU_SHADER_CREATE_INFO(compositor_ellipse_mask_subtract) + .additional_info("compositor_ellipse_mask_shared") + .define("CMP_NODE_MASKTYPE_SUBTRACT") + .do_static_compilation(true); + +GPU_SHADER_CREATE_INFO(compositor_ellipse_mask_multiply) + .additional_info("compositor_ellipse_mask_shared") + .define("CMP_NODE_MASKTYPE_MULTIPLY") + .do_static_compilation(true); + +GPU_SHADER_CREATE_INFO(compositor_ellipse_mask_not) + .additional_info("compositor_ellipse_mask_shared") + .define("CMP_NODE_MASKTYPE_NOT") + .do_static_compilation(true); diff --git a/source/blender/gpu/shaders/compositor/infos/compositor_flip_info.hh b/source/blender/gpu/shaders/compositor/infos/compositor_flip_info.hh new file mode 100644 index 00000000000..db831518cb7 --- /dev/null +++ b/source/blender/gpu/shaders/compositor/infos/compositor_flip_info.hh @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +#include "gpu_shader_create_info.hh" + +GPU_SHADER_CREATE_INFO(compositor_flip) + .local_group_size(16, 16) + .push_constant(Type::BOOL, "flip_x") + .push_constant(Type::BOOL, "flip_y") + .sampler(0, ImageType::FLOAT_2D, "input_tx") + .image(0, GPU_RGBA16F, Qualifier::WRITE, ImageType::FLOAT_2D, "output_img") + .compute_source("compositor_flip.glsl") + .do_static_compilation(true); diff --git a/source/blender/gpu/shaders/compositor/infos/compositor_image_crop_info.hh b/source/blender/gpu/shaders/compositor/infos/compositor_image_crop_info.hh new file mode 100644 index 00000000000..e7736744c40 --- /dev/null +++ b/source/blender/gpu/shaders/compositor/infos/compositor_image_crop_info.hh @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +#include "gpu_shader_create_info.hh" + +GPU_SHADER_CREATE_INFO(compositor_image_crop) + .local_group_size(16, 16) + .push_constant(Type::IVEC2, "lower_bound") + .sampler(0, ImageType::FLOAT_2D, "input_tx") + .image(0, GPU_RGBA16F, Qualifier::WRITE, ImageType::FLOAT_2D, "output_img") + .compute_source("compositor_image_crop.glsl") + .do_static_compilation(true); diff --git a/source/blender/gpu/shaders/compositor/infos/compositor_projector_lens_distortion_info.hh b/source/blender/gpu/shaders/compositor/infos/compositor_projector_lens_distortion_info.hh new file mode 100644 index 00000000000..98fe1731703 --- /dev/null +++ b/source/blender/gpu/shaders/compositor/infos/compositor_projector_lens_distortion_info.hh @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +#include "gpu_shader_create_info.hh" + +GPU_SHADER_CREATE_INFO(compositor_projector_lens_distortion) + .local_group_size(16, 16) + .push_constant(Type::FLOAT, "dispersion") + .sampler(0, ImageType::FLOAT_2D, "input_tx") + .image(0, GPU_RGBA16F, Qualifier::WRITE, ImageType::FLOAT_2D, "output_img") + .compute_source("compositor_projector_lens_distortion.glsl") + .do_static_compilation(true); diff --git a/source/blender/gpu/shaders/compositor/infos/compositor_realize_on_domain_info.hh b/source/blender/gpu/shaders/compositor/infos/compositor_realize_on_domain_info.hh new file mode 100644 index 00000000000..4528649ae98 --- /dev/null +++ b/source/blender/gpu/shaders/compositor/infos/compositor_realize_on_domain_info.hh @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +#include "gpu_shader_create_info.hh" + +GPU_SHADER_CREATE_INFO(compositor_realize_on_domain_shared) + .local_group_size(16, 16) + .push_constant(Type::MAT4, "inverse_transformation") + .sampler(0, ImageType::FLOAT_2D, "input_tx") + .compute_source("compositor_realize_on_domain.glsl"); + +GPU_SHADER_CREATE_INFO(compositor_realize_on_domain_color) + .additional_info("compositor_realize_on_domain_shared") + .image(0, GPU_RGBA16F, Qualifier::WRITE, ImageType::FLOAT_2D, "domain_img") + .do_static_compilation(true); + +GPU_SHADER_CREATE_INFO(compositor_realize_on_domain_vector) + .additional_info("compositor_realize_on_domain_shared") + .image(0, GPU_RGBA16F, Qualifier::WRITE, ImageType::FLOAT_2D, "domain_img") + .do_static_compilation(true); + +GPU_SHADER_CREATE_INFO(compositor_realize_on_domain_float) + .additional_info("compositor_realize_on_domain_shared") + .image(0, GPU_R16F, Qualifier::WRITE, ImageType::FLOAT_2D, "domain_img") + .do_static_compilation(true); diff --git a/source/blender/gpu/shaders/compositor/infos/compositor_screen_lens_distortion_info.hh b/source/blender/gpu/shaders/compositor/infos/compositor_screen_lens_distortion_info.hh new file mode 100644 index 00000000000..c42f2b328d4 --- /dev/null +++ b/source/blender/gpu/shaders/compositor/infos/compositor_screen_lens_distortion_info.hh @@ -0,0 +1,20 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +#include "gpu_shader_create_info.hh" + +GPU_SHADER_CREATE_INFO(compositor_screen_lens_distortion_shared) + .local_group_size(16, 16) + .push_constant(Type::VEC3, "chromatic_distortion") + .push_constant(Type::FLOAT, "scale") + .sampler(0, ImageType::FLOAT_2D, "input_tx") + .image(0, GPU_RGBA16F, Qualifier::WRITE, ImageType::FLOAT_2D, "output_img") + .compute_source("compositor_screen_lens_distortion.glsl"); + +GPU_SHADER_CREATE_INFO(compositor_screen_lens_distortion) + .additional_info("compositor_screen_lens_distortion_shared") + .do_static_compilation(true); + +GPU_SHADER_CREATE_INFO(compositor_screen_lens_distortion_jitter) + .additional_info("compositor_screen_lens_distortion_shared") + .define("JITTER") + .do_static_compilation(true); diff --git a/source/blender/gpu/shaders/compositor/infos/compositor_set_alpha_info.hh b/source/blender/gpu/shaders/compositor/infos/compositor_set_alpha_info.hh new file mode 100644 index 00000000000..ca28194e921 --- /dev/null +++ b/source/blender/gpu/shaders/compositor/infos/compositor_set_alpha_info.hh @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +#include "gpu_shader_create_info.hh" + +GPU_SHADER_CREATE_INFO(compositor_set_alpha) + .local_group_size(16, 16) + .sampler(0, ImageType::FLOAT_2D, "image_tx") + .sampler(1, ImageType::FLOAT_2D, "alpha_tx") + .image(0, GPU_RGBA16F, Qualifier::WRITE, ImageType::FLOAT_2D, "output_img") + .compute_source("compositor_set_alpha.glsl") + .do_static_compilation(true); diff --git a/source/blender/gpu/shaders/compositor/infos/compositor_split_viewer_info.hh b/source/blender/gpu/shaders/compositor/infos/compositor_split_viewer_info.hh new file mode 100644 index 00000000000..d5793b0ce59 --- /dev/null +++ b/source/blender/gpu/shaders/compositor/infos/compositor_split_viewer_info.hh @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +#include "gpu_shader_create_info.hh" + +GPU_SHADER_CREATE_INFO(compositor_split_viewer_shared) + .local_group_size(16, 16) + .push_constant(Type::FLOAT, "split_ratio") + .push_constant(Type::IVEC2, "view_size") + .sampler(0, ImageType::FLOAT_2D, "first_image_tx") + .sampler(1, ImageType::FLOAT_2D, "second_image_tx") + .image(0, GPU_RGBA16F, Qualifier::WRITE, ImageType::FLOAT_2D, "output_img") + .compute_source("compositor_split_viewer.glsl"); + +GPU_SHADER_CREATE_INFO(compositor_split_viewer_horizontal) + .additional_info("compositor_split_viewer_shared") + .define("SPLIT_HORIZONTAL") + .do_static_compilation(true); + +GPU_SHADER_CREATE_INFO(compositor_split_viewer_vertical) + .additional_info("compositor_split_viewer_shared") + .define("SPLIT_VERTICAL") + .do_static_compilation(true); diff --git a/source/blender/gpu/shaders/compositor/library/gpu_shader_compositor_alpha_over.glsl b/source/blender/gpu/shaders/compositor/library/gpu_shader_compositor_alpha_over.glsl new file mode 100644 index 00000000000..8e3e033147f --- /dev/null +++ b/source/blender/gpu/shaders/compositor/library/gpu_shader_compositor_alpha_over.glsl @@ -0,0 +1,48 @@ +void node_composite_alpha_over_mixed( + float factor, vec4 color, vec4 over_color, float premultiply_factor, out vec4 result) +{ + if (over_color.a <= 0.0) { + result = color; + } + else if (factor == 1.0 && over_color.a >= 1.0) { + result = over_color; + } + else { + float add_factor = 1.0 - premultiply_factor + over_color.a * premultiply_factor; + float premultiplier = factor * add_factor; + float multiplier = 1.0 - factor * over_color.a; + + result = multiplier * color + vec2(premultiplier, factor).xxxy * over_color; + } +} + +void node_composite_alpha_over_key(float factor, vec4 color, vec4 over_color, out vec4 result) +{ + if (over_color.a <= 0.0) { + result = color; + } + else if (factor == 1.0 && over_color.a >= 1.0) { + result = over_color; + } + else { + result = mix(color, vec4(over_color.rgb, 1.0), factor * over_color.a); + } +} + +void node_composite_alpha_over_premultiply(float factor, + vec4 color, + vec4 over_color, + out vec4 result) +{ + if (over_color.a < 0.0) { + result = color; + } + else if (factor == 1.0 && over_color.a >= 1.0) { + result = over_color; + } + else { + float multiplier = 1.0 - factor * over_color.a; + + result = multiplier * color + factor * over_color; + } +} diff --git a/source/blender/gpu/shaders/compositor/library/gpu_shader_compositor_bright_contrast.glsl b/source/blender/gpu/shaders/compositor/library/gpu_shader_compositor_bright_contrast.glsl new file mode 100644 index 00000000000..ce71b4fd8a4 --- /dev/null +++ b/source/blender/gpu/shaders/compositor/library/gpu_shader_compositor_bright_contrast.glsl @@ -0,0 +1,38 @@ +#pragma BLENDER_REQUIRE(gpu_shader_common_color_utils.glsl) + +/* The algorithm is by Werner D. Streidt + * (http://visca.com/ffactory/archives/5-99/msg00021.html) + * Extracted of OpenCV demhist.c + */ + +#define FLT_EPSILON 1.192092896e-07F + +void node_composite_bright_contrast( + vec4 color, float brightness, float contrast, const float use_premultiply, out vec4 result) +{ + brightness /= 100.0; + float delta = contrast / 200.0; + + float multiplier, offset; + if (contrast > 0.0) { + multiplier = 1.0 - delta * 2.0; + multiplier = 1.0 / max(multiplier, FLT_EPSILON); + offset = multiplier * (brightness - delta); + } + else { + delta *= -1.0; + multiplier = max(1.0 - delta * 2.0, 0.0); + offset = multiplier * brightness + delta; + } + + if (use_premultiply != 0.0) { + color_alpha_unpremultiply(color, color); + } + + result.rgb = color.rgb * multiplier + offset; + result.a = color.a; + + if (use_premultiply != 0.0) { + color_alpha_premultiply(result, result); + } +} diff --git a/source/blender/gpu/shaders/compositor/library/gpu_shader_compositor_channel_matte.glsl b/source/blender/gpu/shaders/compositor/library/gpu_shader_compositor_channel_matte.glsl new file mode 100644 index 00000000000..f2dcc9543f2 --- /dev/null +++ b/source/blender/gpu/shaders/compositor/library/gpu_shader_compositor_channel_matte.glsl @@ -0,0 +1,52 @@ +#pragma BLENDER_REQUIRE(gpu_shader_common_color_utils.glsl) + +#define CMP_NODE_CHANNEL_MATTE_CS_RGB 1.0 +#define CMP_NODE_CHANNEL_MATTE_CS_HSV 2.0 +#define CMP_NODE_CHANNEL_MATTE_CS_YUV 3.0 +#define CMP_NODE_CHANNEL_MATTE_CS_YCC 4.0 + +void node_composite_channel_matte(vec4 color, + const float color_space, + const float matte_channel, + const vec2 limit_channels, + float max_limit, + float min_limit, + out vec4 result, + out float matte) +{ + vec4 channels; + if (color_space == CMP_NODE_CHANNEL_MATTE_CS_HSV) { + rgb_to_hsv(color, channels); + } + else if (color_space == CMP_NODE_CHANNEL_MATTE_CS_YUV) { + rgba_to_yuva_itu_709(color, channels); + } + else if (color_space == CMP_NODE_CHANNEL_MATTE_CS_YCC) { + rgba_to_ycca_itu_709(color, channels); + } + else { + channels = color; + } + + float matte_value = channels[int(matte_channel)]; + float limit_value = max(channels[int(limit_channels.x)], channels[int(limit_channels.y)]); + + float alpha = 1.0 - (matte_value - limit_value); + if (alpha > max_limit) { + alpha = color.a; + } + else if (alpha < min_limit) { + alpha = 0.0; + } + else { + alpha = (alpha - min_limit) / (max_limit - min_limit); + } + + matte = min(alpha, color.a); + result = color * matte; +} + +#undef CMP_NODE_CHANNEL_MATTE_CS_RGB +#undef CMP_NODE_CHANNEL_MATTE_CS_HSV +#undef CMP_NODE_CHANNEL_MATTE_CS_YUV +#undef CMP_NODE_CHANNEL_MATTE_CS_YCC diff --git a/source/blender/gpu/shaders/compositor/library/gpu_shader_compositor_chroma_matte.glsl b/source/blender/gpu/shaders/compositor/library/gpu_shader_compositor_chroma_matte.glsl new file mode 100644 index 00000000000..5d6bea0c9db --- /dev/null +++ b/source/blender/gpu/shaders/compositor/library/gpu_shader_compositor_chroma_matte.glsl @@ -0,0 +1,43 @@ +#pragma BLENDER_REQUIRE(gpu_shader_common_math_utils.glsl) +#pragma BLENDER_REQUIRE(gpu_shader_common_color_utils.glsl) + +/* Algorithm from the book Video Demystified. Chapter 7. Chroma Keying. */ +void node_composite_chroma_matte(vec4 color, + vec4 key, + float acceptance, + float cutoff, + float falloff, + out vec4 result, + out float matte) +{ + vec4 color_ycca; + rgba_to_ycca_itu_709(color, color_ycca); + vec4 key_ycca; + rgba_to_ycca_itu_709(key, key_ycca); + + /* Normalize the CrCb components into the [-1, 1] range. */ + vec2 color_cc = color_ycca.yz * 2.0 - 1.0; + vec2 key_cc = key_ycca.yz * 2.0 - 1.0; + + /* Rotate the color onto the space of the key such that x axis of the color space passes through + * the key color. */ + color_cc = vector_to_rotation_matrix(key_cc * vec2(1.0, -1.0)) * color_cc; + + /* Compute foreground key. If positive, the value is in the [0, 1] range. */ + float foreground_key = color_cc.x - (abs(color_cc.y) / acceptance); + + /* Negative foreground key values retain the original alpha. Positive values are scaled by the + * falloff, while colors that make an angle less than the cutoff angle get a zero alpha. */ + float alpha = color.a; + if (foreground_key > 0.0) { + alpha = 1.0 - (foreground_key / falloff); + + if (abs(atan(color_cc.y, color_cc.x)) < (cutoff / 2.0)) { + alpha = 0.0; + } + } + + /* Compute output. */ + matte = min(alpha, color.a); + result = color * matte; +} diff --git a/source/blender/gpu/shaders/compositor/library/gpu_shader_compositor_color_balance.glsl b/source/blender/gpu/shaders/compositor/library/gpu_shader_compositor_color_balance.glsl new file mode 100644 index 00000000000..bffb94cdedb --- /dev/null +++ b/source/blender/gpu/shaders/compositor/library/gpu_shader_compositor_color_balance.glsl @@ -0,0 +1,34 @@ +#pragma BLENDER_REQUIRE(gpu_shader_common_color_utils.glsl) + +void node_composite_color_balance_lgg( + float factor, vec4 color, vec3 lift, vec3 gamma, vec3 gain, out vec4 result) +{ + lift = 2.0 - lift; + vec3 srgb_color = linear_rgb_to_srgb(color.rgb); + vec3 lift_balanced = ((srgb_color - 1.0) * lift) + 1.0; + + vec3 gain_balanced = lift_balanced * gain; + gain_balanced = max(gain_balanced, vec3(0.0)); + + vec3 linear_color = srgb_to_linear_rgb(gain_balanced); + gamma = mix(gamma, vec3(1e-6), equal(gamma, vec3(0.0))); + vec3 gamma_balanced = pow(linear_color, 1.0 / gamma); + + result.rgb = mix(color.rgb, gamma_balanced, min(factor, 1.0)); + result.a = color.a; +} + +void node_composite_color_balance_asc_cdl(float factor, + vec4 color, + vec3 offset, + vec3 power, + vec3 slope, + float offset_basis, + out vec4 result) +{ + offset += offset_basis; + vec3 balanced = color.rgb * slope + offset; + balanced = pow(max(balanced, vec3(0.0)), power); + result.rgb = mix(color.rgb, balanced, min(factor, 1.0)); + result.a = color.a; +} diff --git a/source/blender/gpu/shaders/compositor/library/gpu_shader_compositor_color_correction.glsl b/source/blender/gpu/shaders/compositor/library/gpu_shader_compositor_color_correction.glsl new file mode 100644 index 00000000000..9b4858f03be --- /dev/null +++ b/source/blender/gpu/shaders/compositor/library/gpu_shader_compositor_color_correction.glsl @@ -0,0 +1,87 @@ +#pragma BLENDER_REQUIRE(gpu_shader_common_math_utils.glsl) +#pragma BLENDER_REQUIRE(gpu_shader_common_color_utils.glsl) + +void node_composite_color_correction(vec4 color, + float mask, + const vec3 enabled_channels, + float start_midtones, + float end_midtones, + float master_saturation, + float master_contrast, + float master_gamma, + float master_gain, + float master_lift, + float shadows_saturation, + float shadows_contrast, + float shadows_gamma, + float shadows_gain, + float shadows_lift, + float midtones_saturation, + float midtones_contrast, + float midtones_gamma, + float midtones_gain, + float midtones_lift, + float highlights_saturation, + float highlights_contrast, + float highlights_gamma, + float highlights_gain, + float highlights_lift, + const vec3 luminance_coefficients, + out vec4 result) +{ + const float margin = 0.10; + const float margin_divider = 0.5 / margin; + float level = (color.r + color.g + color.b) / 3.0; + float level_shadows = 0.0; + float level_midtones = 0.0; + float level_highlights = 0.0; + if (level < (start_midtones - margin)) { + level_shadows = 1.0; + } + else if (level < (start_midtones + margin)) { + level_midtones = ((level - start_midtones) * margin_divider) + 0.5; + level_shadows = 1.0 - level_midtones; + } + else if (level < (end_midtones - margin)) { + level_midtones = 1.0; + } + else if (level < (end_midtones + margin)) { + level_highlights = ((level - end_midtones) * margin_divider) + 0.5; + level_midtones = 1.0 - level_highlights; + } + else { + level_highlights = 1.0; + } + + float contrast = level_shadows * shadows_contrast; + contrast += level_midtones * midtones_contrast; + contrast += level_highlights * highlights_contrast; + contrast *= master_contrast; + float saturation = level_shadows * shadows_saturation; + saturation += level_midtones * midtones_saturation; + saturation += level_highlights * highlights_saturation; + saturation *= master_saturation; + float gamma = level_shadows * shadows_gamma; + gamma += level_midtones * midtones_gamma; + gamma += level_highlights * highlights_gamma; + gamma *= master_gamma; + float gain = level_shadows * shadows_gain; + gain += level_midtones * midtones_gain; + gain += level_highlights * highlights_gain; + gain *= master_gain; + float lift = level_shadows * shadows_lift; + lift += level_midtones * midtones_lift; + lift += level_highlights * highlights_lift; + lift += master_lift; + + float inverse_gamma = 1.0 / gamma; + float luma = get_luminance(color.rgb, luminance_coefficients); + + vec3 corrected = luma + saturation * (color.rgb - luma); + corrected = 0.5 + (corrected - 0.5) * contrast; + corrected = fallback_pow(corrected * gain + lift, inverse_gamma, corrected); + corrected = mix(color.rgb, corrected, min(mask, 1.0)); + + result.rgb = mix(corrected, color.rgb, equal(enabled_channels, vec3(0.0))); + result.a = color.a; +} diff --git a/source/blender/gpu/shaders/compositor/library/gpu_shader_compositor_color_matte.glsl b/source/blender/gpu/shaders/compositor/library/gpu_shader_compositor_color_matte.glsl new file mode 100644 index 00000000000..038471bc1bc --- /dev/null +++ b/source/blender/gpu/shaders/compositor/library/gpu_shader_compositor_color_matte.glsl @@ -0,0 +1,27 @@ +#pragma BLENDER_REQUIRE(gpu_shader_common_color_utils.glsl) + +void node_composite_color_matte(vec4 color, + vec4 key, + float hue_epsilon, + float saturation_epsilon, + float value_epsilon, + out vec4 result, + out float matte) + +{ + vec4 color_hsva; + rgb_to_hsv(color, color_hsva); + vec4 key_hsva; + rgb_to_hsv(key, key_hsva); + + bool is_within_saturation = distance(color_hsva.y, key_hsva.y) < saturation_epsilon; + bool is_within_value = distance(color_hsva.z, key_hsva.z) < value_epsilon; + bool is_within_hue = distance(color_hsva.x, key_hsva.x) < hue_epsilon; + /* Hue wraps around, so check the distance around the boundary. */ + float min_hue = min(color_hsva.x, key_hsva.x); + float max_hue = max(color_hsva.x, key_hsva.x); + is_within_hue = is_within_hue || ((min_hue + (1.0 - max_hue)) < hue_epsilon); + + matte = (is_within_hue && is_within_saturation && is_within_value) ? 0.0 : color.a; + result = color * matte; +} diff --git a/source/blender/gpu/shaders/compositor/library/gpu_shader_compositor_color_spill.glsl b/source/blender/gpu/shaders/compositor/library/gpu_shader_compositor_color_spill.glsl new file mode 100644 index 00000000000..0adad53ad80 --- /dev/null +++ b/source/blender/gpu/shaders/compositor/library/gpu_shader_compositor_color_spill.glsl @@ -0,0 +1,13 @@ +void node_composite_color_spill(vec4 color, + float factor, + const float spill_channel, + vec3 spill_scale, + const vec2 limit_channels, + float limit_scale, + out vec4 result) +{ + float average_limit = (color[int(limit_channels.x)] + color[int(limit_channels.y)]) / 2.0; + float map = factor * color[int(spill_channel)] - limit_scale * average_limit; + result.rgb = map > 0.0 ? color.rgb + spill_scale * map : color.rgb; + result.a = color.a; +} diff --git a/source/blender/gpu/shaders/compositor/library/gpu_shader_compositor_color_to_luminance.glsl b/source/blender/gpu/shaders/compositor/library/gpu_shader_compositor_color_to_luminance.glsl new file mode 100644 index 00000000000..bcdd625bd4f --- /dev/null +++ b/source/blender/gpu/shaders/compositor/library/gpu_shader_compositor_color_to_luminance.glsl @@ -0,0 +1,6 @@ +#pragma BLENDER_REQUIRE(gpu_shader_common_color_utils.glsl) + +void color_to_luminance(vec4 color, const vec3 luminance_coefficients, out float result) +{ + result = get_luminance(color.rgb, luminance_coefficients); +} diff --git a/source/blender/gpu/shaders/compositor/library/gpu_shader_compositor_difference_matte.glsl b/source/blender/gpu/shaders/compositor/library/gpu_shader_compositor_difference_matte.glsl new file mode 100644 index 00000000000..d769cadce3c --- /dev/null +++ b/source/blender/gpu/shaders/compositor/library/gpu_shader_compositor_difference_matte.glsl @@ -0,0 +1,10 @@ +void node_composite_difference_matte( + vec4 color, vec4 key, float tolerance, float falloff, out vec4 result, out float matte) +{ + vec4 difference = abs(color - key); + float average_difference = (difference.r + difference.g + difference.b) / 3.0; + bool is_opaque = average_difference > tolerance + falloff; + float alpha = is_opaque ? color.a : (max(0.0, average_difference - tolerance) / falloff); + matte = min(alpha, color.a); + result = color * matte; +} diff --git a/source/blender/gpu/shaders/compositor/library/gpu_shader_compositor_distance_matte.glsl b/source/blender/gpu/shaders/compositor/library/gpu_shader_compositor_distance_matte.glsl new file mode 100644 index 00000000000..9beed66826c --- /dev/null +++ b/source/blender/gpu/shaders/compositor/library/gpu_shader_compositor_distance_matte.glsl @@ -0,0 +1,26 @@ +#pragma BLENDER_REQUIRE(gpu_shader_common_color_utils.glsl) + +void node_composite_distance_matte_rgba( + vec4 color, vec4 key, float tolerance, float falloff, out vec4 result, out float matte) +{ + float difference = distance(color.rgb, key.rgb); + bool is_opaque = difference > tolerance + falloff; + float alpha = is_opaque ? color.a : max(0.0, difference - tolerance) / falloff; + matte = min(alpha, color.a); + result = color * matte; +} + +void node_composite_distance_matte_ycca( + vec4 color, vec4 key, float tolerance, float falloff, out vec4 result, out float matte) +{ + vec4 color_ycca; + rgba_to_ycca_itu_709(color, color_ycca); + vec4 key_ycca; + rgba_to_ycca_itu_709(key, key_ycca); + + float difference = distance(color_ycca.yz, key_ycca.yz); + bool is_opaque = difference > tolerance + falloff; + float alpha = is_opaque ? color.a : max(0.0, difference - tolerance) / falloff; + matte = min(alpha, color.a); + result = color * matte; +} diff --git a/source/blender/gpu/shaders/compositor/library/gpu_shader_compositor_exposure.glsl b/source/blender/gpu/shaders/compositor/library/gpu_shader_compositor_exposure.glsl new file mode 100644 index 00000000000..f246635a91e --- /dev/null +++ b/source/blender/gpu/shaders/compositor/library/gpu_shader_compositor_exposure.glsl @@ -0,0 +1,6 @@ +void node_composite_exposure(vec4 color, float exposure, out vec4 result) +{ + float multiplier = exp2(exposure); + result.rgb = color.rgb * multiplier; + result.a = color.a; +} diff --git a/source/blender/gpu/shaders/compositor/library/gpu_shader_compositor_gamma.glsl b/source/blender/gpu/shaders/compositor/library/gpu_shader_compositor_gamma.glsl new file mode 100644 index 00000000000..53070d4b0e2 --- /dev/null +++ b/source/blender/gpu/shaders/compositor/library/gpu_shader_compositor_gamma.glsl @@ -0,0 +1,7 @@ +#pragma BLENDER_REQUIRE(gpu_shader_common_math_utils.glsl) + +void node_composite_gamma(vec4 color, float gamma, out vec4 result) +{ + result.rgb = fallback_pow(color.rgb, gamma, color.rgb); + result.a = color.a; +} diff --git a/source/blender/gpu/shaders/compositor/library/gpu_shader_compositor_hue_correct.glsl b/source/blender/gpu/shaders/compositor/library/gpu_shader_compositor_hue_correct.glsl new file mode 100644 index 00000000000..99eb125cdf2 --- /dev/null +++ b/source/blender/gpu/shaders/compositor/library/gpu_shader_compositor_hue_correct.glsl @@ -0,0 +1,39 @@ +#pragma BLENDER_REQUIRE(gpu_shader_common_color_utils.glsl) + +/* Curve maps are stored in sampler objects that are evaluated in the [0, 1] range, so normalize + * parameters accordingly. */ +#define NORMALIZE_PARAMETER(parameter, minimum, range) ((parameter - minimum) * range) + +void node_composite_hue_correct(float factor, + vec4 color, + sampler1DArray curve_map, + const float layer, + vec3 minimums, + vec3 range_dividers, + out vec4 result) +{ + vec4 hsv; + rgb_to_hsv(color, hsv); + + /* First, adjust the hue channel on its own, since corrections in the saturation and value + * channels depends on the new value of the hue, not its original value. A curve map value of 0.5 + * means no change in hue, so adjust the value to get an identity at 0.5. Since the identity of + * addition is 0, we subtract 0.5 (0.5 - 0.5 = 0). */ + const float hue_parameter = NORMALIZE_PARAMETER(hsv.x, minimums.x, range_dividers.x); + hsv.x += texture(curve_map, vec2(hue_parameter, layer)).x - 0.5; + + /* Second, adjust the saturation and value based on the new value of the hue. A curve map value + * of 0.5 means no change in hue, so adjust the value to get an identity at 0.5. Since the + * identity of duplication is 1, we multiply by 2 (0.5 * 2 = 1). */ + vec2 parameters = NORMALIZE_PARAMETER(hsv.x, minimums.yz, range_dividers.yz); + hsv.y *= texture(curve_map, vec2(parameters.x, layer)).y * 2.0; + hsv.z *= texture(curve_map, vec2(parameters.y, layer)).z * 2.0; + + /* Sanitize the new hue and saturation values. */ + hsv.x = fract(hsv.x); + hsv.y = clamp(hsv.y, 0.0, 1.0); + + hsv_to_rgb(hsv, result); + + result = mix(color, result, factor); +} diff --git a/source/blender/gpu/shaders/compositor/library/gpu_shader_compositor_hue_saturation_value.glsl b/source/blender/gpu/shaders/compositor/library/gpu_shader_compositor_hue_saturation_value.glsl new file mode 100644 index 00000000000..dd5eb33d318 --- /dev/null +++ b/source/blender/gpu/shaders/compositor/library/gpu_shader_compositor_hue_saturation_value.glsl @@ -0,0 +1,16 @@ +#pragma BLENDER_REQUIRE(gpu_shader_common_color_utils.glsl) + +void node_composite_hue_saturation_value( + vec4 color, float hue, float saturation, float value, float factor, out vec4 result) +{ + vec4 hsv; + rgb_to_hsv(color, hsv); + + hsv.x = fract(hsv.x + hue + 0.5); + hsv.y = clamp(hsv.y * saturation, 0.0, 1.0); + hsv.z = hsv.z * value; + + hsv_to_rgb(hsv, result); + + result = mix(color, result, factor); +} diff --git a/source/blender/gpu/shaders/compositor/library/gpu_shader_compositor_invert.glsl b/source/blender/gpu/shaders/compositor/library/gpu_shader_compositor_invert.glsl new file mode 100644 index 00000000000..59be746da7f --- /dev/null +++ b/source/blender/gpu/shaders/compositor/library/gpu_shader_compositor_invert.glsl @@ -0,0 +1,13 @@ +#pragma BLENDER_REQUIRE(gpu_shader_common_color_utils.glsl) + +void node_composite_invert(float fac, vec4 color, float do_rgb, float do_alpha, out vec4 result) +{ + result = color; + if (do_rgb != 0.0) { + result.rgb = 1.0 - result.rgb; + } + if (do_alpha != 0.0) { + result.a = 1.0 - result.a; + } + result = mix(color, result, fac); +} diff --git a/source/blender/gpu/shaders/compositor/library/gpu_shader_compositor_luminance_matte.glsl b/source/blender/gpu/shaders/compositor/library/gpu_shader_compositor_luminance_matte.glsl new file mode 100644 index 00000000000..3647ac583fe --- /dev/null +++ b/source/blender/gpu/shaders/compositor/library/gpu_shader_compositor_luminance_matte.glsl @@ -0,0 +1,14 @@ +#pragma BLENDER_REQUIRE(gpu_shader_common_color_utils.glsl) + +void node_composite_luminance_matte(vec4 color, + float high, + float low, + const vec3 luminance_coefficients, + out vec4 result, + out float matte) +{ + float luminance = get_luminance(color.rgb, luminance_coefficients); + float alpha = clamp(0.0, 1.0, (luminance - low) / (high - low)); + matte = min(alpha, color.a); + result = color * matte; +} diff --git a/source/blender/gpu/shaders/compositor/library/gpu_shader_compositor_main.glsl b/source/blender/gpu/shaders/compositor/library/gpu_shader_compositor_main.glsl new file mode 100644 index 00000000000..27624223dbc --- /dev/null +++ b/source/blender/gpu/shaders/compositor/library/gpu_shader_compositor_main.glsl @@ -0,0 +1,7 @@ +/* The compute shader that will be dispatched by the compositor ShaderOperation. It just calls the + * evaluate function that will be dynamically generated and appended to this shader in the + * ShaderOperation::generate_code method. */ +void main() +{ + evaluate(); +} diff --git a/source/blender/gpu/shaders/compositor/library/gpu_shader_compositor_map_value.glsl b/source/blender/gpu/shaders/compositor/library/gpu_shader_compositor_map_value.glsl new file mode 100644 index 00000000000..20874b4ef44 --- /dev/null +++ b/source/blender/gpu/shaders/compositor/library/gpu_shader_compositor_map_value.glsl @@ -0,0 +1,56 @@ +/* An arbitrary value determined by Blender. */ +#define BLENDER_ZMAX 10000.0 + +void node_composite_map_range(float value, + float from_min, + float from_max, + float to_min, + float to_max, + const float should_clamp, + out float result) +{ + if (abs(from_max - from_min) < 1e-6) { + result = 0.0; + } + else { + if (value >= -BLENDER_ZMAX && value <= BLENDER_ZMAX) { + result = (value - from_min) / (from_max - from_min); + result = to_min + result * (to_max - to_min); + } + else if (value > BLENDER_ZMAX) { + result = to_max; + } + else { + result = to_min; + } + + if (should_clamp != 0.0) { + if (to_max > to_min) { + result = clamp(result, to_min, to_max); + } + else { + result = clamp(result, to_max, to_min); + } + } + } +} + +void node_composite_map_value(float value, + float offset, + float size, + const float use_min, + float min, + const float use_max, + float max, + out float result) +{ + result = (value + offset) * size; + + if (use_min != 0.0 && result < min) { + result = min; + } + + if (use_max != 0.0 && result > max) { + result = max; + } +} diff --git a/source/blender/gpu/shaders/compositor/library/gpu_shader_compositor_normal.glsl b/source/blender/gpu/shaders/compositor/library/gpu_shader_compositor_normal.glsl new file mode 100644 index 00000000000..a2e3b6c4aaa --- /dev/null +++ b/source/blender/gpu/shaders/compositor/library/gpu_shader_compositor_normal.glsl @@ -0,0 +1,9 @@ +void node_composite_normal(vec3 input_vector, + vec3 input_normal, + out vec3 result_normal, + out float result_dot) +{ + vec3 normal = normalize(input_normal); + result_normal = normal; + result_dot = -dot(input_vector, normal); +} diff --git a/source/blender/gpu/shaders/compositor/library/gpu_shader_compositor_posterize.glsl b/source/blender/gpu/shaders/compositor/library/gpu_shader_compositor_posterize.glsl new file mode 100644 index 00000000000..ee8ae234abe --- /dev/null +++ b/source/blender/gpu/shaders/compositor/library/gpu_shader_compositor_posterize.glsl @@ -0,0 +1,6 @@ +void node_composite_posterize(vec4 color, float steps, out vec4 result) +{ + steps = clamp(steps, 2.0, 1024.0); + result = floor(color * steps) / steps; + result.a = color.a; +} diff --git a/source/blender/gpu/shaders/compositor/library/gpu_shader_compositor_separate_combine.glsl b/source/blender/gpu/shaders/compositor/library/gpu_shader_compositor_separate_combine.glsl new file mode 100644 index 00000000000..d72d2260394 --- /dev/null +++ b/source/blender/gpu/shaders/compositor/library/gpu_shader_compositor_separate_combine.glsl @@ -0,0 +1,132 @@ +#pragma BLENDER_REQUIRE(gpu_shader_common_color_utils.glsl) + +/* ** Combine/Separate XYZ ** */ + +void node_composite_combine_xyz(float x, float y, float z, out vec3 vector) +{ + vector = vec3(x, y, z); +} + +void node_composite_separate_xyz(vec3 vector, out float x, out float y, out float z) +{ + x = vector.x; + y = vector.y; + z = vector.z; +} + +/* ** Combine/Separate RGBA ** */ + +void node_composite_combine_rgba(float r, float g, float b, float a, out vec4 color) +{ + color = vec4(r, g, b, a); +} + +void node_composite_separate_rgba(vec4 color, out float r, out float g, out float b, out float a) +{ + r = color.r; + g = color.g; + b = color.b; + a = color.a; +} + +/* ** Combine/Separate HSVA ** */ + +void node_composite_combine_hsva(float h, float s, float v, float a, out vec4 color) +{ + hsv_to_rgb(vec4(h, s, v, a), color); +} + +void node_composite_separate_hsva(vec4 color, out float h, out float s, out float v, out float a) +{ + vec4 hsva; + rgb_to_hsv(color, hsva); + h = hsva.x; + s = hsva.y; + v = hsva.z; + a = hsva.a; +} + +/* ** Combine/Separate HSLA ** */ + +void node_composite_combine_hsla(float h, float s, float l, float a, out vec4 color) +{ + hsl_to_rgb(vec4(h, s, l, a), color); +} + +void node_composite_separate_hsla(vec4 color, out float h, out float s, out float l, out float a) +{ + vec4 hsla; + rgb_to_hsl(color, hsla); + h = hsla.x; + s = hsla.y; + l = hsla.z; + a = hsla.a; +} + +/* ** Combine/Separate YCCA ** */ + +void node_composite_combine_ycca_itu_601(float y, float cb, float cr, float a, out vec4 color) +{ + ycca_to_rgba_itu_601(vec4(y, cb, cr, a), color); +} + +void node_composite_combine_ycca_itu_709(float y, float cb, float cr, float a, out vec4 color) +{ + ycca_to_rgba_itu_709(vec4(y, cb, cr, a), color); +} + +void node_composite_combine_ycca_jpeg(float y, float cb, float cr, float a, out vec4 color) +{ + ycca_to_rgba_jpeg(vec4(y, cb, cr, a), color); +} + +void node_composite_separate_ycca_itu_601( + vec4 color, out float y, out float cb, out float cr, out float a) +{ + vec4 ycca; + rgba_to_ycca_itu_601(color, ycca); + y = ycca.x; + cb = ycca.y; + cr = ycca.z; + a = ycca.a; +} + +void node_composite_separate_ycca_itu_709( + vec4 color, out float y, out float cb, out float cr, out float a) +{ + vec4 ycca; + rgba_to_ycca_itu_709(color, ycca); + y = ycca.x; + cb = ycca.y; + cr = ycca.z; + a = ycca.a; +} + +void node_composite_separate_ycca_jpeg( + vec4 color, out float y, out float cb, out float cr, out float a) +{ + vec4 ycca; + rgba_to_ycca_jpeg(color, ycca); + y = ycca.x; + cb = ycca.y; + cr = ycca.z; + a = ycca.a; +} + +/* ** Combine/Separate YUVA ** */ + +void node_composite_combine_yuva_itu_709(float y, float u, float v, float a, out vec4 color) +{ + yuva_to_rgba_itu_709(vec4(y, u, v, a), color); +} + +void node_composite_separate_yuva_itu_709( + vec4 color, out float y, out float u, out float v, out float a) +{ + vec4 yuva; + rgba_to_yuva_itu_709(color, yuva); + y = yuva.x; + u = yuva.y; + v = yuva.z; + a = yuva.a; +} diff --git a/source/blender/gpu/shaders/compositor/library/gpu_shader_compositor_set_alpha.glsl b/source/blender/gpu/shaders/compositor/library/gpu_shader_compositor_set_alpha.glsl new file mode 100644 index 00000000000..95380d1ed0f --- /dev/null +++ b/source/blender/gpu/shaders/compositor/library/gpu_shader_compositor_set_alpha.glsl @@ -0,0 +1,9 @@ +void node_composite_set_alpha_apply(vec4 color, float alpha, out vec4 result) +{ + result = color * alpha; +} + +void node_composite_set_alpha_replace(vec4 color, float alpha, out vec4 result) +{ + result = vec4(color.rgb, alpha); +} diff --git a/source/blender/gpu/shaders/compositor/library/gpu_shader_compositor_store_output.glsl b/source/blender/gpu/shaders/compositor/library/gpu_shader_compositor_store_output.glsl new file mode 100644 index 00000000000..7fba26907b5 --- /dev/null +++ b/source/blender/gpu/shaders/compositor/library/gpu_shader_compositor_store_output.glsl @@ -0,0 +1,26 @@ +/* The following functions are called to store the given value in the output identified by the + * given ID. The ID is an unsigned integer that is encoded in a float, so floatBitsToUint is called + * to get the actual identifier. The functions have an output value as their last argument that is + * used to establish an output link that is then used to track the nodes that contribute to the + * output of the compositor node tree. + * + * The store_[float|vector|color] functions are dynamically generated in + * ShaderOperation::generate_code_for_outputs. */ + +void node_compositor_store_output_float(const float id, float value, out float out_value) +{ + store_float(floatBitsToUint(id), value); + out_value = value; +} + +void node_compositor_store_output_vector(const float id, vec3 vector, out vec3 out_vector) +{ + store_vector(floatBitsToUint(id), vector); + out_vector = vector; +} + +void node_compositor_store_output_color(const float id, vec4 color, out vec4 out_color) +{ + store_color(floatBitsToUint(id), color); + out_color = color; +} diff --git a/source/blender/gpu/shaders/compositor/library/gpu_shader_compositor_texture_utilities.glsl b/source/blender/gpu/shaders/compositor/library/gpu_shader_compositor_texture_utilities.glsl new file mode 100644 index 00000000000..00e9a391097 --- /dev/null +++ b/source/blender/gpu/shaders/compositor/library/gpu_shader_compositor_texture_utilities.glsl @@ -0,0 +1,25 @@ +/* A shorthand for 1D textureSize with a zero LOD. */ +int texture_size(sampler1D sampler) +{ + return textureSize(sampler, 0); +} + +/* A shorthand for 1D texelFetch with zero LOD and bounded access clamped to border. */ +vec4 texture_load(sampler1D sampler, int x) +{ + const int texture_bound = texture_size(sampler) - 1; + return texelFetch(sampler, clamp(x, 0, texture_bound), 0); +} + +/* A shorthand for 2D textureSize with a zero LOD. */ +ivec2 texture_size(sampler2D sampler) +{ + return textureSize(sampler, 0); +} + +/* A shorthand for 2D texelFetch with zero LOD and bounded access clamped to border. */ +vec4 texture_load(sampler2D sampler, ivec2 texel) +{ + const ivec2 texture_bounds = texture_size(sampler) - ivec2(1); + return texelFetch(sampler, clamp(texel, ivec2(0), texture_bounds), 0); +} diff --git a/source/blender/gpu/shaders/compositor/library/gpu_shader_compositor_type_conversion.glsl b/source/blender/gpu/shaders/compositor/library/gpu_shader_compositor_type_conversion.glsl new file mode 100644 index 00000000000..75c76fd7341 --- /dev/null +++ b/source/blender/gpu/shaders/compositor/library/gpu_shader_compositor_type_conversion.glsl @@ -0,0 +1,29 @@ +float float_from_vec4(vec4 vector) +{ + return dot(vector.rgb, vec3(1.0)) / 3.0; +} + +float float_from_vec3(vec3 vector) +{ + return dot(vector, vec3(1.0)) / 3.0; +} + +vec3 vec3_from_vec4(vec4 vector) +{ + return vector.rgb; +} + +vec3 vec3_from_float(float value) +{ + return vec3(value); +} + +vec4 vec4_from_vec3(vec3 vector) +{ + return vec4(vector, 1.0); +} + +vec4 vec4_from_float(float value) +{ + return vec4(vec3(value), 1.0); +} diff --git a/source/blender/imbuf/IMB_colormanagement.h b/source/blender/imbuf/IMB_colormanagement.h index 0818dd653a1..2fb1d814c83 100644 --- a/source/blender/imbuf/IMB_colormanagement.h +++ b/source/blender/imbuf/IMB_colormanagement.h @@ -56,6 +56,8 @@ bool IMB_colormanagement_space_name_is_data(const char *name); bool IMB_colormanagement_space_name_is_scene_linear(const char *name); bool IMB_colormanagement_space_name_is_srgb(const char *name); +BLI_INLINE void IMB_colormanagement_get_luminance_coefficients(float r_rgb[3]); + /** * Convert a float RGB triplet to the correct luminance weighted average. * diff --git a/source/blender/imbuf/IMB_imbuf_types.h b/source/blender/imbuf/IMB_imbuf_types.h index 1b32bef0a98..5ad226a26f2 100644 --- a/source/blender/imbuf/IMB_imbuf_types.h +++ b/source/blender/imbuf/IMB_imbuf_types.h @@ -251,11 +251,11 @@ typedef struct ImBuf { int refcounter; /* some parameters to pass along for packing images */ - /** Compressed image only used with png and exr currently */ + /** Compressed image only used with PNG and EXR currently */ unsigned char *encodedbuffer; - /** Size of data written to encodedbuffer */ + /** Size of data written to `encodedbuffer`. */ unsigned int encodedsize; - /** Size of encodedbuffer */ + /** Size of `encodedbuffer` */ unsigned int encodedbuffersize; /* color management */ diff --git a/source/blender/imbuf/intern/colormanagement_inline.c b/source/blender/imbuf/intern/colormanagement_inline.c index 668307ec802..3c6c0f5fd0a 100644 --- a/source/blender/imbuf/intern/colormanagement_inline.c +++ b/source/blender/imbuf/intern/colormanagement_inline.c @@ -11,6 +11,11 @@ #include "BLI_math_vector.h" #include "IMB_colormanagement_intern.h" +void IMB_colormanagement_get_luminance_coefficients(float r_rgb[3]) +{ + copy_v3_v3(r_rgb, imbuf_luma_coefficients); +} + float IMB_colormanagement_get_luminance(const float rgb[3]) { return dot_v3v3(imbuf_luma_coefficients, rgb); diff --git a/source/blender/io/wavefront_obj/IO_wavefront_obj.cc b/source/blender/io/wavefront_obj/IO_wavefront_obj.cc index fb0b4a1aca9..2fd2973ee73 100644 --- a/source/blender/io/wavefront_obj/IO_wavefront_obj.cc +++ b/source/blender/io/wavefront_obj/IO_wavefront_obj.cc @@ -4,6 +4,7 @@ * \ingroup obj */ +#include "BLI_path_util.h" #include "BLI_timeit.hh" #include "IO_wavefront_obj.h" @@ -11,14 +12,26 @@ #include "obj_exporter.hh" #include "obj_importer.hh" +using namespace blender::timeit; + +static void report_duration(const char *job, const TimePoint &start_time, const char *path) +{ + Nanoseconds duration = Clock::now() - start_time; + std::cout << "OBJ " << job << " of '" << BLI_path_basename(path) << "' took "; + print_duration(duration); + std::cout << '\n'; +} + void OBJ_export(bContext *C, const OBJExportParams *export_params) { - SCOPED_TIMER("OBJ export"); + TimePoint start_time = Clock::now(); blender::io::obj::exporter_main(C, *export_params); + report_duration("export", start_time, export_params->filepath); } void OBJ_import(bContext *C, const OBJImportParams *import_params) { - SCOPED_TIMER(__func__); + TimePoint start_time = Clock::now(); blender::io::obj::importer_main(C, *import_params); + report_duration("import", start_time, import_params->filepath); } diff --git a/source/blender/io/wavefront_obj/IO_wavefront_obj.h b/source/blender/io/wavefront_obj/IO_wavefront_obj.h index 6ad96083e37..847b02d3fd1 100644 --- a/source/blender/io/wavefront_obj/IO_wavefront_obj.h +++ b/source/blender/io/wavefront_obj/IO_wavefront_obj.h @@ -75,6 +75,7 @@ struct OBJImportParams { bool import_vertex_groups; bool validate_meshes; bool relative_paths; + bool clear_selection; }; /** diff --git a/source/blender/io/wavefront_obj/exporter/obj_export_file_writer.cc b/source/blender/io/wavefront_obj/exporter/obj_export_file_writer.cc index 36a9cf1b9ae..53aa80700cc 100644 --- a/source/blender/io/wavefront_obj/exporter/obj_export_file_writer.cc +++ b/source/blender/io/wavefront_obj/exporter/obj_export_file_writer.cc @@ -547,20 +547,29 @@ StringRefNull MTLWriter::mtl_file_path() const return mtl_filepath_; } -void MTLWriter::write_bsdf_properties(const MTLMaterial &mtl_material) +void MTLWriter::write_bsdf_properties(const MTLMaterial &mtl) { - fmt_handler_.write<eMTLSyntaxElement::Ns>(mtl_material.Ns); - fmt_handler_.write<eMTLSyntaxElement::Ka>( - mtl_material.Ka.x, mtl_material.Ka.y, mtl_material.Ka.z); - fmt_handler_.write<eMTLSyntaxElement::Kd>( - mtl_material.Kd.x, mtl_material.Kd.y, mtl_material.Kd.z); - fmt_handler_.write<eMTLSyntaxElement::Ks>( - mtl_material.Ks.x, mtl_material.Ks.y, mtl_material.Ks.z); - fmt_handler_.write<eMTLSyntaxElement::Ke>( - mtl_material.Ke.x, mtl_material.Ke.y, mtl_material.Ke.z); - fmt_handler_.write<eMTLSyntaxElement::Ni>(mtl_material.Ni); - fmt_handler_.write<eMTLSyntaxElement::d>(mtl_material.d); - fmt_handler_.write<eMTLSyntaxElement::illum>(mtl_material.illum); + /* For various material properties, we only capture information + * coming from the texture, or the default value of the socket. + * When the texture is present, do not emit the default value. */ + if (!mtl.tex_map_of_type(eMTLSyntaxElement::map_Ns).is_valid()) { + fmt_handler_.write<eMTLSyntaxElement::Ns>(mtl.Ns); + } + fmt_handler_.write<eMTLSyntaxElement::Ka>(mtl.Ka.x, mtl.Ka.y, mtl.Ka.z); + if (!mtl.tex_map_of_type(eMTLSyntaxElement::map_Kd).is_valid()) { + fmt_handler_.write<eMTLSyntaxElement::Kd>(mtl.Kd.x, mtl.Kd.y, mtl.Kd.z); + } + if (!mtl.tex_map_of_type(eMTLSyntaxElement::map_Ks).is_valid()) { + fmt_handler_.write<eMTLSyntaxElement::Ks>(mtl.Ks.x, mtl.Ks.y, mtl.Ks.z); + } + if (!mtl.tex_map_of_type(eMTLSyntaxElement::map_Ke).is_valid()) { + fmt_handler_.write<eMTLSyntaxElement::Ke>(mtl.Ke.x, mtl.Ke.y, mtl.Ke.z); + } + fmt_handler_.write<eMTLSyntaxElement::Ni>(mtl.Ni); + if (!mtl.tex_map_of_type(eMTLSyntaxElement::map_d).is_valid()) { + fmt_handler_.write<eMTLSyntaxElement::d>(mtl.d); + } + fmt_handler_.write<eMTLSyntaxElement::illum>(mtl.illum); } void MTLWriter::write_texture_map( @@ -583,12 +592,13 @@ void MTLWriter::write_texture_map( options.append(" -bm ").append(std::to_string(mtl_material.map_Bump_strength)); } + std::string path = path_reference( + texture_map.value.image_path.c_str(), blen_filedir, dest_dir, path_mode, ©_set); + /* Always emit forward slashes for cross-platform compatibility. */ + std::replace(path.begin(), path.end(), '\\', '/'); + #define SYNTAX_DISPATCH(eMTLSyntaxElement) \ if (texture_map.key == eMTLSyntaxElement) { \ - std::string path = path_reference( \ - texture_map.value.image_path.c_str(), blen_filedir, dest_dir, path_mode, ©_set); \ - /* Always emit forward slashes for cross-platform compatibility. */ \ - std::replace(path.begin(), path.end(), '\\', '/'); \ fmt_handler_.write<eMTLSyntaxElement>(options, path.c_str()); \ return; \ } @@ -600,6 +610,7 @@ void MTLWriter::write_texture_map( SYNTAX_DISPATCH(eMTLSyntaxElement::map_refl); SYNTAX_DISPATCH(eMTLSyntaxElement::map_Ke); SYNTAX_DISPATCH(eMTLSyntaxElement::map_Bump); +#undef SYNTAX_DISPATCH BLI_assert(!"This map type was not written to the file."); } @@ -626,7 +637,7 @@ void MTLWriter::write_materials(const char *blen_filepath, fmt_handler_.write<eMTLSyntaxElement::newmtl>(mtlmat.name); write_bsdf_properties(mtlmat); for (const auto &tex : mtlmat.texture_maps.items()) { - if (tex.value.image_path.empty()) { + if (!tex.value.is_valid()) { continue; } write_texture_map(mtlmat, tex, blen_filedir, dest_dir, path_mode, copy_set); diff --git a/source/blender/io/wavefront_obj/exporter/obj_export_mtl.hh b/source/blender/io/wavefront_obj/exporter/obj_export_mtl.hh index 80e3127f69f..f83b3b49bf5 100644 --- a/source/blender/io/wavefront_obj/exporter/obj_export_mtl.hh +++ b/source/blender/io/wavefront_obj/exporter/obj_export_mtl.hh @@ -8,8 +8,6 @@ #include "BLI_map.hh" #include "BLI_math_vec_types.hh" -#include "BLI_string_ref.hh" -#include "BLI_vector.hh" #include "DNA_node_types.h" #include "obj_export_io.hh" @@ -25,19 +23,22 @@ template<> struct DefaultHash<io::obj::eMTLSyntaxElement> { } // namespace blender namespace blender::io::obj { -class OBJMesh; /** * Generic container for texture node properties. */ struct tex_map_XX { tex_map_XX(StringRef to_socket_id) : dest_socket_id(to_socket_id){}; + bool is_valid() const + { + return !image_path.empty(); + } - /** Target socket which this texture node connects to. */ + /* Target socket which this texture node connects to. */ const std::string dest_socket_id; float3 translation{0.0f}; float3 scale{1.0f}; - /* Only Flat and Smooth projections are supported. */ + /* Only Flat and Sphere projections are supported. */ int projection_type = SHD_PROJ_FLAT; std::string image_path; std::string mtl_dir_path; @@ -58,16 +59,15 @@ struct MTLMaterial { texture_maps.add(eMTLSyntaxElement::map_Bump, tex_map_XX("Normal")); } - /** - * Caller must ensure that the given lookup key exists in the Map. - * \return Texture map corresponding to the given ID. - */ + const tex_map_XX &tex_map_of_type(const eMTLSyntaxElement key) const + { + BLI_assert(texture_maps.contains(key)); + return texture_maps.lookup(key); + } tex_map_XX &tex_map_of_type(const eMTLSyntaxElement key) { - { - BLI_assert(texture_maps.contains_as(key)); - return texture_maps.lookup_as(key); - } + BLI_assert(texture_maps.contains(key)); + return texture_maps.lookup(key); } std::string name; diff --git a/source/blender/io/wavefront_obj/importer/obj_import_file_reader.cc b/source/blender/io/wavefront_obj/importer/obj_import_file_reader.cc index 8594603867f..7069e1185e0 100644 --- a/source/blender/io/wavefront_obj/importer/obj_import_file_reader.cc +++ b/source/blender/io/wavefront_obj/importer/obj_import_file_reader.cc @@ -406,8 +406,7 @@ static void use_all_vertices_if_no_faces(Geometry *geom, all_geometries.begin(), all_geometries.end(), [](const std::unique_ptr<Geometry> &g) { return g->get_vertex_count() == 0; })) { - geom->track_vertex_index(0); - geom->track_vertex_index(global_vertices.vertices.size() - 1); + geom->track_all_vertices(global_vertices.vertices.size()); } } } diff --git a/source/blender/io/wavefront_obj/importer/obj_import_mesh.cc b/source/blender/io/wavefront_obj/importer/obj_import_mesh.cc index d7b2bc2e67c..e62470588ec 100644 --- a/source/blender/io/wavefront_obj/importer/obj_import_mesh.cc +++ b/source/blender/io/wavefront_obj/importer/obj_import_mesh.cc @@ -157,17 +157,17 @@ void MeshFromGeometry::fixup_invalid_faces() void MeshFromGeometry::create_vertices(Mesh *mesh) { - const int tot_verts_object{mesh_geometry_.get_vertex_count()}; - for (int i = 0; i < tot_verts_object; ++i) { - int vi = mesh_geometry_.vertex_index_min_ + i; + int mi = 0; + for (int vi : mesh_geometry_.vertices_) { if (vi < global_vertices_.vertices.size()) { - copy_v3_v3(mesh->mvert[i].co, global_vertices_.vertices[vi]); + copy_v3_v3(mesh->mvert[mi].co, global_vertices_.vertices[vi]); } else { std::cerr << "Vertex index:" << vi << " larger than total vertices:" << global_vertices_.vertices.size() << " ." << std::endl; } + ++mi; } } @@ -208,7 +208,7 @@ void MeshFromGeometry::create_polys_loops(Mesh *mesh, bool use_vertex_groups) const PolyCorner &curr_corner = mesh_geometry_.face_corners_[curr_face.start_index_ + idx]; MLoop &mloop = mesh->mloop[tot_loop_idx]; tot_loop_idx++; - mloop.v = curr_corner.vert_index - mesh_geometry_.vertex_index_min_; + mloop.v = mesh_geometry_.global_to_local_vertices_.lookup_default(curr_corner.vert_index, 0); /* Setup vertex group data, if needed. */ if (!mesh->dvert) { @@ -240,8 +240,8 @@ void MeshFromGeometry::create_edges(Mesh *mesh) for (int i = 0; i < tot_edges; ++i) { const MEdge &src_edge = mesh_geometry_.edges_[i]; MEdge &dst_edge = mesh->medge[i]; - dst_edge.v1 = src_edge.v1 - mesh_geometry_.vertex_index_min_; - dst_edge.v2 = src_edge.v2 - mesh_geometry_.vertex_index_min_; + dst_edge.v1 = mesh_geometry_.global_to_local_vertices_.lookup_default(src_edge.v1, 0); + dst_edge.v2 = mesh_geometry_.global_to_local_vertices_.lookup_default(src_edge.v2, 0); BLI_assert(dst_edge.v1 < total_verts && dst_edge.v2 < total_verts); dst_edge.flag = ME_LOOSEEDGE; } diff --git a/source/blender/io/wavefront_obj/importer/obj_import_mtl.cc b/source/blender/io/wavefront_obj/importer/obj_import_mtl.cc index 0023d1159c5..02e09a77a5d 100644 --- a/source/blender/io/wavefront_obj/importer/obj_import_mtl.cc +++ b/source/blender/io/wavefront_obj/importer/obj_import_mtl.cc @@ -134,6 +134,14 @@ static Image *load_texture_image(Main *bmain, return image; } } + /* Try taking just the basename from input path. */ + std::string base_path{tex_map.mtl_dir_path + BLI_path_basename(tex_map.image_path.c_str())}; + if (base_path != tex_path) { + image = load_image_at_path(bmain, base_path, relative_paths); + if (image != nullptr) { + return image; + } + } image = create_placeholder_image(bmain, tex_path); return image; @@ -334,7 +342,7 @@ void ShaderNodetreeWrap::set_bsdf_socket_values(Material *mat) if (emission_color.x >= 0 && emission_color.y >= 0 && emission_color.z >= 0) { set_property_of_socket(SOCK_RGBA, "Emission", {emission_color, 3}, bsdf_); } - if (mtl_mat_.texture_maps.contains_as(eMTLSyntaxElement::map_Ke)) { + if (mtl_mat_.tex_map_of_type(eMTLSyntaxElement::map_Ke).is_valid()) { set_property_of_socket(SOCK_FLOAT, "Emission Strength", {1.0f}, bsdf_); } set_property_of_socket(SOCK_FLOAT, "Specular", {specular}, bsdf_); @@ -357,7 +365,7 @@ void ShaderNodetreeWrap::add_image_textures(Main *bmain, Material *mat, bool rel { for (const Map<const eMTLSyntaxElement, tex_map_XX>::Item texture_map : mtl_mat_.texture_maps.items()) { - if (texture_map.value.image_path.empty()) { + if (!texture_map.value.is_valid()) { /* No Image texture node of this map type can be added to this material. */ continue; } diff --git a/source/blender/io/wavefront_obj/importer/obj_import_objects.hh b/source/blender/io/wavefront_obj/importer/obj_import_objects.hh index 9f0079d7c53..f48b6dd55e8 100644 --- a/source/blender/io/wavefront_obj/importer/obj_import_objects.hh +++ b/source/blender/io/wavefront_obj/importer/obj_import_objects.hh @@ -9,6 +9,7 @@ #include "BKE_lib_id.h" #include "BLI_map.hh" +#include "BLI_math_base.hh" #include "BLI_math_vec_types.hh" #include "BLI_vector.hh" #include "BLI_vector_set.hh" @@ -92,6 +93,8 @@ struct Geometry { int vertex_index_min_ = INT_MAX; int vertex_index_max_ = -1; + VectorSet<int> vertices_; + Map<int, int> global_to_local_vertices_; /** Edges written in the file in addition to (or even without polygon) elements. */ Vector<MEdge> edges_; @@ -105,14 +108,26 @@ struct Geometry { int get_vertex_count() const { - if (vertex_index_max_ < vertex_index_min_) - return 0; - return vertex_index_max_ - vertex_index_min_ + 1; + return (int)vertices_.size(); } void track_vertex_index(int index) { - vertex_index_min_ = std::min(vertex_index_min_, index); - vertex_index_max_ = std::max(vertex_index_max_, index); + if (vertices_.add(index)) { + global_to_local_vertices_.add_new(index, (int)vertices_.size() - 1); + } + math::min_inplace(vertex_index_min_, index); + math::max_inplace(vertex_index_max_, index); + } + void track_all_vertices(int count) + { + vertices_.reserve(count); + global_to_local_vertices_.reserve(count); + for (int i = 0; i < count; ++i) { + vertices_.add(i); + global_to_local_vertices_.add(i, i); + } + vertex_index_min_ = 0; + vertex_index_max_ = count - 1; } }; diff --git a/source/blender/io/wavefront_obj/importer/obj_importer.cc b/source/blender/io/wavefront_obj/importer/obj_importer.cc index bb32776d2be..5d3f75e7f38 100644 --- a/source/blender/io/wavefront_obj/importer/obj_importer.cc +++ b/source/blender/io/wavefront_obj/importer/obj_importer.cc @@ -39,7 +39,6 @@ static void geometry_to_blender_objects(Main *bmain, Map<std::string, std::unique_ptr<MTLMaterial>> &materials, Map<std::string, Material *> &created_materials) { - BKE_view_layer_base_deselect_all(view_layer); LayerCollection *lc = BKE_layer_collection_get_active(view_layer); /* Don't do collection syncs for each object, will do once after the loop. */ @@ -122,6 +121,9 @@ void importer_main(Main *bmain, mtl_parser.parse_and_store(materials); } + if (import_params.clear_selection) { + BKE_view_layer_base_deselect_all(view_layer); + } geometry_to_blender_objects(bmain, scene, view_layer, diff --git a/source/blender/io/wavefront_obj/tests/obj_importer_tests.cc b/source/blender/io/wavefront_obj/tests/obj_importer_tests.cc index 339f672c3e0..132bb03357f 100644 --- a/source/blender/io/wavefront_obj/tests/obj_importer_tests.cc +++ b/source/blender/io/wavefront_obj/tests/obj_importer_tests.cc @@ -62,6 +62,7 @@ class obj_importer_test : public BlendfileLoadingBaseTest { params.validate_meshes = true; params.import_vertex_groups = false; params.relative_paths = true; + params.clear_selection = true; std::string obj_path = blender::tests::flags_test_asset_dir() + "/io_tests/obj/" + path; strncpy(params.filepath, obj_path.c_str(), FILE_MAX - 1); @@ -153,7 +154,7 @@ TEST_F(obj_importer_test, import_cube) 12, 6, 24, - float3(-1, -1, 1), + float3(1, -1, 1), float3(1, -1, -1), float3(-0.57735f, 0.57735f, -0.57735f)}, }; @@ -171,19 +172,19 @@ TEST_F(obj_importer_test, import_cube_o_after_verts) 12, 6, 24, - float3(-1, -1, 1), + float3(1, -1, 1), float3(1, -1, -1), float3(0, 0, 1), }, { "OBSparseTri", OB_MESH, - 6, + 3, 3, 1, 3, - float3(1, -1, 1), float3(-2, -2, 2), + float3(-1, -1, -1), float3(-0.2357f, 0.9428f, 0.2357f), }, }; @@ -200,8 +201,8 @@ TEST_F(obj_importer_test, import_suzanne_all_data) 1005, 500, 1968, - float3(-0.4375f, 0.164062f, 0.765625f), - float3(0.4375f, 0.164062f, 0.765625f), + float3(-0.5f, 0.09375f, 0.6875f), + float3(0.546875f, 0.054688f, 0.578125f), float3(-0.6040f, -0.5102f, 0.6122f), float2(0.692094f, 0.40191f)}, }; @@ -306,7 +307,7 @@ TEST_F(obj_importer_test, import_materials) { Expectation expect[] = { {"OBCube", OB_MESH, 8, 12, 6, 24, float3(1, 1, -1), float3(-1, 1, 1)}, - {"OBmaterials", OB_MESH, 8, 12, 6, 24, float3(-1, -1, 1), float3(1, -1, -1)}, + {"OBmaterials", OB_MESH, 8, 12, 6, 24, float3(1, -1, 1), float3(1, -1, -1)}, }; import_and_check("materials.obj", expect, std::size(expect), 4, 8); } @@ -322,7 +323,17 @@ TEST_F(obj_importer_test, import_cubes_with_textures_rel) 6, 24, float3(1, 1, -1), - float3(-1, -1, 1), + float3(1, -1, -1), + float3(0, 1, 0), + float2(0.9935f, 0.0020f)}, + {"OBCubeTexMul", + OB_MESH, + 8, + 12, + 6, + 24, + float3(4, -2, -1), + float3(4, -4, -1), float3(0, 1, 0), float2(0.9935f, 0.0020f)}, {"OBCubeTiledTex", @@ -332,7 +343,7 @@ TEST_F(obj_importer_test, import_cubes_with_textures_rel) 6, 24, float3(4, 1, -1), - float3(2, -1, 1), + float3(4, -1, -1), float3(0, 1, 0), float2(0.9935f, 0.0020f)}, {"OBCubeTiledTexFromAnotherFolder", @@ -342,11 +353,11 @@ TEST_F(obj_importer_test, import_cubes_with_textures_rel) 6, 24, float3(7, 1, -1), - float3(5, -1, 1), + float3(7, -1, -1), float3(0, 1, 0), float2(0.9935f, 0.0020f)}, }; - import_and_check("cubes_with_textures_rel.obj", expect, std::size(expect), 3, 4); + import_and_check("cubes_with_textures_rel.obj", expect, std::size(expect), 4, 4); } TEST_F(obj_importer_test, import_faces_invalid_or_with_holes) @@ -433,15 +444,15 @@ TEST_F(obj_importer_test, import_all_objects) {"OBCube", OB_MESH, 8, 12, 6, 24, float3(1, 1, -1), float3(-1, 1, 1)}, /* .obj file has empty EmptyText and EmptyMesh objects; these are ignored and skipped */ {"OBBezierCurve", OB_MESH, 13, 12, 0, 0, float3(-1, -2, 0), float3(1, -2, 0)}, - {"OBBlankCube", OB_MESH, 8, 13, 7, 26, float3(1, 1, -1), float3(-1, 1, 1), float3(0, 0, 1)}, + {"OBBlankCube", OB_MESH, 8, 13, 7, 26, float3(1, 1, 1), float3(-1, 1, -1), float3(0, 0, 1)}, {"OBMaterialCube", OB_MESH, 8, 13, 7, 26, - float3(28, 1, -1), - float3(26, 1, 1), + float3(26, -1, -1), + float3(28, -1, -1), float3(-1, 0, 0)}, {"OBNurbsCircle", OB_MESH, @@ -451,15 +462,15 @@ TEST_F(obj_importer_test, import_all_objects) 0, float3(3.292893f, -2.707107f, 0), float3(3.369084f, -2.77607f, 0)}, - {"OBNurbsCircle.001", OB_MESH, 4, 4, 0, 0, float3(2, -3, 0), float3(3, -2, 0)}, + {"OBNurbsCircle.001", OB_MESH, 4, 4, 0, 0, float3(3, -2, 0), float3(2, -1, 0)}, {"OBParticleCube", OB_MESH, 8, 13, 7, 26, - float3(22, 1, -1), - float3(20, 1, 1), + float3(22, 1, 1), + float3(20, 1, -1), float3(0, 0, 1)}, {"OBShapeKeyCube", OB_MESH, @@ -467,8 +478,8 @@ TEST_F(obj_importer_test, import_all_objects) 13, 7, 26, - float3(19, 1, -1), - float3(17, 1, 1), + float3(19, 1, 2), + float3(17, 1, -1), float3(-0.4082f, -0.4082f, 0.8165f)}, {"OBSmoothCube", OB_MESH, @@ -476,8 +487,8 @@ TEST_F(obj_importer_test, import_all_objects) 13, 7, 26, - float3(4, 1, -1), - float3(2, 1, 1), + float3(4, 1, 1), + float3(2, 1, -1), float3(0.5774f, 0.5773f, 0.5774f)}, {"OBSurface", OB_MESH, @@ -485,7 +496,7 @@ TEST_F(obj_importer_test, import_all_objects) 480, 224, 896, - float3(7.292893f, -2.707107f, -1), + float3(7.292893f, -2.707107f, -0.714285f), float3(7.525872f, -2.883338f, 1), float3(-0.7071f, -0.7071f, 0), float2(0, 0.142857f)}, @@ -495,7 +506,7 @@ TEST_F(obj_importer_test, import_all_objects) 480, 225, 900, - float3(12.5f, -2.5f, 0.694444f), + float3(12.56667f, -2.5f, 0.72037f), float3(13.5f, -1.5f, 0.694444f), float3(-0.3246f, -0.3531f, 0.8775f), float2(0, 0.066667f)}, @@ -516,7 +527,7 @@ TEST_F(obj_importer_test, import_all_objects) 1024, 4096, float3(5.34467f, -2.65533f, -0.176777f), - float3(5.232792f, -2.411795f, -0.220835f), + float3(5.158205f, -2.234695f, -0.220835f), float3(-0.5042f, -0.5042f, -0.7011f), float2(0, 1)}, {"OBTaperCube", @@ -525,8 +536,8 @@ TEST_F(obj_importer_test, import_all_objects) 208, 104, 416, - float3(24.444445f, 0.502543f, -0.753814f), - float3(23.790743f, 0.460522f, -0.766546f), + float3(24.316156f, 0.345556f, 0.796778f), + float3(23.551804f, 0.389113f, -0.639607f), float3(-0.0546f, 0.1716f, 0.9837f)}, {"OBText", OB_MESH, @@ -534,8 +545,8 @@ TEST_F(obj_importer_test, import_all_objects) 345, 171, 513, - float3(1.75f, -9.458f, 0), - float3(0.587f, -9.406f, 0), + float3(1.583f, -9.621f, 0), + float3(0.351f, -10.0f, 0), float3(0, 0, 1), float2(0.017544f, 0)}, {"OBUVCube", @@ -544,8 +555,8 @@ TEST_F(obj_importer_test, import_all_objects) 13, 7, 26, - float3(7, 1, -1), - float3(5, 1, 1), + float3(7, 1, 1), + float3(5, 1, -1), float3(0, 0, 1), float2(0.654526f, 0.579873f)}, {"OBUVImageCube", @@ -554,8 +565,8 @@ TEST_F(obj_importer_test, import_all_objects) 13, 7, 26, - float3(10, 1, -1), - float3(8, 1, 1), + float3(10, 1, 1), + float3(8, 1, -1), float3(0, 0, 1), float2(0.654526f, 0.579873f)}, {"OBVColCube", @@ -564,8 +575,8 @@ TEST_F(obj_importer_test, import_all_objects) 13, 7, 26, - float3(13, 1, -1), - float3(11, 1, 1), + float3(13, 1, 1), + float3(11, 1, -1), float3(0, 0, 1), float2(0, 0), float4(0.0f, 0.002125f, 1.0f, 1.0f)}, @@ -575,8 +586,8 @@ TEST_F(obj_importer_test, import_all_objects) 13, 7, 26, - float3(16, 1, -1), - float3(14, 1, 1), + float3(16, 1, 1), + float3(14, 1, -1), float3(0, 0, 1)}, }; import_and_check("all_objects.obj", expect, std::size(expect), 7); @@ -593,7 +604,7 @@ TEST_F(obj_importer_test, import_cubes_vertex_colors) 6, 24, float3(1.0f, 1.0f, -3.812445f), - float3(-1.0f, -1.0f, -1.812445f), + float3(1.0f, -1.0f, -3.812445f), float3(0, 0, 0), float2(0, 0), float4(0.89627f, 0.036889f, 0.47932f, 1.0f)}, @@ -604,7 +615,7 @@ TEST_F(obj_importer_test, import_cubes_vertex_colors) 6, 24, float3(3.481967f, 1.0f, -3.812445f), - float3(1.481967f, -1.0f, -1.812445f), + float3(3.481967f, -1.0f, -3.812445f), float3(0, 0, 0), float2(0, 0), float4(1.564582f, 0.039217f, 0.664309f, 1.0f)}, @@ -615,7 +626,7 @@ TEST_F(obj_importer_test, import_cubes_vertex_colors) 6, 24, float3(-4.725068f, -1.0f, 1.0f), - float3(-2.725068f, 1.0f, -1.0f), + float3(-2.725068f, -1.0f, 1.0f), float3(0, 0, 0), float2(0, 0), float4(0.270498f, 0.47932f, 0.262251f, 1.0f)}, @@ -626,7 +637,7 @@ TEST_F(obj_importer_test, import_cubes_vertex_colors) 6, 24, float3(-4.550208f, -1.0f, -1.918042f), - float3(-2.550208f, 1.0f, -3.918042f)}, + float3(-2.550208f, -1.0f, -1.918042f)}, {"OBCubeVertexByte", OB_MESH, 8, @@ -634,7 +645,7 @@ TEST_F(obj_importer_test, import_cubes_vertex_colors) 6, 24, float3(1.0f, 1.0f, -1.0f), - float3(-1.0f, -1.0f, 1.0f), + float3(1.0f, -1.0f, -1.0f), float3(0, 0, 0), float2(0, 0), float4(0.846873f, 0.027321f, 0.982123f, 1.0f)}, @@ -645,7 +656,7 @@ TEST_F(obj_importer_test, import_cubes_vertex_colors) 6, 24, float3(3.392028f, 1.0f, -1.0f), - float3(1.392028f, -1.0f, 1.0f), + float3(3.392028f, -1.0f, -1.0f), float3(0, 0, 0), float2(0, 0), float4(49.99467f, 0.027321f, 0.982123f, 1.0f)}, @@ -664,7 +675,7 @@ TEST_F(obj_importer_test, import_cubes_vertex_colors_mrgb) 6, 24, float3(4, 1, -1), - float3(2, -1, 1), + float3(4, -1, -1), float3(0, 0, 0), float2(0, 0), float4(0.8714f, 0.6308f, 0.5271f, 1.0f)}, @@ -675,7 +686,7 @@ TEST_F(obj_importer_test, import_cubes_vertex_colors_mrgb) 6, 24, float3(1, 1, -1), - float3(-1, -1, 1), + float3(1, -1, -1), float3(0, 0, 0), float2(0, 0), float4(0.6038f, 0.3185f, 0.1329f, 1.0f)}, diff --git a/source/blender/makesdna/DNA_node_types.h b/source/blender/makesdna/DNA_node_types.h index 76d8207eead..b9161e918c0 100644 --- a/source/blender/makesdna/DNA_node_types.h +++ b/source/blender/makesdna/DNA_node_types.h @@ -632,12 +632,12 @@ typedef struct bNodeSocketValueMaterial { /* Data structs, for `node->storage`. */ -enum { +typedef enum CMPNodeMaskType { CMP_NODE_MASKTYPE_ADD = 0, CMP_NODE_MASKTYPE_SUBTRACT = 1, CMP_NODE_MASKTYPE_MULTIPLY = 2, CMP_NODE_MASKTYPE_NOT = 3, -}; +} CMPNodeMaskType; enum { CMP_NODE_DILATEERODE_STEP = 0, @@ -1838,6 +1838,49 @@ enum { /* viewer and composite output. */ #define CMP_NODE_OUTPUT_IGNORE_ALPHA 1 +/** Split Viewer Node. Stored in `custom2`. */ +typedef enum CMPNodeSplitViewerAxis { + CMP_NODE_SPLIT_VIEWER_HORIZONTAL = 0, + CMP_NODE_SPLIT_VIEWER_VERTICAL = 1, +} CMPNodeSplitViewerAxis; + +/** Color Balance Node. Stored in `custom1`. */ +typedef enum CMPNodeColorBalanceMethod { + CMP_NODE_COLOR_BALANCE_LGG = 0, + CMP_NODE_COLOR_BALANCE_ASC_CDL = 1, +} CMPNodeColorBalanceMethod; + +/** Alpha Convert Node. Stored in `custom1`. */ +typedef enum CMPNodeAlphaConvertMode { + CMP_NODE_ALPHA_CONVERT_PREMULTIPLY = 0, + CMP_NODE_ALPHA_CONVERT_UNPREMULTIPLY = 1, +} CMPNodeAlphaConvertMode; + +/** Distance Matte Node. Stored in #NodeChroma.channel. */ +typedef enum CMPNodeDistanceMatteColorSpace { + CMP_NODE_DISTANCE_MATTE_COLOR_SPACE_YCCA = 0, + CMP_NODE_DISTANCE_MATTE_COLOR_SPACE_RGBA = 1, +} CMPNodeDistanceMatteColorSpace; + +/** Color Spill Node. Stored in `custom2`. */ +typedef enum CMPNodeColorSpillLimitAlgorithm { + CMP_NODE_COLOR_SPILL_LIMIT_ALGORITHM_SINGLE = 0, + CMP_NODE_COLOR_SPILL_LIMIT_ALGORITHM_AVERAGE = 1, +} CMPNodeColorSpillLimitAlgorithm; + +/** Channel Matte Node. Stored in #NodeChroma.algorithm. */ +typedef enum CMPNodeChannelMatteLimitAlgorithm { + CMP_NODE_CHANNEL_MATTE_LIMIT_ALGORITHM_SINGLE = 0, + CMP_NODE_CHANNEL_MATTE_LIMIT_ALGORITHM_MAX = 1, +} CMPNodeChannelMatteLimitAlgorithm; + +/* Flip Node. Stored in custom1. */ +typedef enum CMPNodeFlipMode { + CMP_NODE_FLIP_X = 0, + CMP_NODE_FLIP_Y = 1, + CMP_NODE_FLIP_X_Y = 2, +} CMPNodeFlipMode; + /* Plane track deform node. */ enum { diff --git a/source/blender/makesdna/DNA_userdef_types.h b/source/blender/makesdna/DNA_userdef_types.h index 74fb1c3ac96..ac553c2655f 100644 --- a/source/blender/makesdna/DNA_userdef_types.h +++ b/source/blender/makesdna/DNA_userdef_types.h @@ -653,6 +653,8 @@ typedef struct UserDef_Experimental { char enable_eevee_next; char use_sculpt_texture_paint; char use_draw_manager_acquire_lock; + char use_realtime_compositor; + char _pad[7]; /** `makesdna` does not allow empty structs. */ } UserDef_Experimental; diff --git a/source/blender/makesdna/DNA_view3d_types.h b/source/blender/makesdna/DNA_view3d_types.h index 8554d070dc3..0d281032b7e 100644 --- a/source/blender/makesdna/DNA_view3d_types.h +++ b/source/blender/makesdna/DNA_view3d_types.h @@ -486,6 +486,7 @@ enum { V3D_SHADING_SCENE_LIGHTS_RENDER = (1 << 12), V3D_SHADING_SCENE_WORLD_RENDER = (1 << 13), V3D_SHADING_STUDIOLIGHT_VIEW_ROTATION = (1 << 14), + V3D_SHADING_COMPOSITOR = (1 << 15), }; #define V3D_USES_SCENE_LIGHTS(v3d) \ diff --git a/source/blender/makesrna/intern/rna_action.c b/source/blender/makesrna/intern/rna_action.c index bcfb646ca19..14f4a82c62b 100644 --- a/source/blender/makesrna/intern/rna_action.c +++ b/source/blender/makesrna/intern/rna_action.c @@ -177,7 +177,7 @@ static void rna_Action_fcurve_clear(bAction *act) static TimeMarker *rna_Action_pose_markers_new(bAction *act, const char name[]) { TimeMarker *marker = MEM_callocN(sizeof(TimeMarker), "TimeMarker"); - marker->flag = 1; + marker->flag = SELECT; marker->frame = 1; BLI_strncpy_utf8(marker->name, name, sizeof(marker->name)); BLI_addtail(&act->markers, marker); diff --git a/source/blender/makesrna/intern/rna_camera.c b/source/blender/makesrna/intern/rna_camera.c index 99f8c263da6..988e65b4ba8 100644 --- a/source/blender/makesrna/intern/rna_camera.c +++ b/source/blender/makesrna/intern/rna_camera.c @@ -695,14 +695,12 @@ void RNA_def_camera(BlenderRNA *brna) prop = RNA_def_property(srna, "shift_x", PROP_FLOAT, PROP_NONE); RNA_def_property_float_sdna(prop, NULL, "shiftx"); - RNA_def_property_range(prop, -10.0f, 10.0f); RNA_def_property_ui_range(prop, -2.0, 2.0, 1, 3); RNA_def_property_ui_text(prop, "Shift X", "Camera horizontal shift"); RNA_def_property_update(prop, NC_OBJECT | ND_DRAW, "rna_Camera_update"); prop = RNA_def_property(srna, "shift_y", PROP_FLOAT, PROP_NONE); RNA_def_property_float_sdna(prop, NULL, "shifty"); - RNA_def_property_range(prop, -10.0f, 10.0f); RNA_def_property_ui_range(prop, -2.0, 2.0, 1, 3); RNA_def_property_ui_text(prop, "Shift Y", "Camera vertical shift"); RNA_def_property_update(prop, NC_OBJECT | ND_DRAW, "rna_Camera_update"); diff --git a/source/blender/makesrna/intern/rna_space.c b/source/blender/makesrna/intern/rna_space.c index 5cee2ca00a3..502b0404764 100644 --- a/source/blender/makesrna/intern/rna_space.c +++ b/source/blender/makesrna/intern/rna_space.c @@ -4199,6 +4199,14 @@ static void rna_def_space_view3d_shading(BlenderRNA *brna) RNA_def_property_ui_text(prop, "Shader AOV Name", "Name of the active Shader AOV"); RNA_def_property_flag(prop, PROP_HIDDEN); RNA_def_property_update(prop, NC_SPACE | ND_SPACE_VIEW3D, NULL); + + prop = RNA_def_property(srna, "use_compositor", PROP_BOOLEAN, PROP_NONE); + RNA_def_property_boolean_sdna(prop, NULL, "flag", V3D_SHADING_COMPOSITOR); + RNA_def_property_clear_flag(prop, PROP_ANIMATABLE); + RNA_def_property_boolean_default(prop, false); + RNA_def_property_ui_text( + prop, "Compositor", "Preview the compositor output inside the viewport"); + RNA_def_property_update(prop, NC_SPACE | ND_SPACE_VIEW3D | NS_VIEW3D_SHADING, NULL); } static void rna_def_space_view3d_overlay(BlenderRNA *brna) diff --git a/source/blender/makesrna/intern/rna_userdef.c b/source/blender/makesrna/intern/rna_userdef.c index cfc72791123..438bac9b458 100644 --- a/source/blender/makesrna/intern/rna_userdef.c +++ b/source/blender/makesrna/intern/rna_userdef.c @@ -6338,6 +6338,10 @@ static void rna_def_userdef_experimental(BlenderRNA *brna) RNA_def_property_ui_text( prop, "Sculpt Mode Tilt Support", "Support for pen tablet tilt events in Sculpt Mode"); + prop = RNA_def_property(srna, "use_realtime_compositor", PROP_BOOLEAN, PROP_NONE); + RNA_def_property_boolean_sdna(prop, NULL, "use_realtime_compositor", 1); + RNA_def_property_ui_text(prop, "Realtime Compositor", "Enable the new realtime compositor"); + prop = RNA_def_property(srna, "use_sculpt_texture_paint", PROP_BOOLEAN, PROP_NONE); RNA_def_property_boolean_sdna(prop, NULL, "use_sculpt_texture_paint", 1); RNA_def_property_ui_text(prop, "Sculpt Texture Paint", "Use texture painting in Sculpt Mode"); diff --git a/source/blender/modifiers/intern/MOD_surfacedeform.c b/source/blender/modifiers/intern/MOD_surfacedeform.c index ad19ecf5720..942598eb04d 100644 --- a/source/blender/modifiers/intern/MOD_surfacedeform.c +++ b/source/blender/modifiers/intern/MOD_surfacedeform.c @@ -215,8 +215,7 @@ static void freeData(ModifierData *md) MEM_SAFE_FREE(smd->verts[i].binds[j].vert_inds); MEM_SAFE_FREE(smd->verts[i].binds[j].vert_weights); } - - MEM_SAFE_FREE(smd->verts[i].binds); + MEM_freeN(smd->verts[i].binds); } } diff --git a/source/blender/nodes/NOD_node_declaration.hh b/source/blender/nodes/NOD_node_declaration.hh index 4e78f6c1142..d8b8c354230 100644 --- a/source/blender/nodes/NOD_node_declaration.hh +++ b/source/blender/nodes/NOD_node_declaration.hh @@ -88,6 +88,14 @@ class SocketDeclaration { InputSocketFieldType input_field_type_ = InputSocketFieldType::None; OutputFieldDependency output_field_dependency_; + /** The priority of the input for determining the domain of the node. See + * realtime_compositor::InputDescriptor for more information. */ + int compositor_domain_priority_ = 0; + + /** This input expects a single value and can't operate on non-single values. See + * realtime_compositor::InputDescriptor for more information. */ + bool compositor_expects_single_value_ = false; + /** Utility method to make the socket available if there is a straightforward way to do so. */ std::function<void(bNode &)> make_available_fn_; @@ -124,6 +132,9 @@ class SocketDeclaration { InputSocketFieldType input_field_type() const; const OutputFieldDependency &output_field_dependency() const; + int compositor_domain_priority() const; + bool compositor_expects_single_value() const; + protected: void set_common_flags(bNodeSocket &socket) const; bool matches_common_data(const bNodeSocket &socket) const; @@ -238,6 +249,22 @@ class SocketDeclarationBuilder : public BaseSocketDeclarationBuilder { return *(Self *)this; } + /** The priority of the input for determining the domain of the node. See + * realtime_compositor::InputDescriptor for more information. */ + Self &compositor_domain_priority(int priority) + { + decl_->compositor_domain_priority_ = priority; + return *(Self *)this; + } + + /** This input expects a single value and can't operate on non-single values. See + * realtime_compositor::InputDescriptor for more information. */ + Self &compositor_expects_single_value(bool value = true) + { + decl_->compositor_expects_single_value_ = value; + return *(Self *)this; + } + /** * Pass a function that sets properties on the node required to make the corresponding socket * available, if it is not available on the default state of the node. The function is allowed to @@ -428,6 +455,16 @@ inline const OutputFieldDependency &SocketDeclaration::output_field_dependency() return output_field_dependency_; } +inline int SocketDeclaration::compositor_domain_priority() const +{ + return compositor_domain_priority_; +} + +inline bool SocketDeclaration::compositor_expects_single_value() const +{ + return compositor_expects_single_value_; +} + inline void SocketDeclaration::make_available(bNode &node) const { if (make_available_fn_) { diff --git a/source/blender/nodes/composite/CMakeLists.txt b/source/blender/nodes/composite/CMakeLists.txt index 6b1522b7634..2537e8e93cc 100644 --- a/source/blender/nodes/composite/CMakeLists.txt +++ b/source/blender/nodes/composite/CMakeLists.txt @@ -10,11 +10,14 @@ set(INC ../../blenlib ../../blentranslation ../../depsgraph + ../../functions + ../../gpu ../../imbuf ../../makesdna ../../makesrna ../../render ../../windowmanager + ../../compositor/realtime_compositor ../../../../intern/guardedalloc # dna_type_offsets.h @@ -120,6 +123,10 @@ set(SRC node_composite_util.hh ) +set(LIB + bf_realtime_compositor +) + if(WITH_IMAGE_OPENEXR) add_definitions(-DWITH_OPENEXR) endif() diff --git a/source/blender/nodes/composite/nodes/node_composite_alpha_over.cc b/source/blender/nodes/composite/nodes/node_composite_alpha_over.cc index d392b810bc1..64c59eb24e3 100644 --- a/source/blender/nodes/composite/nodes/node_composite_alpha_over.cc +++ b/source/blender/nodes/composite/nodes/node_composite_alpha_over.cc @@ -8,6 +8,10 @@ #include "UI_interface.h" #include "UI_resources.h" +#include "GPU_material.h" + +#include "COM_shader_node.hh" + #include "node_composite_util.hh" /* **************** ALPHAOVER ******************** */ @@ -16,9 +20,18 @@ namespace blender::nodes::node_composite_alpha_over_cc { static void cmp_node_alphaover_declare(NodeDeclarationBuilder &b) { - b.add_input<decl::Float>(N_("Fac")).default_value(1.0f).min(0.0f).max(1.0f).subtype(PROP_FACTOR); - b.add_input<decl::Color>(N_("Image")).default_value({1.0f, 1.0f, 1.0f, 1.0f}); - b.add_input<decl::Color>(N_("Image"), "Image_001").default_value({1.0f, 1.0f, 1.0f, 1.0f}); + b.add_input<decl::Float>(N_("Fac")) + .default_value(1.0f) + .min(0.0f) + .max(1.0f) + .subtype(PROP_FACTOR) + .compositor_domain_priority(2); + b.add_input<decl::Color>(N_("Image")) + .default_value({1.0f, 1.0f, 1.0f, 1.0f}) + .compositor_domain_priority(0); + b.add_input<decl::Color>(N_("Image"), "Image_001") + .default_value({1.0f, 1.0f, 1.0f, 1.0f}) + .compositor_domain_priority(1); b.add_output<decl::Color>(N_("Image")); } @@ -36,6 +49,52 @@ static void node_composit_buts_alphaover(uiLayout *layout, bContext *UNUSED(C), uiItemR(col, ptr, "premul", UI_ITEM_R_SPLIT_EMPTY_NAME, nullptr, ICON_NONE); } +using namespace blender::realtime_compositor; + +class AlphaOverShaderNode : public ShaderNode { + public: + using ShaderNode::ShaderNode; + + void compile(GPUMaterial *material) override + { + GPUNodeStack *inputs = get_inputs_array(); + GPUNodeStack *outputs = get_outputs_array(); + + const float premultiply_factor = get_premultiply_factor(); + if (premultiply_factor != 0.0f) { + GPU_stack_link(material, + &bnode(), + "node_composite_alpha_over_mixed", + inputs, + outputs, + GPU_uniform(&premultiply_factor)); + return; + } + + if (get_use_premultiply()) { + GPU_stack_link(material, &bnode(), "node_composite_alpha_over_key", inputs, outputs); + return; + } + + GPU_stack_link(material, &bnode(), "node_composite_alpha_over_premultiply", inputs, outputs); + } + + bool get_use_premultiply() + { + return bnode().custom1; + } + + float get_premultiply_factor() + { + return ((NodeTwoFloats *)bnode().storage)->x; + } +}; + +static ShaderNode *get_compositor_shader_node(DNode node) +{ + return new AlphaOverShaderNode(node); +} + } // namespace blender::nodes::node_composite_alpha_over_cc void register_node_type_cmp_alphaover() @@ -50,6 +109,7 @@ void register_node_type_cmp_alphaover() node_type_init(&ntype, file_ns::node_alphaover_init); node_type_storage( &ntype, "NodeTwoFloats", node_free_standard_storage, node_copy_standard_storage); + ntype.get_compositor_shader_node = file_ns::get_compositor_shader_node; nodeRegisterType(&ntype); } diff --git a/source/blender/nodes/composite/nodes/node_composite_antialiasing.cc b/source/blender/nodes/composite/nodes/node_composite_antialiasing.cc index f45b678fc50..55fe3366526 100644 --- a/source/blender/nodes/composite/nodes/node_composite_antialiasing.cc +++ b/source/blender/nodes/composite/nodes/node_composite_antialiasing.cc @@ -8,6 +8,8 @@ #include "UI_interface.h" #include "UI_resources.h" +#include "COM_node_operation.hh" + #include "node_composite_util.hh" /* **************** Anti-Aliasing (SMAA 1x) ******************** */ @@ -42,6 +44,23 @@ static void node_composit_buts_antialiasing(uiLayout *layout, bContext *UNUSED(C uiItemR(col, ptr, "corner_rounding", 0, nullptr, ICON_NONE); } +using namespace blender::realtime_compositor; + +class AntiAliasingOperation : public NodeOperation { + public: + using NodeOperation::NodeOperation; + + void execute() override + { + get_input("Image").pass_through(get_result("Image")); + } +}; + +static NodeOperation *get_compositor_operation(Context &context, DNode node) +{ + return new AntiAliasingOperation(context, node); +} + } // namespace blender::nodes::node_composite_antialiasing_cc void register_node_type_cmp_antialiasing() @@ -58,6 +77,7 @@ void register_node_type_cmp_antialiasing() node_type_init(&ntype, file_ns::node_composit_init_antialiasing); node_type_storage( &ntype, "NodeAntiAliasingData", node_free_standard_storage, node_copy_standard_storage); + ntype.get_compositor_operation = file_ns::get_compositor_operation; nodeRegisterType(&ntype); } diff --git a/source/blender/nodes/composite/nodes/node_composite_bilateralblur.cc b/source/blender/nodes/composite/nodes/node_composite_bilateralblur.cc index ad4a1f701d6..66a321eb088 100644 --- a/source/blender/nodes/composite/nodes/node_composite_bilateralblur.cc +++ b/source/blender/nodes/composite/nodes/node_composite_bilateralblur.cc @@ -8,6 +8,8 @@ #include "UI_interface.h" #include "UI_resources.h" +#include "COM_node_operation.hh" + #include "node_composite_util.hh" /* **************** BILATERALBLUR ******************** */ @@ -42,6 +44,23 @@ static void node_composit_buts_bilateralblur(uiLayout *layout, uiItemR(col, ptr, "sigma_space", UI_ITEM_R_SPLIT_EMPTY_NAME, nullptr, ICON_NONE); } +using namespace blender::realtime_compositor; + +class BilateralBlurOperation : public NodeOperation { + public: + using NodeOperation::NodeOperation; + + void execute() override + { + get_input("Image").pass_through(get_result("Image")); + } +}; + +static NodeOperation *get_compositor_operation(Context &context, DNode node) +{ + return new BilateralBlurOperation(context, node); +} + } // namespace blender::nodes::node_composite_bilateralblur_cc void register_node_type_cmp_bilateralblur() @@ -56,6 +75,7 @@ void register_node_type_cmp_bilateralblur() node_type_init(&ntype, file_ns::node_composit_init_bilateralblur); node_type_storage( &ntype, "NodeBilateralBlurData", node_free_standard_storage, node_copy_standard_storage); + ntype.get_compositor_operation = file_ns::get_compositor_operation; nodeRegisterType(&ntype); } diff --git a/source/blender/nodes/composite/nodes/node_composite_blur.cc b/source/blender/nodes/composite/nodes/node_composite_blur.cc index 7beffe15c8e..cb1d93fe10b 100644 --- a/source/blender/nodes/composite/nodes/node_composite_blur.cc +++ b/source/blender/nodes/composite/nodes/node_composite_blur.cc @@ -10,6 +10,8 @@ #include "UI_interface.h" #include "UI_resources.h" +#include "COM_node_operation.hh" + #include "node_composite_util.hh" /* **************** BLUR ******************** */ @@ -71,6 +73,23 @@ static void node_composit_buts_blur(uiLayout *layout, bContext *UNUSED(C), Point uiItemR(col, ptr, "use_extended_bounds", UI_ITEM_R_SPLIT_EMPTY_NAME, nullptr, ICON_NONE); } +using namespace blender::realtime_compositor; + +class BlurOperation : public NodeOperation { + public: + using NodeOperation::NodeOperation; + + void execute() override + { + get_input("Image").pass_through(get_result("Image")); + } +}; + +static NodeOperation *get_compositor_operation(Context &context, DNode node) +{ + return new BlurOperation(context, node); +} + } // namespace blender::nodes::node_composite_blur_cc void register_node_type_cmp_blur() @@ -86,6 +105,7 @@ void register_node_type_cmp_blur() node_type_init(&ntype, file_ns::node_composit_init_blur); node_type_storage( &ntype, "NodeBlurData", node_free_standard_storage, node_copy_standard_storage); + ntype.get_compositor_operation = file_ns::get_compositor_operation; nodeRegisterType(&ntype); } diff --git a/source/blender/nodes/composite/nodes/node_composite_bokehblur.cc b/source/blender/nodes/composite/nodes/node_composite_bokehblur.cc index a936bafe671..538f00af12d 100644 --- a/source/blender/nodes/composite/nodes/node_composite_bokehblur.cc +++ b/source/blender/nodes/composite/nodes/node_composite_bokehblur.cc @@ -8,6 +8,8 @@ #include "UI_interface.h" #include "UI_resources.h" +#include "COM_node_operation.hh" + #include "node_composite_util.hh" /* **************** BLUR ******************** */ @@ -37,6 +39,23 @@ static void node_composit_buts_bokehblur(uiLayout *layout, bContext *UNUSED(C), uiItemR(layout, ptr, "use_extended_bounds", UI_ITEM_R_SPLIT_EMPTY_NAME, nullptr, ICON_NONE); } +using namespace blender::realtime_compositor; + +class BokehBlurOperation : public NodeOperation { + public: + using NodeOperation::NodeOperation; + + void execute() override + { + get_input("Image").pass_through(get_result("Image")); + } +}; + +static NodeOperation *get_compositor_operation(Context &context, DNode node) +{ + return new BokehBlurOperation(context, node); +} + } // namespace blender::nodes::node_composite_bokehblur_cc void register_node_type_cmp_bokehblur() @@ -49,6 +68,7 @@ void register_node_type_cmp_bokehblur() ntype.declare = file_ns::cmp_node_bokehblur_declare; ntype.draw_buttons = file_ns::node_composit_buts_bokehblur; node_type_init(&ntype, file_ns::node_composit_init_bokehblur); + ntype.get_compositor_operation = file_ns::get_compositor_operation; nodeRegisterType(&ntype); } diff --git a/source/blender/nodes/composite/nodes/node_composite_bokehimage.cc b/source/blender/nodes/composite/nodes/node_composite_bokehimage.cc index 8330c56736a..a11cba37191 100644 --- a/source/blender/nodes/composite/nodes/node_composite_bokehimage.cc +++ b/source/blender/nodes/composite/nodes/node_composite_bokehimage.cc @@ -8,6 +8,8 @@ #include "UI_interface.h" #include "UI_resources.h" +#include "COM_node_operation.hh" + #include "node_composite_util.hh" /* **************** Bokeh image Tools ******************** */ @@ -45,6 +47,23 @@ static void node_composit_buts_bokehimage(uiLayout *layout, bContext *UNUSED(C), uiItemR(layout, ptr, "shift", UI_ITEM_R_SPLIT_EMPTY_NAME | UI_ITEM_R_SLIDER, nullptr, ICON_NONE); } +using namespace blender::realtime_compositor; + +class BokehImageOperation : public NodeOperation { + public: + using NodeOperation::NodeOperation; + + void execute() override + { + get_result("Image").allocate_invalid(); + } +}; + +static NodeOperation *get_compositor_operation(Context &context, DNode node) +{ + return new BokehImageOperation(context, node); +} + } // namespace blender::nodes::node_composite_bokehimage_cc void register_node_type_cmp_bokehimage() @@ -60,6 +79,7 @@ void register_node_type_cmp_bokehimage() node_type_init(&ntype, file_ns::node_composit_init_bokehimage); node_type_storage( &ntype, "NodeBokehImage", node_free_standard_storage, node_copy_standard_storage); + ntype.get_compositor_operation = file_ns::get_compositor_operation; nodeRegisterType(&ntype); } diff --git a/source/blender/nodes/composite/nodes/node_composite_boxmask.cc b/source/blender/nodes/composite/nodes/node_composite_boxmask.cc index f39b69c63f2..9c7bb6432cb 100644 --- a/source/blender/nodes/composite/nodes/node_composite_boxmask.cc +++ b/source/blender/nodes/composite/nodes/node_composite_boxmask.cc @@ -5,9 +5,18 @@ * \ingroup cmpnodes */ +#include <cmath> + +#include "BLI_math_vec_types.hh" + #include "UI_interface.h" #include "UI_resources.h" +#include "GPU_shader.h" + +#include "COM_node_operation.hh" +#include "COM_utilities.hh" + #include "node_composite_util.hh" /* **************** SCALAR MATH ******************** */ @@ -48,6 +57,98 @@ static void node_composit_buts_boxmask(uiLayout *layout, bContext *UNUSED(C), Po uiItemR(layout, ptr, "mask_type", UI_ITEM_R_SPLIT_EMPTY_NAME, nullptr, ICON_NONE); } +using namespace blender::realtime_compositor; + +class BoxMaskOperation : public NodeOperation { + public: + using NodeOperation::NodeOperation; + + void execute() override + { + GPUShader *shader = shader_manager().get(get_shader_name()); + GPU_shader_bind(shader); + + const Domain domain = compute_domain(); + + GPU_shader_uniform_2iv(shader, "domain_size", domain.size); + + GPU_shader_uniform_2fv(shader, "location", get_location()); + GPU_shader_uniform_2fv(shader, "size", get_size() / 2.0f); + GPU_shader_uniform_1f(shader, "cos_angle", std::cos(get_angle())); + GPU_shader_uniform_1f(shader, "sin_angle", std::sin(get_angle())); + + const Result &input_mask = get_input("Mask"); + input_mask.bind_as_texture(shader, "base_mask_tx"); + + const Result &value = get_input("Value"); + value.bind_as_texture(shader, "mask_value_tx"); + + Result &output_mask = get_result("Mask"); + output_mask.allocate_texture(domain); + output_mask.bind_as_image(shader, "output_mask_img"); + + compute_dispatch_threads_at_least(shader, domain.size); + + input_mask.unbind_as_texture(); + value.unbind_as_texture(); + output_mask.unbind_as_image(); + GPU_shader_unbind(); + } + + Domain compute_domain() override + { + if (get_input("Mask").is_single_value()) { + return Domain(context().get_output_size()); + } + return get_input("Mask").domain(); + } + + CMPNodeMaskType get_mask_type() + { + return (CMPNodeMaskType)bnode().custom1; + } + + const char *get_shader_name() + { + switch (get_mask_type()) { + default: + case CMP_NODE_MASKTYPE_ADD: + return "compositor_box_mask_add"; + case CMP_NODE_MASKTYPE_SUBTRACT: + return "compositor_box_mask_subtract"; + case CMP_NODE_MASKTYPE_MULTIPLY: + return "compositor_box_mask_multiply"; + case CMP_NODE_MASKTYPE_NOT: + return "compositor_box_mask_not"; + } + } + + NodeBoxMask &get_node_box_mask() + { + return *static_cast<NodeBoxMask *>(bnode().storage); + } + + float2 get_location() + { + return float2(get_node_box_mask().x, get_node_box_mask().y); + } + + float2 get_size() + { + return float2(get_node_box_mask().width, get_node_box_mask().height); + } + + float get_angle() + { + return get_node_box_mask().rotation; + } +}; + +static NodeOperation *get_compositor_operation(Context &context, DNode node) +{ + return new BoxMaskOperation(context, node); +} + } // namespace blender::nodes::node_composite_boxmask_cc void register_node_type_cmp_boxmask() @@ -61,6 +162,7 @@ void register_node_type_cmp_boxmask() ntype.draw_buttons = file_ns::node_composit_buts_boxmask; node_type_init(&ntype, file_ns::node_composit_init_boxmask); node_type_storage(&ntype, "NodeBoxMask", node_free_standard_storage, node_copy_standard_storage); + ntype.get_compositor_operation = file_ns::get_compositor_operation; nodeRegisterType(&ntype); } diff --git a/source/blender/nodes/composite/nodes/node_composite_brightness.cc b/source/blender/nodes/composite/nodes/node_composite_brightness.cc index 65ed2885d9b..fa22f551de6 100644 --- a/source/blender/nodes/composite/nodes/node_composite_brightness.cc +++ b/source/blender/nodes/composite/nodes/node_composite_brightness.cc @@ -8,6 +8,10 @@ #include "UI_interface.h" #include "UI_resources.h" +#include "GPU_material.h" + +#include "COM_shader_node.hh" + #include "node_composite_util.hh" /* **************** Bright and Contrast ******************** */ @@ -16,9 +20,11 @@ namespace blender::nodes::node_composite_brightness_cc { static void cmp_node_brightcontrast_declare(NodeDeclarationBuilder &b) { - b.add_input<decl::Color>(N_("Image")).default_value({1.0f, 1.0f, 1.0f, 1.0f}); - b.add_input<decl::Float>(N_("Bright")).min(-100.0f).max(100.0f); - b.add_input<decl::Float>(N_("Contrast")).min(-100.0f).max(100.0f); + b.add_input<decl::Color>(N_("Image")) + .default_value({1.0f, 1.0f, 1.0f, 1.0f}) + .compositor_domain_priority(0); + b.add_input<decl::Float>(N_("Bright")).min(-100.0f).max(100.0f).compositor_domain_priority(1); + b.add_input<decl::Float>(N_("Contrast")).min(-100.0f).max(100.0f).compositor_domain_priority(2); b.add_output<decl::Color>(N_("Image")); } @@ -34,6 +40,38 @@ static void node_composit_buts_brightcontrast(uiLayout *layout, uiItemR(layout, ptr, "use_premultiply", UI_ITEM_R_SPLIT_EMPTY_NAME, nullptr, ICON_NONE); } +using namespace blender::realtime_compositor; + +class BrightContrastShaderNode : public ShaderNode { + public: + using ShaderNode::ShaderNode; + + void compile(GPUMaterial *material) override + { + GPUNodeStack *inputs = get_inputs_array(); + GPUNodeStack *outputs = get_outputs_array(); + + const float use_premultiply = get_use_premultiply(); + + GPU_stack_link(material, + &bnode(), + "node_composite_bright_contrast", + inputs, + outputs, + GPU_constant(&use_premultiply)); + } + + bool get_use_premultiply() + { + return bnode().custom1; + } +}; + +static ShaderNode *get_compositor_shader_node(DNode node) +{ + return new BrightContrastShaderNode(node); +} + } // namespace blender::nodes::node_composite_brightness_cc void register_node_type_cmp_brightcontrast() @@ -46,6 +84,7 @@ void register_node_type_cmp_brightcontrast() ntype.declare = file_ns::cmp_node_brightcontrast_declare; ntype.draw_buttons = file_ns::node_composit_buts_brightcontrast; node_type_init(&ntype, file_ns::node_composit_init_brightcontrast); + ntype.get_compositor_shader_node = file_ns::get_compositor_shader_node; nodeRegisterType(&ntype); } diff --git a/source/blender/nodes/composite/nodes/node_composite_channel_matte.cc b/source/blender/nodes/composite/nodes/node_composite_channel_matte.cc index 627f07fdfce..e899f7eb100 100644 --- a/source/blender/nodes/composite/nodes/node_composite_channel_matte.cc +++ b/source/blender/nodes/composite/nodes/node_composite_channel_matte.cc @@ -10,6 +10,10 @@ #include "UI_interface.h" #include "UI_resources.h" +#include "GPU_material.h" + +#include "COM_shader_node.hh" + #include "node_composite_util.hh" /* ******************* Channel Matte Node ********************************* */ @@ -18,7 +22,9 @@ namespace blender::nodes::node_composite_channel_matte_cc { static void cmp_node_channel_matte_declare(NodeDeclarationBuilder &b) { - b.add_input<decl::Color>(N_("Image")).default_value({1.0f, 1.0f, 1.0f, 1.0f}); + b.add_input<decl::Color>(N_("Image")) + .default_value({1.0f, 1.0f, 1.0f, 1.0f}) + .compositor_domain_priority(0); b.add_output<decl::Color>(N_("Image")); b.add_output<decl::Float>(N_("Matte")); } @@ -79,6 +85,96 @@ static void node_composit_buts_channel_matte(uiLayout *layout, col, ptr, "limit_min", UI_ITEM_R_SPLIT_EMPTY_NAME | UI_ITEM_R_SLIDER, nullptr, ICON_NONE); } +using namespace blender::realtime_compositor; + +class ChannelMatteShaderNode : public ShaderNode { + public: + using ShaderNode::ShaderNode; + + void compile(GPUMaterial *material) override + { + GPUNodeStack *inputs = get_inputs_array(); + GPUNodeStack *outputs = get_outputs_array(); + + const float color_space = get_color_space(); + const float matte_channel = get_matte_channel(); + float limit_channels[2]; + get_limit_channels(limit_channels); + const float max_limit = get_max_limit(); + const float min_limit = get_min_limit(); + + GPU_stack_link(material, + &bnode(), + "node_composite_channel_matte", + inputs, + outputs, + GPU_constant(&color_space), + GPU_constant(&matte_channel), + GPU_constant(limit_channels), + GPU_uniform(&max_limit), + GPU_uniform(&min_limit)); + } + + /* 1 -> CMP_NODE_CHANNEL_MATTE_CS_RGB + * 2 -> CMP_NODE_CHANNEL_MATTE_CS_HSV + * 3 -> CMP_NODE_CHANNEL_MATTE_CS_YUV + * 4 -> CMP_NODE_CHANNEL_MATTE_CS_YCC */ + int get_color_space() + { + return bnode().custom1; + } + + /* Get the index of the channel used to generate the matte. */ + int get_matte_channel() + { + return bnode().custom2 - 1; + } + + NodeChroma *get_node_chroma() + { + return static_cast<NodeChroma *>(bnode().storage); + } + + /* Get the index of the channel used to compute the limit value. */ + int get_limit_channel() + { + return get_node_chroma()->channel - 1; + } + + /* Get the indices of the channels used to compute the limit value. We always assume the limit + * algorithm is Max, if it is a single limit channel, store it in both limit channels, because + * the maximum of two identical values is is the same value. */ + void get_limit_channels(float limit_channels[2]) + { + if (get_node_chroma()->algorithm == CMP_NODE_CHANNEL_MATTE_LIMIT_ALGORITHM_MAX) { + /* If the algorithm is Max, store the indices of the other two channels other than the matte + * channel. */ + limit_channels[0] = (get_matte_channel() + 1) % 3; + limit_channels[1] = (get_matte_channel() + 2) % 3; + } + else { + /* If the algorithm is Single, store the index of the limit channel in both channels. */ + limit_channels[0] = get_limit_channel(); + limit_channels[1] = get_limit_channel(); + } + } + + float get_max_limit() + { + return get_node_chroma()->t1; + } + + float get_min_limit() + { + return get_node_chroma()->t2; + } +}; + +static ShaderNode *get_compositor_shader_node(DNode node) +{ + return new ChannelMatteShaderNode(node); +} + } // namespace blender::nodes::node_composite_channel_matte_cc void register_node_type_cmp_channel_matte() @@ -93,6 +189,7 @@ void register_node_type_cmp_channel_matte() ntype.flag |= NODE_PREVIEW; node_type_init(&ntype, file_ns::node_composit_init_channel_matte); node_type_storage(&ntype, "NodeChroma", node_free_standard_storage, node_copy_standard_storage); + ntype.get_compositor_shader_node = file_ns::get_compositor_shader_node; nodeRegisterType(&ntype); } diff --git a/source/blender/nodes/composite/nodes/node_composite_chroma_matte.cc b/source/blender/nodes/composite/nodes/node_composite_chroma_matte.cc index 69319c6825d..cb3648c5680 100644 --- a/source/blender/nodes/composite/nodes/node_composite_chroma_matte.cc +++ b/source/blender/nodes/composite/nodes/node_composite_chroma_matte.cc @@ -5,11 +5,17 @@ * \ingroup cmpnodes */ +#include <cmath> + #include "BLI_math_rotation.h" #include "UI_interface.h" #include "UI_resources.h" +#include "GPU_material.h" + +#include "COM_shader_node.hh" + #include "node_composite_util.hh" /* ******************* Chroma Key ********************************************************** */ @@ -18,8 +24,12 @@ namespace blender::nodes::node_composite_chroma_matte_cc { static void cmp_node_chroma_matte_declare(NodeDeclarationBuilder &b) { - b.add_input<decl::Color>(N_("Image")).default_value({1.0f, 1.0f, 1.0f, 1.0f}); - b.add_input<decl::Color>(N_("Key Color")).default_value({1.0f, 1.0f, 1.0f, 1.0f}); + b.add_input<decl::Color>(N_("Image")) + .default_value({1.0f, 1.0f, 1.0f, 1.0f}) + .compositor_domain_priority(0); + b.add_input<decl::Color>(N_("Key Color")) + .default_value({1.0f, 1.0f, 1.0f, 1.0f}) + .compositor_domain_priority(1); b.add_output<decl::Color>(N_("Image")); b.add_output<decl::Float>(N_("Matte")); } @@ -51,6 +61,57 @@ static void node_composit_buts_chroma_matte(uiLayout *layout, bContext *UNUSED(C // uiItemR(col, ptr, "shadow_adjust", UI_ITEM_R_SLIDER, nullptr, ICON_NONE); } +using namespace blender::realtime_compositor; + +class ChromaMatteShaderNode : public ShaderNode { + public: + using ShaderNode::ShaderNode; + + void compile(GPUMaterial *material) override + { + GPUNodeStack *inputs = get_inputs_array(); + GPUNodeStack *outputs = get_outputs_array(); + + const float acceptance = get_acceptance(); + const float cutoff = get_cutoff(); + const float falloff = get_falloff(); + + GPU_stack_link(material, + &bnode(), + "node_composite_chroma_matte", + inputs, + outputs, + GPU_uniform(&acceptance), + GPU_uniform(&cutoff), + GPU_uniform(&falloff)); + } + + NodeChroma *get_node_chroma() + { + return static_cast<NodeChroma *>(bnode().storage); + } + + float get_acceptance() + { + return std::tan(get_node_chroma()->t1) / 2.0f; + } + + float get_cutoff() + { + return get_node_chroma()->t2; + } + + float get_falloff() + { + return get_node_chroma()->fstrength; + } +}; + +static ShaderNode *get_compositor_shader_node(DNode node) +{ + return new ChromaMatteShaderNode(node); +} + } // namespace blender::nodes::node_composite_chroma_matte_cc void register_node_type_cmp_chroma_matte() @@ -65,6 +126,7 @@ void register_node_type_cmp_chroma_matte() ntype.flag |= NODE_PREVIEW; node_type_init(&ntype, file_ns::node_composit_init_chroma_matte); node_type_storage(&ntype, "NodeChroma", node_free_standard_storage, node_copy_standard_storage); + ntype.get_compositor_shader_node = file_ns::get_compositor_shader_node; nodeRegisterType(&ntype); } diff --git a/source/blender/nodes/composite/nodes/node_composite_color_matte.cc b/source/blender/nodes/composite/nodes/node_composite_color_matte.cc index 474fb1b72f2..5e3aaf512e6 100644 --- a/source/blender/nodes/composite/nodes/node_composite_color_matte.cc +++ b/source/blender/nodes/composite/nodes/node_composite_color_matte.cc @@ -8,6 +8,10 @@ #include "UI_interface.h" #include "UI_resources.h" +#include "GPU_material.h" + +#include "COM_shader_node.hh" + #include "node_composite_util.hh" /* ******************* Color Matte ********************************************************** */ @@ -16,8 +20,12 @@ namespace blender::nodes::node_composite_color_matte_cc { static void cmp_node_color_matte_declare(NodeDeclarationBuilder &b) { - b.add_input<decl::Color>(N_("Image")).default_value({1.0f, 1.0f, 1.0f, 1.0f}); - b.add_input<decl::Color>(N_("Key Color")).default_value({1.0f, 1.0f, 1.0f, 1.0f}); + b.add_input<decl::Color>(N_("Image")) + .default_value({1.0f, 1.0f, 1.0f, 1.0f}) + .compositor_domain_priority(0); + b.add_input<decl::Color>(N_("Key Color")) + .default_value({1.0f, 1.0f, 1.0f, 1.0f}) + .compositor_domain_priority(1); b.add_output<decl::Color>(N_("Image")); b.add_output<decl::Float>(N_("Matte")); } @@ -50,6 +58,58 @@ static void node_composit_buts_color_matte(uiLayout *layout, bContext *UNUSED(C) col, ptr, "color_value", UI_ITEM_R_SPLIT_EMPTY_NAME | UI_ITEM_R_SLIDER, nullptr, ICON_NONE); } +using namespace blender::realtime_compositor; + +class ColorMatteShaderNode : public ShaderNode { + public: + using ShaderNode::ShaderNode; + + void compile(GPUMaterial *material) override + { + GPUNodeStack *inputs = get_inputs_array(); + GPUNodeStack *outputs = get_outputs_array(); + + const float hue_epsilon = get_hue_epsilon(); + const float saturation_epsilon = get_saturation_epsilon(); + const float value_epsilon = get_value_epsilon(); + + GPU_stack_link(material, + &bnode(), + "node_composite_color_matte", + inputs, + outputs, + GPU_uniform(&hue_epsilon), + GPU_uniform(&saturation_epsilon), + GPU_uniform(&value_epsilon)); + } + + NodeChroma *get_node_chroma() + { + return static_cast<NodeChroma *>(bnode().storage); + } + + float get_hue_epsilon() + { + /* Divide by 2 because the hue wraps around. */ + return get_node_chroma()->t1 / 2.0f; + } + + float get_saturation_epsilon() + { + return get_node_chroma()->t2; + } + + float get_value_epsilon() + { + return get_node_chroma()->t3; + } +}; + +static ShaderNode *get_compositor_shader_node(DNode node) +{ + return new ColorMatteShaderNode(node); +} + } // namespace blender::nodes::node_composite_color_matte_cc void register_node_type_cmp_color_matte() @@ -64,6 +124,7 @@ void register_node_type_cmp_color_matte() ntype.flag |= NODE_PREVIEW; node_type_init(&ntype, file_ns::node_composit_init_color_matte); node_type_storage(&ntype, "NodeChroma", node_free_standard_storage, node_copy_standard_storage); + ntype.get_compositor_shader_node = file_ns::get_compositor_shader_node; nodeRegisterType(&ntype); } diff --git a/source/blender/nodes/composite/nodes/node_composite_color_spill.cc b/source/blender/nodes/composite/nodes/node_composite_color_spill.cc index 9ad5dfbaeb2..9744c01a256 100644 --- a/source/blender/nodes/composite/nodes/node_composite_color_spill.cc +++ b/source/blender/nodes/composite/nodes/node_composite_color_spill.cc @@ -10,6 +10,10 @@ #include "UI_interface.h" #include "UI_resources.h" +#include "GPU_material.h" + +#include "COM_shader_node.hh" + #include "node_composite_util.hh" /* ******************* Color Spill Suppression ********************************* */ @@ -18,8 +22,15 @@ namespace blender::nodes::node_composite_color_spill_cc { static void cmp_node_color_spill_declare(NodeDeclarationBuilder &b) { - b.add_input<decl::Color>(N_("Image")).default_value({1.0f, 1.0f, 1.0f, 1.0f}); - b.add_input<decl::Float>(N_("Fac")).default_value(1.0f).min(0.0f).max(1.0f).subtype(PROP_FACTOR); + b.add_input<decl::Color>(N_("Image")) + .default_value({1.0f, 1.0f, 1.0f, 1.0f}) + .compositor_domain_priority(0); + b.add_input<decl::Float>(N_("Fac")) + .default_value(1.0f) + .min(0.0f) + .max(1.0f) + .subtype(PROP_FACTOR) + .compositor_domain_priority(1); b.add_output<decl::Color>(N_("Image")); } @@ -27,8 +38,8 @@ static void node_composit_init_color_spill(bNodeTree *UNUSED(ntree), bNode *node { NodeColorspill *ncs = MEM_cnew<NodeColorspill>(__func__); node->storage = ncs; + node->custom2 = CMP_NODE_COLOR_SPILL_LIMIT_ALGORITHM_SINGLE; node->custom1 = 2; /* green channel */ - node->custom2 = 0; /* simple limit algorithm */ ncs->limchan = 0; /* limit by red */ ncs->limscale = 1.0f; /* limit scaling factor */ ncs->unspill = 0; /* do not use unspill */ @@ -80,6 +91,103 @@ static void node_composit_buts_color_spill(uiLayout *layout, bContext *UNUSED(C) } } +using namespace blender::realtime_compositor; + +class ColorSpillShaderNode : public ShaderNode { + public: + using ShaderNode::ShaderNode; + + void compile(GPUMaterial *material) override + { + GPUNodeStack *inputs = get_inputs_array(); + GPUNodeStack *outputs = get_outputs_array(); + + const float spill_channel = get_spill_channel(); + float spill_scale[3]; + get_spill_scale(spill_scale); + float limit_channels[2]; + get_limit_channels(limit_channels); + const float limit_scale = get_limit_scale(); + + GPU_stack_link(material, + &bnode(), + "node_composite_color_spill", + inputs, + outputs, + GPU_constant(&spill_channel), + GPU_uniform(spill_scale), + GPU_constant(limit_channels), + GPU_uniform(&limit_scale)); + } + + /* Get the index of the channel used for spilling. */ + int get_spill_channel() + { + return bnode().custom1 - 1; + } + + CMPNodeColorSpillLimitAlgorithm get_limit_algorithm() + { + return (CMPNodeColorSpillLimitAlgorithm)bnode().custom2; + } + + NodeColorspill *get_node_color_spill() + { + return static_cast<NodeColorspill *>(bnode().storage); + } + + void get_spill_scale(float spill_scale[3]) + { + const NodeColorspill *node_color_spill = get_node_color_spill(); + if (node_color_spill->unspill) { + spill_scale[0] = node_color_spill->uspillr; + spill_scale[1] = node_color_spill->uspillg; + spill_scale[2] = node_color_spill->uspillb; + spill_scale[get_spill_channel()] *= -1.0f; + } + else { + spill_scale[0] = 0.0f; + spill_scale[1] = 0.0f; + spill_scale[2] = 0.0f; + spill_scale[get_spill_channel()] = -1.0f; + } + } + + /* Get the index of the channel used for limiting. */ + int get_limit_channel() + { + return get_node_color_spill()->limchan; + } + + /* Get the indices of the channels used to compute the limit value. We always assume the limit + * algorithm is Average, if it is a single limit channel, store it in both limit channels, + * because the average of two identical values is the same value. */ + void get_limit_channels(float limit_channels[2]) + { + if (get_limit_algorithm() == CMP_NODE_COLOR_SPILL_LIMIT_ALGORITHM_AVERAGE) { + /* If the algorithm is Average, store the indices of the other two channels other than the + * spill channel. */ + limit_channels[0] = (get_spill_channel() + 1) % 3; + limit_channels[1] = (get_spill_channel() + 2) % 3; + } + else { + /* If the algorithm is Single, store the index of the limit channel in both channels. */ + limit_channels[0] = get_limit_channel(); + limit_channels[1] = get_limit_channel(); + } + } + + float get_limit_scale() + { + return get_node_color_spill()->limscale; + } +}; + +static ShaderNode *get_compositor_shader_node(DNode node) +{ + return new ColorSpillShaderNode(node); +} + } // namespace blender::nodes::node_composite_color_spill_cc void register_node_type_cmp_color_spill() @@ -94,6 +202,7 @@ void register_node_type_cmp_color_spill() node_type_init(&ntype, file_ns::node_composit_init_color_spill); node_type_storage( &ntype, "NodeColorspill", node_free_standard_storage, node_copy_standard_storage); + ntype.get_compositor_shader_node = file_ns::get_compositor_shader_node; nodeRegisterType(&ntype); } diff --git a/source/blender/nodes/composite/nodes/node_composite_colorbalance.cc b/source/blender/nodes/composite/nodes/node_composite_colorbalance.cc index dd081c8fc12..95675169c76 100644 --- a/source/blender/nodes/composite/nodes/node_composite_colorbalance.cc +++ b/source/blender/nodes/composite/nodes/node_composite_colorbalance.cc @@ -10,6 +10,10 @@ #include "UI_interface.h" #include "UI_resources.h" +#include "GPU_material.h" + +#include "COM_shader_node.hh" + #include "node_composite_util.hh" /* ******************* Color Balance ********************************* */ @@ -46,8 +50,15 @@ namespace blender::nodes::node_composite_colorbalance_cc { static void cmp_node_colorbalance_declare(NodeDeclarationBuilder &b) { - b.add_input<decl::Float>(N_("Fac")).default_value(1.0f).min(0.0f).max(1.0f).subtype(PROP_FACTOR); - b.add_input<decl::Color>(N_("Image")).default_value({1.0f, 1.0f, 1.0f, 1.0f}); + b.add_input<decl::Float>(N_("Fac")) + .default_value(1.0f) + .min(0.0f) + .max(1.0f) + .subtype(PROP_FACTOR) + .compositor_domain_priority(1); + b.add_input<decl::Color>(N_("Image")) + .default_value({1.0f, 1.0f, 1.0f, 1.0f}) + .compositor_domain_priority(0); b.add_output<decl::Color>(N_("Image")); } @@ -71,7 +82,7 @@ static void node_composit_buts_colorbalance(uiLayout *layout, bContext *UNUSED(C uiItemR(layout, ptr, "correction_method", UI_ITEM_R_SPLIT_EMPTY_NAME, nullptr, ICON_NONE); - if (RNA_enum_get(ptr, "correction_method") == 0) { + if (RNA_enum_get(ptr, "correction_method") == CMP_NODE_COLOR_BALANCE_LGG) { split = uiLayoutSplit(layout, 0.0f, false); col = uiLayoutColumn(split, false); @@ -116,7 +127,7 @@ static void node_composit_buts_colorbalance_ex(uiLayout *layout, { uiItemR(layout, ptr, "correction_method", UI_ITEM_R_SPLIT_EMPTY_NAME, nullptr, ICON_NONE); - if (RNA_enum_get(ptr, "correction_method") == 0) { + if (RNA_enum_get(ptr, "correction_method") == CMP_NODE_COLOR_BALANCE_LGG) { uiTemplateColorPicker(layout, ptr, "lift", true, true, false, true); uiItemR(layout, ptr, "lift", UI_ITEM_R_SPLIT_EMPTY_NAME, nullptr, ICON_NONE); @@ -139,6 +150,58 @@ static void node_composit_buts_colorbalance_ex(uiLayout *layout, } } +using namespace blender::realtime_compositor; + +class ColorBalanceShaderNode : public ShaderNode { + public: + using ShaderNode::ShaderNode; + + void compile(GPUMaterial *material) override + { + GPUNodeStack *inputs = get_inputs_array(); + GPUNodeStack *outputs = get_outputs_array(); + + const NodeColorBalance *node_color_balance = get_node_color_balance(); + + if (get_color_balance_method() == CMP_NODE_COLOR_BALANCE_LGG) { + GPU_stack_link(material, + &bnode(), + "node_composite_color_balance_lgg", + inputs, + outputs, + GPU_uniform(node_color_balance->lift), + GPU_uniform(node_color_balance->gamma), + GPU_uniform(node_color_balance->gain)); + return; + } + + GPU_stack_link(material, + &bnode(), + "node_composite_color_balance_asc_cdl", + inputs, + outputs, + GPU_uniform(node_color_balance->offset), + GPU_uniform(node_color_balance->power), + GPU_uniform(node_color_balance->slope), + GPU_uniform(&node_color_balance->offset_basis)); + } + + CMPNodeColorBalanceMethod get_color_balance_method() + { + return (CMPNodeColorBalanceMethod)bnode().custom1; + } + + NodeColorBalance *get_node_color_balance() + { + return static_cast<NodeColorBalance *>(bnode().storage); + } +}; + +static ShaderNode *get_compositor_shader_node(DNode node) +{ + return new ColorBalanceShaderNode(node); +} + } // namespace blender::nodes::node_composite_colorbalance_cc void register_node_type_cmp_colorbalance() @@ -155,6 +218,7 @@ void register_node_type_cmp_colorbalance() node_type_init(&ntype, file_ns::node_composit_init_colorbalance); node_type_storage( &ntype, "NodeColorBalance", node_free_standard_storage, node_copy_standard_storage); + ntype.get_compositor_shader_node = file_ns::get_compositor_shader_node; nodeRegisterType(&ntype); } diff --git a/source/blender/nodes/composite/nodes/node_composite_colorcorrection.cc b/source/blender/nodes/composite/nodes/node_composite_colorcorrection.cc index 39ecd277cec..36e6672ce1c 100644 --- a/source/blender/nodes/composite/nodes/node_composite_colorcorrection.cc +++ b/source/blender/nodes/composite/nodes/node_composite_colorcorrection.cc @@ -5,9 +5,15 @@ * \ingroup cmpnodes */ +#include "IMB_colormanagement.h" + #include "UI_interface.h" #include "UI_resources.h" +#include "GPU_material.h" + +#include "COM_shader_node.hh" + #include "node_composite_util.hh" /* ******************* Color Correction ********************************* */ @@ -16,8 +22,14 @@ namespace blender::nodes::node_composite_colorcorrection_cc { static void cmp_node_colorcorrection_declare(NodeDeclarationBuilder &b) { - b.add_input<decl::Color>(N_("Image")).default_value({1.0f, 1.0f, 1.0f, 1.0f}); - b.add_input<decl::Float>(N_("Mask")).default_value(1.0f).min(0.0f).max(1.0f); + b.add_input<decl::Color>(N_("Image")) + .default_value({1.0f, 1.0f, 1.0f, 1.0f}) + .compositor_domain_priority(0); + b.add_input<decl::Float>(N_("Mask")) + .default_value(1.0f) + .min(0.0f) + .max(1.0f) + .compositor_domain_priority(1); b.add_output<decl::Color>(N_("Image")); } @@ -266,6 +278,73 @@ static void node_composit_buts_colorcorrection_ex(uiLayout *layout, uiItemR(row, ptr, "midtones_end", UI_ITEM_R_SPLIT_EMPTY_NAME, nullptr, ICON_NONE); } +using namespace blender::realtime_compositor; + +class ColorCorrectionShaderNode : public ShaderNode { + public: + using ShaderNode::ShaderNode; + + void compile(GPUMaterial *material) override + { + GPUNodeStack *inputs = get_inputs_array(); + GPUNodeStack *outputs = get_outputs_array(); + + float enabled_channels[3]; + get_enabled_channels(enabled_channels); + float luminance_coefficients[3]; + IMB_colormanagement_get_luminance_coefficients(luminance_coefficients); + + const NodeColorCorrection *node_color_correction = get_node_color_correction(); + + GPU_stack_link(material, + &bnode(), + "node_composite_color_correction", + inputs, + outputs, + GPU_constant(enabled_channels), + GPU_uniform(&node_color_correction->startmidtones), + GPU_uniform(&node_color_correction->endmidtones), + GPU_uniform(&node_color_correction->master.saturation), + GPU_uniform(&node_color_correction->master.contrast), + GPU_uniform(&node_color_correction->master.gamma), + GPU_uniform(&node_color_correction->master.gain), + GPU_uniform(&node_color_correction->master.lift), + GPU_uniform(&node_color_correction->shadows.saturation), + GPU_uniform(&node_color_correction->shadows.contrast), + GPU_uniform(&node_color_correction->shadows.gamma), + GPU_uniform(&node_color_correction->shadows.gain), + GPU_uniform(&node_color_correction->shadows.lift), + GPU_uniform(&node_color_correction->midtones.saturation), + GPU_uniform(&node_color_correction->midtones.contrast), + GPU_uniform(&node_color_correction->midtones.gamma), + GPU_uniform(&node_color_correction->midtones.gain), + GPU_uniform(&node_color_correction->midtones.lift), + GPU_uniform(&node_color_correction->highlights.saturation), + GPU_uniform(&node_color_correction->highlights.contrast), + GPU_uniform(&node_color_correction->highlights.gamma), + GPU_uniform(&node_color_correction->highlights.gain), + GPU_uniform(&node_color_correction->highlights.lift), + GPU_constant(luminance_coefficients)); + } + + void get_enabled_channels(float enabled_channels[3]) + { + for (int i = 0; i < 3; i++) { + enabled_channels[i] = (bnode().custom1 & (1 << i)) ? 1.0f : 0.0f; + } + } + + NodeColorCorrection *get_node_color_correction() + { + return static_cast<NodeColorCorrection *>(bnode().storage); + } +}; + +static ShaderNode *get_compositor_shader_node(DNode node) +{ + return new ColorCorrectionShaderNode(node); +} + } // namespace blender::nodes::node_composite_colorcorrection_cc void register_node_type_cmp_colorcorrection() @@ -282,6 +361,7 @@ void register_node_type_cmp_colorcorrection() node_type_init(&ntype, file_ns::node_composit_init_colorcorrection); node_type_storage( &ntype, "NodeColorCorrection", node_free_standard_storage, node_copy_standard_storage); + ntype.get_compositor_shader_node = file_ns::get_compositor_shader_node; nodeRegisterType(&ntype); } diff --git a/source/blender/nodes/composite/nodes/node_composite_composite.cc b/source/blender/nodes/composite/nodes/node_composite_composite.cc index d35ce7dc11a..68061bb434d 100644 --- a/source/blender/nodes/composite/nodes/node_composite_composite.cc +++ b/source/blender/nodes/composite/nodes/node_composite_composite.cc @@ -5,9 +5,18 @@ * \ingroup cmpnodes */ +#include "BLI_math_vec_types.hh" + #include "UI_interface.h" #include "UI_resources.h" +#include "GPU_shader.h" +#include "GPU_state.h" +#include "GPU_texture.h" + +#include "COM_node_operation.hh" +#include "COM_utilities.hh" + #include "node_composite_util.hh" /* **************** COMPOSITE ******************** */ @@ -26,6 +35,125 @@ static void node_composit_buts_composite(uiLayout *layout, bContext *UNUSED(C), uiItemR(layout, ptr, "use_alpha", UI_ITEM_R_SPLIT_EMPTY_NAME, nullptr, ICON_NONE); } +using namespace blender::realtime_compositor; + +class CompositeOperation : public NodeOperation { + public: + using NodeOperation::NodeOperation; + + void execute() override + { + const Result &image = get_input("Image"); + const Result &alpha = get_input("Alpha"); + + if (image.is_single_value() && alpha.is_single_value()) { + execute_clear(); + } + else if (ignore_alpha()) { + execute_ignore_alpha(); + } + else if (!node().input_by_identifier("Alpha")->is_logically_linked()) { + execute_copy(); + } + else { + execute_set_alpha(); + } + } + + /* Executes when all inputs are single values, in which case, the output texture can just be + * cleared to the appropriate color. */ + void execute_clear() + { + const Result &image = get_input("Image"); + const Result &alpha = get_input("Alpha"); + + float4 color = image.get_color_value(); + if (ignore_alpha()) { + color.w = 1.0f; + } + else if (node().input_by_identifier("Alpha")->is_logically_linked()) { + color.w = alpha.get_float_value(); + } + + GPU_texture_clear(context().get_output_texture(), GPU_DATA_FLOAT, color); + } + + /* Executes when the alpha channel of the image is ignored. */ + void execute_ignore_alpha() + { + GPUShader *shader = shader_manager().get("compositor_convert_color_to_opaque"); + GPU_shader_bind(shader); + + const Result &image = get_input("Image"); + image.bind_as_texture(shader, "input_tx"); + + GPUTexture *output_texture = context().get_output_texture(); + const int image_unit = GPU_shader_get_texture_binding(shader, "output_img"); + GPU_texture_image_bind(output_texture, image_unit); + + compute_dispatch_threads_at_least(shader, compute_domain().size); + + image.unbind_as_texture(); + GPU_texture_image_unbind(output_texture); + GPU_shader_unbind(); + } + + /* Executes when the image texture is written with no adjustments and can thus be copied directly + * to the output texture. */ + void execute_copy() + { + const Result &image = get_input("Image"); + + /* Make sure any prior writes to the texture are reflected before copying it. */ + GPU_memory_barrier(GPU_BARRIER_TEXTURE_UPDATE); + + GPU_texture_copy(context().get_output_texture(), image.texture()); + } + + /* Executes when the alpha channel of the image is set as the value of the input alpha. */ + void execute_set_alpha() + { + GPUShader *shader = shader_manager().get("compositor_set_alpha"); + GPU_shader_bind(shader); + + const Result &image = get_input("Image"); + image.bind_as_texture(shader, "image_tx"); + + const Result &alpha = get_input("Alpha"); + alpha.bind_as_texture(shader, "alpha_tx"); + + GPUTexture *output_texture = context().get_output_texture(); + const int image_unit = GPU_shader_get_texture_binding(shader, "output_img"); + GPU_texture_image_bind(output_texture, image_unit); + + compute_dispatch_threads_at_least(shader, compute_domain().size); + + image.unbind_as_texture(); + alpha.unbind_as_texture(); + GPU_texture_image_unbind(output_texture); + GPU_shader_unbind(); + } + + /* If true, the alpha channel of the image is set to 1, that is, it becomes opaque. If false, the + * alpha channel of the image is retained, but only if the alpha input is not linked. If the + * alpha input is linked, it the value of that input will be used as the alpha of the image. */ + bool ignore_alpha() + { + return bnode().custom2 & CMP_NODE_OUTPUT_IGNORE_ALPHA; + } + + /* The operation domain have the same dimensions of the output without any transformations. */ + Domain compute_domain() override + { + return Domain(context().get_output_size()); + } +}; + +static NodeOperation *get_compositor_operation(Context &context, DNode node) +{ + return new CompositeOperation(context, node); +} + } // namespace blender::nodes::node_composite_composite_cc void register_node_type_cmp_composite() @@ -37,6 +165,7 @@ void register_node_type_cmp_composite() cmp_node_type_base(&ntype, CMP_NODE_COMPOSITE, "Composite", NODE_CLASS_OUTPUT); ntype.declare = file_ns::cmp_node_composite_declare; ntype.draw_buttons = file_ns::node_composit_buts_composite; + ntype.get_compositor_operation = file_ns::get_compositor_operation; ntype.flag |= NODE_PREVIEW; ntype.no_muting = true; diff --git a/source/blender/nodes/composite/nodes/node_composite_convert_color_space.cc b/source/blender/nodes/composite/nodes/node_composite_convert_color_space.cc index 303248c3852..e36da39cca1 100644 --- a/source/blender/nodes/composite/nodes/node_composite_convert_color_space.cc +++ b/source/blender/nodes/composite/nodes/node_composite_convert_color_space.cc @@ -14,6 +14,8 @@ #include "IMB_colormanagement.h" +#include "COM_node_operation.hh" + namespace blender::nodes::node_composite_convert_color_space_cc { static void CMP_NODE_CONVERT_COLOR_SPACE_declare(NodeDeclarationBuilder &b) @@ -47,6 +49,23 @@ static void node_composit_buts_convert_colorspace(uiLayout *layout, uiItemR(layout, ptr, "to_color_space", UI_ITEM_R_SPLIT_EMPTY_NAME, nullptr, ICON_NONE); } +using namespace blender::realtime_compositor; + +class ConvertColorSpaceOperation : public NodeOperation { + public: + using NodeOperation::NodeOperation; + + void execute() override + { + get_input("Image").pass_through(get_result("Image")); + } +}; + +static NodeOperation *get_compositor_operation(Context &context, DNode node) +{ + return new ConvertColorSpaceOperation(context, node); +} + } // namespace blender::nodes::node_composite_convert_color_space_cc void register_node_type_cmp_convert_color_space(void) @@ -62,6 +81,7 @@ void register_node_type_cmp_convert_color_space(void) node_type_init(&ntype, file_ns::node_composit_init_convert_colorspace); node_type_storage( &ntype, "NodeConvertColorSpace", node_free_standard_storage, node_copy_standard_storage); + ntype.get_compositor_operation = file_ns::get_compositor_operation; nodeRegisterType(&ntype); } diff --git a/source/blender/nodes/composite/nodes/node_composite_cornerpin.cc b/source/blender/nodes/composite/nodes/node_composite_cornerpin.cc index 07da0da0be1..9679701a7cf 100644 --- a/source/blender/nodes/composite/nodes/node_composite_cornerpin.cc +++ b/source/blender/nodes/composite/nodes/node_composite_cornerpin.cc @@ -5,6 +5,8 @@ * \ingroup cmpnodes */ +#include "COM_node_operation.hh" + #include "node_composite_util.hh" namespace blender::nodes::node_composite_cornerpin_cc { @@ -32,6 +34,24 @@ static void cmp_node_cornerpin_declare(NodeDeclarationBuilder &b) b.add_output<decl::Float>(N_("Plane")); } +using namespace blender::realtime_compositor; + +class CornerPinOperation : public NodeOperation { + public: + using NodeOperation::NodeOperation; + + void execute() override + { + get_input("Image").pass_through(get_result("Image")); + get_result("Plane").allocate_invalid(); + } +}; + +static NodeOperation *get_compositor_operation(Context &context, DNode node) +{ + return new CornerPinOperation(context, node); +} + } // namespace blender::nodes::node_composite_cornerpin_cc void register_node_type_cmp_cornerpin() @@ -42,6 +62,7 @@ void register_node_type_cmp_cornerpin() cmp_node_type_base(&ntype, CMP_NODE_CORNERPIN, "Corner Pin", NODE_CLASS_DISTORT); ntype.declare = file_ns::cmp_node_cornerpin_declare; + ntype.get_compositor_operation = file_ns::get_compositor_operation; nodeRegisterType(&ntype); } diff --git a/source/blender/nodes/composite/nodes/node_composite_crop.cc b/source/blender/nodes/composite/nodes/node_composite_crop.cc index 823e1052dd0..d7331732fc7 100644 --- a/source/blender/nodes/composite/nodes/node_composite_crop.cc +++ b/source/blender/nodes/composite/nodes/node_composite_crop.cc @@ -5,11 +5,22 @@ * \ingroup cmpnodes */ +#include "BLI_math_base.h" +#include "BLI_math_vec_types.hh" + +#include "DNA_node_types.h" + #include "RNA_access.h" #include "UI_interface.h" #include "UI_resources.h" +#include "GPU_shader.h" +#include "GPU_texture.h" + +#include "COM_node_operation.hh" +#include "COM_utilities.hh" + #include "node_composite_util.hh" /* **************** Crop ******************** */ @@ -18,7 +29,9 @@ namespace blender::nodes::node_composite_crop_cc { static void cmp_node_crop_declare(NodeDeclarationBuilder &b) { - b.add_input<decl::Color>(N_("Image")).default_value({1.0f, 1.0f, 1.0f, 1.0f}); + b.add_input<decl::Color>(N_("Image")) + .default_value({1.0f, 1.0f, 1.0f, 1.0f}) + .compositor_domain_priority(0); b.add_output<decl::Color>(N_("Image")); } @@ -54,6 +67,161 @@ static void node_composit_buts_crop(uiLayout *layout, bContext *UNUSED(C), Point } } +using namespace blender::realtime_compositor; + +class CropOperation : public NodeOperation { + public: + using NodeOperation::NodeOperation; + + void execute() override + { + /* The operation does nothing, so just pass the input through. */ + if (is_identity()) { + get_input("Image").pass_through(get_result("Image")); + return; + } + + if (get_is_image_crop()) { + execute_image_crop(); + } + else { + execute_alpha_crop(); + } + } + + /* Crop by replacing areas outside of the cropping bounds with zero alpha. The output have the + * same domain as the input image. */ + void execute_alpha_crop() + { + GPUShader *shader = shader_manager().get("compositor_alpha_crop"); + GPU_shader_bind(shader); + + int2 lower_bound, upper_bound; + compute_cropping_bounds(lower_bound, upper_bound); + GPU_shader_uniform_2iv(shader, "lower_bound", lower_bound); + GPU_shader_uniform_2iv(shader, "upper_bound", upper_bound); + + const Result &input_image = get_input("Image"); + input_image.bind_as_texture(shader, "input_tx"); + + const Domain domain = compute_domain(); + + Result &output_image = get_result("Image"); + output_image.allocate_texture(domain); + output_image.bind_as_image(shader, "output_img"); + + compute_dispatch_threads_at_least(shader, domain.size); + + input_image.unbind_as_texture(); + output_image.unbind_as_image(); + GPU_shader_unbind(); + } + + /* Crop the image into a new size that matches the cropping bounds. */ + void execute_image_crop() + { + int2 lower_bound, upper_bound; + compute_cropping_bounds(lower_bound, upper_bound); + + /* The image is cropped into nothing, so just return a single zero value. */ + if (lower_bound.x == upper_bound.x || lower_bound.y == upper_bound.y) { + Result &result = get_result("Image"); + result.allocate_invalid(); + return; + } + + GPUShader *shader = shader_manager().get("compositor_image_crop"); + GPU_shader_bind(shader); + + GPU_shader_uniform_2iv(shader, "lower_bound", lower_bound); + + const Result &input_image = get_input("Image"); + input_image.bind_as_texture(shader, "input_tx"); + + const int2 size = upper_bound - lower_bound; + + Result &output_image = get_result("Image"); + output_image.allocate_texture(Domain(size, compute_domain().transformation)); + output_image.bind_as_image(shader, "output_img"); + + compute_dispatch_threads_at_least(shader, size); + + input_image.unbind_as_texture(); + output_image.unbind_as_image(); + GPU_shader_unbind(); + } + + /* If true, the image should actually be cropped into a new size. Otherwise, if false, the region + * outside of the cropping bounds will be set to a zero alpha value. */ + bool get_is_image_crop() + { + return bnode().custom1; + } + + bool get_is_relative() + { + return bnode().custom2; + } + + NodeTwoXYs &get_node_two_xys() + { + return *static_cast<NodeTwoXYs *>(bnode().storage); + } + + /* Returns true if the operation does nothing and the input can be passed through. */ + bool is_identity() + { + const Result &input = get_input("Image"); + /* Single value inputs can't be cropped and are returned as is. */ + if (input.is_single_value()) { + return true; + } + + int2 lower_bound, upper_bound; + compute_cropping_bounds(lower_bound, upper_bound); + const int2 input_size = input.domain().size; + /* The cropping bounds cover the whole image, so no cropping happens. */ + if (lower_bound == int2(0) && upper_bound == input_size) { + return true; + } + + return false; + } + + void compute_cropping_bounds(int2 &lower_bound, int2 &upper_bound) + { + const NodeTwoXYs &node_two_xys = get_node_two_xys(); + const int2 input_size = get_input("Image").domain().size; + + if (get_is_relative()) { + /* The cropping bounds are relative to the image size. The factors are in the [0, 1] range, + * so it is guaranteed that they won't go over the input image size. */ + lower_bound.x = input_size.x * node_two_xys.fac_x1; + lower_bound.y = input_size.y * node_two_xys.fac_y2; + upper_bound.x = input_size.x * node_two_xys.fac_x2; + upper_bound.y = input_size.y * node_two_xys.fac_y1; + } + else { + /* Make sure the bounds don't go over the input image size. */ + lower_bound.x = min_ii(node_two_xys.x1, input_size.x); + lower_bound.y = min_ii(node_two_xys.y2, input_size.y); + upper_bound.x = min_ii(node_two_xys.x2, input_size.x); + upper_bound.y = min_ii(node_two_xys.y1, input_size.y); + } + + /* Make sure upper bound is actually higher than the lower bound. */ + lower_bound.x = min_ii(lower_bound.x, upper_bound.x); + lower_bound.y = min_ii(lower_bound.y, upper_bound.y); + upper_bound.x = max_ii(lower_bound.x, upper_bound.x); + upper_bound.y = max_ii(lower_bound.y, upper_bound.y); + } +}; + +static NodeOperation *get_compositor_operation(Context &context, DNode node) +{ + return new CropOperation(context, node); +} + } // namespace blender::nodes::node_composite_crop_cc void register_node_type_cmp_crop() @@ -67,6 +235,7 @@ void register_node_type_cmp_crop() ntype.draw_buttons = file_ns::node_composit_buts_crop; node_type_init(&ntype, file_ns::node_composit_init_crop); node_type_storage(&ntype, "NodeTwoXYs", node_free_standard_storage, node_copy_standard_storage); + ntype.get_compositor_operation = file_ns::get_compositor_operation; nodeRegisterType(&ntype); } diff --git a/source/blender/nodes/composite/nodes/node_composite_cryptomatte.cc b/source/blender/nodes/composite/nodes/node_composite_cryptomatte.cc index 2d362a39814..7e5544381a4 100644 --- a/source/blender/nodes/composite/nodes/node_composite_cryptomatte.cc +++ b/source/blender/nodes/composite/nodes/node_composite_cryptomatte.cc @@ -26,6 +26,8 @@ #include "RE_pipeline.h" +#include "COM_node_operation.hh" + #include <optional> /* -------------------------------------------------------------------- */ @@ -105,7 +107,6 @@ static blender::bke::cryptomatte::CryptomatteSessionPtr cryptomatte_init_from_no return session; } -extern "C" { static CryptomatteEntry *cryptomatte_find(const NodeCryptomatte &n, float encoded_hash) { LISTBASE_FOREACH (CryptomatteEntry *, entry, &n.entries) { @@ -299,6 +300,25 @@ static bool node_poll_cryptomatte(bNodeType *UNUSED(ntype), return false; } +using namespace blender::realtime_compositor; + +class CryptoMatteOperation : public NodeOperation { + public: + using NodeOperation::NodeOperation; + + void execute() override + { + get_input("Image").pass_through(get_result("Image")); + get_result("Matte").allocate_invalid(); + get_result("Pick").allocate_invalid(); + } +}; + +static NodeOperation *get_compositor_operation(Context &context, DNode node) +{ + return new CryptoMatteOperation(context, node); +} + } // namespace blender::nodes::node_composite_cryptomatte_cc void register_node_type_cmp_cryptomatte() @@ -316,6 +336,8 @@ void register_node_type_cmp_cryptomatte() ntype.poll = file_ns::node_poll_cryptomatte; node_type_storage( &ntype, "NodeCryptomatte", file_ns::node_free_cryptomatte, file_ns::node_copy_cryptomatte); + ntype.get_compositor_operation = file_ns::get_compositor_operation; + nodeRegisterType(&ntype); } @@ -350,7 +372,7 @@ int ntreeCompositCryptomatteRemoveSocket(bNodeTree *ntree, bNode *node) return 1; } -namespace blender::nodes::node_composite_cryptomatte_cc { +namespace blender::nodes::node_composite_legacy_cryptomatte_cc { static void node_init_cryptomatte_legacy(bNodeTree *ntree, bNode *node) { @@ -365,24 +387,43 @@ static void node_init_cryptomatte_legacy(bNodeTree *ntree, bNode *node) ntreeCompositCryptomatteAddSocket(ntree, node); } -} // namespace blender::nodes::node_composite_cryptomatte_cc +using namespace blender::realtime_compositor; + +class CryptoMatteOperation : public NodeOperation { + public: + using NodeOperation::NodeOperation; + + void execute() override + { + get_input("image").pass_through(get_result("Image")); + get_result("Matte").allocate_invalid(); + get_result("Pick").allocate_invalid(); + } +}; + +static NodeOperation *get_compositor_operation(Context &context, DNode node) +{ + return new CryptoMatteOperation(context, node); +} + +} // namespace blender::nodes::node_composite_legacy_cryptomatte_cc void register_node_type_cmp_cryptomatte_legacy() { - namespace legacy_file_ns = blender::nodes::node_composite_cryptomatte_cc; + namespace legacy_file_ns = blender::nodes::node_composite_legacy_cryptomatte_cc; namespace file_ns = blender::nodes::node_composite_cryptomatte_cc; static bNodeType ntype; cmp_node_type_base(&ntype, CMP_NODE_CRYPTOMATTE_LEGACY, "Cryptomatte", NODE_CLASS_MATTE); node_type_socket_templates(&ntype, nullptr, file_ns::cmp_node_cryptomatte_out); - node_type_init(&ntype, file_ns::node_init_cryptomatte_legacy); + node_type_init(&ntype, legacy_file_ns::node_init_cryptomatte_legacy); node_type_storage( &ntype, "NodeCryptomatte", file_ns::node_free_cryptomatte, file_ns::node_copy_cryptomatte); ntype.gather_link_search_ops = nullptr; + ntype.get_compositor_operation = legacy_file_ns::get_compositor_operation; nodeRegisterType(&ntype); } /** \} */ -} diff --git a/source/blender/nodes/composite/nodes/node_composite_curves.cc b/source/blender/nodes/composite/nodes/node_composite_curves.cc index 802664d7934..c5d303c576a 100644 --- a/source/blender/nodes/composite/nodes/node_composite_curves.cc +++ b/source/blender/nodes/composite/nodes/node_composite_curves.cc @@ -5,16 +5,23 @@ * \ingroup cmpnodes */ +#include "BLI_math_base.h" + #include "BKE_colortools.h" #include "UI_interface.h" #include "UI_resources.h" +#include "GPU_material.h" + +#include "COM_node_operation.hh" +#include "COM_shader_node.hh" + #include "node_composite_util.hh" /* **************** CURVE Time ******************** */ -namespace blender::nodes::node_composite_curves_cc { +namespace blender::nodes::node_composite_time_curves_cc { static void cmp_node_time_declare(NodeDeclarationBuilder &b) { @@ -29,11 +36,65 @@ static void node_composit_init_curves_time(bNodeTree *UNUSED(ntree), bNode *node node->storage = BKE_curvemapping_add(1, 0.0f, 0.0f, 1.0f, 1.0f); } -} // namespace blender::nodes::node_composite_curves_cc +using namespace blender::realtime_compositor; + +class TimeCurveOperation : public NodeOperation { + public: + using NodeOperation::NodeOperation; + + void execute() override + { + Result &result = get_result("Fac"); + result.allocate_single_value(); + + CurveMapping *curve_mapping = get_curve_mapping(); + BKE_curvemapping_init(curve_mapping); + const float time = BKE_curvemapping_evaluateF(curve_mapping, 0, compute_normalized_time()); + result.set_float_value(clamp_f(time, 0.0f, 1.0f)); + } + + CurveMapping *get_curve_mapping() + { + return static_cast<CurveMapping *>(bnode().storage); + } + + int get_start_time() + { + return bnode().custom1; + } + + int get_end_time() + { + return bnode().custom2; + } + + float compute_normalized_time() + { + const int frame_number = context().get_frame_number(); + if (frame_number < get_start_time()) { + return 0.0f; + } + if (frame_number > get_end_time()) { + return 1.0f; + } + if (get_start_time() == get_end_time()) { + return 0.0f; + } + return static_cast<float>(frame_number - get_start_time()) / + static_cast<float>(get_end_time() - get_start_time()); + } +}; + +static NodeOperation *get_compositor_operation(Context &context, DNode node) +{ + return new TimeCurveOperation(context, node); +} + +} // namespace blender::nodes::node_composite_time_curves_cc void register_node_type_cmp_curve_time() { - namespace file_ns = blender::nodes::node_composite_curves_cc; + namespace file_ns = blender::nodes::node_composite_time_curves_cc; static bNodeType ntype; @@ -42,17 +103,22 @@ void register_node_type_cmp_curve_time() node_type_size(&ntype, 200, 140, 320); node_type_init(&ntype, file_ns::node_composit_init_curves_time); node_type_storage(&ntype, "CurveMapping", node_free_curves, node_copy_curves); + ntype.get_compositor_operation = file_ns::get_compositor_operation; nodeRegisterType(&ntype); } /* **************** CURVE VEC ******************** */ -namespace blender::nodes::node_composite_curves_cc { +namespace blender::nodes::node_composite_vector_curves_cc { static void cmp_node_curve_vec_declare(NodeDeclarationBuilder &b) { - b.add_input<decl::Vector>(N_("Vector")).default_value({0.0f, 0.0f, 0.0f}).min(-1.0f).max(1.0f); + b.add_input<decl::Vector>(N_("Vector")) + .default_value({0.0f, 0.0f, 0.0f}) + .min(-1.0f) + .max(1.0f) + .compositor_domain_priority(0); b.add_output<decl::Vector>(N_("Vector")); } @@ -66,11 +132,63 @@ static void node_buts_curvevec(uiLayout *layout, bContext *UNUSED(C), PointerRNA uiTemplateCurveMapping(layout, ptr, "mapping", 'v', false, false, false, false); } -} // namespace blender::nodes::node_composite_curves_cc +using namespace blender::realtime_compositor; + +class VectorCurvesShaderNode : public ShaderNode { + public: + using ShaderNode::ShaderNode; + + void compile(GPUMaterial *material) override + { + GPUNodeStack *inputs = get_inputs_array(); + GPUNodeStack *outputs = get_outputs_array(); + + CurveMapping *curve_mapping = get_curve_mapping(); + + BKE_curvemapping_init(curve_mapping); + float *band_values; + int band_size; + BKE_curvemapping_table_RGBA(curve_mapping, &band_values, &band_size); + float band_layer; + GPUNodeLink *band_texture = GPU_color_band(material, band_size, band_values, &band_layer); + + float start_slopes[CM_TOT]; + float end_slopes[CM_TOT]; + BKE_curvemapping_compute_slopes(curve_mapping, start_slopes, end_slopes); + float range_minimums[CM_TOT]; + BKE_curvemapping_get_range_minimums(curve_mapping, range_minimums); + float range_dividers[CM_TOT]; + BKE_curvemapping_compute_range_dividers(curve_mapping, range_dividers); + + GPU_stack_link(material, + &bnode(), + "curves_vector", + inputs, + outputs, + band_texture, + GPU_constant(&band_layer), + GPU_uniform(range_minimums), + GPU_uniform(range_dividers), + GPU_uniform(start_slopes), + GPU_uniform(end_slopes)); + } + + CurveMapping *get_curve_mapping() + { + return static_cast<CurveMapping *>(bnode().storage); + } +}; + +static ShaderNode *get_compositor_shader_node(DNode node) +{ + return new VectorCurvesShaderNode(node); +} + +} // namespace blender::nodes::node_composite_vector_curves_cc void register_node_type_cmp_curve_vec() { - namespace file_ns = blender::nodes::node_composite_curves_cc; + namespace file_ns = blender::nodes::node_composite_vector_curves_cc; static bNodeType ntype; @@ -80,19 +198,26 @@ void register_node_type_cmp_curve_vec() node_type_size(&ntype, 200, 140, 320); node_type_init(&ntype, file_ns::node_composit_init_curve_vec); node_type_storage(&ntype, "CurveMapping", node_free_curves, node_copy_curves); + ntype.get_compositor_shader_node = file_ns::get_compositor_shader_node; nodeRegisterType(&ntype); } /* **************** CURVE RGB ******************** */ -namespace blender::nodes::node_composite_curves_cc { +namespace blender::nodes::node_composite_rgb_curves_cc { static void cmp_node_rgbcurves_declare(NodeDeclarationBuilder &b) { - b.add_input<decl::Float>(N_("Fac")).default_value(1.0f).min(-1.0f).max(1.0f).subtype( - PROP_FACTOR); - b.add_input<decl::Color>(N_("Image")).default_value({1.0f, 1.0f, 1.0f, 1.0f}); + b.add_input<decl::Float>(N_("Fac")) + .default_value(1.0f) + .min(-1.0f) + .max(1.0f) + .subtype(PROP_FACTOR) + .compositor_domain_priority(1); + b.add_input<decl::Color>(N_("Image")) + .default_value({1.0f, 1.0f, 1.0f, 1.0f}) + .compositor_domain_priority(0); b.add_input<decl::Color>(N_("Black Level")).default_value({0.0f, 0.0f, 0.0f, 1.0f}); b.add_input<decl::Color>(N_("White Level")).default_value({1.0f, 1.0f, 1.0f, 1.0f}); b.add_output<decl::Color>(N_("Image")); @@ -103,11 +228,105 @@ static void node_composit_init_curve_rgb(bNodeTree *UNUSED(ntree), bNode *node) node->storage = BKE_curvemapping_add(4, 0.0f, 0.0f, 1.0f, 1.0f); } -} // namespace blender::nodes::node_composite_curves_cc +using namespace blender::realtime_compositor; + +class RGBCurvesShaderNode : public ShaderNode { + public: + using ShaderNode::ShaderNode; + + void compile(GPUMaterial *material) override + { + GPUNodeStack *inputs = get_inputs_array(); + GPUNodeStack *outputs = get_outputs_array(); + + CurveMapping *curve_mapping = get_curve_mapping(); + + BKE_curvemapping_init(curve_mapping); + float *band_values; + int band_size; + BKE_curvemapping_table_RGBA(curve_mapping, &band_values, &band_size); + float band_layer; + GPUNodeLink *band_texture = GPU_color_band(material, band_size, band_values, &band_layer); + + float start_slopes[CM_TOT]; + float end_slopes[CM_TOT]; + BKE_curvemapping_compute_slopes(curve_mapping, start_slopes, end_slopes); + float range_minimums[CM_TOT]; + BKE_curvemapping_get_range_minimums(curve_mapping, range_minimums); + float range_dividers[CM_TOT]; + BKE_curvemapping_compute_range_dividers(curve_mapping, range_dividers); + + if (curve_mapping->tone == CURVE_TONE_FILMLIKE) { + GPU_stack_link(material, + &bnode(), + "curves_film_like", + inputs, + outputs, + band_texture, + GPU_constant(&band_layer), + GPU_uniform(&range_minimums[3]), + GPU_uniform(&range_dividers[3]), + GPU_uniform(&start_slopes[3]), + GPU_uniform(&end_slopes[3])); + return; + } + + const float min = 0.0f; + const float max = 1.0f; + GPU_link(material, + "clamp_value", + get_input_link("Fac"), + GPU_constant(&min), + GPU_constant(&max), + &get_input("Fac").link); + + /* If the RGB curves do nothing, use a function that skips RGB computations. */ + if (BKE_curvemapping_is_map_identity(curve_mapping, 0) && + BKE_curvemapping_is_map_identity(curve_mapping, 1) && + BKE_curvemapping_is_map_identity(curve_mapping, 2)) { + GPU_stack_link(material, + &bnode(), + "curves_combined_only", + inputs, + outputs, + band_texture, + GPU_constant(&band_layer), + GPU_uniform(&range_minimums[3]), + GPU_uniform(&range_dividers[3]), + GPU_uniform(&start_slopes[3]), + GPU_uniform(&end_slopes[3])); + return; + } + + GPU_stack_link(material, + &bnode(), + "curves_combined_rgb", + inputs, + outputs, + band_texture, + GPU_constant(&band_layer), + GPU_uniform(range_minimums), + GPU_uniform(range_dividers), + GPU_uniform(start_slopes), + GPU_uniform(end_slopes)); + } + + CurveMapping *get_curve_mapping() + { + return static_cast<CurveMapping *>(bnode().storage); + } +}; + +static ShaderNode *get_compositor_shader_node(DNode node) +{ + return new RGBCurvesShaderNode(node); +} + +} // namespace blender::nodes::node_composite_rgb_curves_cc void register_node_type_cmp_curve_rgb() { - namespace file_ns = blender::nodes::node_composite_curves_cc; + namespace file_ns = blender::nodes::node_composite_rgb_curves_cc; static bNodeType ntype; @@ -116,6 +335,7 @@ void register_node_type_cmp_curve_rgb() node_type_size(&ntype, 200, 140, 320); node_type_init(&ntype, file_ns::node_composit_init_curve_rgb); node_type_storage(&ntype, "CurveMapping", node_free_curves, node_copy_curves); + ntype.get_compositor_shader_node = file_ns::get_compositor_shader_node; nodeRegisterType(&ntype); } diff --git a/source/blender/nodes/composite/nodes/node_composite_defocus.cc b/source/blender/nodes/composite/nodes/node_composite_defocus.cc index 83dd397ff1f..94b4908a1bd 100644 --- a/source/blender/nodes/composite/nodes/node_composite_defocus.cc +++ b/source/blender/nodes/composite/nodes/node_composite_defocus.cc @@ -12,6 +12,8 @@ #include "UI_interface.h" #include "UI_resources.h" +#include "COM_node_operation.hh" + #include "node_composite_util.hh" /* ************ Defocus Node ****************** */ @@ -81,6 +83,23 @@ static void node_composit_buts_defocus(uiLayout *layout, bContext *C, PointerRNA uiItemR(sub, ptr, "z_scale", UI_ITEM_R_SPLIT_EMPTY_NAME, nullptr, ICON_NONE); } +using namespace blender::realtime_compositor; + +class DefocusOperation : public NodeOperation { + public: + using NodeOperation::NodeOperation; + + void execute() override + { + get_input("Image").pass_through(get_result("Image")); + } +}; + +static NodeOperation *get_compositor_operation(Context &context, DNode node) +{ + return new DefocusOperation(context, node); +} + } // namespace blender::nodes::node_composite_defocus_cc void register_node_type_cmp_defocus() @@ -94,6 +113,7 @@ void register_node_type_cmp_defocus() ntype.draw_buttons = file_ns::node_composit_buts_defocus; node_type_init(&ntype, file_ns::node_composit_init_defocus); node_type_storage(&ntype, "NodeDefocus", node_free_standard_storage, node_copy_standard_storage); + ntype.get_compositor_operation = file_ns::get_compositor_operation; nodeRegisterType(&ntype); } diff --git a/source/blender/nodes/composite/nodes/node_composite_denoise.cc b/source/blender/nodes/composite/nodes/node_composite_denoise.cc index 051a2580ef9..0452e7cd943 100644 --- a/source/blender/nodes/composite/nodes/node_composite_denoise.cc +++ b/source/blender/nodes/composite/nodes/node_composite_denoise.cc @@ -10,6 +10,8 @@ #include "UI_interface.h" #include "UI_resources.h" +#include "COM_node_operation.hh" + #include "node_composite_util.hh" namespace blender::nodes::node_composite_denoise_cc { @@ -52,6 +54,23 @@ static void node_composit_buts_denoise(uiLayout *layout, bContext *UNUSED(C), Po uiItemR(layout, ptr, "use_hdr", UI_ITEM_R_SPLIT_EMPTY_NAME, nullptr, ICON_NONE); } +using namespace blender::realtime_compositor; + +class DenoiseOperation : public NodeOperation { + public: + using NodeOperation::NodeOperation; + + void execute() override + { + get_input("Image").pass_through(get_result("Image")); + } +}; + +static NodeOperation *get_compositor_operation(Context &context, DNode node) +{ + return new DenoiseOperation(context, node); +} + } // namespace blender::nodes::node_composite_denoise_cc void register_node_type_cmp_denoise() @@ -65,6 +84,7 @@ void register_node_type_cmp_denoise() ntype.draw_buttons = file_ns::node_composit_buts_denoise; node_type_init(&ntype, file_ns::node_composit_init_denonise); node_type_storage(&ntype, "NodeDenoise", node_free_standard_storage, node_copy_standard_storage); + ntype.get_compositor_operation = file_ns::get_compositor_operation; nodeRegisterType(&ntype); } diff --git a/source/blender/nodes/composite/nodes/node_composite_despeckle.cc b/source/blender/nodes/composite/nodes/node_composite_despeckle.cc index 66a18cfa369..0b9f9c8f76d 100644 --- a/source/blender/nodes/composite/nodes/node_composite_despeckle.cc +++ b/source/blender/nodes/composite/nodes/node_composite_despeckle.cc @@ -8,6 +8,8 @@ #include "UI_interface.h" #include "UI_resources.h" +#include "COM_node_operation.hh" + #include "node_composite_util.hh" /* **************** FILTER ******************** */ @@ -36,6 +38,23 @@ static void node_composit_buts_despeckle(uiLayout *layout, bContext *UNUSED(C), uiItemR(col, ptr, "threshold_neighbor", UI_ITEM_R_SPLIT_EMPTY_NAME, nullptr, ICON_NONE); } +using namespace blender::realtime_compositor; + +class DespeckleOperation : public NodeOperation { + public: + using NodeOperation::NodeOperation; + + void execute() override + { + get_input("Image").pass_through(get_result("Image")); + } +}; + +static NodeOperation *get_compositor_operation(Context &context, DNode node) +{ + return new DespeckleOperation(context, node); +} + } // namespace blender::nodes::node_composite_despeckle_cc void register_node_type_cmp_despeckle() @@ -49,6 +68,7 @@ void register_node_type_cmp_despeckle() ntype.draw_buttons = file_ns::node_composit_buts_despeckle; ntype.flag |= NODE_PREVIEW; node_type_init(&ntype, file_ns::node_composit_init_despeckle); + ntype.get_compositor_operation = file_ns::get_compositor_operation; nodeRegisterType(&ntype); } diff --git a/source/blender/nodes/composite/nodes/node_composite_diff_matte.cc b/source/blender/nodes/composite/nodes/node_composite_diff_matte.cc index b87bbe439db..e129dcaa6ef 100644 --- a/source/blender/nodes/composite/nodes/node_composite_diff_matte.cc +++ b/source/blender/nodes/composite/nodes/node_composite_diff_matte.cc @@ -8,6 +8,10 @@ #include "UI_interface.h" #include "UI_resources.h" +#include "GPU_material.h" + +#include "COM_shader_node.hh" + #include "node_composite_util.hh" /* ******************* channel Difference Matte ********************************* */ @@ -16,8 +20,12 @@ namespace blender::nodes::node_composite_diff_matte_cc { static void cmp_node_diff_matte_declare(NodeDeclarationBuilder &b) { - b.add_input<decl::Color>(N_("Image 1")).default_value({1.0f, 1.0f, 1.0f, 1.0f}); - b.add_input<decl::Color>(N_("Image 2")).default_value({1.0f, 1.0f, 1.0f, 1.0f}); + b.add_input<decl::Color>(N_("Image 1")) + .default_value({1.0f, 1.0f, 1.0f, 1.0f}) + .compositor_domain_priority(0); + b.add_input<decl::Color>(N_("Image 2")) + .default_value({1.0f, 1.0f, 1.0f, 1.0f}) + .compositor_domain_priority(1); b.add_output<decl::Color>(N_("Image")); b.add_output<decl::Float>(N_("Matte")); } @@ -40,6 +48,50 @@ static void node_composit_buts_diff_matte(uiLayout *layout, bContext *UNUSED(C), uiItemR(col, ptr, "falloff", UI_ITEM_R_SPLIT_EMPTY_NAME | UI_ITEM_R_SLIDER, nullptr, ICON_NONE); } +using namespace blender::realtime_compositor; + +class DifferenceMatteShaderNode : public ShaderNode { + public: + using ShaderNode::ShaderNode; + + void compile(GPUMaterial *material) override + { + GPUNodeStack *inputs = get_inputs_array(); + GPUNodeStack *outputs = get_outputs_array(); + + const float tolerance = get_tolerance(); + const float falloff = get_falloff(); + + GPU_stack_link(material, + &bnode(), + "node_composite_difference_matte", + inputs, + outputs, + GPU_uniform(&tolerance), + GPU_uniform(&falloff)); + } + + NodeChroma *get_node_chroma() + { + return static_cast<NodeChroma *>(bnode().storage); + } + + float get_tolerance() + { + return get_node_chroma()->t1; + } + + float get_falloff() + { + return get_node_chroma()->t2; + } +}; + +static ShaderNode *get_compositor_shader_node(DNode node) +{ + return new DifferenceMatteShaderNode(node); +} + } // namespace blender::nodes::node_composite_diff_matte_cc void register_node_type_cmp_diff_matte() @@ -54,6 +106,7 @@ void register_node_type_cmp_diff_matte() ntype.flag |= NODE_PREVIEW; node_type_init(&ntype, file_ns::node_composit_init_diff_matte); node_type_storage(&ntype, "NodeChroma", node_free_standard_storage, node_copy_standard_storage); + ntype.get_compositor_shader_node = file_ns::get_compositor_shader_node; nodeRegisterType(&ntype); } diff --git a/source/blender/nodes/composite/nodes/node_composite_dilate.cc b/source/blender/nodes/composite/nodes/node_composite_dilate.cc index 9bdb9ae0837..46199d3ff04 100644 --- a/source/blender/nodes/composite/nodes/node_composite_dilate.cc +++ b/source/blender/nodes/composite/nodes/node_composite_dilate.cc @@ -10,6 +10,8 @@ #include "UI_interface.h" #include "UI_resources.h" +#include "COM_node_operation.hh" + #include "node_composite_util.hh" /* **************** Dilate/Erode ******************** */ @@ -43,6 +45,23 @@ static void node_composit_buts_dilateerode(uiLayout *layout, bContext *UNUSED(C) } } +using namespace blender::realtime_compositor; + +class DilateErodeOperation : public NodeOperation { + public: + using NodeOperation::NodeOperation; + + void execute() override + { + get_input("Mask").pass_through(get_result("Mask")); + } +}; + +static NodeOperation *get_compositor_operation(Context &context, DNode node) +{ + return new DilateErodeOperation(context, node); +} + } // namespace blender::nodes::node_composite_dilate_cc void register_node_type_cmp_dilateerode() @@ -57,6 +76,7 @@ void register_node_type_cmp_dilateerode() node_type_init(&ntype, file_ns::node_composit_init_dilateerode); node_type_storage( &ntype, "NodeDilateErode", node_free_standard_storage, node_copy_standard_storage); + ntype.get_compositor_operation = file_ns::get_compositor_operation; nodeRegisterType(&ntype); } diff --git a/source/blender/nodes/composite/nodes/node_composite_directionalblur.cc b/source/blender/nodes/composite/nodes/node_composite_directionalblur.cc index 3d82ab04fc9..eacba5ad12d 100644 --- a/source/blender/nodes/composite/nodes/node_composite_directionalblur.cc +++ b/source/blender/nodes/composite/nodes/node_composite_directionalblur.cc @@ -8,6 +8,8 @@ #include "UI_interface.h" #include "UI_resources.h" +#include "COM_node_operation.hh" + #include "node_composite_util.hh" namespace blender::nodes::node_composite_directionalblur_cc { @@ -51,6 +53,23 @@ static void node_composit_buts_dblur(uiLayout *layout, bContext *UNUSED(C), Poin uiItemR(layout, ptr, "zoom", UI_ITEM_R_SPLIT_EMPTY_NAME, nullptr, ICON_NONE); } +using namespace blender::realtime_compositor; + +class DirectionalBlurOperation : public NodeOperation { + public: + using NodeOperation::NodeOperation; + + void execute() override + { + get_input("Image").pass_through(get_result("Image")); + } +}; + +static NodeOperation *get_compositor_operation(Context &context, DNode node) +{ + return new DirectionalBlurOperation(context, node); +} + } // namespace blender::nodes::node_composite_directionalblur_cc void register_node_type_cmp_dblur() @@ -65,6 +84,7 @@ void register_node_type_cmp_dblur() node_type_init(&ntype, file_ns::node_composit_init_dblur); node_type_storage( &ntype, "NodeDBlurData", node_free_standard_storage, node_copy_standard_storage); + ntype.get_compositor_operation = file_ns::get_compositor_operation; nodeRegisterType(&ntype); } diff --git a/source/blender/nodes/composite/nodes/node_composite_displace.cc b/source/blender/nodes/composite/nodes/node_composite_displace.cc index 0b0d42cbb08..1049f2fa4a9 100644 --- a/source/blender/nodes/composite/nodes/node_composite_displace.cc +++ b/source/blender/nodes/composite/nodes/node_composite_displace.cc @@ -5,6 +5,8 @@ * \ingroup cmpnodes */ +#include "COM_node_operation.hh" + #include "node_composite_util.hh" /* **************** Displace ******************** */ @@ -24,6 +26,23 @@ static void cmp_node_displace_declare(NodeDeclarationBuilder &b) b.add_output<decl::Color>(N_("Image")); } +using namespace blender::realtime_compositor; + +class DisplaceOperation : public NodeOperation { + public: + using NodeOperation::NodeOperation; + + void execute() override + { + get_input("Image").pass_through(get_result("Image")); + } +}; + +static NodeOperation *get_compositor_operation(Context &context, DNode node) +{ + return new DisplaceOperation(context, node); +} + } // namespace blender::nodes::node_composite_displace_cc void register_node_type_cmp_displace() @@ -34,6 +53,7 @@ void register_node_type_cmp_displace() cmp_node_type_base(&ntype, CMP_NODE_DISPLACE, "Displace", NODE_CLASS_DISTORT); ntype.declare = file_ns::cmp_node_displace_declare; + ntype.get_compositor_operation = file_ns::get_compositor_operation; nodeRegisterType(&ntype); } diff --git a/source/blender/nodes/composite/nodes/node_composite_distance_matte.cc b/source/blender/nodes/composite/nodes/node_composite_distance_matte.cc index a8646d8498e..9d910b3f409 100644 --- a/source/blender/nodes/composite/nodes/node_composite_distance_matte.cc +++ b/source/blender/nodes/composite/nodes/node_composite_distance_matte.cc @@ -8,6 +8,10 @@ #include "UI_interface.h" #include "UI_resources.h" +#include "GPU_material.h" + +#include "COM_shader_node.hh" + #include "node_composite_util.hh" /* ******************* channel Distance Matte ********************************* */ @@ -16,8 +20,12 @@ namespace blender::nodes::node_composite_distance_matte_cc { static void cmp_node_distance_matte_declare(NodeDeclarationBuilder &b) { - b.add_input<decl::Color>(N_("Image")).default_value({1.0f, 1.0f, 1.0f, 1.0f}); - b.add_input<decl::Color>(N_("Key Color")).default_value({1.0f, 1.0f, 1.0f, 1.0f}); + b.add_input<decl::Color>(N_("Image")) + .default_value({1.0f, 1.0f, 1.0f, 1.0f}) + .compositor_domain_priority(0); + b.add_input<decl::Color>(N_("Key Color")) + .default_value({1.0f, 1.0f, 1.0f, 1.0f}) + .compositor_domain_priority(1); b.add_output<decl::Color>(N_("Image")); b.add_output<decl::Float>(N_("Matte")); } @@ -26,7 +34,7 @@ static void node_composit_init_distance_matte(bNodeTree *UNUSED(ntree), bNode *n { NodeChroma *c = MEM_cnew<NodeChroma>(__func__); node->storage = c; - c->channel = 1; + c->channel = CMP_NODE_DISTANCE_MATTE_COLOR_SPACE_RGBA; c->t1 = 0.1f; c->t2 = 0.1f; } @@ -48,6 +56,66 @@ static void node_composit_buts_distance_matte(uiLayout *layout, uiItemR(col, ptr, "falloff", UI_ITEM_R_SPLIT_EMPTY_NAME | UI_ITEM_R_SLIDER, nullptr, ICON_NONE); } +using namespace blender::realtime_compositor; + +class DistanceMatteShaderNode : public ShaderNode { + public: + using ShaderNode::ShaderNode; + + void compile(GPUMaterial *material) override + { + GPUNodeStack *inputs = get_inputs_array(); + GPUNodeStack *outputs = get_outputs_array(); + + const float tolerance = get_tolerance(); + const float falloff = get_falloff(); + + if (get_color_space() == CMP_NODE_DISTANCE_MATTE_COLOR_SPACE_RGBA) { + GPU_stack_link(material, + &bnode(), + "node_composite_distance_matte_rgba", + inputs, + outputs, + GPU_uniform(&tolerance), + GPU_uniform(&falloff)); + return; + } + + GPU_stack_link(material, + &bnode(), + "node_composite_distance_matte_ycca", + inputs, + outputs, + GPU_uniform(&tolerance), + GPU_uniform(&falloff)); + } + + NodeChroma *get_node_chroma() + { + return static_cast<NodeChroma *>(bnode().storage); + } + + CMPNodeDistanceMatteColorSpace get_color_space() + { + return (CMPNodeDistanceMatteColorSpace)get_node_chroma()->channel; + } + + float get_tolerance() + { + return get_node_chroma()->t1; + } + + float get_falloff() + { + return get_node_chroma()->t2; + } +}; + +static ShaderNode *get_compositor_shader_node(DNode node) +{ + return new DistanceMatteShaderNode(node); +} + } // namespace blender::nodes::node_composite_distance_matte_cc void register_node_type_cmp_distance_matte() @@ -62,6 +130,7 @@ void register_node_type_cmp_distance_matte() ntype.flag |= NODE_PREVIEW; node_type_init(&ntype, file_ns::node_composit_init_distance_matte); node_type_storage(&ntype, "NodeChroma", node_free_standard_storage, node_copy_standard_storage); + ntype.get_compositor_shader_node = file_ns::get_compositor_shader_node; nodeRegisterType(&ntype); } diff --git a/source/blender/nodes/composite/nodes/node_composite_double_edge_mask.cc b/source/blender/nodes/composite/nodes/node_composite_double_edge_mask.cc index 9dc2b223618..fec7879ed78 100644 --- a/source/blender/nodes/composite/nodes/node_composite_double_edge_mask.cc +++ b/source/blender/nodes/composite/nodes/node_composite_double_edge_mask.cc @@ -8,6 +8,8 @@ #include "UI_interface.h" #include "UI_resources.h" +#include "COM_node_operation.hh" + #include "node_composite_util.hh" /* **************** Double Edge Mask ******************** */ @@ -35,6 +37,23 @@ static void node_composit_buts_double_edge_mask(uiLayout *layout, uiItemR(col, ptr, "edge_mode", UI_ITEM_R_SPLIT_EMPTY_NAME, "", ICON_NONE); } +using namespace blender::realtime_compositor; + +class DoubleEdgeMaskOperation : public NodeOperation { + public: + using NodeOperation::NodeOperation; + + void execute() override + { + get_input("Inner Mask").pass_through(get_result("Mask")); + } +}; + +static NodeOperation *get_compositor_operation(Context &context, DNode node) +{ + return new DoubleEdgeMaskOperation(context, node); +} + } // namespace blender::nodes::node_composite_double_edge_mask_cc void register_node_type_cmp_doubleedgemask() @@ -46,6 +65,7 @@ void register_node_type_cmp_doubleedgemask() cmp_node_type_base(&ntype, CMP_NODE_DOUBLEEDGEMASK, "Double Edge Mask", NODE_CLASS_MATTE); ntype.declare = file_ns::cmp_node_double_edge_mask_declare; ntype.draw_buttons = file_ns::node_composit_buts_double_edge_mask; + ntype.get_compositor_operation = file_ns::get_compositor_operation; nodeRegisterType(&ntype); } diff --git a/source/blender/nodes/composite/nodes/node_composite_ellipsemask.cc b/source/blender/nodes/composite/nodes/node_composite_ellipsemask.cc index 4da6a0a442e..54dfa00eadd 100644 --- a/source/blender/nodes/composite/nodes/node_composite_ellipsemask.cc +++ b/source/blender/nodes/composite/nodes/node_composite_ellipsemask.cc @@ -5,9 +5,18 @@ * \ingroup cmpnodes */ +#include <cmath> + +#include "BLI_math_vec_types.hh" + #include "UI_interface.h" #include "UI_resources.h" +#include "GPU_shader.h" + +#include "COM_node_operation.hh" +#include "COM_utilities.hh" + #include "node_composite_util.hh" /* **************** SCALAR MATH ******************** */ @@ -46,6 +55,98 @@ static void node_composit_buts_ellipsemask(uiLayout *layout, bContext *UNUSED(C) uiItemR(layout, ptr, "mask_type", UI_ITEM_R_SPLIT_EMPTY_NAME, nullptr, ICON_NONE); } +using namespace blender::realtime_compositor; + +class EllipseMaskOperation : public NodeOperation { + public: + using NodeOperation::NodeOperation; + + void execute() override + { + GPUShader *shader = shader_manager().get(get_shader_name()); + GPU_shader_bind(shader); + + const Domain domain = compute_domain(); + + GPU_shader_uniform_2iv(shader, "domain_size", domain.size); + + GPU_shader_uniform_2fv(shader, "location", get_location()); + GPU_shader_uniform_2fv(shader, "radius", get_size() / 2.0f); + GPU_shader_uniform_1f(shader, "cos_angle", std::cos(get_angle())); + GPU_shader_uniform_1f(shader, "sin_angle", std::sin(get_angle())); + + const Result &input_mask = get_input("Mask"); + input_mask.bind_as_texture(shader, "base_mask_tx"); + + const Result &value = get_input("Value"); + value.bind_as_texture(shader, "mask_value_tx"); + + Result &output_mask = get_result("Mask"); + output_mask.allocate_texture(domain); + output_mask.bind_as_image(shader, "output_mask_img"); + + compute_dispatch_threads_at_least(shader, domain.size); + + input_mask.unbind_as_texture(); + value.unbind_as_texture(); + output_mask.unbind_as_image(); + GPU_shader_unbind(); + } + + Domain compute_domain() override + { + if (get_input("Mask").is_single_value()) { + return Domain(context().get_output_size()); + } + return get_input("Mask").domain(); + } + + CMPNodeMaskType get_mask_type() + { + return (CMPNodeMaskType)bnode().custom1; + } + + const char *get_shader_name() + { + switch (get_mask_type()) { + default: + case CMP_NODE_MASKTYPE_ADD: + return "compositor_ellipse_mask_add"; + case CMP_NODE_MASKTYPE_SUBTRACT: + return "compositor_ellipse_mask_subtract"; + case CMP_NODE_MASKTYPE_MULTIPLY: + return "compositor_ellipse_mask_multiply"; + case CMP_NODE_MASKTYPE_NOT: + return "compositor_ellipse_mask_not"; + } + } + + NodeEllipseMask &get_node_ellipse_mask() + { + return *static_cast<NodeEllipseMask *>(bnode().storage); + } + + float2 get_location() + { + return float2(get_node_ellipse_mask().x, get_node_ellipse_mask().y); + } + + float2 get_size() + { + return float2(get_node_ellipse_mask().width, get_node_ellipse_mask().height); + } + + float get_angle() + { + return get_node_ellipse_mask().rotation; + } +}; + +static NodeOperation *get_compositor_operation(Context &context, DNode node) +{ + return new EllipseMaskOperation(context, node); +} + } // namespace blender::nodes::node_composite_ellipsemask_cc void register_node_type_cmp_ellipsemask() @@ -61,6 +162,7 @@ void register_node_type_cmp_ellipsemask() node_type_init(&ntype, file_ns::node_composit_init_ellipsemask); node_type_storage( &ntype, "NodeEllipseMask", node_free_standard_storage, node_copy_standard_storage); + ntype.get_compositor_operation = file_ns::get_compositor_operation; nodeRegisterType(&ntype); } diff --git a/source/blender/nodes/composite/nodes/node_composite_exposure.cc b/source/blender/nodes/composite/nodes/node_composite_exposure.cc index 881cfc11058..19b93680ff6 100644 --- a/source/blender/nodes/composite/nodes/node_composite_exposure.cc +++ b/source/blender/nodes/composite/nodes/node_composite_exposure.cc @@ -5,6 +5,10 @@ * \ingroup cmpnodes */ +#include "GPU_material.h" + +#include "COM_shader_node.hh" + #include "node_composite_util.hh" /* **************** Exposure ******************** */ @@ -13,11 +17,33 @@ namespace blender::nodes::node_composite_exposure_cc { static void cmp_node_exposure_declare(NodeDeclarationBuilder &b) { - b.add_input<decl::Color>(N_("Image")).default_value({1.0f, 1.0f, 1.0f, 1.0f}); - b.add_input<decl::Float>(N_("Exposure")).min(-10.0f).max(10.0f); + b.add_input<decl::Color>(N_("Image")) + .default_value({1.0f, 1.0f, 1.0f, 1.0f}) + .compositor_domain_priority(0); + b.add_input<decl::Float>(N_("Exposure")).min(-10.0f).max(10.0f).compositor_domain_priority(1); b.add_output<decl::Color>(N_("Image")); } +using namespace blender::realtime_compositor; + +class ExposureShaderNode : public ShaderNode { + public: + using ShaderNode::ShaderNode; + + void compile(GPUMaterial *material) override + { + GPUNodeStack *inputs = get_inputs_array(); + GPUNodeStack *outputs = get_outputs_array(); + + GPU_stack_link(material, &bnode(), "node_composite_exposure", inputs, outputs); + } +}; + +static ShaderNode *get_compositor_shader_node(DNode node) +{ + return new ExposureShaderNode(node); +} + } // namespace blender::nodes::node_composite_exposure_cc void register_node_type_cmp_exposure() @@ -28,6 +54,7 @@ void register_node_type_cmp_exposure() cmp_node_type_base(&ntype, CMP_NODE_EXPOSURE, "Exposure", NODE_CLASS_OP_COLOR); ntype.declare = file_ns::cmp_node_exposure_declare; + ntype.get_compositor_shader_node = file_ns::get_compositor_shader_node; nodeRegisterType(&ntype); } diff --git a/source/blender/nodes/composite/nodes/node_composite_filter.cc b/source/blender/nodes/composite/nodes/node_composite_filter.cc index c343c21feb2..854cf684806 100644 --- a/source/blender/nodes/composite/nodes/node_composite_filter.cc +++ b/source/blender/nodes/composite/nodes/node_composite_filter.cc @@ -8,6 +8,8 @@ #include "UI_interface.h" #include "UI_resources.h" +#include "COM_node_operation.hh" + #include "node_composite_util.hh" /* **************** FILTER ******************** */ @@ -26,6 +28,23 @@ static void node_composit_buts_filter(uiLayout *layout, bContext *UNUSED(C), Poi uiItemR(layout, ptr, "filter_type", UI_ITEM_R_SPLIT_EMPTY_NAME, "", ICON_NONE); } +using namespace blender::realtime_compositor; + +class FilterOperation : public NodeOperation { + public: + using NodeOperation::NodeOperation; + + void execute() override + { + get_input("Image").pass_through(get_result("Image")); + } +}; + +static NodeOperation *get_compositor_operation(Context &context, DNode node) +{ + return new FilterOperation(context, node); +} + } // namespace blender::nodes::node_composite_filter_cc void register_node_type_cmp_filter() @@ -39,6 +58,7 @@ void register_node_type_cmp_filter() ntype.draw_buttons = file_ns::node_composit_buts_filter; ntype.labelfunc = node_filter_label; ntype.flag |= NODE_PREVIEW; + ntype.get_compositor_operation = file_ns::get_compositor_operation; nodeRegisterType(&ntype); } diff --git a/source/blender/nodes/composite/nodes/node_composite_flip.cc b/source/blender/nodes/composite/nodes/node_composite_flip.cc index 37b9a2d020d..aaa2b565ed2 100644 --- a/source/blender/nodes/composite/nodes/node_composite_flip.cc +++ b/source/blender/nodes/composite/nodes/node_composite_flip.cc @@ -5,9 +5,18 @@ * \ingroup cmpnodes */ +#include "BLI_assert.h" +#include "BLI_utildefines.h" + #include "UI_interface.h" #include "UI_resources.h" +#include "GPU_shader.h" +#include "GPU_texture.h" + +#include "COM_node_operation.hh" +#include "COM_utilities.hh" + #include "node_composite_util.hh" /* **************** Flip ******************** */ @@ -16,7 +25,9 @@ namespace blender::nodes::node_composite_flip_cc { static void cmp_node_flip_declare(NodeDeclarationBuilder &b) { - b.add_input<decl::Color>(N_("Image")).default_value({1.0f, 1.0f, 1.0f, 1.0f}); + b.add_input<decl::Color>(N_("Image")) + .default_value({1.0f, 1.0f, 1.0f, 1.0f}) + .compositor_domain_priority(0); b.add_output<decl::Color>(N_("Image")); } @@ -25,6 +36,56 @@ static void node_composit_buts_flip(uiLayout *layout, bContext *UNUSED(C), Point uiItemR(layout, ptr, "axis", UI_ITEM_R_SPLIT_EMPTY_NAME, "", ICON_NONE); } +using namespace blender::realtime_compositor; + +class FlipOperation : public NodeOperation { + public: + using NodeOperation::NodeOperation; + + void execute() override + { + Result &input = get_input("Image"); + Result &result = get_result("Image"); + + /* Can't flip a single value, pass it through to the output. */ + if (input.is_single_value()) { + input.pass_through(result); + return; + } + + GPUShader *shader = shader_manager().get("compositor_flip"); + GPU_shader_bind(shader); + + GPU_shader_uniform_1b( + shader, "flip_x", ELEM(get_flip_mode(), CMP_NODE_FLIP_X, CMP_NODE_FLIP_X_Y)); + GPU_shader_uniform_1b( + shader, "flip_y", ELEM(get_flip_mode(), CMP_NODE_FLIP_Y, CMP_NODE_FLIP_X_Y)); + + input.bind_as_texture(shader, "input_tx"); + + const Domain domain = compute_domain(); + + result.allocate_texture(domain); + result.bind_as_image(shader, "output_img"); + + compute_dispatch_threads_at_least(shader, domain.size); + + input.unbind_as_texture(); + result.unbind_as_image(); + GPU_shader_unbind(); + } + + CMPNodeFlipMode get_flip_mode() + { + return (CMPNodeFlipMode)bnode().custom1; + } +}; + +static NodeOperation *get_compositor_operation(Context &context, DNode node) +{ + return new FlipOperation(context, node); +} + } // namespace blender::nodes::node_composite_flip_cc void register_node_type_cmp_flip() @@ -36,6 +97,7 @@ void register_node_type_cmp_flip() cmp_node_type_base(&ntype, CMP_NODE_FLIP, "Flip", NODE_CLASS_DISTORT); ntype.declare = file_ns::cmp_node_flip_declare; ntype.draw_buttons = file_ns::node_composit_buts_flip; + ntype.get_compositor_operation = file_ns::get_compositor_operation; nodeRegisterType(&ntype); } diff --git a/source/blender/nodes/composite/nodes/node_composite_gamma.cc b/source/blender/nodes/composite/nodes/node_composite_gamma.cc index b4b8502e915..660d8068231 100644 --- a/source/blender/nodes/composite/nodes/node_composite_gamma.cc +++ b/source/blender/nodes/composite/nodes/node_composite_gamma.cc @@ -5,6 +5,10 @@ * \ingroup cmpnodes */ +#include "GPU_material.h" + +#include "COM_shader_node.hh" + #include "node_composite_util.hh" /* **************** Gamma Tools ******************** */ @@ -13,15 +17,38 @@ namespace blender::nodes::node_composite_gamma_cc { static void cmp_node_gamma_declare(NodeDeclarationBuilder &b) { - b.add_input<decl::Color>(N_("Image")).default_value({1.0f, 1.0f, 1.0f, 1.0f}); + b.add_input<decl::Color>(N_("Image")) + .default_value({1.0f, 1.0f, 1.0f, 1.0f}) + .compositor_domain_priority(0); b.add_input<decl::Float>(N_("Gamma")) .default_value(1.0f) .min(0.001f) .max(10.0f) - .subtype(PROP_UNSIGNED); + .subtype(PROP_UNSIGNED) + .compositor_domain_priority(1); b.add_output<decl::Color>(N_("Image")); } +using namespace blender::realtime_compositor; + +class GammaShaderNode : public ShaderNode { + public: + using ShaderNode::ShaderNode; + + void compile(GPUMaterial *material) override + { + GPUNodeStack *inputs = get_inputs_array(); + GPUNodeStack *outputs = get_outputs_array(); + + GPU_stack_link(material, &bnode(), "node_composite_gamma", inputs, outputs); + } +}; + +static ShaderNode *get_compositor_shader_node(DNode node) +{ + return new GammaShaderNode(node); +} + } // namespace blender::nodes::node_composite_gamma_cc void register_node_type_cmp_gamma() @@ -32,6 +59,7 @@ void register_node_type_cmp_gamma() cmp_node_type_base(&ntype, CMP_NODE_GAMMA, "Gamma", NODE_CLASS_OP_COLOR); ntype.declare = file_ns::cmp_node_gamma_declare; + ntype.get_compositor_shader_node = file_ns::get_compositor_shader_node; nodeRegisterType(&ntype); } diff --git a/source/blender/nodes/composite/nodes/node_composite_glare.cc b/source/blender/nodes/composite/nodes/node_composite_glare.cc index 7f21d30cfa6..33577d5caf8 100644 --- a/source/blender/nodes/composite/nodes/node_composite_glare.cc +++ b/source/blender/nodes/composite/nodes/node_composite_glare.cc @@ -10,6 +10,8 @@ #include "UI_interface.h" #include "UI_resources.h" +#include "COM_node_operation.hh" + #include "node_composite_util.hh" namespace blender::nodes::node_composite_glare_cc { @@ -75,6 +77,23 @@ static void node_composit_buts_glare(uiLayout *layout, bContext *UNUSED(C), Poin } } +using namespace blender::realtime_compositor; + +class GlareOperation : public NodeOperation { + public: + using NodeOperation::NodeOperation; + + void execute() override + { + get_input("Image").pass_through(get_result("Image")); + } +}; + +static NodeOperation *get_compositor_operation(Context &context, DNode node) +{ + return new GlareOperation(context, node); +} + } // namespace blender::nodes::node_composite_glare_cc void register_node_type_cmp_glare() @@ -88,6 +107,7 @@ void register_node_type_cmp_glare() ntype.draw_buttons = file_ns::node_composit_buts_glare; node_type_init(&ntype, file_ns::node_composit_init_glare); node_type_storage(&ntype, "NodeGlare", node_free_standard_storage, node_copy_standard_storage); + ntype.get_compositor_operation = file_ns::get_compositor_operation; nodeRegisterType(&ntype); } diff --git a/source/blender/nodes/composite/nodes/node_composite_hue_sat_val.cc b/source/blender/nodes/composite/nodes/node_composite_hue_sat_val.cc index 08a048829df..091864a06f7 100644 --- a/source/blender/nodes/composite/nodes/node_composite_hue_sat_val.cc +++ b/source/blender/nodes/composite/nodes/node_composite_hue_sat_val.cc @@ -5,6 +5,10 @@ * \ingroup cmpnodes */ +#include "GPU_material.h" + +#include "COM_shader_node.hh" + #include "node_composite_util.hh" /* **************** Hue Saturation ******************** */ @@ -13,22 +17,56 @@ namespace blender::nodes::node_composite_hue_sat_val_cc { static void cmp_node_huesatval_declare(NodeDeclarationBuilder &b) { - b.add_input<decl::Color>(N_("Image")).default_value({1.0f, 1.0f, 1.0f, 1.0f}); - b.add_input<decl::Float>(N_("Hue")).default_value(0.5f).min(0.0f).max(1.0f).subtype(PROP_FACTOR); + b.add_input<decl::Color>(N_("Image")) + .default_value({1.0f, 1.0f, 1.0f, 1.0f}) + .compositor_domain_priority(0); + b.add_input<decl::Float>(N_("Hue")) + .default_value(0.5f) + .min(0.0f) + .max(1.0f) + .subtype(PROP_FACTOR) + .compositor_domain_priority(1); b.add_input<decl::Float>(N_("Saturation")) .default_value(1.0f) .min(0.0f) .max(2.0f) - .subtype(PROP_FACTOR); + .subtype(PROP_FACTOR) + .compositor_domain_priority(2); b.add_input<decl::Float>(N_("Value")) .default_value(1.0f) .min(0.0f) .max(2.0f) - .subtype(PROP_FACTOR); - b.add_input<decl::Float>(N_("Fac")).default_value(1.0f).min(0.0f).max(1.0f).subtype(PROP_FACTOR); + .subtype(PROP_FACTOR) + .compositor_domain_priority(3); + b.add_input<decl::Float>(N_("Fac")) + .default_value(1.0f) + .min(0.0f) + .max(1.0f) + .subtype(PROP_FACTOR) + .compositor_domain_priority(4); b.add_output<decl::Color>(N_("Image")); } +using namespace blender::realtime_compositor; + +class HueSaturationValueShaderNode : public ShaderNode { + public: + using ShaderNode::ShaderNode; + + void compile(GPUMaterial *material) override + { + GPUNodeStack *inputs = get_inputs_array(); + GPUNodeStack *outputs = get_outputs_array(); + + GPU_stack_link(material, &bnode(), "node_composite_hue_saturation_value", inputs, outputs); + } +}; + +static ShaderNode *get_compositor_shader_node(DNode node) +{ + return new HueSaturationValueShaderNode(node); +} + } // namespace blender::nodes::node_composite_hue_sat_val_cc void register_node_type_cmp_hue_sat() @@ -39,6 +77,7 @@ void register_node_type_cmp_hue_sat() cmp_node_type_base(&ntype, CMP_NODE_HUE_SAT, "Hue Saturation Value", NODE_CLASS_OP_COLOR); ntype.declare = file_ns::cmp_node_huesatval_declare; + ntype.get_compositor_shader_node = file_ns::get_compositor_shader_node; nodeRegisterType(&ntype); } diff --git a/source/blender/nodes/composite/nodes/node_composite_huecorrect.cc b/source/blender/nodes/composite/nodes/node_composite_huecorrect.cc index d252d96f8c3..a84420231aa 100644 --- a/source/blender/nodes/composite/nodes/node_composite_huecorrect.cc +++ b/source/blender/nodes/composite/nodes/node_composite_huecorrect.cc @@ -5,6 +5,12 @@ * \ingroup cmpnodes */ +#include "BKE_colortools.h" + +#include "GPU_material.h" + +#include "COM_shader_node.hh" + #include "node_composite_util.hh" #include "BKE_colortools.h" @@ -13,8 +19,15 @@ namespace blender::nodes::node_composite_huecorrect_cc { static void cmp_node_huecorrect_declare(NodeDeclarationBuilder &b) { - b.add_input<decl::Float>(N_("Fac")).default_value(1.0f).min(0.0f).max(1.0f).subtype(PROP_FACTOR); - b.add_input<decl::Color>(N_("Image")).default_value({1.0f, 1.0f, 1.0f, 1.0f}); + b.add_input<decl::Float>(N_("Fac")) + .default_value(1.0f) + .min(0.0f) + .max(1.0f) + .subtype(PROP_FACTOR) + .compositor_domain_priority(1); + b.add_input<decl::Color>(N_("Image")) + .default_value({1.0f, 1.0f, 1.0f, 1.0f}) + .compositor_domain_priority(0); b.add_output<decl::Color>(N_("Image")); } @@ -35,6 +48,53 @@ static void node_composit_init_huecorrect(bNodeTree *UNUSED(ntree), bNode *node) cumapping->cur = 1; } +using namespace blender::realtime_compositor; + +class HueCorrectShaderNode : public ShaderNode { + public: + using ShaderNode::ShaderNode; + + void compile(GPUMaterial *material) override + { + GPUNodeStack *inputs = get_inputs_array(); + GPUNodeStack *outputs = get_outputs_array(); + + CurveMapping *curve_mapping = get_curve_mapping(); + + BKE_curvemapping_init(curve_mapping); + float *band_values; + int band_size; + BKE_curvemapping_table_RGBA(curve_mapping, &band_values, &band_size); + float band_layer; + GPUNodeLink *band_texture = GPU_color_band(material, band_size, band_values, &band_layer); + + float range_minimums[CM_TOT]; + BKE_curvemapping_get_range_minimums(curve_mapping, range_minimums); + float range_dividers[CM_TOT]; + BKE_curvemapping_compute_range_dividers(curve_mapping, range_dividers); + + GPU_stack_link(material, + &bnode(), + "node_composite_hue_correct", + inputs, + outputs, + band_texture, + GPU_constant(&band_layer), + GPU_uniform(range_minimums), + GPU_uniform(range_dividers)); + } + + CurveMapping *get_curve_mapping() + { + return static_cast<CurveMapping *>(bnode().storage); + } +}; + +static ShaderNode *get_compositor_shader_node(DNode node) +{ + return new HueCorrectShaderNode(node); +} + } // namespace blender::nodes::node_composite_huecorrect_cc void register_node_type_cmp_huecorrect() @@ -48,6 +108,7 @@ void register_node_type_cmp_huecorrect() node_type_size(&ntype, 320, 140, 500); node_type_init(&ntype, file_ns::node_composit_init_huecorrect); node_type_storage(&ntype, "CurveMapping", node_free_curves, node_copy_curves); + ntype.get_compositor_shader_node = file_ns::get_compositor_shader_node; nodeRegisterType(&ntype); } diff --git a/source/blender/nodes/composite/nodes/node_composite_id_mask.cc b/source/blender/nodes/composite/nodes/node_composite_id_mask.cc index 25ab9aa63fc..ac8456cb931 100644 --- a/source/blender/nodes/composite/nodes/node_composite_id_mask.cc +++ b/source/blender/nodes/composite/nodes/node_composite_id_mask.cc @@ -8,6 +8,8 @@ #include "UI_interface.h" #include "UI_resources.h" +#include "COM_node_operation.hh" + #include "node_composite_util.hh" /* **************** ID Mask ******************** */ @@ -26,6 +28,23 @@ static void node_composit_buts_id_mask(uiLayout *layout, bContext *UNUSED(C), Po uiItemR(layout, ptr, "use_antialiasing", UI_ITEM_R_SPLIT_EMPTY_NAME, nullptr, ICON_NONE); } +using namespace blender::realtime_compositor; + +class IDMaskOperation : public NodeOperation { + public: + using NodeOperation::NodeOperation; + + void execute() override + { + get_input("ID value").pass_through(get_result("Alpha")); + } +}; + +static NodeOperation *get_compositor_operation(Context &context, DNode node) +{ + return new IDMaskOperation(context, node); +} + } // namespace blender::nodes::node_composite_id_mask_cc void register_node_type_cmp_idmask() @@ -37,6 +56,7 @@ void register_node_type_cmp_idmask() cmp_node_type_base(&ntype, CMP_NODE_ID_MASK, "ID Mask", NODE_CLASS_CONVERTER); ntype.declare = file_ns::cmp_node_idmask_declare; ntype.draw_buttons = file_ns::node_composit_buts_id_mask; + ntype.get_compositor_operation = file_ns::get_compositor_operation; nodeRegisterType(&ntype); } diff --git a/source/blender/nodes/composite/nodes/node_composite_image.cc b/source/blender/nodes/composite/nodes/node_composite_image.cc index d75aa575395..d8852e9333f 100644 --- a/source/blender/nodes/composite/nodes/node_composite_image.cc +++ b/source/blender/nodes/composite/nodes/node_composite_image.cc @@ -8,6 +8,7 @@ #include "node_composite_util.hh" #include "BLI_linklist.h" +#include "BLI_math_vec_types.hh" #include "BLI_utildefines.h" #include "BKE_context.h" @@ -17,6 +18,8 @@ #include "BKE_main.h" #include "BKE_scene.h" +#include "DEG_depsgraph_query.h" + #include "DNA_scene_types.h" #include "RE_engine.h" @@ -27,6 +30,12 @@ #include "UI_interface.h" #include "UI_resources.h" +#include "GPU_shader.h" +#include "GPU_texture.h" + +#include "COM_node_operation.hh" +#include "COM_utilities.hh" + /* **************** IMAGE (and RenderResult, multilayer image) ******************** */ static bNodeSocketTemplate cmp_node_rlayers_out[] = { @@ -433,6 +442,215 @@ static void node_composit_copy_image(bNodeTree *UNUSED(dest_ntree), } } +using namespace blender::realtime_compositor; + +class ImageOperation : public NodeOperation { + public: + using NodeOperation::NodeOperation; + + void execute() override + { + if (!is_valid()) { + allocate_invalid(); + return; + } + + update_image_frame_number(); + + for (const OutputSocketRef *output : node()->outputs()) { + compute_output(output->identifier()); + } + } + + /* Returns true if the node results can be computed, otherwise, returns false. */ + bool is_valid() + { + Image *image = get_image(); + ImageUser *image_user = get_image_user(); + if (!image || !image_user) { + return false; + } + + if (BKE_image_is_multilayer(image)) { + if (!image->rr) { + return false; + } + + RenderLayer *render_layer = get_render_layer(); + if (!render_layer) { + return false; + } + } + + return true; + } + + /* Allocate all needed outputs as invalid. This should be called when is_valid returns false. */ + void allocate_invalid() + { + for (const OutputSocketRef *output : node()->outputs()) { + if (!should_compute_output(output->identifier())) { + continue; + } + + Result &result = get_result(output->identifier()); + result.allocate_invalid(); + } + } + + /* Compute the effective frame number of the image if it was animated and invalidate the cached + * GPU texture if the computed frame number is different. */ + void update_image_frame_number() + { + BKE_image_user_frame_calc(get_image(), get_image_user(), context().get_frame_number()); + } + + void compute_output(StringRef identifier) + { + if (!should_compute_output(identifier)) { + return; + } + + ImageUser image_user = compute_image_user_for_output(identifier); + GPUTexture *image_texture = BKE_image_get_gpu_texture(get_image(), &image_user, nullptr); + + const int2 size = int2(GPU_texture_width(image_texture), GPU_texture_height(image_texture)); + Result &result = get_result(identifier); + result.allocate_texture(Domain(size)); + + GPUShader *shader = shader_manager().get(get_shader_name(identifier)); + GPU_shader_bind(shader); + + const int input_unit = GPU_shader_get_texture_binding(shader, "input_tx"); + GPU_texture_bind(image_texture, input_unit); + + result.bind_as_image(shader, "output_img"); + + compute_dispatch_threads_at_least(shader, size); + + GPU_shader_unbind(); + GPU_texture_unbind(image_texture); + result.unbind_as_image(); + } + + /* Get a copy of the image user that is appropriate to retrieve the image buffer for the output + * with the given identifier. This essentially sets the appropriate pass and view indices that + * corresponds to the output. */ + ImageUser compute_image_user_for_output(StringRef identifier) + { + ImageUser image_user = *get_image_user(); + + /* Set the needed view. */ + image_user.view = get_view_index(); + + /* Set the needed pass. */ + if (BKE_image_is_multilayer(get_image())) { + image_user.pass = get_pass_index(get_pass_name(identifier)); + BKE_image_multilayer_index(get_image()->rr, &image_user); + } + else { + BKE_image_multiview_index(get_image(), &image_user); + } + + return image_user; + } + + /* Get the shader that should be used to compute the output with the given identifier. The + * shaders just copy the retrieved image textures into the results except for the alpha output, + * which extracts the alpha and writes it to the result instead. Note that a call to a host + * texture copy doesn't work because results are stored in a different half float formats. */ + const char *get_shader_name(StringRef identifier) + { + if (identifier == "Alpha") { + return "compositor_extract_alpha_from_color"; + } + else if (get_result(identifier).type() == ResultType::Color) { + return "compositor_convert_color_to_half_color"; + } + else { + return "compositor_convert_float_to_half_float"; + } + } + + Image *get_image() + { + return (Image *)bnode().id; + } + + ImageUser *get_image_user() + { + return static_cast<ImageUser *>(bnode().storage); + } + + /* Get the render layer selected in the node assuming the image is a multilayer image. */ + RenderLayer *get_render_layer() + { + const ListBase *layers = &get_image()->rr->layers; + return static_cast<RenderLayer *>(BLI_findlink(layers, get_image_user()->layer)); + } + + /* Get the name of the pass corresponding to the output with the given identifier assuming the + * image is a multilayer image. */ + const char *get_pass_name(StringRef identifier) + { + DOutputSocket output = node().output_by_identifier(identifier); + return static_cast<NodeImageLayer *>(output->bsocket()->storage)->pass_name; + } + + /* Get the index of the pass with the given name in the selected render layer's passes list + * assuming the image is a multilayer image. */ + int get_pass_index(const char *name) + { + return BLI_findstringindex(&get_render_layer()->passes, name, offsetof(RenderPass, name)); + } + + /* Get the index of the view selected in the node. If the image is not a multi-view image or only + * has a single view, then zero is returned. Otherwise, if the image is a multi-view image, the + * index of the selected view is returned. However, note that the value of the view member of the + * image user is not the actual index of the view. More specifically, the index 0 is reserved to + * denote the special mode of operation "All", which dynamically selects the view whose name + * matches the view currently being rendered. It follows that the views are then indexed starting + * from 1. So for non zero view values, the actual index of the view is the value of the view + * member of the image user minus 1. */ + int get_view_index() + { + /* The image is not a multi-view image, so just return zero. */ + if (!BKE_image_is_multiview(get_image())) { + return 0; + } + + const ListBase *views = &get_image()->rr->views; + /* There is only one view and its index is 0. */ + if (BLI_listbase_count_at_most(views, 2) < 2) { + return 0; + } + + const int view = get_image_user()->view; + /* The view is not zero, which means it is manually specified and the actual index is then the + * view value minus 1. */ + if (view != 0) { + return view - 1; + } + + /* Otherwise, the view value is zero, denoting the special mode of operation "All", which finds + * the index of the view whose name matches the view currently being rendered. */ + const char *view_name = context().get_view_name().data(); + const int matched_view = BLI_findstringindex(views, view_name, offsetof(RenderView, name)); + + /* No view matches the view currently being rendered, so fallback to the first view. */ + if (matched_view == -1) { + return 0; + } + + return matched_view; + } +}; + +static NodeOperation *get_compositor_operation(Context &context, DNode node) +{ + return new ImageOperation(context, node); +} + } // namespace blender::nodes::node_composite_image_cc void register_node_type_cmp_image() @@ -446,6 +664,7 @@ void register_node_type_cmp_image() node_type_storage( &ntype, "ImageUser", file_ns::node_composit_free_image, file_ns::node_composit_copy_image); node_type_update(&ntype, file_ns::cmp_node_image_update); + ntype.get_compositor_operation = file_ns::get_compositor_operation; ntype.labelfunc = node_image_label; ntype.flag |= NODE_PREVIEW; @@ -469,7 +688,7 @@ const char *node_cmp_rlayers_sock_to_pass(int sock_index) return (STREQ(name, "Alpha")) ? RE_PASSNAME_COMBINED : name; } -namespace blender::nodes::node_composite_image_cc { +namespace blender::nodes::node_composite_render_layer_cc { static void node_composit_init_rlayers(const bContext *C, PointerRNA *ptr) { @@ -595,11 +814,60 @@ static void node_composit_buts_viewlayers(uiLayout *layout, bContext *C, Pointer RNA_string_set(&op_ptr, "scene", scene_name); } -} // namespace blender::nodes::node_composite_image_cc +using namespace blender::realtime_compositor; + +class RenderLayerOperation : public NodeOperation { + public: + using NodeOperation::NodeOperation; + + void execute() override + { + const int view_layer = bnode().custom1; + GPUTexture *pass_texture = context().get_input_texture(view_layer, SCE_PASS_COMBINED); + const int2 size = int2(GPU_texture_width(pass_texture), GPU_texture_height(pass_texture)); + + /* Compute image output. */ + Result &image_result = get_result("Image"); + image_result.allocate_texture(Domain(size)); + GPU_texture_copy(image_result.texture(), pass_texture); + + /* Compute alpha output. */ + Result &alpha_result = get_result("Alpha"); + alpha_result.allocate_texture(Domain(size)); + + GPUShader *shader = shader_manager().get("compositor_extract_alpha_from_color"); + GPU_shader_bind(shader); + + const int input_unit = GPU_shader_get_texture_binding(shader, "input_tx"); + GPU_texture_bind(pass_texture, input_unit); + + alpha_result.bind_as_image(shader, "output_img"); + + compute_dispatch_threads_at_least(shader, size); + + GPU_shader_unbind(); + GPU_texture_unbind(pass_texture); + alpha_result.unbind_as_image(); + + /* Other output passes are not supported for now, so allocate them as invalid. */ + for (const OutputSocketRef *output : node()->outputs()) { + if (output->identifier() != "Image" && output->identifier() != "Alpha") { + get_result(output->identifier()).allocate_invalid(); + } + } + } +}; + +static NodeOperation *get_compositor_operation(Context &context, DNode node) +{ + return new RenderLayerOperation(context, node); +} + +} // namespace blender::nodes::node_composite_render_layer_cc void register_node_type_cmp_rlayers() { - namespace file_ns = blender::nodes::node_composite_image_cc; + namespace file_ns = blender::nodes::node_composite_render_layer_cc; static bNodeType ntype; @@ -608,6 +876,7 @@ void register_node_type_cmp_rlayers() ntype.draw_buttons = file_ns::node_composit_buts_viewlayers; ntype.initfunc_api = file_ns::node_composit_init_rlayers; ntype.poll = file_ns::node_composit_poll_rlayers; + ntype.get_compositor_operation = file_ns::get_compositor_operation; ntype.flag |= NODE_PREVIEW; node_type_storage( &ntype, nullptr, file_ns::node_composit_free_rlayers, file_ns::node_composit_copy_rlayers); diff --git a/source/blender/nodes/composite/nodes/node_composite_inpaint.cc b/source/blender/nodes/composite/nodes/node_composite_inpaint.cc index 2958d1b2869..f6e46bef299 100644 --- a/source/blender/nodes/composite/nodes/node_composite_inpaint.cc +++ b/source/blender/nodes/composite/nodes/node_composite_inpaint.cc @@ -8,6 +8,8 @@ #include "UI_interface.h" #include "UI_resources.h" +#include "COM_node_operation.hh" + #include "node_composite_util.hh" /* **************** Inpaint/ ******************** */ @@ -25,6 +27,23 @@ static void node_composit_buts_inpaint(uiLayout *layout, bContext *UNUSED(C), Po uiItemR(layout, ptr, "distance", UI_ITEM_R_SPLIT_EMPTY_NAME, nullptr, ICON_NONE); } +using namespace blender::realtime_compositor; + +class InpaintOperation : public NodeOperation { + public: + using NodeOperation::NodeOperation; + + void execute() override + { + get_input("Image").pass_through(get_result("Image")); + } +}; + +static NodeOperation *get_compositor_operation(Context &context, DNode node) +{ + return new InpaintOperation(context, node); +} + } // namespace blender::nodes::node_composite_inpaint_cc void register_node_type_cmp_inpaint() @@ -36,6 +55,7 @@ void register_node_type_cmp_inpaint() cmp_node_type_base(&ntype, CMP_NODE_INPAINT, "Inpaint", NODE_CLASS_OP_FILTER); ntype.declare = file_ns::cmp_node_inpaint_declare; ntype.draw_buttons = file_ns::node_composit_buts_inpaint; + ntype.get_compositor_operation = file_ns::get_compositor_operation; nodeRegisterType(&ntype); } diff --git a/source/blender/nodes/composite/nodes/node_composite_invert.cc b/source/blender/nodes/composite/nodes/node_composite_invert.cc index 6dff043537a..4bfcc7b6b9c 100644 --- a/source/blender/nodes/composite/nodes/node_composite_invert.cc +++ b/source/blender/nodes/composite/nodes/node_composite_invert.cc @@ -8,6 +8,10 @@ #include "UI_interface.h" #include "UI_resources.h" +#include "GPU_material.h" + +#include "COM_shader_node.hh" + #include "node_composite_util.hh" /* **************** INVERT ******************** */ @@ -16,8 +20,15 @@ namespace blender::nodes::node_composite_invert_cc { static void cmp_node_invert_declare(NodeDeclarationBuilder &b) { - b.add_input<decl::Float>(N_("Fac")).default_value(1.0f).min(0.0f).max(1.0f).subtype(PROP_FACTOR); - b.add_input<decl::Color>(N_("Color")).default_value({1.0f, 1.0f, 1.0f, 1.0f}); + b.add_input<decl::Float>(N_("Fac")) + .default_value(1.0f) + .min(0.0f) + .max(1.0f) + .subtype(PROP_FACTOR) + .compositor_domain_priority(1); + b.add_input<decl::Color>(N_("Color")) + .default_value({1.0f, 1.0f, 1.0f, 1.0f}) + .compositor_domain_priority(0); b.add_output<decl::Color>(N_("Color")); } @@ -35,6 +46,45 @@ static void node_composit_buts_invert(uiLayout *layout, bContext *UNUSED(C), Poi uiItemR(col, ptr, "invert_alpha", UI_ITEM_R_SPLIT_EMPTY_NAME, nullptr, ICON_NONE); } +using namespace blender::realtime_compositor; + +class InvertShaderNode : public ShaderNode { + public: + using ShaderNode::ShaderNode; + + void compile(GPUMaterial *material) override + { + GPUNodeStack *inputs = get_inputs_array(); + GPUNodeStack *outputs = get_outputs_array(); + + const float do_rgb = get_do_rgb(); + const float do_alpha = get_do_alpha(); + + GPU_stack_link(material, + &bnode(), + "node_composite_invert", + inputs, + outputs, + GPU_constant(&do_rgb), + GPU_constant(&do_alpha)); + } + + bool get_do_rgb() + { + return bnode().custom1 & CMP_CHAN_RGB; + } + + bool get_do_alpha() + { + return bnode().custom1 & CMP_CHAN_A; + } +}; + +static ShaderNode *get_compositor_shader_node(DNode node) +{ + return new InvertShaderNode(node); +} + } // namespace blender::nodes::node_composite_invert_cc void register_node_type_cmp_invert() @@ -47,6 +97,7 @@ void register_node_type_cmp_invert() ntype.declare = file_ns::cmp_node_invert_declare; ntype.draw_buttons = file_ns::node_composit_buts_invert; node_type_init(&ntype, file_ns::node_composit_init_invert); + ntype.get_compositor_shader_node = file_ns::get_compositor_shader_node; nodeRegisterType(&ntype); } diff --git a/source/blender/nodes/composite/nodes/node_composite_keying.cc b/source/blender/nodes/composite/nodes/node_composite_keying.cc index fbfdf2ad3c6..8b584e216cd 100644 --- a/source/blender/nodes/composite/nodes/node_composite_keying.cc +++ b/source/blender/nodes/composite/nodes/node_composite_keying.cc @@ -14,6 +14,8 @@ #include "UI_interface.h" #include "UI_resources.h" +#include "COM_node_operation.hh" + #include "node_composite_util.hh" /* **************** Keying ******************** */ @@ -63,6 +65,25 @@ static void node_composit_buts_keying(uiLayout *layout, bContext *UNUSED(C), Poi uiItemR(layout, ptr, "blur_post", UI_ITEM_R_SPLIT_EMPTY_NAME, nullptr, ICON_NONE); } +using namespace blender::realtime_compositor; + +class KeyingOperation : public NodeOperation { + public: + using NodeOperation::NodeOperation; + + void execute() override + { + get_input("Image").pass_through(get_result("Image")); + get_result("Matte").allocate_invalid(); + get_result("Edges").allocate_invalid(); + } +}; + +static NodeOperation *get_compositor_operation(Context &context, DNode node) +{ + return new KeyingOperation(context, node); +} + } // namespace blender::nodes::node_composite_keying_cc void register_node_type_cmp_keying() @@ -77,6 +98,7 @@ void register_node_type_cmp_keying() node_type_init(&ntype, file_ns::node_composit_init_keying); node_type_storage( &ntype, "NodeKeyingData", node_free_standard_storage, node_copy_standard_storage); + ntype.get_compositor_operation = file_ns::get_compositor_operation; nodeRegisterType(&ntype); } diff --git a/source/blender/nodes/composite/nodes/node_composite_keyingscreen.cc b/source/blender/nodes/composite/nodes/node_composite_keyingscreen.cc index e835ee9e721..9eec705b6ca 100644 --- a/source/blender/nodes/composite/nodes/node_composite_keyingscreen.cc +++ b/source/blender/nodes/composite/nodes/node_composite_keyingscreen.cc @@ -21,6 +21,8 @@ #include "UI_interface.h" #include "UI_resources.h" +#include "COM_node_operation.hh" + #include "node_composite_util.hh" /* **************** Keying Screen ******************** */ @@ -78,6 +80,23 @@ static void node_composit_buts_keyingscreen(uiLayout *layout, bContext *C, Point } } +using namespace blender::realtime_compositor; + +class KeyingScreenOperation : public NodeOperation { + public: + using NodeOperation::NodeOperation; + + void execute() override + { + get_result("Screen").allocate_invalid(); + } +}; + +static NodeOperation *get_compositor_operation(Context &context, DNode node) +{ + return new KeyingScreenOperation(context, node); +} + } // namespace blender::nodes::node_composite_keyingscreen_cc void register_node_type_cmp_keyingscreen() @@ -92,6 +111,7 @@ void register_node_type_cmp_keyingscreen() ntype.initfunc_api = file_ns::node_composit_init_keyingscreen; node_type_storage( &ntype, "NodeKeyingScreenData", node_free_standard_storage, node_copy_standard_storage); + ntype.get_compositor_operation = file_ns::get_compositor_operation; nodeRegisterType(&ntype); } diff --git a/source/blender/nodes/composite/nodes/node_composite_lensdist.cc b/source/blender/nodes/composite/nodes/node_composite_lensdist.cc index 593b7cc9b71..26477b02496 100644 --- a/source/blender/nodes/composite/nodes/node_composite_lensdist.cc +++ b/source/blender/nodes/composite/nodes/node_composite_lensdist.cc @@ -5,20 +5,48 @@ * \ingroup cmpnodes */ +#include "BLI_math_base.h" +#include "BLI_math_vec_types.hh" + #include "RNA_access.h" #include "UI_interface.h" #include "UI_resources.h" +#include "GPU_shader.h" +#include "GPU_texture.h" + +#include "COM_node_operation.hh" +#include "COM_utilities.hh" + #include "node_composite_util.hh" +/* Distortion can't be exactly -1.0 as it will cause infinite pincushion distortion. */ +#define MINIMUM_DISTORTION -0.999f +/* Arbitrary scaling factor for the dispersion input in projector distortion mode. */ +#define PROJECTOR_DISPERSION_SCALE 5.0f +/* Arbitrary scaling factor for the dispersion input in screen distortion mode. */ +#define SCREEN_DISPERSION_SCALE 4.0f +/* Arbitrary scaling factor for the distortion input. */ +#define DISTORTION_SCALE 4.0f + namespace blender::nodes::node_composite_lensdist_cc { static void cmp_node_lensdist_declare(NodeDeclarationBuilder &b) { - b.add_input<decl::Color>(N_("Image")).default_value({1.0f, 1.0f, 1.0f, 1.0f}); - b.add_input<decl::Float>(N_("Distort")).default_value(0.0f).min(-0.999f).max(1.0f); - b.add_input<decl::Float>(N_("Dispersion")).default_value(0.0f).min(0.0f).max(1.0f); + b.add_input<decl::Color>(N_("Image")) + .default_value({1.0f, 1.0f, 1.0f, 1.0f}) + .compositor_domain_priority(0); + b.add_input<decl::Float>(N_("Distort")) + .default_value(0.0f) + .min(MINIMUM_DISTORTION) + .max(1.0f) + .compositor_expects_single_value(); + b.add_input<decl::Float>(N_("Dispersion")) + .default_value(0.0f) + .min(0.0f) + .max(1.0f) + .compositor_expects_single_value(); b.add_output<decl::Color>(N_("Image")); } @@ -42,6 +70,178 @@ static void node_composit_buts_lensdist(uiLayout *layout, bContext *UNUSED(C), P uiItemR(col, ptr, "use_fit", UI_ITEM_R_SPLIT_EMPTY_NAME, nullptr, ICON_NONE); } +using namespace blender::realtime_compositor; + +class LensDistortionOperation : public NodeOperation { + public: + using NodeOperation::NodeOperation; + + void execute() override + { + if (is_identity()) { + get_input("Image").pass_through(get_result("Image")); + return; + } + + if (get_is_projector()) { + execute_projector_distortion(); + } + else { + execute_screen_distortion(); + } + } + + void execute_projector_distortion() + { + GPUShader *shader = shader_manager().get("compositor_projector_lens_distortion"); + GPU_shader_bind(shader); + + const Result &input_image = get_input("Image"); + input_image.bind_as_texture(shader, "input_tx"); + + GPU_texture_filter_mode(input_image.texture(), true); + GPU_texture_wrap_mode(input_image.texture(), false, false); + + const Domain domain = compute_domain(); + + const float dispersion = (get_dispersion() * PROJECTOR_DISPERSION_SCALE) / domain.size.x; + GPU_shader_uniform_1f(shader, "dispersion", dispersion); + + Result &output_image = get_result("Image"); + output_image.allocate_texture(domain); + output_image.bind_as_image(shader, "output_img"); + + compute_dispatch_threads_at_least(shader, domain.size); + + input_image.unbind_as_texture(); + output_image.unbind_as_image(); + GPU_shader_unbind(); + } + + void execute_screen_distortion() + { + GPUShader *shader = shader_manager().get(get_screen_distortion_shader()); + GPU_shader_bind(shader); + + const Result &input_image = get_input("Image"); + input_image.bind_as_texture(shader, "input_tx"); + + GPU_texture_filter_mode(input_image.texture(), true); + GPU_texture_wrap_mode(input_image.texture(), false, false); + + const Domain domain = compute_domain(); + + const float3 chromatic_distortion = compute_chromatic_distortion(); + GPU_shader_uniform_3fv(shader, "chromatic_distortion", chromatic_distortion); + + GPU_shader_uniform_1f(shader, "scale", compute_scale()); + + Result &output_image = get_result("Image"); + output_image.allocate_texture(domain); + output_image.bind_as_image(shader, "output_img"); + + compute_dispatch_threads_at_least(shader, domain.size); + + input_image.unbind_as_texture(); + output_image.unbind_as_image(); + GPU_shader_unbind(); + } + + const char *get_screen_distortion_shader() + { + if (get_is_jitter()) { + return "compositor_screen_lens_distortion_jitter"; + } + return "compositor_screen_lens_distortion"; + } + + float get_distortion() + { + const Result &input = get_input("Distort"); + return clamp_f(input.get_float_value_default(0.0f), MINIMUM_DISTORTION, 1.0f); + } + + float get_dispersion() + { + const Result &input = get_input("Dispersion"); + return clamp_f(input.get_float_value_default(0.0f), 0.0f, 1.0f); + } + + /* Get the distortion amount for each channel. The green channel has a distortion amount that + * matches that specified in the node inputs, while the red and blue channels have higher and + * lower distortion amounts respectively based on the dispersion value. */ + float3 compute_chromatic_distortion() + { + const float green_distortion = get_distortion(); + const float dispersion = get_dispersion() / SCREEN_DISPERSION_SCALE; + const float red_distortion = clamp_f(green_distortion + dispersion, MINIMUM_DISTORTION, 1.0f); + const float blue_distortion = clamp_f(green_distortion - dispersion, MINIMUM_DISTORTION, 1.0f); + return float3(red_distortion, green_distortion, blue_distortion) * DISTORTION_SCALE; + } + + /* The distortion model model will distort the image in such a way that the result will no longer + * fit the domain of the original image, so we scale the image to account for that. If get_is_fit + * is false, then the scaling factor will be such that the furthest pixels horizontally and + * vertically are at the boundary of the image. Otherwise, if get_is_fit is true, the scaling + * factor will be such that the furthest pixels diagonally are at the corner of the image. */ + float compute_scale() + { + const float3 distortion = compute_chromatic_distortion() / DISTORTION_SCALE; + const float maximum_distortion = max_fff(distortion[0], distortion[1], distortion[2]); + + if (get_is_fit() && (maximum_distortion > 0.0f)) { + return 1.0f / (1.0f + 2.0f * maximum_distortion); + } + return 1.0f / (1.0f + maximum_distortion); + } + + bool get_is_projector() + { + return get_node_lens_distortion().proj; + } + + bool get_is_jitter() + { + return get_node_lens_distortion().jit; + } + + bool get_is_fit() + { + return get_node_lens_distortion().fit; + } + + NodeLensDist &get_node_lens_distortion() + { + return *static_cast<NodeLensDist *>(bnode().storage); + } + + /* Returns true if the operation does nothing and the input can be passed through. */ + bool is_identity() + { + /* The input is a single value and the operation does nothing. */ + if (get_input("Image").is_single_value()) { + return true; + } + + /* Projector have zero dispersion and does nothing. */ + if (get_is_projector() && get_dispersion() == 0.0f) { + return true; + } + + /* Both distortion and dispersion are zero and the operation does nothing. */ + if (get_distortion() == 0.0f && get_dispersion() == 0.0f) { + return true; + } + + return false; + } +}; + +static NodeOperation *get_compositor_operation(Context &context, DNode node) +{ + return new LensDistortionOperation(context, node); +} + } // namespace blender::nodes::node_composite_lensdist_cc void register_node_type_cmp_lensdist() @@ -56,6 +256,7 @@ void register_node_type_cmp_lensdist() node_type_init(&ntype, file_ns::node_composit_init_lensdist); node_type_storage( &ntype, "NodeLensDist", node_free_standard_storage, node_copy_standard_storage); + ntype.get_compositor_operation = file_ns::get_compositor_operation; nodeRegisterType(&ntype); } diff --git a/source/blender/nodes/composite/nodes/node_composite_levels.cc b/source/blender/nodes/composite/nodes/node_composite_levels.cc index a30567672f0..2f1ebeb79b5 100644 --- a/source/blender/nodes/composite/nodes/node_composite_levels.cc +++ b/source/blender/nodes/composite/nodes/node_composite_levels.cc @@ -8,6 +8,8 @@ #include "UI_interface.h" #include "UI_resources.h" +#include "COM_node_operation.hh" + #include "node_composite_util.hh" /* **************** LEVELS ******************** */ @@ -31,6 +33,24 @@ static void node_composit_buts_view_levels(uiLayout *layout, bContext *UNUSED(C) uiItemR(layout, ptr, "channel", UI_ITEM_R_SPLIT_EMPTY_NAME, "", ICON_NONE); } +using namespace blender::realtime_compositor; + +class LevelsOperation : public NodeOperation { + public: + using NodeOperation::NodeOperation; + + void execute() override + { + get_result("Mean").allocate_invalid(); + get_result("Std Dev").allocate_invalid(); + } +}; + +static NodeOperation *get_compositor_operation(Context &context, DNode node) +{ + return new LevelsOperation(context, node); +} + } // namespace blender::nodes::node_composite_levels_cc void register_node_type_cmp_view_levels() @@ -44,6 +64,7 @@ void register_node_type_cmp_view_levels() ntype.draw_buttons = file_ns::node_composit_buts_view_levels; ntype.flag |= NODE_PREVIEW; node_type_init(&ntype, file_ns::node_composit_init_view_levels); + ntype.get_compositor_operation = file_ns::get_compositor_operation; nodeRegisterType(&ntype); } diff --git a/source/blender/nodes/composite/nodes/node_composite_luma_matte.cc b/source/blender/nodes/composite/nodes/node_composite_luma_matte.cc index 94697a2aafd..092a12a7ea4 100644 --- a/source/blender/nodes/composite/nodes/node_composite_luma_matte.cc +++ b/source/blender/nodes/composite/nodes/node_composite_luma_matte.cc @@ -5,9 +5,15 @@ * \ingroup cmpnodes */ +#include "IMB_colormanagement.h" + #include "UI_interface.h" #include "UI_resources.h" +#include "GPU_material.h" + +#include "COM_shader_node.hh" + #include "node_composite_util.hh" /* ******************* Luma Matte Node ********************************* */ @@ -16,7 +22,9 @@ namespace blender::nodes::node_composite_luma_matte_cc { static void cmp_node_luma_matte_declare(NodeDeclarationBuilder &b) { - b.add_input<decl::Color>(N_("Image")).default_value({1.0f, 1.0f, 1.0f, 1.0f}); + b.add_input<decl::Color>(N_("Image")) + .default_value({1.0f, 1.0f, 1.0f, 1.0f}) + .compositor_domain_priority(0); b.add_output<decl::Color>(N_("Image")); b.add_output<decl::Float>(N_("Matte")); } @@ -40,6 +48,53 @@ static void node_composit_buts_luma_matte(uiLayout *layout, bContext *UNUSED(C), col, ptr, "limit_min", UI_ITEM_R_SPLIT_EMPTY_NAME | UI_ITEM_R_SLIDER, nullptr, ICON_NONE); } +using namespace blender::realtime_compositor; + +class LuminanceMatteShaderNode : public ShaderNode { + public: + using ShaderNode::ShaderNode; + + void compile(GPUMaterial *material) override + { + GPUNodeStack *inputs = get_inputs_array(); + GPUNodeStack *outputs = get_outputs_array(); + + const float high = get_high(); + const float low = get_low(); + float luminance_coefficients[3]; + IMB_colormanagement_get_luminance_coefficients(luminance_coefficients); + + GPU_stack_link(material, + &bnode(), + "node_composite_luminance_matte", + inputs, + outputs, + GPU_uniform(&high), + GPU_uniform(&low), + GPU_constant(luminance_coefficients)); + } + + NodeChroma *get_node_chroma() + { + return static_cast<NodeChroma *>(bnode().storage); + } + + float get_high() + { + return get_node_chroma()->t1; + } + + float get_low() + { + return get_node_chroma()->t2; + } +}; + +static ShaderNode *get_compositor_shader_node(DNode node) +{ + return new LuminanceMatteShaderNode(node); +} + } // namespace blender::nodes::node_composite_luma_matte_cc void register_node_type_cmp_luma_matte() @@ -54,6 +109,7 @@ void register_node_type_cmp_luma_matte() ntype.flag |= NODE_PREVIEW; node_type_init(&ntype, file_ns::node_composit_init_luma_matte); node_type_storage(&ntype, "NodeChroma", node_free_standard_storage, node_copy_standard_storage); + ntype.get_compositor_shader_node = file_ns::get_compositor_shader_node; nodeRegisterType(&ntype); } diff --git a/source/blender/nodes/composite/nodes/node_composite_map_range.cc b/source/blender/nodes/composite/nodes/node_composite_map_range.cc index e52c6d096b9..e72869efa93 100644 --- a/source/blender/nodes/composite/nodes/node_composite_map_range.cc +++ b/source/blender/nodes/composite/nodes/node_composite_map_range.cc @@ -8,6 +8,10 @@ #include "UI_interface.h" #include "UI_resources.h" +#include "GPU_material.h" + +#include "COM_shader_node.hh" + #include "node_composite_util.hh" /* **************** Map Range ******************** */ @@ -16,11 +20,31 @@ namespace blender::nodes::node_composite_map_range_cc { static void cmp_node_map_range_declare(NodeDeclarationBuilder &b) { - b.add_input<decl::Float>(N_("Value")).default_value(1.0f).min(0.0f).max(1.0f); - b.add_input<decl::Float>(N_("From Min")).default_value(0.0f).min(-10000.0f).max(10000.0f); - b.add_input<decl::Float>(N_("From Max")).default_value(1.0f).min(-10000.0f).max(10000.0f); - b.add_input<decl::Float>(N_("To Min")).default_value(0.0f).min(-10000.0f).max(10000.0f); - b.add_input<decl::Float>(N_("To Max")).default_value(1.0f).min(-10000.0f).max(10000.0f); + b.add_input<decl::Float>(N_("Value")) + .default_value(1.0f) + .min(0.0f) + .max(1.0f) + .compositor_domain_priority(0); + b.add_input<decl::Float>(N_("From Min")) + .default_value(0.0f) + .min(-10000.0f) + .max(10000.0f) + .compositor_domain_priority(1); + b.add_input<decl::Float>(N_("From Max")) + .default_value(1.0f) + .min(-10000.0f) + .max(10000.0f) + .compositor_domain_priority(2); + b.add_input<decl::Float>(N_("To Min")) + .default_value(0.0f) + .min(-10000.0f) + .max(10000.0f) + .compositor_domain_priority(3); + b.add_input<decl::Float>(N_("To Max")) + .default_value(1.0f) + .min(-10000.0f) + .max(10000.0f) + .compositor_domain_priority(4); b.add_output<decl::Float>(N_("Value")); } @@ -32,6 +56,38 @@ static void node_composit_buts_map_range(uiLayout *layout, bContext *UNUSED(C), uiItemR(col, ptr, "use_clamp", UI_ITEM_R_SPLIT_EMPTY_NAME, nullptr, ICON_NONE); } +using namespace blender::realtime_compositor; + +class MapRangeShaderNode : public ShaderNode { + public: + using ShaderNode::ShaderNode; + + void compile(GPUMaterial *material) override + { + GPUNodeStack *inputs = get_inputs_array(); + GPUNodeStack *outputs = get_outputs_array(); + + const float should_clamp = get_should_clamp(); + + GPU_stack_link(material, + &bnode(), + "node_composite_map_range", + inputs, + outputs, + GPU_constant(&should_clamp)); + } + + bool get_should_clamp() + { + return bnode().custom1; + } +}; + +static ShaderNode *get_compositor_shader_node(DNode node) +{ + return new MapRangeShaderNode(node); +} + } // namespace blender::nodes::node_composite_map_range_cc void register_node_type_cmp_map_range() @@ -43,6 +99,7 @@ void register_node_type_cmp_map_range() cmp_node_type_base(&ntype, CMP_NODE_MAP_RANGE, "Map Range", NODE_CLASS_OP_VECTOR); ntype.declare = file_ns::cmp_node_map_range_declare; ntype.draw_buttons = file_ns::node_composit_buts_map_range; + ntype.get_compositor_shader_node = file_ns::get_compositor_shader_node; nodeRegisterType(&ntype); } diff --git a/source/blender/nodes/composite/nodes/node_composite_map_uv.cc b/source/blender/nodes/composite/nodes/node_composite_map_uv.cc index 31961f07ea4..4f660d62c3b 100644 --- a/source/blender/nodes/composite/nodes/node_composite_map_uv.cc +++ b/source/blender/nodes/composite/nodes/node_composite_map_uv.cc @@ -8,6 +8,8 @@ #include "UI_interface.h" #include "UI_resources.h" +#include "COM_node_operation.hh" + #include "node_composite_util.hh" /* **************** Map UV ******************** */ @@ -26,6 +28,23 @@ static void node_composit_buts_map_uv(uiLayout *layout, bContext *UNUSED(C), Poi uiItemR(layout, ptr, "alpha", UI_ITEM_R_SPLIT_EMPTY_NAME, nullptr, ICON_NONE); } +using namespace blender::realtime_compositor; + +class MapUVOperation : public NodeOperation { + public: + using NodeOperation::NodeOperation; + + void execute() override + { + get_input("Image").pass_through(get_result("Image")); + } +}; + +static NodeOperation *get_compositor_operation(Context &context, DNode node) +{ + return new MapUVOperation(context, node); +} + } // namespace blender::nodes::node_composite_map_uv_cc void register_node_type_cmp_mapuv() @@ -37,6 +56,7 @@ void register_node_type_cmp_mapuv() cmp_node_type_base(&ntype, CMP_NODE_MAP_UV, "Map UV", NODE_CLASS_DISTORT); ntype.declare = file_ns::cmp_node_map_uv_declare; ntype.draw_buttons = file_ns::node_composit_buts_map_uv; + ntype.get_compositor_operation = file_ns::get_compositor_operation; nodeRegisterType(&ntype); } diff --git a/source/blender/nodes/composite/nodes/node_composite_map_value.cc b/source/blender/nodes/composite/nodes/node_composite_map_value.cc index bb42628ed3d..ec9b2d56636 100644 --- a/source/blender/nodes/composite/nodes/node_composite_map_value.cc +++ b/source/blender/nodes/composite/nodes/node_composite_map_value.cc @@ -12,6 +12,10 @@ #include "UI_interface.h" #include "UI_resources.h" +#include "GPU_material.h" + +#include "COM_shader_node.hh" + #include "node_composite_util.hh" /* **************** MAP VALUE ******************** */ @@ -20,7 +24,11 @@ namespace blender::nodes::node_composite_map_value_cc { static void cmp_node_map_value_declare(NodeDeclarationBuilder &b) { - b.add_input<decl::Float>(N_("Value")).default_value(1.0f).min(0.0f).max(1.0f); + b.add_input<decl::Float>(N_("Value")) + .default_value(1.0f) + .min(0.0f) + .max(1.0f) + .compositor_domain_priority(0); b.add_output<decl::Float>(N_("Value")); } @@ -50,6 +58,56 @@ static void node_composit_buts_map_value(uiLayout *layout, bContext *UNUSED(C), uiItemR(sub, ptr, "max", UI_ITEM_R_SPLIT_EMPTY_NAME, "", ICON_NONE); } +using namespace blender::realtime_compositor; + +class MapValueShaderNode : public ShaderNode { + public: + using ShaderNode::ShaderNode; + + void compile(GPUMaterial *material) override + { + GPUNodeStack *inputs = get_inputs_array(); + GPUNodeStack *outputs = get_outputs_array(); + + const TexMapping *texture_mapping = get_texture_mapping(); + + const float use_min = get_use_min(); + const float use_max = get_use_max(); + + GPU_stack_link(material, + &bnode(), + "node_composite_map_value", + inputs, + outputs, + GPU_uniform(texture_mapping->loc), + GPU_uniform(texture_mapping->size), + GPU_constant(&use_min), + GPU_uniform(texture_mapping->min), + GPU_constant(&use_max), + GPU_uniform(texture_mapping->max)); + } + + TexMapping *get_texture_mapping() + { + return static_cast<TexMapping *>(bnode().storage); + } + + bool get_use_min() + { + return get_texture_mapping()->flag & TEXMAP_CLIP_MIN; + } + + bool get_use_max() + { + return get_texture_mapping()->flag & TEXMAP_CLIP_MAX; + } +}; + +static ShaderNode *get_compositor_shader_node(DNode node) +{ + return new MapValueShaderNode(node); +} + } // namespace blender::nodes::node_composite_map_value_cc void register_node_type_cmp_map_value() @@ -63,6 +121,7 @@ void register_node_type_cmp_map_value() ntype.draw_buttons = file_ns::node_composit_buts_map_value; node_type_init(&ntype, file_ns::node_composit_init_map_value); node_type_storage(&ntype, "TexMapping", node_free_standard_storage, node_copy_standard_storage); + ntype.get_compositor_shader_node = file_ns::get_compositor_shader_node; nodeRegisterType(&ntype); } diff --git a/source/blender/nodes/composite/nodes/node_composite_mask.cc b/source/blender/nodes/composite/nodes/node_composite_mask.cc index 5b8fac5d1c0..2372dbae3f2 100644 --- a/source/blender/nodes/composite/nodes/node_composite_mask.cc +++ b/source/blender/nodes/composite/nodes/node_composite_mask.cc @@ -10,6 +10,8 @@ #include "UI_interface.h" #include "UI_resources.h" +#include "COM_node_operation.hh" + #include "node_composite_util.hh" /* **************** Mask ******************** */ @@ -74,6 +76,23 @@ static void node_composit_buts_mask(uiLayout *layout, bContext *C, PointerRNA *p } } +using namespace blender::realtime_compositor; + +class MaskOperation : public NodeOperation { + public: + using NodeOperation::NodeOperation; + + void execute() override + { + get_result("Mask").allocate_invalid(); + } +}; + +static NodeOperation *get_compositor_operation(Context &context, DNode node) +{ + return new MaskOperation(context, node); +} + } // namespace blender::nodes::node_composite_mask_cc void register_node_type_cmp_mask() @@ -87,6 +106,7 @@ void register_node_type_cmp_mask() ntype.draw_buttons = file_ns::node_composit_buts_mask; node_type_init(&ntype, file_ns::node_composit_init_mask); ntype.labelfunc = file_ns::node_mask_label; + ntype.get_compositor_operation = file_ns::get_compositor_operation; node_type_storage(&ntype, "NodeMask", node_free_standard_storage, node_copy_standard_storage); diff --git a/source/blender/nodes/composite/nodes/node_composite_math.cc b/source/blender/nodes/composite/nodes/node_composite_math.cc index 7b2eadef2cb..4baf057913e 100644 --- a/source/blender/nodes/composite/nodes/node_composite_math.cc +++ b/source/blender/nodes/composite/nodes/node_composite_math.cc @@ -5,6 +5,12 @@ * \ingroup cmpnodes */ +#include "GPU_material.h" + +#include "COM_shader_node.hh" + +#include "NOD_math_functions.hh" + #include "node_composite_util.hh" /* **************** SCALAR MATH ******************** */ @@ -13,18 +19,72 @@ namespace blender::nodes::node_composite_math_cc { static void cmp_node_math_declare(NodeDeclarationBuilder &b) { - b.add_input<decl::Float>(N_("Value")).default_value(0.5f).min(-10000.0f).max(10000.0f); + b.add_input<decl::Float>(N_("Value")) + .default_value(0.5f) + .min(-10000.0f) + .max(10000.0f) + .compositor_domain_priority(0); b.add_input<decl::Float>(N_("Value"), "Value_001") .default_value(0.5f) .min(-10000.0f) - .max(10000.0f); + .max(10000.0f) + .compositor_domain_priority(1); b.add_input<decl::Float>(N_("Value"), "Value_002") .default_value(0.5f) .min(-10000.0f) - .max(10000.0f); + .max(10000.0f) + .compositor_domain_priority(2); b.add_output<decl::Float>(N_("Value")); } +using namespace blender::realtime_compositor; + +class MathShaderNode : public ShaderNode { + public: + using ShaderNode::ShaderNode; + + void compile(GPUMaterial *material) override + { + GPUNodeStack *inputs = get_inputs_array(); + GPUNodeStack *outputs = get_outputs_array(); + + GPU_stack_link(material, &bnode(), get_shader_function_name(), inputs, outputs); + + if (!get_should_clamp()) { + return; + } + + const float min = 0.0f; + const float max = 1.0f; + GPU_link(material, + "clamp_value", + get_output("Value").link, + GPU_constant(&min), + GPU_constant(&max), + &get_output("Value").link); + } + + NodeMathOperation get_operation() + { + return (NodeMathOperation)bnode().custom1; + } + + const char *get_shader_function_name() + { + return get_float_math_operation_info(get_operation())->shader_name.c_str(); + } + + bool get_should_clamp() + { + return bnode().custom2 & SHD_MATH_CLAMP; + } +}; + +static ShaderNode *get_compositor_shader_node(DNode node) +{ + return new MathShaderNode(node); +} + } // namespace blender::nodes::node_composite_math_cc void register_node_type_cmp_math() @@ -37,6 +97,7 @@ void register_node_type_cmp_math() ntype.declare = file_ns::cmp_node_math_declare; ntype.labelfunc = node_math_label; node_type_update(&ntype, node_math_update); + ntype.get_compositor_shader_node = file_ns::get_compositor_shader_node; nodeRegisterType(&ntype); } diff --git a/source/blender/nodes/composite/nodes/node_composite_mixrgb.cc b/source/blender/nodes/composite/nodes/node_composite_mixrgb.cc index fc11aa188b0..a1fbbfe7d40 100644 --- a/source/blender/nodes/composite/nodes/node_composite_mixrgb.cc +++ b/source/blender/nodes/composite/nodes/node_composite_mixrgb.cc @@ -5,6 +5,14 @@ * \ingroup cmpnodes */ +#include "BLI_assert.h" + +#include "DNA_material_types.h" + +#include "GPU_material.h" + +#include "COM_shader_node.hh" + #include "node_composite_util.hh" /* **************** MIX RGB ******************** */ @@ -13,12 +21,122 @@ namespace blender::nodes::node_composite_mixrgb_cc { static void cmp_node_mixrgb_declare(NodeDeclarationBuilder &b) { - b.add_input<decl::Float>(N_("Fac")).default_value(1.0f).min(0.0f).max(1.0f).subtype(PROP_FACTOR); - b.add_input<decl::Color>(N_("Image")).default_value({1.0f, 1.0f, 1.0f, 1.0f}); - b.add_input<decl::Color>(N_("Image"), "Image_001").default_value({1.0f, 1.0f, 1.0f, 1.0f}); + b.add_input<decl::Float>(N_("Fac")) + .default_value(1.0f) + .min(0.0f) + .max(1.0f) + .subtype(PROP_FACTOR) + .compositor_domain_priority(2); + b.add_input<decl::Color>(N_("Image")) + .default_value({1.0f, 1.0f, 1.0f, 1.0f}) + .compositor_domain_priority(0); + b.add_input<decl::Color>(N_("Image"), "Image_001") + .default_value({1.0f, 1.0f, 1.0f, 1.0f}) + .compositor_domain_priority(1); b.add_output<decl::Color>(N_("Image")); } +using namespace blender::realtime_compositor; + +class MixRGBShaderNode : public ShaderNode { + public: + using ShaderNode::ShaderNode; + + void compile(GPUMaterial *material) override + { + GPUNodeStack *inputs = get_inputs_array(); + GPUNodeStack *outputs = get_outputs_array(); + + if (get_use_alpha()) { + GPU_link(material, + "multiply_by_alpha", + get_input_link("Fac"), + get_input_link("Image_001"), + &get_input("Fac").link); + } + + GPU_stack_link(material, &bnode(), get_shader_function_name(), inputs, outputs); + + if (!get_should_clamp()) { + return; + } + + const float min[4] = {0.0f, 0.0f, 0.0f, 0.0f}; + const float max[4] = {1.0f, 1.0f, 1.0f, 1.0f}; + GPU_link(material, + "clamp_color", + get_output("Image").link, + GPU_constant(min), + GPU_constant(max), + &get_output("Image").link); + } + + int get_mode() + { + return bnode().custom1; + } + + const char *get_shader_function_name() + { + switch (get_mode()) { + case MA_RAMP_BLEND: + return "mix_blend"; + case MA_RAMP_ADD: + return "mix_add"; + case MA_RAMP_MULT: + return "mix_mult"; + case MA_RAMP_SUB: + return "mix_sub"; + case MA_RAMP_SCREEN: + return "mix_screen"; + case MA_RAMP_DIV: + return "mix_div"; + case MA_RAMP_DIFF: + return "mix_diff"; + case MA_RAMP_DARK: + return "mix_dark"; + case MA_RAMP_LIGHT: + return "mix_light"; + case MA_RAMP_OVERLAY: + return "mix_overlay"; + case MA_RAMP_DODGE: + return "mix_dodge"; + case MA_RAMP_BURN: + return "mix_burn"; + case MA_RAMP_HUE: + return "mix_hue"; + case MA_RAMP_SAT: + return "mix_sat"; + case MA_RAMP_VAL: + return "mix_val"; + case MA_RAMP_COLOR: + return "mix_color"; + case MA_RAMP_SOFT: + return "mix_soft"; + case MA_RAMP_LINEAR: + return "mix_linear"; + } + + BLI_assert_unreachable(); + return nullptr; + } + + bool get_use_alpha() + { + return bnode().custom2 & SHD_MIXRGB_USE_ALPHA; + } + + bool get_should_clamp() + { + return bnode().custom2 & SHD_MIXRGB_CLAMP; + } +}; + +static ShaderNode *get_compositor_shader_node(DNode node) +{ + return new MixRGBShaderNode(node); +} + } // namespace blender::nodes::node_composite_mixrgb_cc void register_node_type_cmp_mix_rgb() @@ -31,6 +149,7 @@ void register_node_type_cmp_mix_rgb() ntype.flag |= NODE_PREVIEW; ntype.declare = file_ns::cmp_node_mixrgb_declare; ntype.labelfunc = node_blend_label; + ntype.get_compositor_shader_node = file_ns::get_compositor_shader_node; nodeRegisterType(&ntype); } diff --git a/source/blender/nodes/composite/nodes/node_composite_movieclip.cc b/source/blender/nodes/composite/nodes/node_composite_movieclip.cc index a4d5f294fe0..ec95de3da18 100644 --- a/source/blender/nodes/composite/nodes/node_composite_movieclip.cc +++ b/source/blender/nodes/composite/nodes/node_composite_movieclip.cc @@ -5,8 +5,13 @@ * \ingroup cmpnodes */ +#include "BLI_math_vec_types.hh" + #include "BKE_context.h" #include "BKE_lib_id.h" +#include "BKE_movieclip.h" +#include "BKE_tracking.h" + #include "DNA_defaults.h" #include "RNA_access.h" @@ -14,6 +19,12 @@ #include "UI_interface.h" #include "UI_resources.h" +#include "GPU_shader.h" +#include "GPU_texture.h" + +#include "COM_node_operation.hh" +#include "COM_utilities.hh" + #include "node_composite_util.hh" namespace blender::nodes::node_composite_movieclip_cc { @@ -79,6 +90,177 @@ static void node_composit_buts_movieclip_ex(uiLayout *layout, bContext *C, Point uiTemplateColorspaceSettings(layout, &clipptr, "colorspace_settings"); } +using namespace blender::realtime_compositor; + +class MovieClipOperation : public NodeOperation { + public: + using NodeOperation::NodeOperation; + + void execute() override + { + GPUTexture *movie_clip_texture = get_movie_clip_texture(); + + compute_image(movie_clip_texture); + compute_alpha(movie_clip_texture); + compute_stabilization_data(movie_clip_texture); + + free_movie_clip_texture(); + } + + void compute_image(GPUTexture *movie_clip_texture) + { + if (!should_compute_output("Image")) { + return; + } + + Result &result = get_result("Image"); + + /* The movie clip texture is invalid or missing, set an appropriate fallback value. */ + if (!movie_clip_texture) { + result.allocate_invalid(); + return; + } + + const int2 size = int2(GPU_texture_width(movie_clip_texture), + GPU_texture_height(movie_clip_texture)); + result.allocate_texture(Domain(size)); + + GPUShader *shader = shader_manager().get("compositor_convert_color_to_half_color"); + GPU_shader_bind(shader); + + const int input_unit = GPU_shader_get_texture_binding(shader, "input_tx"); + GPU_texture_bind(movie_clip_texture, input_unit); + + result.bind_as_image(shader, "output_img"); + + compute_dispatch_threads_at_least(shader, size); + + GPU_shader_unbind(); + GPU_texture_unbind(movie_clip_texture); + result.unbind_as_image(); + } + + void compute_alpha(GPUTexture *movie_clip_texture) + { + if (!should_compute_output("Alpha")) { + return; + } + + Result &result = get_result("Alpha"); + + /* The movie clip texture is invalid or missing, set an appropriate fallback value. */ + if (!movie_clip_texture) { + result.allocate_single_value(); + result.set_float_value(1.0f); + return; + } + + const int2 size = int2(GPU_texture_width(movie_clip_texture), + GPU_texture_height(movie_clip_texture)); + result.allocate_texture(Domain(size)); + + GPUShader *shader = shader_manager().get("compositor_extract_alpha_from_color"); + GPU_shader_bind(shader); + + const int input_unit = GPU_shader_get_texture_binding(shader, "input_tx"); + GPU_texture_bind(movie_clip_texture, input_unit); + + result.bind_as_image(shader, "output_img"); + + compute_dispatch_threads_at_least(shader, size); + + GPU_shader_unbind(); + GPU_texture_unbind(movie_clip_texture); + result.unbind_as_image(); + } + + void compute_stabilization_data(GPUTexture *movie_clip_texture) + { + /* The movie clip texture is invalid or missing, set appropriate fallback values. */ + if (!movie_clip_texture) { + if (should_compute_output("Offset X")) { + Result &result = get_result("Offset X"); + result.allocate_single_value(); + result.set_float_value(0.0f); + } + if (should_compute_output("Offset Y")) { + Result &result = get_result("Offset Y"); + result.allocate_single_value(); + result.set_float_value(0.0f); + } + if (should_compute_output("Scale")) { + Result &result = get_result("Scale"); + result.allocate_single_value(); + result.set_float_value(1.0f); + } + if (should_compute_output("Angle")) { + Result &result = get_result("Angle"); + result.allocate_single_value(); + result.set_float_value(0.0f); + } + return; + } + + MovieClip *movie_clip = get_movie_clip(); + const int frame_number = BKE_movieclip_remap_scene_to_clip_frame(movie_clip, + context().get_frame_number()); + const int width = GPU_texture_width(movie_clip_texture); + const int height = GPU_texture_height(movie_clip_texture); + + /* If the movie clip has no stabilization data, it will initialize the given values with + * fallback values regardless, so no need to handle that case. */ + float2 offset; + float scale, angle; + BKE_tracking_stabilization_data_get( + movie_clip, frame_number, width, height, offset, &scale, &angle); + + if (should_compute_output("Offset X")) { + Result &result = get_result("Offset X"); + result.allocate_single_value(); + result.set_float_value(offset.x); + } + if (should_compute_output("Offset Y")) { + Result &result = get_result("Offset Y"); + result.allocate_single_value(); + result.set_float_value(offset.y); + } + if (should_compute_output("Scale")) { + Result &result = get_result("Scale"); + result.allocate_single_value(); + result.set_float_value(scale); + } + if (should_compute_output("Angle")) { + Result &result = get_result("Angle"); + result.allocate_single_value(); + result.set_float_value(angle); + } + } + + GPUTexture *get_movie_clip_texture() + { + MovieClip *movie_clip = get_movie_clip(); + MovieClipUser *movie_clip_user = static_cast<MovieClipUser *>(bnode().storage); + BKE_movieclip_user_set_frame(movie_clip_user, context().get_frame_number()); + return BKE_movieclip_get_gpu_texture(movie_clip, movie_clip_user); + } + + void free_movie_clip_texture() + { + MovieClip *movie_clip = get_movie_clip(); + return BKE_movieclip_free_gputexture(movie_clip); + } + + MovieClip *get_movie_clip() + { + return (MovieClip *)bnode().id; + } +}; + +static NodeOperation *get_compositor_operation(Context &context, DNode node) +{ + return new MovieClipOperation(context, node); +} + } // namespace blender::nodes::node_composite_movieclip_cc void register_node_type_cmp_movieclip() @@ -91,6 +273,7 @@ void register_node_type_cmp_movieclip() ntype.declare = file_ns::cmp_node_movieclip_declare; ntype.draw_buttons = file_ns::node_composit_buts_movieclip; ntype.draw_buttons_ex = file_ns::node_composit_buts_movieclip_ex; + ntype.get_compositor_operation = file_ns::get_compositor_operation; ntype.initfunc_api = file_ns::init; ntype.flag |= NODE_PREVIEW; node_type_storage( diff --git a/source/blender/nodes/composite/nodes/node_composite_moviedistortion.cc b/source/blender/nodes/composite/nodes/node_composite_moviedistortion.cc index 4d52a767b8a..88638586594 100644 --- a/source/blender/nodes/composite/nodes/node_composite_moviedistortion.cc +++ b/source/blender/nodes/composite/nodes/node_composite_moviedistortion.cc @@ -12,6 +12,8 @@ #include "UI_interface.h" #include "UI_resources.h" +#include "COM_node_operation.hh" + #include "node_composite_util.hh" /* **************** Translate ******************** */ @@ -81,6 +83,23 @@ static void node_composit_buts_moviedistortion(uiLayout *layout, bContext *C, Po uiItemR(layout, ptr, "distortion_type", UI_ITEM_R_SPLIT_EMPTY_NAME, "", ICON_NONE); } +using namespace blender::realtime_compositor; + +class MovieDistortionOperation : public NodeOperation { + public: + using NodeOperation::NodeOperation; + + void execute() override + { + get_input("Image").pass_through(get_result("Image")); + } +}; + +static NodeOperation *get_compositor_operation(Context &context, DNode node) +{ + return new MovieDistortionOperation(context, node); +} + } // namespace blender::nodes::node_composite_moviedistortion_cc void register_node_type_cmp_moviedistortion() @@ -95,6 +114,7 @@ void register_node_type_cmp_moviedistortion() ntype.labelfunc = file_ns::label; ntype.initfunc_api = file_ns::init; node_type_storage(&ntype, nullptr, file_ns::storage_free, file_ns::storage_copy); + ntype.get_compositor_operation = file_ns::get_compositor_operation; nodeRegisterType(&ntype); } diff --git a/source/blender/nodes/composite/nodes/node_composite_normal.cc b/source/blender/nodes/composite/nodes/node_composite_normal.cc index b4dd0bbacd0..f61ace01cfd 100644 --- a/source/blender/nodes/composite/nodes/node_composite_normal.cc +++ b/source/blender/nodes/composite/nodes/node_composite_normal.cc @@ -5,6 +5,10 @@ * \ingroup cmpnodes */ +#include "GPU_material.h" + +#include "COM_shader_node.hh" + #include "node_composite_util.hh" /* **************** NORMAL ******************** */ @@ -17,7 +21,8 @@ static void cmp_node_normal_declare(NodeDeclarationBuilder &b) .default_value({0.0f, 0.0f, 1.0f}) .min(-1.0f) .max(1.0f) - .subtype(PROP_DIRECTION); + .subtype(PROP_DIRECTION) + .compositor_domain_priority(0); b.add_output<decl::Vector>(N_("Normal")) .default_value({0.0f, 0.0f, 1.0f}) .min(-1.0f) @@ -26,6 +31,37 @@ static void cmp_node_normal_declare(NodeDeclarationBuilder &b) b.add_output<decl::Float>(N_("Dot")); } +using namespace blender::realtime_compositor; + +class NormalShaderNode : public ShaderNode { + public: + using ShaderNode::ShaderNode; + + void compile(GPUMaterial *material) override + { + GPUNodeStack *inputs = get_inputs_array(); + GPUNodeStack *outputs = get_outputs_array(); + + GPU_stack_link(material, + &bnode(), + "node_composite_normal", + inputs, + outputs, + GPU_uniform(get_vector_value())); + } + + /* The vector value is stored in the default value of the output socket. */ + float *get_vector_value() + { + return node().output_by_identifier("Normal")->default_value<bNodeSocketValueVector>()->value; + } +}; + +static ShaderNode *get_compositor_shader_node(DNode node) +{ + return new NormalShaderNode(node); +} + } // namespace blender::nodes::node_composite_normal_cc void register_node_type_cmp_normal() @@ -36,6 +72,7 @@ void register_node_type_cmp_normal() cmp_node_type_base(&ntype, CMP_NODE_NORMAL, "Normal", NODE_CLASS_OP_VECTOR); ntype.declare = file_ns::cmp_node_normal_declare; + ntype.get_compositor_shader_node = file_ns::get_compositor_shader_node; nodeRegisterType(&ntype); } diff --git a/source/blender/nodes/composite/nodes/node_composite_normalize.cc b/source/blender/nodes/composite/nodes/node_composite_normalize.cc index 49318279bdb..21765825468 100644 --- a/source/blender/nodes/composite/nodes/node_composite_normalize.cc +++ b/source/blender/nodes/composite/nodes/node_composite_normalize.cc @@ -5,6 +5,8 @@ * \ingroup cmpnodes */ +#include "COM_node_operation.hh" + #include "node_composite_util.hh" /* **************** NORMALIZE single channel, useful for Z buffer ******************** */ @@ -17,6 +19,23 @@ static void cmp_node_normalize_declare(NodeDeclarationBuilder &b) b.add_output<decl::Float>(N_("Value")); } +using namespace blender::realtime_compositor; + +class NormalizeOperation : public NodeOperation { + public: + using NodeOperation::NodeOperation; + + void execute() override + { + get_input("Value").pass_through(get_result("Value")); + } +}; + +static NodeOperation *get_compositor_operation(Context &context, DNode node) +{ + return new NormalizeOperation(context, node); +} + } // namespace blender::nodes::node_composite_normalize_cc void register_node_type_cmp_normalize() @@ -27,6 +46,7 @@ void register_node_type_cmp_normalize() cmp_node_type_base(&ntype, CMP_NODE_NORMALIZE, "Normalize", NODE_CLASS_OP_VECTOR); ntype.declare = file_ns::cmp_node_normalize_declare; + ntype.get_compositor_operation = file_ns::get_compositor_operation; nodeRegisterType(&ntype); } diff --git a/source/blender/nodes/composite/nodes/node_composite_output_file.cc b/source/blender/nodes/composite/nodes/node_composite_output_file.cc index 84235b085a4..5ed383977a5 100644 --- a/source/blender/nodes/composite/nodes/node_composite_output_file.cc +++ b/source/blender/nodes/composite/nodes/node_composite_output_file.cc @@ -24,6 +24,8 @@ #include "IMB_openexr.h" +#include "COM_node_operation.hh" + #include "node_composite_util.hh" /* **************** OUTPUT FILE ******************** */ @@ -439,6 +441,22 @@ static void node_composit_buts_file_output_ex(uiLayout *layout, bContext *C, Poi } } +using namespace blender::realtime_compositor; + +class OutputFileOperation : public NodeOperation { + public: + using NodeOperation::NodeOperation; + + void execute() override + { + } +}; + +static NodeOperation *get_compositor_operation(Context &context, DNode node) +{ + return new OutputFileOperation(context, node); +} + } // namespace blender::nodes::node_composite_output_file_cc void register_node_type_cmp_output_file() @@ -455,6 +473,7 @@ void register_node_type_cmp_output_file() node_type_storage( &ntype, "NodeImageMultiFile", file_ns::free_output_file, file_ns::copy_output_file); node_type_update(&ntype, file_ns::update_output_file); + ntype.get_compositor_operation = file_ns::get_compositor_operation; nodeRegisterType(&ntype); } diff --git a/source/blender/nodes/composite/nodes/node_composite_pixelate.cc b/source/blender/nodes/composite/nodes/node_composite_pixelate.cc index 529aa0f84de..4567464a547 100644 --- a/source/blender/nodes/composite/nodes/node_composite_pixelate.cc +++ b/source/blender/nodes/composite/nodes/node_composite_pixelate.cc @@ -5,6 +5,8 @@ * \ingroup cmpnodes */ +#include "COM_node_operation.hh" + #include "node_composite_util.hh" /* **************** Pixelate ******************** */ @@ -17,6 +19,23 @@ static void cmp_node_pixelate_declare(NodeDeclarationBuilder &b) b.add_output<decl::Color>(N_("Color")); } +using namespace blender::realtime_compositor; + +class PixelateOperation : public NodeOperation { + public: + using NodeOperation::NodeOperation; + + void execute() override + { + get_input("Color").pass_through(get_result("Color")); + } +}; + +static NodeOperation *get_compositor_operation(Context &context, DNode node) +{ + return new PixelateOperation(context, node); +} + } // namespace blender::nodes::node_composite_pixelate_cc void register_node_type_cmp_pixelate() @@ -27,6 +46,7 @@ void register_node_type_cmp_pixelate() cmp_node_type_base(&ntype, CMP_NODE_PIXELATE, "Pixelate", NODE_CLASS_OP_FILTER); ntype.declare = file_ns::cmp_node_pixelate_declare; + ntype.get_compositor_operation = file_ns::get_compositor_operation; nodeRegisterType(&ntype); } diff --git a/source/blender/nodes/composite/nodes/node_composite_planetrackdeform.cc b/source/blender/nodes/composite/nodes/node_composite_planetrackdeform.cc index 6557478fc4b..68dc020a02e 100644 --- a/source/blender/nodes/composite/nodes/node_composite_planetrackdeform.cc +++ b/source/blender/nodes/composite/nodes/node_composite_planetrackdeform.cc @@ -18,6 +18,8 @@ #include "UI_interface.h" #include "UI_resources.h" +#include "COM_node_operation.hh" + #include "node_composite_util.hh" namespace blender::nodes::node_composite_planetrackdeform_cc { @@ -107,6 +109,24 @@ static void node_composit_buts_planetrackdeform(uiLayout *layout, bContext *C, P } } +using namespace blender::realtime_compositor; + +class PlaneTrackDeformOperation : public NodeOperation { + public: + using NodeOperation::NodeOperation; + + void execute() override + { + get_input("Image").pass_through(get_result("Image")); + get_result("Plane").allocate_invalid(); + } +}; + +static NodeOperation *get_compositor_operation(Context &context, DNode node) +{ + return new PlaneTrackDeformOperation(context, node); +} + } // namespace blender::nodes::node_composite_planetrackdeform_cc void register_node_type_cmp_planetrackdeform() @@ -121,6 +141,7 @@ void register_node_type_cmp_planetrackdeform() ntype.initfunc_api = file_ns::init; node_type_storage( &ntype, "NodePlaneTrackDeformData", node_free_standard_storage, node_copy_standard_storage); + ntype.get_compositor_operation = file_ns::get_compositor_operation; nodeRegisterType(&ntype); } diff --git a/source/blender/nodes/composite/nodes/node_composite_posterize.cc b/source/blender/nodes/composite/nodes/node_composite_posterize.cc index c97035d55ea..1268219e7e2 100644 --- a/source/blender/nodes/composite/nodes/node_composite_posterize.cc +++ b/source/blender/nodes/composite/nodes/node_composite_posterize.cc @@ -5,6 +5,10 @@ * \ingroup cmpnodes */ +#include "GPU_material.h" + +#include "COM_shader_node.hh" + #include "node_composite_util.hh" /* **************** Posterize ******************** */ @@ -13,11 +17,37 @@ namespace blender::nodes::node_composite_posterize_cc { static void cmp_node_posterize_declare(NodeDeclarationBuilder &b) { - b.add_input<decl::Color>(N_("Image")).default_value({1.0f, 1.0f, 1.0f, 1.0f}); - b.add_input<decl::Float>(N_("Steps")).default_value(8.0f).min(2.0f).max(1024.0f); + b.add_input<decl::Color>(N_("Image")) + .default_value({1.0f, 1.0f, 1.0f, 1.0f}) + .compositor_domain_priority(0); + b.add_input<decl::Float>(N_("Steps")) + .default_value(8.0f) + .min(2.0f) + .max(1024.0f) + .compositor_domain_priority(1); b.add_output<decl::Color>(N_("Image")); } +using namespace blender::realtime_compositor; + +class PosterizeShaderNode : public ShaderNode { + public: + using ShaderNode::ShaderNode; + + void compile(GPUMaterial *material) override + { + GPUNodeStack *inputs = get_inputs_array(); + GPUNodeStack *outputs = get_outputs_array(); + + GPU_stack_link(material, &bnode(), "node_composite_posterize", inputs, outputs); + } +}; + +static ShaderNode *get_compositor_shader_node(DNode node) +{ + return new PosterizeShaderNode(node); +} + } // namespace blender::nodes::node_composite_posterize_cc void register_node_type_cmp_posterize() @@ -28,6 +58,7 @@ void register_node_type_cmp_posterize() cmp_node_type_base(&ntype, CMP_NODE_POSTERIZE, "Posterize", NODE_CLASS_OP_COLOR); ntype.declare = file_ns::cmp_node_posterize_declare; + ntype.get_compositor_shader_node = file_ns::get_compositor_shader_node; nodeRegisterType(&ntype); } diff --git a/source/blender/nodes/composite/nodes/node_composite_premulkey.cc b/source/blender/nodes/composite/nodes/node_composite_premulkey.cc index 000cc9df90a..c814ea5f738 100644 --- a/source/blender/nodes/composite/nodes/node_composite_premulkey.cc +++ b/source/blender/nodes/composite/nodes/node_composite_premulkey.cc @@ -8,6 +8,10 @@ #include "UI_interface.h" #include "UI_resources.h" +#include "GPU_material.h" + +#include "COM_shader_node.hh" + #include "node_composite_util.hh" /* **************** Premul and Key Alpha Convert ******************** */ @@ -16,7 +20,9 @@ namespace blender::nodes::node_composite_premulkey_cc { static void cmp_node_premulkey_declare(NodeDeclarationBuilder &b) { - b.add_input<decl::Color>(N_("Image")).default_value({1.0f, 1.0f, 1.0f, 1.0f}); + b.add_input<decl::Color>(N_("Image")) + .default_value({1.0f, 1.0f, 1.0f, 1.0f}) + .compositor_domain_priority(0); b.add_output<decl::Color>(N_("Image")); } @@ -25,6 +31,36 @@ static void node_composit_buts_premulkey(uiLayout *layout, bContext *UNUSED(C), uiItemR(layout, ptr, "mapping", UI_ITEM_R_SPLIT_EMPTY_NAME, "", ICON_NONE); } +using namespace blender::realtime_compositor; + +class AlphaConvertShaderNode : public ShaderNode { + public: + using ShaderNode::ShaderNode; + + void compile(GPUMaterial *material) override + { + GPUNodeStack *inputs = get_inputs_array(); + GPUNodeStack *outputs = get_outputs_array(); + + if (get_mode() == 0) { + GPU_stack_link(material, &bnode(), "color_alpha_premultiply", inputs, outputs); + return; + } + + GPU_stack_link(material, &bnode(), "color_alpha_unpremultiply", inputs, outputs); + } + + CMPNodeAlphaConvertMode get_mode() + { + return (CMPNodeAlphaConvertMode)bnode().custom1; + } +}; + +static ShaderNode *get_compositor_shader_node(DNode node) +{ + return new AlphaConvertShaderNode(node); +} + } // namespace blender::nodes::node_composite_premulkey_cc void register_node_type_cmp_premulkey() @@ -36,6 +72,7 @@ void register_node_type_cmp_premulkey() cmp_node_type_base(&ntype, CMP_NODE_PREMULKEY, "Alpha Convert", NODE_CLASS_CONVERTER); ntype.declare = file_ns::cmp_node_premulkey_declare; ntype.draw_buttons = file_ns::node_composit_buts_premulkey; + ntype.get_compositor_shader_node = file_ns::get_compositor_shader_node; nodeRegisterType(&ntype); } diff --git a/source/blender/nodes/composite/nodes/node_composite_rgb.cc b/source/blender/nodes/composite/nodes/node_composite_rgb.cc index 5bc4c67dd8e..6f3a00af7e3 100644 --- a/source/blender/nodes/composite/nodes/node_composite_rgb.cc +++ b/source/blender/nodes/composite/nodes/node_composite_rgb.cc @@ -5,6 +5,12 @@ * \ingroup cmpnodes */ +#include "BLI_math_vec_types.hh" + +#include "DNA_node_types.h" + +#include "COM_node_operation.hh" + #include "node_composite_util.hh" /* **************** RGB ******************** */ @@ -16,6 +22,29 @@ static void cmp_node_rgb_declare(NodeDeclarationBuilder &b) b.add_output<decl::Color>(N_("RGBA")).default_value({0.5f, 0.5f, 0.5f, 1.0f}); } +using namespace blender::realtime_compositor; + +class RGBOperation : public NodeOperation { + public: + using NodeOperation::NodeOperation; + + void execute() override + { + Result &result = get_result("RGBA"); + result.allocate_single_value(); + + const bNodeSocket *socket = static_cast<bNodeSocket *>(bnode().outputs.first); + float4 color = float4(static_cast<bNodeSocketValueRGBA *>(socket->default_value)->value); + + result.set_color_value(color); + } +}; + +static NodeOperation *get_compositor_operation(Context &context, DNode node) +{ + return new RGBOperation(context, node); +} + } // namespace blender::nodes::node_composite_rgb_cc void register_node_type_cmp_rgb() @@ -27,6 +56,7 @@ void register_node_type_cmp_rgb() cmp_node_type_base(&ntype, CMP_NODE_RGB, "RGB", NODE_CLASS_INPUT); ntype.declare = file_ns::cmp_node_rgb_declare; node_type_size_preset(&ntype, NODE_SIZE_SMALL); + ntype.get_compositor_operation = file_ns::get_compositor_operation; nodeRegisterType(&ntype); } diff --git a/source/blender/nodes/composite/nodes/node_composite_rotate.cc b/source/blender/nodes/composite/nodes/node_composite_rotate.cc index a083bc1837b..35caa3cd242 100644 --- a/source/blender/nodes/composite/nodes/node_composite_rotate.cc +++ b/source/blender/nodes/composite/nodes/node_composite_rotate.cc @@ -5,9 +5,14 @@ * \ingroup cmpnodes */ +#include "BLI_assert.h" +#include "BLI_float3x3.hh" + #include "UI_interface.h" #include "UI_resources.h" +#include "COM_node_operation.hh" + #include "node_composite_util.hh" /* **************** Rotate ******************** */ @@ -16,12 +21,15 @@ namespace blender::nodes::node_composite_rotate_cc { static void cmp_node_rotate_declare(NodeDeclarationBuilder &b) { - b.add_input<decl::Color>(N_("Image")).default_value({1.0f, 1.0f, 1.0f, 1.0f}); + b.add_input<decl::Color>(N_("Image")) + .default_value({1.0f, 1.0f, 1.0f, 1.0f}) + .compositor_domain_priority(0); b.add_input<decl::Float>(N_("Degr")) .default_value(0.0f) .min(-10000.0f) .max(10000.0f) - .subtype(PROP_ANGLE); + .subtype(PROP_ANGLE) + .compositor_expects_single_value(); b.add_output<decl::Color>(N_("Image")); } @@ -35,6 +43,47 @@ static void node_composit_buts_rotate(uiLayout *layout, bContext *UNUSED(C), Poi uiItemR(layout, ptr, "filter_type", UI_ITEM_R_SPLIT_EMPTY_NAME, "", ICON_NONE); } +using namespace blender::realtime_compositor; + +class RotateOperation : public NodeOperation { + public: + using NodeOperation::NodeOperation; + + void execute() override + { + Result &input = get_input("Image"); + Result &result = get_result("Image"); + input.pass_through(result); + + const float rotation = get_input("Degr").get_float_value_default(0.0f); + + const float3x3 transformation = float3x3::from_rotation(rotation); + + result.transform(transformation); + result.get_realization_options().interpolation = get_interpolation(); + } + + Interpolation get_interpolation() + { + switch (bnode().custom1) { + case 0: + return Interpolation::Nearest; + case 1: + return Interpolation::Bilinear; + case 2: + return Interpolation::Bicubic; + } + + BLI_assert_unreachable(); + return Interpolation::Nearest; + } +}; + +static NodeOperation *get_compositor_operation(Context &context, DNode node) +{ + return new RotateOperation(context, node); +} + } // namespace blender::nodes::node_composite_rotate_cc void register_node_type_cmp_rotate() @@ -47,6 +96,7 @@ void register_node_type_cmp_rotate() ntype.declare = file_ns::cmp_node_rotate_declare; ntype.draw_buttons = file_ns::node_composit_buts_rotate; node_type_init(&ntype, file_ns::node_composit_init_rotate); + ntype.get_compositor_operation = file_ns::get_compositor_operation; nodeRegisterType(&ntype); } diff --git a/source/blender/nodes/composite/nodes/node_composite_scale.cc b/source/blender/nodes/composite/nodes/node_composite_scale.cc index b2b42a3613c..8b43ae8c9ca 100644 --- a/source/blender/nodes/composite/nodes/node_composite_scale.cc +++ b/source/blender/nodes/composite/nodes/node_composite_scale.cc @@ -10,6 +10,8 @@ #include "UI_interface.h" #include "UI_resources.h" +#include "COM_node_operation.hh" + #include "node_composite_util.hh" /* **************** Scale ******************** */ @@ -55,6 +57,23 @@ static void node_composit_buts_scale(uiLayout *layout, bContext *UNUSED(C), Poin } } +using namespace blender::realtime_compositor; + +class ScaleOperation : public NodeOperation { + public: + using NodeOperation::NodeOperation; + + void execute() override + { + get_input("Image").pass_through(get_result("Image")); + } +}; + +static NodeOperation *get_compositor_operation(Context &context, DNode node) +{ + return new ScaleOperation(context, node); +} + } // namespace blender::nodes::node_composite_scale_cc void register_node_type_cmp_scale() @@ -67,6 +86,7 @@ void register_node_type_cmp_scale() ntype.declare = file_ns::cmp_node_scale_declare; ntype.draw_buttons = file_ns::node_composit_buts_scale; node_type_update(&ntype, file_ns::node_composite_update_scale); + ntype.get_compositor_operation = file_ns::get_compositor_operation; nodeRegisterType(&ntype); } diff --git a/source/blender/nodes/composite/nodes/node_composite_scene_time.cc b/source/blender/nodes/composite/nodes/node_composite_scene_time.cc index 20bafb0d3d4..1f5317378bb 100644 --- a/source/blender/nodes/composite/nodes/node_composite_scene_time.cc +++ b/source/blender/nodes/composite/nodes/node_composite_scene_time.cc @@ -3,6 +3,8 @@ * \ingroup cmpnodes */ +#include "COM_node_operation.hh" + #include "node_composite_util.hh" namespace blender::nodes { @@ -13,6 +15,38 @@ static void cmp_node_scene_time_declare(NodeDeclarationBuilder &b) b.add_output<decl::Float>(N_("Frame")); } +using namespace blender::realtime_compositor; + +class SceneTimeOperation : public NodeOperation { + public: + using NodeOperation::NodeOperation; + + void execute() override + { + execute_seconds(); + execute_frame(); + } + + void execute_seconds() + { + Result &result = get_result("Seconds"); + result.allocate_single_value(); + result.set_float_value(context().get_time()); + } + + void execute_frame() + { + Result &result = get_result("Frame"); + result.allocate_single_value(); + result.set_float_value(static_cast<float>(context().get_frame_number())); + } +}; + +static NodeOperation *get_compositor_operation(Context &context, DNode node) +{ + return new SceneTimeOperation(context, node); +} + } // namespace blender::nodes void register_node_type_cmp_scene_time() @@ -21,5 +55,7 @@ void register_node_type_cmp_scene_time() cmp_node_type_base(&ntype, CMP_NODE_SCENE_TIME, "Scene Time", NODE_CLASS_INPUT); ntype.declare = blender::nodes::cmp_node_scene_time_declare; + ntype.get_compositor_operation = blender::nodes::get_compositor_operation; + nodeRegisterType(&ntype); } diff --git a/source/blender/nodes/composite/nodes/node_composite_sepcomb_color.cc b/source/blender/nodes/composite/nodes/node_composite_sepcomb_color.cc index b253656a628..d1f0b7977f8 100644 --- a/source/blender/nodes/composite/nodes/node_composite_sepcomb_color.cc +++ b/source/blender/nodes/composite/nodes/node_composite_sepcomb_color.cc @@ -1,5 +1,11 @@ /* SPDX-License-Identifier: GPL-2.0-or-later */ +#include "BLI_assert.h" + +#include "GPU_material.h" + +#include "COM_shader_node.hh" + #include "node_composite_util.hh" static void node_cmp_combsep_color_init(bNodeTree *UNUSED(ntree), bNode *node) @@ -58,7 +64,9 @@ namespace blender::nodes::node_composite_separate_color_cc { static void cmp_node_separate_color_declare(NodeDeclarationBuilder &b) { - b.add_input<decl::Color>(N_("Image")).default_value({1.0f, 1.0f, 1.0f, 1.0f}); + b.add_input<decl::Color>(N_("Image")) + .default_value({1.0f, 1.0f, 1.0f, 1.0f}) + .compositor_domain_priority(0); b.add_output<decl::Float>(N_("Red")); b.add_output<decl::Float>(N_("Green")); b.add_output<decl::Float>(N_("Blue")); @@ -71,6 +79,57 @@ static void cmp_node_separate_color_update(bNodeTree *UNUSED(ntree), bNode *node node_cmp_combsep_color_label(&node->outputs, (CMPNodeCombSepColorMode)storage->mode); } +using namespace blender::realtime_compositor; + +class SeparateColorShaderNode : public ShaderNode { + public: + using ShaderNode::ShaderNode; + + void compile(GPUMaterial *material) override + { + GPUNodeStack *inputs = get_inputs_array(); + GPUNodeStack *outputs = get_outputs_array(); + + GPU_stack_link(material, &bnode(), get_shader_function_name(), inputs, outputs); + } + + NodeCMPCombSepColor *get_node_combine_separate_color() + { + return static_cast<NodeCMPCombSepColor *>(bnode().storage); + } + + const char *get_shader_function_name() + { + switch (get_node_combine_separate_color()->mode) { + case CMP_NODE_COMBSEP_COLOR_RGB: + return "node_composite_separate_rgba"; + case CMP_NODE_COMBSEP_COLOR_HSV: + return "node_composite_separate_hsva"; + case CMP_NODE_COMBSEP_COLOR_HSL: + return "node_composite_separate_hsla"; + case CMP_NODE_COMBSEP_COLOR_YUV: + return "node_composite_separate_yuva_itu_709"; + case CMP_NODE_COMBSEP_COLOR_YCC: + switch (get_node_combine_separate_color()->ycc_mode) { + case BLI_YCC_ITU_BT601: + return "node_composite_separate_ycca_itu_601"; + case BLI_YCC_ITU_BT709: + return "node_composite_separate_ycca_itu_709"; + case BLI_YCC_JFIF_0_255: + return "node_composite_separate_ycca_jpeg"; + } + } + + BLI_assert_unreachable(); + return nullptr; + } +}; + +static ShaderNode *get_compositor_shader_node(DNode node) +{ + return new SeparateColorShaderNode(node); +} + } // namespace blender::nodes::node_composite_separate_color_cc void register_node_type_cmp_separate_color() @@ -85,6 +144,7 @@ void register_node_type_cmp_separate_color() node_type_storage( &ntype, "NodeCMPCombSepColor", node_free_standard_storage, node_copy_standard_storage); node_type_update(&ntype, file_ns::cmp_node_separate_color_update); + ntype.get_compositor_shader_node = file_ns::get_compositor_shader_node; nodeRegisterType(&ntype); } @@ -95,22 +155,30 @@ namespace blender::nodes::node_composite_combine_color_cc { static void cmp_node_combine_color_declare(NodeDeclarationBuilder &b) { - b.add_input<decl::Float>(N_("Red")).default_value(0.0f).min(0.0f).max(1.0f).subtype(PROP_FACTOR); + b.add_input<decl::Float>(N_("Red")) + .default_value(0.0f) + .min(0.0f) + .max(1.0f) + .subtype(PROP_FACTOR) + .compositor_domain_priority(0); b.add_input<decl::Float>(N_("Green")) .default_value(0.0f) .min(0.0f) .max(1.0f) - .subtype(PROP_FACTOR); + .subtype(PROP_FACTOR) + .compositor_domain_priority(1); b.add_input<decl::Float>(N_("Blue")) .default_value(0.0f) .min(0.0f) .max(1.0f) - .subtype(PROP_FACTOR); + .subtype(PROP_FACTOR) + .compositor_domain_priority(2); b.add_input<decl::Float>(N_("Alpha")) .default_value(1.0f) .min(0.0f) .max(1.0f) - .subtype(PROP_FACTOR); + .subtype(PROP_FACTOR) + .compositor_domain_priority(3); b.add_output<decl::Color>(N_("Image")); } @@ -120,6 +188,57 @@ static void cmp_node_combine_color_update(bNodeTree *UNUSED(ntree), bNode *node) node_cmp_combsep_color_label(&node->inputs, (CMPNodeCombSepColorMode)storage->mode); } +using namespace blender::realtime_compositor; + +class CombineColorShaderNode : public ShaderNode { + public: + using ShaderNode::ShaderNode; + + void compile(GPUMaterial *material) override + { + GPUNodeStack *inputs = get_inputs_array(); + GPUNodeStack *outputs = get_outputs_array(); + + GPU_stack_link(material, &bnode(), get_shader_function_name(), inputs, outputs); + } + + NodeCMPCombSepColor *get_node_combine_separate_color() + { + return static_cast<NodeCMPCombSepColor *>(bnode().storage); + } + + const char *get_shader_function_name() + { + switch (get_node_combine_separate_color()->mode) { + case CMP_NODE_COMBSEP_COLOR_RGB: + return "node_composite_combine_rgba"; + case CMP_NODE_COMBSEP_COLOR_HSV: + return "node_composite_combine_hsva"; + case CMP_NODE_COMBSEP_COLOR_HSL: + return "node_composite_combine_hsla"; + case CMP_NODE_COMBSEP_COLOR_YUV: + return "node_composite_combine_yuva_itu_709"; + case CMP_NODE_COMBSEP_COLOR_YCC: + switch (get_node_combine_separate_color()->ycc_mode) { + case BLI_YCC_ITU_BT601: + return "node_composite_combine_ycca_itu_601"; + case BLI_YCC_ITU_BT709: + return "node_composite_combine_ycca_itu_709"; + case BLI_YCC_JFIF_0_255: + return "node_composite_combine_ycca_jpeg"; + } + } + + BLI_assert_unreachable(); + return nullptr; + } +}; + +static ShaderNode *get_compositor_shader_node(DNode node) +{ + return new CombineColorShaderNode(node); +} + } // namespace blender::nodes::node_composite_combine_color_cc void register_node_type_cmp_combine_color() @@ -134,6 +253,7 @@ void register_node_type_cmp_combine_color() node_type_storage( &ntype, "NodeCMPCombSepColor", node_free_standard_storage, node_copy_standard_storage); node_type_update(&ntype, file_ns::cmp_node_combine_color_update); + ntype.get_compositor_shader_node = file_ns::get_compositor_shader_node; nodeRegisterType(&ntype); } diff --git a/source/blender/nodes/composite/nodes/node_composite_sepcomb_hsva.cc b/source/blender/nodes/composite/nodes/node_composite_sepcomb_hsva.cc index a169f7e0dd3..b655c0db106 100644 --- a/source/blender/nodes/composite/nodes/node_composite_sepcomb_hsva.cc +++ b/source/blender/nodes/composite/nodes/node_composite_sepcomb_hsva.cc @@ -5,60 +5,112 @@ * \ingroup cmpnodes */ +#include "GPU_material.h" + +#include "COM_shader_node.hh" + #include "node_composite_util.hh" /* **************** SEPARATE HSVA ******************** */ -namespace blender::nodes::node_composite_sepcomb_hsva_cc { +namespace blender::nodes::node_composite_separate_hsva_cc { static void cmp_node_sephsva_declare(NodeDeclarationBuilder &b) { - b.add_input<decl::Color>(N_("Image")).default_value({1.0f, 1.0f, 1.0f, 1.0f}); + b.add_input<decl::Color>(N_("Image")) + .default_value({1.0f, 1.0f, 1.0f, 1.0f}) + .compositor_domain_priority(0); b.add_output<decl::Float>(N_("H")); b.add_output<decl::Float>(N_("S")); b.add_output<decl::Float>(N_("V")); b.add_output<decl::Float>(N_("A")); } -} // namespace blender::nodes::node_composite_sepcomb_hsva_cc +using namespace blender::realtime_compositor; + +class SeparateHSVAShaderNode : public ShaderNode { + public: + using ShaderNode::ShaderNode; + + void compile(GPUMaterial *material) override + { + GPUNodeStack *inputs = get_inputs_array(); + GPUNodeStack *outputs = get_outputs_array(); + + GPU_stack_link(material, &bnode(), "node_composite_separate_hsva", inputs, outputs); + } +}; + +static ShaderNode *get_compositor_shader_node(DNode node) +{ + return new SeparateHSVAShaderNode(node); +} + +} // namespace blender::nodes::node_composite_separate_hsva_cc void register_node_type_cmp_sephsva() { - namespace file_ns = blender::nodes::node_composite_sepcomb_hsva_cc; + namespace file_ns = blender::nodes::node_composite_separate_hsva_cc; static bNodeType ntype; cmp_node_type_base(&ntype, CMP_NODE_SEPHSVA_LEGACY, "Separate HSVA", NODE_CLASS_CONVERTER); ntype.declare = file_ns::cmp_node_sephsva_declare; ntype.gather_link_search_ops = nullptr; + ntype.get_compositor_shader_node = file_ns::get_compositor_shader_node; nodeRegisterType(&ntype); } /* **************** COMBINE HSVA ******************** */ -namespace blender::nodes::node_composite_sepcomb_hsva_cc { +namespace blender::nodes::node_composite_combine_hsva_cc { static void cmp_node_combhsva_declare(NodeDeclarationBuilder &b) { - b.add_input<decl::Float>(N_("H")).min(0.0f).max(1.0f); - b.add_input<decl::Float>(N_("S")).min(0.0f).max(1.0f); - b.add_input<decl::Float>(N_("V")).min(0.0f).max(1.0f); - b.add_input<decl::Float>(N_("A")).default_value(1.0f).min(0.0f).max(1.0f); + b.add_input<decl::Float>(N_("H")).min(0.0f).max(1.0f).compositor_domain_priority(0); + b.add_input<decl::Float>(N_("S")).min(0.0f).max(1.0f).compositor_domain_priority(1); + b.add_input<decl::Float>(N_("V")).min(0.0f).max(1.0f).compositor_domain_priority(2); + b.add_input<decl::Float>(N_("A")) + .default_value(1.0f) + .min(0.0f) + .max(1.0f) + .compositor_domain_priority(3); b.add_output<decl::Color>(N_("Image")); } -} // namespace blender::nodes::node_composite_sepcomb_hsva_cc +using namespace blender::realtime_compositor; + +class CombineHSVAShaderNode : public ShaderNode { + public: + using ShaderNode::ShaderNode; + + void compile(GPUMaterial *material) override + { + GPUNodeStack *inputs = get_inputs_array(); + GPUNodeStack *outputs = get_outputs_array(); + + GPU_stack_link(material, &bnode(), "node_composite_combine_hsva", inputs, outputs); + } +}; + +static ShaderNode *get_compositor_shader_node(DNode node) +{ + return new CombineHSVAShaderNode(node); +} + +} // namespace blender::nodes::node_composite_combine_hsva_cc void register_node_type_cmp_combhsva() { - namespace file_ns = blender::nodes::node_composite_sepcomb_hsva_cc; + namespace file_ns = blender::nodes::node_composite_combine_hsva_cc; static bNodeType ntype; cmp_node_type_base(&ntype, CMP_NODE_COMBHSVA_LEGACY, "Combine HSVA", NODE_CLASS_CONVERTER); ntype.declare = file_ns::cmp_node_combhsva_declare; ntype.gather_link_search_ops = nullptr; + ntype.get_compositor_shader_node = file_ns::get_compositor_shader_node; nodeRegisterType(&ntype); } diff --git a/source/blender/nodes/composite/nodes/node_composite_sepcomb_rgba.cc b/source/blender/nodes/composite/nodes/node_composite_sepcomb_rgba.cc index a243500b56d..1f4c9fd153f 100644 --- a/source/blender/nodes/composite/nodes/node_composite_sepcomb_rgba.cc +++ b/source/blender/nodes/composite/nodes/node_composite_sepcomb_rgba.cc @@ -5,59 +5,112 @@ * \ingroup cmpnodes */ +#include "GPU_material.h" + +#include "COM_shader_node.hh" + #include "node_composite_util.hh" /* **************** SEPARATE RGBA ******************** */ -namespace blender::nodes::node_composite_sepcomb_rgba_cc { + +namespace blender::nodes::node_composite_separate_rgba_cc { static void cmp_node_seprgba_declare(NodeDeclarationBuilder &b) { - b.add_input<decl::Color>(N_("Image")).default_value({1.0f, 1.0f, 1.0f, 1.0f}); + b.add_input<decl::Color>(N_("Image")) + .default_value({1.0f, 1.0f, 1.0f, 1.0f}) + .compositor_domain_priority(0); b.add_output<decl::Float>(N_("R")); b.add_output<decl::Float>(N_("G")); b.add_output<decl::Float>(N_("B")); b.add_output<decl::Float>(N_("A")); } -} // namespace blender::nodes::node_composite_sepcomb_rgba_cc +using namespace blender::realtime_compositor; + +class SeparateRGBAShaderNode : public ShaderNode { + public: + using ShaderNode::ShaderNode; + + void compile(GPUMaterial *material) override + { + GPUNodeStack *inputs = get_inputs_array(); + GPUNodeStack *outputs = get_outputs_array(); + + GPU_stack_link(material, &bnode(), "node_composite_separate_rgba", inputs, outputs); + } +}; + +static ShaderNode *get_compositor_shader_node(DNode node) +{ + return new SeparateRGBAShaderNode(node); +} + +} // namespace blender::nodes::node_composite_separate_rgba_cc void register_node_type_cmp_seprgba() { - namespace file_ns = blender::nodes::node_composite_sepcomb_rgba_cc; + namespace file_ns = blender::nodes::node_composite_separate_rgba_cc; static bNodeType ntype; cmp_node_type_base(&ntype, CMP_NODE_SEPRGBA_LEGACY, "Separate RGBA", NODE_CLASS_CONVERTER); ntype.declare = file_ns::cmp_node_seprgba_declare; ntype.gather_link_search_ops = nullptr; + ntype.get_compositor_shader_node = file_ns::get_compositor_shader_node; nodeRegisterType(&ntype); } /* **************** COMBINE RGBA ******************** */ -namespace blender::nodes::node_composite_sepcomb_rgba_cc { +namespace blender::nodes::node_composite_combine_rgba_cc { static void cmp_node_combrgba_declare(NodeDeclarationBuilder &b) { - b.add_input<decl::Float>(N_("R")).min(0.0f).max(1.0f); - b.add_input<decl::Float>(N_("G")).min(0.0f).max(1.0f); - b.add_input<decl::Float>(N_("B")).min(0.0f).max(1.0f); - b.add_input<decl::Float>(N_("A")).default_value(1.0f).min(0.0f).max(1.0f); + b.add_input<decl::Float>(N_("R")).min(0.0f).max(1.0f).compositor_domain_priority(0); + b.add_input<decl::Float>(N_("G")).min(0.0f).max(1.0f).compositor_domain_priority(1); + b.add_input<decl::Float>(N_("B")).min(0.0f).max(1.0f).compositor_domain_priority(2); + b.add_input<decl::Float>(N_("A")) + .default_value(1.0f) + .min(0.0f) + .max(1.0f) + .compositor_domain_priority(3); b.add_output<decl::Color>(N_("Image")); } -} // namespace blender::nodes::node_composite_sepcomb_rgba_cc +using namespace blender::realtime_compositor; + +class CombineRGBAShaderNode : public ShaderNode { + public: + using ShaderNode::ShaderNode; + + void compile(GPUMaterial *material) override + { + GPUNodeStack *inputs = get_inputs_array(); + GPUNodeStack *outputs = get_outputs_array(); + + GPU_stack_link(material, &bnode(), "node_composite_combine_rgba", inputs, outputs); + } +}; + +static ShaderNode *get_compositor_shader_node(DNode node) +{ + return new CombineRGBAShaderNode(node); +} + +} // namespace blender::nodes::node_composite_combine_rgba_cc void register_node_type_cmp_combrgba() { - namespace file_ns = blender::nodes::node_composite_sepcomb_rgba_cc; + namespace file_ns = blender::nodes::node_composite_combine_rgba_cc; static bNodeType ntype; cmp_node_type_base(&ntype, CMP_NODE_COMBRGBA_LEGACY, "Combine RGBA", NODE_CLASS_CONVERTER); ntype.declare = file_ns::cmp_node_combrgba_declare; ntype.gather_link_search_ops = nullptr; + ntype.get_compositor_shader_node = file_ns::get_compositor_shader_node; nodeRegisterType(&ntype); } diff --git a/source/blender/nodes/composite/nodes/node_composite_sepcomb_xyz.cc b/source/blender/nodes/composite/nodes/node_composite_sepcomb_xyz.cc index 4979c376cab..e288e698808 100644 --- a/source/blender/nodes/composite/nodes/node_composite_sepcomb_xyz.cc +++ b/source/blender/nodes/composite/nodes/node_composite_sepcomb_xyz.cc @@ -5,10 +5,15 @@ * \ingroup cmpnodes */ +#include "GPU_material.h" + +#include "COM_shader_node.hh" + #include "node_composite_util.hh" /* **************** SEPARATE XYZ ******************** */ -namespace blender::nodes { + +namespace blender::nodes::node_composite_separate_xyz_cc { static void cmp_node_separate_xyz_declare(NodeDeclarationBuilder &b) { @@ -18,21 +23,44 @@ static void cmp_node_separate_xyz_declare(NodeDeclarationBuilder &b) b.add_output<decl::Float>("Z"); } -} // namespace blender::nodes +using namespace blender::realtime_compositor; + +class SeparateXYZShaderNode : public ShaderNode { + public: + using ShaderNode::ShaderNode; + + void compile(GPUMaterial *material) override + { + GPUNodeStack *inputs = get_inputs_array(); + GPUNodeStack *outputs = get_outputs_array(); + + GPU_stack_link(material, &bnode(), "node_composite_separate_xyz", inputs, outputs); + } +}; + +static ShaderNode *get_compositor_shader_node(DNode node) +{ + return new SeparateXYZShaderNode(node); +} + +} // namespace blender::nodes::node_composite_separate_xyz_cc void register_node_type_cmp_separate_xyz() { + namespace file_ns = blender::nodes::node_composite_separate_xyz_cc; + static bNodeType ntype; cmp_node_type_base(&ntype, CMP_NODE_SEPARATE_XYZ, "Separate XYZ", NODE_CLASS_CONVERTER); - ntype.declare = blender::nodes::cmp_node_separate_xyz_declare; + ntype.declare = file_ns::cmp_node_separate_xyz_declare; + ntype.get_compositor_shader_node = file_ns::get_compositor_shader_node; nodeRegisterType(&ntype); } /* **************** COMBINE XYZ ******************** */ -namespace blender::nodes { +namespace blender::nodes::node_composite_combine_xyz_cc { static void cmp_node_combine_xyz_declare(NodeDeclarationBuilder &b) { @@ -42,14 +70,37 @@ static void cmp_node_combine_xyz_declare(NodeDeclarationBuilder &b) b.add_output<decl::Vector>("Vector"); } -} // namespace blender::nodes +using namespace blender::realtime_compositor; + +class CombineXYZShaderNode : public ShaderNode { + public: + using ShaderNode::ShaderNode; + + void compile(GPUMaterial *material) override + { + GPUNodeStack *inputs = get_inputs_array(); + GPUNodeStack *outputs = get_outputs_array(); + + GPU_stack_link(material, &bnode(), "node_composite_combine_xyz", inputs, outputs); + } +}; + +static ShaderNode *get_compositor_shader_node(DNode node) +{ + return new CombineXYZShaderNode(node); +} + +} // namespace blender::nodes::node_composite_combine_xyz_cc void register_node_type_cmp_combine_xyz() { + namespace file_ns = blender::nodes::node_composite_combine_xyz_cc; + static bNodeType ntype; cmp_node_type_base(&ntype, CMP_NODE_COMBINE_XYZ, "Combine XYZ", NODE_CLASS_CONVERTER); - ntype.declare = blender::nodes::cmp_node_combine_xyz_declare; + ntype.declare = file_ns::cmp_node_combine_xyz_declare; + ntype.get_compositor_shader_node = file_ns::get_compositor_shader_node; nodeRegisterType(&ntype); } diff --git a/source/blender/nodes/composite/nodes/node_composite_sepcomb_ycca.cc b/source/blender/nodes/composite/nodes/node_composite_sepcomb_ycca.cc index 51d3c18d238..bebe6abe115 100644 --- a/source/blender/nodes/composite/nodes/node_composite_sepcomb_ycca.cc +++ b/source/blender/nodes/composite/nodes/node_composite_sepcomb_ycca.cc @@ -5,15 +5,23 @@ * \ingroup cmpnodes */ +#include "BLI_assert.h" + +#include "GPU_material.h" + +#include "COM_shader_node.hh" + #include "node_composite_util.hh" /* **************** SEPARATE YCCA ******************** */ -namespace blender::nodes::node_composite_sepcomb_ycca_cc { +namespace blender::nodes::node_composite_separate_ycca_cc { static void cmp_node_sepycca_declare(NodeDeclarationBuilder &b) { - b.add_input<decl::Color>(N_("Image")).default_value({1.0f, 1.0f, 1.0f, 1.0f}); + b.add_input<decl::Color>(N_("Image")) + .default_value({1.0f, 1.0f, 1.0f, 1.0f}) + .compositor_domain_priority(0); b.add_output<decl::Float>(N_("Y")); b.add_output<decl::Float>(N_("Cb")); b.add_output<decl::Float>(N_("Cr")); @@ -25,11 +33,51 @@ static void node_composit_init_mode_sepycca(bNodeTree *UNUSED(ntree), bNode *nod node->custom1 = 1; /* BLI_YCC_ITU_BT709 */ } -} // namespace blender::nodes::node_composite_sepcomb_ycca_cc +using namespace blender::realtime_compositor; + +class SeparateYCCAShaderNode : public ShaderNode { + public: + using ShaderNode::ShaderNode; + + void compile(GPUMaterial *material) override + { + GPUNodeStack *inputs = get_inputs_array(); + GPUNodeStack *outputs = get_outputs_array(); + + GPU_stack_link(material, &bnode(), get_shader_function_name(), inputs, outputs); + } + + int get_mode() + { + return bnode().custom1; + } + + const char *get_shader_function_name() + { + switch (get_mode()) { + case BLI_YCC_ITU_BT601: + return "node_composite_separate_ycca_itu_601"; + case BLI_YCC_ITU_BT709: + return "node_composite_separate_ycca_itu_709"; + case BLI_YCC_JFIF_0_255: + return "node_composite_separate_ycca_jpeg"; + } + + BLI_assert_unreachable(); + return nullptr; + } +}; + +static ShaderNode *get_compositor_shader_node(DNode node) +{ + return new SeparateYCCAShaderNode(node); +} + +} // namespace blender::nodes::node_composite_separate_ycca_cc void register_node_type_cmp_sepycca() { - namespace file_ns = blender::nodes::node_composite_sepcomb_ycca_cc; + namespace file_ns = blender::nodes::node_composite_separate_ycca_cc; static bNodeType ntype; @@ -37,20 +85,33 @@ void register_node_type_cmp_sepycca() ntype.declare = file_ns::cmp_node_sepycca_declare; node_type_init(&ntype, file_ns::node_composit_init_mode_sepycca); ntype.gather_link_search_ops = nullptr; + ntype.get_compositor_shader_node = file_ns::get_compositor_shader_node; nodeRegisterType(&ntype); } /* **************** COMBINE YCCA ******************** */ -namespace blender::nodes::node_composite_sepcomb_ycca_cc { +namespace blender::nodes::node_composite_combine_ycca_cc { static void cmp_node_combycca_declare(NodeDeclarationBuilder &b) { - b.add_input<decl::Float>(N_("Y")).min(0.0f).max(1.0f); - b.add_input<decl::Float>(N_("Cb")).default_value(0.5f).min(0.0f).max(1.0f); - b.add_input<decl::Float>(N_("Cr")).default_value(0.5f).min(0.0f).max(1.0f); - b.add_input<decl::Float>(N_("A")).default_value(1.0f).min(0.0f).max(1.0f); + b.add_input<decl::Float>(N_("Y")).min(0.0f).max(1.0f).compositor_domain_priority(0); + b.add_input<decl::Float>(N_("Cb")) + .default_value(0.5f) + .min(0.0f) + .max(1.0f) + .compositor_domain_priority(1); + b.add_input<decl::Float>(N_("Cr")) + .default_value(0.5f) + .min(0.0f) + .max(1.0f) + .compositor_domain_priority(2); + b.add_input<decl::Float>(N_("A")) + .default_value(1.0f) + .min(0.0f) + .max(1.0f) + .compositor_domain_priority(3); b.add_output<decl::Color>(N_("Image")); } @@ -59,11 +120,51 @@ static void node_composit_init_mode_combycca(bNodeTree *UNUSED(ntree), bNode *no node->custom1 = 1; /* BLI_YCC_ITU_BT709 */ } -} // namespace blender::nodes::node_composite_sepcomb_ycca_cc +using namespace blender::realtime_compositor; + +class CombineYCCAShaderNode : public ShaderNode { + public: + using ShaderNode::ShaderNode; + + void compile(GPUMaterial *material) override + { + GPUNodeStack *inputs = get_inputs_array(); + GPUNodeStack *outputs = get_outputs_array(); + + GPU_stack_link(material, &bnode(), get_shader_function_name(), inputs, outputs); + } + + int get_mode() + { + return bnode().custom1; + } + + const char *get_shader_function_name() + { + switch (get_mode()) { + case BLI_YCC_ITU_BT601: + return "node_composite_combine_ycca_itu_601"; + case BLI_YCC_ITU_BT709: + return "node_composite_combine_ycca_itu_709"; + case BLI_YCC_JFIF_0_255: + return "node_composite_combine_ycca_jpeg"; + } + + BLI_assert_unreachable(); + return nullptr; + } +}; + +static ShaderNode *get_compositor_shader_node(DNode node) +{ + return new CombineYCCAShaderNode(node); +} + +} // namespace blender::nodes::node_composite_combine_ycca_cc void register_node_type_cmp_combycca() { - namespace file_ns = blender::nodes::node_composite_sepcomb_ycca_cc; + namespace file_ns = blender::nodes::node_composite_combine_ycca_cc; static bNodeType ntype; @@ -71,6 +172,7 @@ void register_node_type_cmp_combycca() ntype.declare = file_ns::cmp_node_combycca_declare; node_type_init(&ntype, file_ns::node_composit_init_mode_combycca); ntype.gather_link_search_ops = nullptr; + ntype.get_compositor_shader_node = file_ns::get_compositor_shader_node; nodeRegisterType(&ntype); } diff --git a/source/blender/nodes/composite/nodes/node_composite_sepcomb_yuva.cc b/source/blender/nodes/composite/nodes/node_composite_sepcomb_yuva.cc index 4acd2294114..1f0eb04cfc3 100644 --- a/source/blender/nodes/composite/nodes/node_composite_sepcomb_yuva.cc +++ b/source/blender/nodes/composite/nodes/node_composite_sepcomb_yuva.cc @@ -5,60 +5,112 @@ * \ingroup cmpnodes */ +#include "GPU_material.h" + +#include "COM_shader_node.hh" + #include "node_composite_util.hh" /* **************** SEPARATE YUVA ******************** */ -namespace blender::nodes::node_composite_sepcomb_yuva_cc { +namespace blender::nodes::node_composite_separate_yuva_cc { static void cmp_node_sepyuva_declare(NodeDeclarationBuilder &b) { - b.add_input<decl::Color>(N_("Image")).default_value({1.0f, 1.0f, 1.0f, 1.0f}); + b.add_input<decl::Color>(N_("Image")) + .default_value({1.0f, 1.0f, 1.0f, 1.0f}) + .compositor_domain_priority(0); b.add_output<decl::Float>(N_("Y")); b.add_output<decl::Float>(N_("U")); b.add_output<decl::Float>(N_("V")); b.add_output<decl::Float>(N_("A")); } -} // namespace blender::nodes::node_composite_sepcomb_yuva_cc +using namespace blender::realtime_compositor; + +class SeparateYUVAShaderNode : public ShaderNode { + public: + using ShaderNode::ShaderNode; + + void compile(GPUMaterial *material) override + { + GPUNodeStack *inputs = get_inputs_array(); + GPUNodeStack *outputs = get_outputs_array(); + + GPU_stack_link(material, &bnode(), "node_composite_separate_yuva_itu_709", inputs, outputs); + } +}; + +static ShaderNode *get_compositor_shader_node(DNode node) +{ + return new SeparateYUVAShaderNode(node); +} + +} // namespace blender::nodes::node_composite_separate_yuva_cc void register_node_type_cmp_sepyuva() { - namespace file_ns = blender::nodes::node_composite_sepcomb_yuva_cc; + namespace file_ns = blender::nodes::node_composite_separate_yuva_cc; static bNodeType ntype; cmp_node_type_base(&ntype, CMP_NODE_SEPYUVA_LEGACY, "Separate YUVA", NODE_CLASS_CONVERTER); ntype.declare = file_ns::cmp_node_sepyuva_declare; ntype.gather_link_search_ops = nullptr; + ntype.get_compositor_shader_node = file_ns::get_compositor_shader_node; nodeRegisterType(&ntype); } /* **************** COMBINE YUVA ******************** */ -namespace blender::nodes::node_composite_sepcomb_yuva_cc { +namespace blender::nodes::node_composite_combine_yuva_cc { static void cmp_node_combyuva_declare(NodeDeclarationBuilder &b) { - b.add_input<decl::Float>(N_("Y")).min(0.0f).max(1.0f); - b.add_input<decl::Float>(N_("U")).min(0.0f).max(1.0f); - b.add_input<decl::Float>(N_("V")).min(0.0f).max(1.0f); - b.add_input<decl::Float>(N_("A")).default_value(1.0f).min(0.0f).max(1.0f); + b.add_input<decl::Float>(N_("Y")).min(0.0f).max(1.0f).compositor_domain_priority(0); + b.add_input<decl::Float>(N_("U")).min(0.0f).max(1.0f).compositor_domain_priority(1); + b.add_input<decl::Float>(N_("V")).min(0.0f).max(1.0f).compositor_domain_priority(2); + b.add_input<decl::Float>(N_("A")) + .default_value(1.0f) + .min(0.0f) + .max(1.0f) + .compositor_domain_priority(3); b.add_output<decl::Color>(N_("Image")); } -} // namespace blender::nodes::node_composite_sepcomb_yuva_cc +using namespace blender::realtime_compositor; + +class CombineYUVAShaderNode : public ShaderNode { + public: + using ShaderNode::ShaderNode; + + void compile(GPUMaterial *material) override + { + GPUNodeStack *inputs = get_inputs_array(); + GPUNodeStack *outputs = get_outputs_array(); + + GPU_stack_link(material, &bnode(), "node_composite_combine_yuva_itu_709", inputs, outputs); + } +}; + +static ShaderNode *get_compositor_shader_node(DNode node) +{ + return new CombineYUVAShaderNode(node); +} + +} // namespace blender::nodes::node_composite_combine_yuva_cc void register_node_type_cmp_combyuva() { - namespace file_ns = blender::nodes::node_composite_sepcomb_yuva_cc; + namespace file_ns = blender::nodes::node_composite_combine_yuva_cc; static bNodeType ntype; cmp_node_type_base(&ntype, CMP_NODE_COMBYUVA_LEGACY, "Combine YUVA", NODE_CLASS_CONVERTER); ntype.declare = file_ns::cmp_node_combyuva_declare; ntype.gather_link_search_ops = nullptr; + ntype.get_compositor_shader_node = file_ns::get_compositor_shader_node; nodeRegisterType(&ntype); } diff --git a/source/blender/nodes/composite/nodes/node_composite_setalpha.cc b/source/blender/nodes/composite/nodes/node_composite_setalpha.cc index 8aeaafbbf67..9930125aa70 100644 --- a/source/blender/nodes/composite/nodes/node_composite_setalpha.cc +++ b/source/blender/nodes/composite/nodes/node_composite_setalpha.cc @@ -8,6 +8,10 @@ #include "UI_interface.h" #include "UI_resources.h" +#include "GPU_material.h" + +#include "COM_shader_node.hh" + #include "node_composite_util.hh" /* **************** SET ALPHA ******************** */ @@ -16,8 +20,14 @@ namespace blender::nodes::node_composite_setalpha_cc { static void cmp_node_setalpha_declare(NodeDeclarationBuilder &b) { - b.add_input<decl::Color>(N_("Image")).default_value({1.0f, 1.0f, 1.0f, 1.0f}); - b.add_input<decl::Float>(N_("Alpha")).default_value(1.0f).min(0.0f).max(1.0f); + b.add_input<decl::Color>(N_("Image")) + .default_value({1.0f, 1.0f, 1.0f, 1.0f}) + .compositor_domain_priority(0); + b.add_input<decl::Float>(N_("Alpha")) + .default_value(1.0f) + .min(0.0f) + .max(1.0f) + .compositor_domain_priority(1); b.add_output<decl::Color>(N_("Image")); } @@ -33,6 +43,36 @@ static void node_composit_buts_set_alpha(uiLayout *layout, bContext *UNUSED(C), uiItemR(layout, ptr, "mode", UI_ITEM_R_SPLIT_EMPTY_NAME, nullptr, ICON_NONE); } +using namespace blender::realtime_compositor; + +class SetAlphaShaderNode : public ShaderNode { + public: + using ShaderNode::ShaderNode; + + void compile(GPUMaterial *material) override + { + GPUNodeStack *inputs = get_inputs_array(); + GPUNodeStack *outputs = get_outputs_array(); + + if (get_node_set_alpha()->mode == CMP_NODE_SETALPHA_MODE_APPLY) { + GPU_stack_link(material, &bnode(), "node_composite_set_alpha_apply", inputs, outputs); + return; + } + + GPU_stack_link(material, &bnode(), "node_composite_set_alpha_replace", inputs, outputs); + } + + NodeSetAlpha *get_node_set_alpha() + { + return static_cast<NodeSetAlpha *>(bnode().storage); + } +}; + +static ShaderNode *get_compositor_shader_node(DNode node) +{ + return new SetAlphaShaderNode(node); +} + } // namespace blender::nodes::node_composite_setalpha_cc void register_node_type_cmp_setalpha() @@ -47,6 +87,7 @@ void register_node_type_cmp_setalpha() node_type_init(&ntype, file_ns::node_composit_init_setalpha); node_type_storage( &ntype, "NodeSetAlpha", node_free_standard_storage, node_copy_standard_storage); + ntype.get_compositor_shader_node = file_ns::get_compositor_shader_node; nodeRegisterType(&ntype); } diff --git a/source/blender/nodes/composite/nodes/node_composite_split_viewer.cc b/source/blender/nodes/composite/nodes/node_composite_split_viewer.cc index ab325c4559f..085de69e63e 100644 --- a/source/blender/nodes/composite/nodes/node_composite_split_viewer.cc +++ b/source/blender/nodes/composite/nodes/node_composite_split_viewer.cc @@ -11,6 +11,12 @@ #include "UI_interface.h" #include "UI_resources.h" +#include "GPU_shader.h" +#include "GPU_texture.h" + +#include "COM_node_operation.hh" +#include "COM_utilities.hh" + #include "node_composite_util.hh" /* **************** SPLIT VIEWER ******************** */ @@ -43,6 +49,70 @@ static void node_composit_buts_splitviewer(uiLayout *layout, bContext *UNUSED(C) uiItemR(col, ptr, "factor", UI_ITEM_R_SPLIT_EMPTY_NAME, nullptr, ICON_NONE); } +using namespace blender::realtime_compositor; + +class ViewerOperation : public NodeOperation { + public: + using NodeOperation::NodeOperation; + + void execute() override + { + GPUShader *shader = get_split_viewer_shader(); + GPU_shader_bind(shader); + + const int2 size = compute_domain().size; + + GPU_shader_uniform_1f(shader, "split_ratio", get_split_ratio()); + GPU_shader_uniform_2iv(shader, "view_size", size); + + const Result &first_image = get_input("Image"); + first_image.bind_as_texture(shader, "first_image_tx"); + const Result &second_image = get_input("Image_001"); + second_image.bind_as_texture(shader, "second_image_tx"); + + GPUTexture *output_texture = context().get_output_texture(); + const int image_unit = GPU_shader_get_texture_binding(shader, "output_img"); + GPU_texture_image_bind(output_texture, image_unit); + + compute_dispatch_threads_at_least(shader, size); + + first_image.unbind_as_texture(); + second_image.unbind_as_texture(); + GPU_texture_image_unbind(output_texture); + GPU_shader_unbind(); + } + + /* The operation domain have the same dimensions of the output without any transformations. */ + Domain compute_domain() override + { + return Domain(context().get_output_size()); + } + + GPUShader *get_split_viewer_shader() + { + if (get_split_axis() == CMP_NODE_SPLIT_VIEWER_HORIZONTAL) { + return shader_manager().get("compositor_split_viewer_horizontal"); + } + + return shader_manager().get("compositor_split_viewer_vertical"); + } + + CMPNodeSplitViewerAxis get_split_axis() + { + return (CMPNodeSplitViewerAxis)bnode().custom2; + } + + float get_split_ratio() + { + return bnode().custom1 / 100.0f; + } +}; + +static NodeOperation *get_compositor_operation(Context &context, DNode node) +{ + return new ViewerOperation(context, node); +} + } // namespace blender::nodes::node_composite_split_viewer_cc void register_node_type_cmp_splitviewer() @@ -57,6 +127,7 @@ void register_node_type_cmp_splitviewer() ntype.flag |= NODE_PREVIEW; node_type_init(&ntype, file_ns::node_composit_init_splitviewer); node_type_storage(&ntype, "ImageUser", node_free_standard_storage, node_copy_standard_storage); + ntype.get_compositor_operation = file_ns::get_compositor_operation; ntype.no_muting = true; diff --git a/source/blender/nodes/composite/nodes/node_composite_stabilize2d.cc b/source/blender/nodes/composite/nodes/node_composite_stabilize2d.cc index 63d00a0864b..75a96a05863 100644 --- a/source/blender/nodes/composite/nodes/node_composite_stabilize2d.cc +++ b/source/blender/nodes/composite/nodes/node_composite_stabilize2d.cc @@ -11,6 +11,8 @@ #include "BKE_context.h" #include "BKE_lib_id.h" +#include "COM_node_operation.hh" + #include "node_composite_util.hh" /* **************** Stabilize 2D ******************** */ @@ -58,6 +60,23 @@ static void node_composit_buts_stabilize2d(uiLayout *layout, bContext *C, Pointe uiItemR(layout, ptr, "invert", UI_ITEM_R_SPLIT_EMPTY_NAME, nullptr, ICON_NONE); } +using namespace blender::realtime_compositor; + +class Stabilize2DOperation : public NodeOperation { + public: + using NodeOperation::NodeOperation; + + void execute() override + { + get_input("Image").pass_through(get_result("Image")); + } +}; + +static NodeOperation *get_compositor_operation(Context &context, DNode node) +{ + return new Stabilize2DOperation(context, node); +} + } // namespace blender::nodes::node_composite_stabilize2d_cc void register_node_type_cmp_stabilize2d() @@ -70,6 +89,7 @@ void register_node_type_cmp_stabilize2d() ntype.declare = file_ns::cmp_node_stabilize2d_declare; ntype.draw_buttons = file_ns::node_composit_buts_stabilize2d; ntype.initfunc_api = file_ns::init; + ntype.get_compositor_operation = file_ns::get_compositor_operation; nodeRegisterType(&ntype); } diff --git a/source/blender/nodes/composite/nodes/node_composite_sunbeams.cc b/source/blender/nodes/composite/nodes/node_composite_sunbeams.cc index 766f26745ef..4b9264d7e35 100644 --- a/source/blender/nodes/composite/nodes/node_composite_sunbeams.cc +++ b/source/blender/nodes/composite/nodes/node_composite_sunbeams.cc @@ -8,6 +8,8 @@ #include "UI_interface.h" #include "UI_resources.h" +#include "COM_node_operation.hh" + #include "node_composite_util.hh" namespace blender::nodes::node_composite_sunbeams_cc { @@ -38,6 +40,23 @@ static void node_composit_buts_sunbeams(uiLayout *layout, bContext *UNUSED(C), P ICON_NONE); } +using namespace blender::realtime_compositor; + +class SunBeamsOperation : public NodeOperation { + public: + using NodeOperation::NodeOperation; + + void execute() override + { + get_input("Image").pass_through(get_result("Image")); + } +}; + +static NodeOperation *get_compositor_operation(Context &context, DNode node) +{ + return new SunBeamsOperation(context, node); +} + } // namespace blender::nodes::node_composite_sunbeams_cc void register_node_type_cmp_sunbeams() @@ -52,6 +71,7 @@ void register_node_type_cmp_sunbeams() node_type_init(&ntype, file_ns::init); node_type_storage( &ntype, "NodeSunBeams", node_free_standard_storage, node_copy_standard_storage); + ntype.get_compositor_operation = file_ns::get_compositor_operation; nodeRegisterType(&ntype); } diff --git a/source/blender/nodes/composite/nodes/node_composite_switch.cc b/source/blender/nodes/composite/nodes/node_composite_switch.cc index bda490572e9..767802cc442 100644 --- a/source/blender/nodes/composite/nodes/node_composite_switch.cc +++ b/source/blender/nodes/composite/nodes/node_composite_switch.cc @@ -8,6 +8,8 @@ #include "UI_interface.h" #include "UI_resources.h" +#include "COM_node_operation.hh" + #include "node_composite_util.hh" /* **************** Switch ******************** */ @@ -26,6 +28,30 @@ static void node_composit_buts_switch(uiLayout *layout, bContext *UNUSED(C), Poi uiItemR(layout, ptr, "check", UI_ITEM_R_SPLIT_EMPTY_NAME, nullptr, ICON_NONE); } +using namespace blender::realtime_compositor; + +class SwitchOperation : public NodeOperation { + public: + using NodeOperation::NodeOperation; + + void execute() override + { + Result &input = get_input(get_condition() ? "On" : "Off"); + Result &result = get_result("Image"); + input.pass_through(result); + } + + bool get_condition() + { + return bnode().custom1; + } +}; + +static NodeOperation *get_compositor_operation(Context &context, DNode node) +{ + return new SwitchOperation(context, node); +} + } // namespace blender::nodes::node_composite_switch_cc void register_node_type_cmp_switch() @@ -38,5 +64,7 @@ void register_node_type_cmp_switch() ntype.declare = file_ns::cmp_node_switch_declare; ntype.draw_buttons = file_ns::node_composit_buts_switch; node_type_size_preset(&ntype, NODE_SIZE_SMALL); + ntype.get_compositor_operation = file_ns::get_compositor_operation; + nodeRegisterType(&ntype); } diff --git a/source/blender/nodes/composite/nodes/node_composite_switchview.cc b/source/blender/nodes/composite/nodes/node_composite_switchview.cc index 2cf3da03a05..e74c3b6007a 100644 --- a/source/blender/nodes/composite/nodes/node_composite_switchview.cc +++ b/source/blender/nodes/composite/nodes/node_composite_switchview.cc @@ -11,6 +11,8 @@ #include "UI_interface.h" #include "UI_resources.h" +#include "COM_node_operation.hh" + #include "node_composite_util.hh" /* **************** SWITCH VIEW ******************** */ @@ -140,6 +142,25 @@ static void node_composit_buts_switch_view_ex(uiLayout *layout, nullptr); } +using namespace blender::realtime_compositor; + +class SwitchViewOperation : public NodeOperation { + public: + using NodeOperation::NodeOperation; + + void execute() override + { + Result &input = get_input(context().get_view_name()); + Result &result = get_result("Image"); + input.pass_through(result); + } +}; + +static NodeOperation *get_compositor_operation(Context &context, DNode node) +{ + return new SwitchViewOperation(context, node); +} + } // namespace blender::nodes::node_composite_switchview_cc void register_node_type_cmp_switch_view() @@ -153,6 +174,7 @@ void register_node_type_cmp_switch_view() ntype.draw_buttons_ex = file_ns::node_composit_buts_switch_view_ex; ntype.initfunc_api = file_ns::init_switch_view; node_type_update(&ntype, file_ns::cmp_node_switch_view_update); + ntype.get_compositor_operation = file_ns::get_compositor_operation; nodeRegisterType(&ntype); } diff --git a/source/blender/nodes/composite/nodes/node_composite_texture.cc b/source/blender/nodes/composite/nodes/node_composite_texture.cc index 7571e97a2cd..5a628aae7a7 100644 --- a/source/blender/nodes/composite/nodes/node_composite_texture.cc +++ b/source/blender/nodes/composite/nodes/node_composite_texture.cc @@ -5,6 +5,8 @@ * \ingroup cmpnodes */ +#include "COM_node_operation.hh" + #include "node_composite_util.hh" /* **************** TEXTURE ******************** */ @@ -23,6 +25,24 @@ static void cmp_node_texture_declare(NodeDeclarationBuilder &b) b.add_output<decl::Color>(N_("Color")); } +using namespace blender::realtime_compositor; + +class TextureOperation : public NodeOperation { + public: + using NodeOperation::NodeOperation; + + void execute() override + { + get_result("Value").allocate_invalid(); + get_result("Color").allocate_invalid(); + } +}; + +static NodeOperation *get_compositor_operation(Context &context, DNode node) +{ + return new TextureOperation(context, node); +} + } // namespace blender::nodes::node_composite_texture_cc void register_node_type_cmp_texture() @@ -34,6 +54,7 @@ void register_node_type_cmp_texture() cmp_node_type_base(&ntype, CMP_NODE_TEXTURE, "Texture", NODE_CLASS_INPUT); ntype.declare = file_ns::cmp_node_texture_declare; ntype.flag |= NODE_PREVIEW; + ntype.get_compositor_operation = file_ns::get_compositor_operation; nodeRegisterType(&ntype); } diff --git a/source/blender/nodes/composite/nodes/node_composite_tonemap.cc b/source/blender/nodes/composite/nodes/node_composite_tonemap.cc index cdfe97b038d..4cc3d4f32a3 100644 --- a/source/blender/nodes/composite/nodes/node_composite_tonemap.cc +++ b/source/blender/nodes/composite/nodes/node_composite_tonemap.cc @@ -10,6 +10,8 @@ #include "UI_interface.h" #include "UI_resources.h" +#include "COM_node_operation.hh" + #include "node_composite_util.hh" namespace blender::nodes::node_composite_tonemap_cc { @@ -58,6 +60,23 @@ static void node_composit_buts_tonemap(uiLayout *layout, bContext *UNUSED(C), Po } } +using namespace blender::realtime_compositor; + +class ToneMapOperation : public NodeOperation { + public: + using NodeOperation::NodeOperation; + + void execute() override + { + get_input("Image").pass_through(get_result("Image")); + } +}; + +static NodeOperation *get_compositor_operation(Context &context, DNode node) +{ + return new ToneMapOperation(context, node); +} + } // namespace blender::nodes::node_composite_tonemap_cc void register_node_type_cmp_tonemap() @@ -71,6 +90,7 @@ void register_node_type_cmp_tonemap() ntype.draw_buttons = file_ns::node_composit_buts_tonemap; node_type_init(&ntype, file_ns::node_composit_init_tonemap); node_type_storage(&ntype, "NodeTonemap", node_free_standard_storage, node_copy_standard_storage); + ntype.get_compositor_operation = file_ns::get_compositor_operation; nodeRegisterType(&ntype); } diff --git a/source/blender/nodes/composite/nodes/node_composite_trackpos.cc b/source/blender/nodes/composite/nodes/node_composite_trackpos.cc index 0e99ff59327..0e9bd800f44 100644 --- a/source/blender/nodes/composite/nodes/node_composite_trackpos.cc +++ b/source/blender/nodes/composite/nodes/node_composite_trackpos.cc @@ -18,6 +18,8 @@ #include "UI_interface.h" #include "UI_resources.h" +#include "COM_node_operation.hh" + #include "node_composite_util.hh" namespace blender::nodes::node_composite_trackpos_cc { @@ -102,6 +104,25 @@ static void node_composit_buts_trackpos(uiLayout *layout, bContext *C, PointerRN } } +using namespace blender::realtime_compositor; + +class TrackPositionOperation : public NodeOperation { + public: + using NodeOperation::NodeOperation; + + void execute() override + { + get_result("X").allocate_invalid(); + get_result("Y").allocate_invalid(); + get_result("Speed").allocate_invalid(); + } +}; + +static NodeOperation *get_compositor_operation(Context &context, DNode node) +{ + return new TrackPositionOperation(context, node); +} + } // namespace blender::nodes::node_composite_trackpos_cc void register_node_type_cmp_trackpos() @@ -116,6 +137,7 @@ void register_node_type_cmp_trackpos() ntype.initfunc_api = file_ns::init; node_type_storage( &ntype, "NodeTrackPosData", node_free_standard_storage, node_copy_standard_storage); + ntype.get_compositor_operation = file_ns::get_compositor_operation; nodeRegisterType(&ntype); } diff --git a/source/blender/nodes/composite/nodes/node_composite_transform.cc b/source/blender/nodes/composite/nodes/node_composite_transform.cc index fe72f5e33ca..7c5866d2d06 100644 --- a/source/blender/nodes/composite/nodes/node_composite_transform.cc +++ b/source/blender/nodes/composite/nodes/node_composite_transform.cc @@ -5,9 +5,15 @@ * \ingroup cmpnodes */ +#include "BLI_assert.h" +#include "BLI_float3x3.hh" +#include "BLI_math_vector.h" + #include "UI_interface.h" #include "UI_resources.h" +#include "COM_node_operation.hh" + #include "node_composite_util.hh" /* **************** Transform ******************** */ @@ -16,15 +22,30 @@ namespace blender::nodes::node_composite_transform_cc { static void cmp_node_transform_declare(NodeDeclarationBuilder &b) { - b.add_input<decl::Color>(N_("Image")).default_value({0.8f, 0.8f, 0.8f, 1.0f}); - b.add_input<decl::Float>(N_("X")).default_value(0.0f).min(-10000.0f).max(10000.0f); - b.add_input<decl::Float>(N_("Y")).default_value(0.0f).min(-10000.0f).max(10000.0f); + b.add_input<decl::Color>(N_("Image")) + .default_value({0.8f, 0.8f, 0.8f, 1.0f}) + .compositor_domain_priority(0); + b.add_input<decl::Float>(N_("X")) + .default_value(0.0f) + .min(-10000.0f) + .max(10000.0f) + .compositor_expects_single_value(); + b.add_input<decl::Float>(N_("Y")) + .default_value(0.0f) + .min(-10000.0f) + .max(10000.0f) + .compositor_expects_single_value(); b.add_input<decl::Float>(N_("Angle")) .default_value(0.0f) .min(-10000.0f) .max(10000.0f) - .subtype(PROP_ANGLE); - b.add_input<decl::Float>(N_("Scale")).default_value(1.0f).min(0.0001f).max(CMP_SCALE_MAX); + .subtype(PROP_ANGLE) + .compositor_expects_single_value(); + b.add_input<decl::Float>(N_("Scale")) + .default_value(1.0f) + .min(0.0001f) + .max(CMP_SCALE_MAX) + .compositor_expects_single_value(); b.add_output<decl::Color>(N_("Image")); } @@ -33,6 +54,51 @@ static void node_composit_buts_transform(uiLayout *layout, bContext *UNUSED(C), uiItemR(layout, ptr, "filter_type", UI_ITEM_R_SPLIT_EMPTY_NAME, "", ICON_NONE); } +using namespace blender::realtime_compositor; + +class TransformOperation : public NodeOperation { + public: + using NodeOperation::NodeOperation; + + void execute() override + { + Result &input = get_input("Image"); + Result &result = get_result("Image"); + input.pass_through(result); + + const float2 translation = float2(get_input("X").get_float_value_default(0.0f), + get_input("Y").get_float_value_default(0.0f)); + const float rotation = get_input("Angle").get_float_value_default(0.0f); + const float2 scale = float2(get_input("Scale").get_float_value_default(1.0f)); + + const float3x3 transformation = float3x3::from_translation_rotation_scale( + translation, rotation, scale); + + result.transform(transformation); + result.get_realization_options().interpolation = get_interpolation(); + } + + Interpolation get_interpolation() + { + switch (bnode().custom1) { + case 0: + return Interpolation::Nearest; + case 1: + return Interpolation::Bilinear; + case 2: + return Interpolation::Bicubic; + } + + BLI_assert_unreachable(); + return Interpolation::Nearest; + } +}; + +static NodeOperation *get_compositor_operation(Context &context, DNode node) +{ + return new TransformOperation(context, node); +} + } // namespace blender::nodes::node_composite_transform_cc void register_node_type_cmp_transform() @@ -44,6 +110,7 @@ void register_node_type_cmp_transform() cmp_node_type_base(&ntype, CMP_NODE_TRANSFORM, "Transform", NODE_CLASS_DISTORT); ntype.declare = file_ns::cmp_node_transform_declare; ntype.draw_buttons = file_ns::node_composit_buts_transform; + ntype.get_compositor_operation = file_ns::get_compositor_operation; nodeRegisterType(&ntype); } diff --git a/source/blender/nodes/composite/nodes/node_composite_translate.cc b/source/blender/nodes/composite/nodes/node_composite_translate.cc index bbdc8ca4d31..fbd53b8310f 100644 --- a/source/blender/nodes/composite/nodes/node_composite_translate.cc +++ b/source/blender/nodes/composite/nodes/node_composite_translate.cc @@ -5,9 +5,14 @@ * \ingroup cmpnodes */ +#include "BLI_float3x3.hh" +#include "BLI_math_vec_types.hh" + #include "UI_interface.h" #include "UI_resources.h" +#include "COM_node_operation.hh" + #include "node_composite_util.hh" /* **************** Translate ******************** */ @@ -16,9 +21,19 @@ namespace blender::nodes::node_composite_translate_cc { static void cmp_node_translate_declare(NodeDeclarationBuilder &b) { - b.add_input<decl::Color>(N_("Image")).default_value({1.0f, 1.0f, 1.0f, 1.0f}); - b.add_input<decl::Float>(N_("X")).default_value(0.0f).min(-10000.0f).max(10000.0f); - b.add_input<decl::Float>(N_("Y")).default_value(0.0f).min(-10000.0f).max(10000.0f); + b.add_input<decl::Color>(N_("Image")) + .default_value({1.0f, 1.0f, 1.0f, 1.0f}) + .compositor_domain_priority(0); + b.add_input<decl::Float>(N_("X")) + .default_value(0.0f) + .min(-10000.0f) + .max(10000.0f) + .compositor_expects_single_value(); + b.add_input<decl::Float>(N_("Y")) + .default_value(0.0f) + .min(-10000.0f) + .max(10000.0f) + .compositor_expects_single_value(); b.add_output<decl::Color>(N_("Image")); } @@ -34,6 +49,59 @@ static void node_composit_buts_translate(uiLayout *layout, bContext *UNUSED(C), uiItemR(layout, ptr, "wrap_axis", UI_ITEM_R_SPLIT_EMPTY_NAME, nullptr, ICON_NONE); } +using namespace blender::realtime_compositor; + +class TranslateOperation : public NodeOperation { + public: + using NodeOperation::NodeOperation; + + void execute() override + { + Result &input = get_input("Image"); + Result &result = get_result("Image"); + input.pass_through(result); + + float x = get_input("X").get_float_value_default(0.0f); + float y = get_input("Y").get_float_value_default(0.0f); + if (get_use_relative()) { + x *= input.domain().size.x; + y *= input.domain().size.y; + } + + const float2 translation = float2(x, y); + const float3x3 transformation = float3x3::from_translation(translation); + + result.transform(transformation); + result.get_realization_options().repeat_x = get_repeat_x(); + result.get_realization_options().repeat_y = get_repeat_y(); + } + + NodeTranslateData &get_node_translate() + { + return *static_cast<NodeTranslateData *>(bnode().storage); + } + + bool get_use_relative() + { + return get_node_translate().relative; + } + + bool get_repeat_x() + { + return ELEM(get_node_translate().wrap_axis, CMP_NODE_WRAP_X, CMP_NODE_WRAP_XY); + } + + bool get_repeat_y() + { + return ELEM(get_node_translate().wrap_axis, CMP_NODE_WRAP_Y, CMP_NODE_WRAP_XY); + } +}; + +static NodeOperation *get_compositor_operation(Context &context, DNode node) +{ + return new TranslateOperation(context, node); +} + } // namespace blender::nodes::node_composite_translate_cc void register_node_type_cmp_translate() @@ -48,6 +116,7 @@ void register_node_type_cmp_translate() node_type_init(&ntype, file_ns::node_composit_init_translate); node_type_storage( &ntype, "NodeTranslateData", node_free_standard_storage, node_copy_standard_storage); + ntype.get_compositor_operation = file_ns::get_compositor_operation; nodeRegisterType(&ntype); } diff --git a/source/blender/nodes/composite/nodes/node_composite_val_to_rgb.cc b/source/blender/nodes/composite/nodes/node_composite_val_to_rgb.cc index df669d5beda..03a7bc61924 100644 --- a/source/blender/nodes/composite/nodes/node_composite_val_to_rgb.cc +++ b/source/blender/nodes/composite/nodes/node_composite_val_to_rgb.cc @@ -5,18 +5,33 @@ * \ingroup cmpnodes */ +#include "BLI_assert.h" + +#include "IMB_colormanagement.h" + +#include "BKE_colorband.h" + +#include "GPU_material.h" + +#include "COM_shader_node.hh" + #include "node_composite_util.hh" #include "BKE_colorband.h" /* **************** VALTORGB ******************** */ -namespace blender::nodes::node_composite_val_to_rgb_cc { +namespace blender::nodes::node_composite_color_ramp_cc { static void cmp_node_valtorgb_declare(NodeDeclarationBuilder &b) { - b.add_input<decl::Float>(N_("Fac")).default_value(0.5f).min(0.0f).max(1.0f).subtype(PROP_FACTOR); - b.add_output<decl::Color>(N_("Image")); + b.add_input<decl::Float>(N_("Fac")) + .default_value(0.5f) + .min(0.0f) + .max(1.0f) + .subtype(PROP_FACTOR) + .compositor_domain_priority(1); + b.add_output<decl::Color>(N_("Image")).compositor_domain_priority(0); b.add_output<decl::Float>(N_("Alpha")); } @@ -25,11 +40,94 @@ static void node_composit_init_valtorgb(bNodeTree *UNUSED(ntree), bNode *node) node->storage = BKE_colorband_add(true); } -} // namespace blender::nodes::node_composite_val_to_rgb_cc +using namespace blender::realtime_compositor; + +class ColorRampShaderNode : public ShaderNode { + public: + using ShaderNode::ShaderNode; + + void compile(GPUMaterial *material) override + { + GPUNodeStack *inputs = get_inputs_array(); + GPUNodeStack *outputs = get_outputs_array(); + + struct ColorBand *color_band = get_color_band(); + + /* Common / easy case optimization. */ + if ((color_band->tot <= 2) && (color_band->color_mode == COLBAND_BLEND_RGB)) { + float mul_bias[2]; + switch (color_band->ipotype) { + case COLBAND_INTERP_LINEAR: + mul_bias[0] = 1.0f / (color_band->data[1].pos - color_band->data[0].pos); + mul_bias[1] = -mul_bias[0] * color_band->data[0].pos; + GPU_stack_link(material, + &bnode(), + "valtorgb_opti_linear", + inputs, + outputs, + GPU_uniform(mul_bias), + GPU_uniform(&color_band->data[0].r), + GPU_uniform(&color_band->data[1].r)); + return; + case COLBAND_INTERP_CONSTANT: + mul_bias[1] = max_ff(color_band->data[0].pos, color_band->data[1].pos); + GPU_stack_link(material, + &bnode(), + "valtorgb_opti_constant", + inputs, + outputs, + GPU_uniform(&mul_bias[1]), + GPU_uniform(&color_band->data[0].r), + GPU_uniform(&color_band->data[1].r)); + return; + case COLBAND_INTERP_EASE: + mul_bias[0] = 1.0f / (color_band->data[1].pos - color_band->data[0].pos); + mul_bias[1] = -mul_bias[0] * color_band->data[0].pos; + GPU_stack_link(material, + &bnode(), + "valtorgb_opti_ease", + inputs, + outputs, + GPU_uniform(mul_bias), + GPU_uniform(&color_band->data[0].r), + GPU_uniform(&color_band->data[1].r)); + return; + default: + BLI_assert_unreachable(); + return; + } + } + + float *array, layer; + int size; + BKE_colorband_evaluate_table_rgba(color_band, &array, &size); + GPUNodeLink *tex = GPU_color_band(material, size, array, &layer); + + if (color_band->ipotype == COLBAND_INTERP_CONSTANT) { + GPU_stack_link( + material, &bnode(), "valtorgb_nearest", inputs, outputs, tex, GPU_constant(&layer)); + return; + } + + GPU_stack_link(material, &bnode(), "valtorgb", inputs, outputs, tex, GPU_constant(&layer)); + } + + struct ColorBand *get_color_band() + { + return static_cast<struct ColorBand *>(bnode().storage); + } +}; + +static ShaderNode *get_compositor_shader_node(DNode node) +{ + return new ColorRampShaderNode(node); +} + +} // namespace blender::nodes::node_composite_color_ramp_cc void register_node_type_cmp_valtorgb() { - namespace file_ns = blender::nodes::node_composite_val_to_rgb_cc; + namespace file_ns = blender::nodes::node_composite_color_ramp_cc; static bNodeType ntype; @@ -38,31 +136,63 @@ void register_node_type_cmp_valtorgb() node_type_size(&ntype, 240, 200, 320); node_type_init(&ntype, file_ns::node_composit_init_valtorgb); node_type_storage(&ntype, "ColorBand", node_free_standard_storage, node_copy_standard_storage); + ntype.get_compositor_shader_node = file_ns::get_compositor_shader_node; nodeRegisterType(&ntype); } /* **************** RGBTOBW ******************** */ -namespace blender::nodes::node_composite_val_to_rgb_cc { +namespace blender::nodes::node_composite_rgb_to_bw_cc { static void cmp_node_rgbtobw_declare(NodeDeclarationBuilder &b) { - b.add_input<decl::Color>(N_("Image")).default_value({0.8f, 0.8f, 0.8f, 1.0f}); + b.add_input<decl::Color>(N_("Image")) + .default_value({0.8f, 0.8f, 0.8f, 1.0f}) + .compositor_domain_priority(0); b.add_output<decl::Float>(N_("Val")); } -} // namespace blender::nodes::node_composite_val_to_rgb_cc +using namespace blender::realtime_compositor; + +class RGBToBWShaderNode : public ShaderNode { + public: + using ShaderNode::ShaderNode; + + void compile(GPUMaterial *material) override + { + GPUNodeStack *inputs = get_inputs_array(); + GPUNodeStack *outputs = get_outputs_array(); + + float luminance_coefficients[3]; + IMB_colormanagement_get_luminance_coefficients(luminance_coefficients); + + GPU_stack_link(material, + &bnode(), + "color_to_luminance", + inputs, + outputs, + GPU_constant(luminance_coefficients)); + } +}; + +static ShaderNode *get_compositor_shader_node(DNode node) +{ + return new RGBToBWShaderNode(node); +} + +} // namespace blender::nodes::node_composite_rgb_to_bw_cc void register_node_type_cmp_rgbtobw() { - namespace file_ns = blender::nodes::node_composite_val_to_rgb_cc; + namespace file_ns = blender::nodes::node_composite_rgb_to_bw_cc; static bNodeType ntype; cmp_node_type_base(&ntype, CMP_NODE_RGBTOBW, "RGB to BW", NODE_CLASS_CONVERTER); ntype.declare = file_ns::cmp_node_rgbtobw_declare; node_type_size_preset(&ntype, NODE_SIZE_SMALL); + ntype.get_compositor_shader_node = file_ns::get_compositor_shader_node; nodeRegisterType(&ntype); } diff --git a/source/blender/nodes/composite/nodes/node_composite_value.cc b/source/blender/nodes/composite/nodes/node_composite_value.cc index a3269d3d1c2..a96e1db14ad 100644 --- a/source/blender/nodes/composite/nodes/node_composite_value.cc +++ b/source/blender/nodes/composite/nodes/node_composite_value.cc @@ -5,6 +5,8 @@ * \ingroup cmpnodes */ +#include "COM_node_operation.hh" + #include "node_composite_util.hh" /* **************** VALUE ******************** */ @@ -16,6 +18,29 @@ static void cmp_node_value_declare(NodeDeclarationBuilder &b) b.add_output<decl::Float>(N_("Value")).default_value(0.5f); } +using namespace blender::realtime_compositor; + +class ValueOperation : public NodeOperation { + public: + using NodeOperation::NodeOperation; + + void execute() override + { + Result &result = get_result("Value"); + result.allocate_single_value(); + + const bNodeSocket *socket = static_cast<bNodeSocket *>(bnode().outputs.first); + float value = static_cast<bNodeSocketValueFloat *>(socket->default_value)->value; + + result.set_float_value(value); + } +}; + +static NodeOperation *get_compositor_operation(Context &context, DNode node) +{ + return new ValueOperation(context, node); +} + } // namespace blender::nodes::node_composite_value_cc void register_node_type_cmp_value() @@ -27,6 +52,7 @@ void register_node_type_cmp_value() cmp_node_type_base(&ntype, CMP_NODE_VALUE, "Value", NODE_CLASS_INPUT); ntype.declare = file_ns::cmp_node_value_declare; node_type_size_preset(&ntype, NODE_SIZE_SMALL); + ntype.get_compositor_operation = file_ns::get_compositor_operation; nodeRegisterType(&ntype); } diff --git a/source/blender/nodes/composite/nodes/node_composite_vec_blur.cc b/source/blender/nodes/composite/nodes/node_composite_vec_blur.cc index 741f2e0e816..515478da75d 100644 --- a/source/blender/nodes/composite/nodes/node_composite_vec_blur.cc +++ b/source/blender/nodes/composite/nodes/node_composite_vec_blur.cc @@ -8,6 +8,8 @@ #include "UI_interface.h" #include "UI_resources.h" +#include "COM_node_operation.hh" + #include "node_composite_util.hh" /* **************** VECTOR BLUR ******************** */ @@ -51,6 +53,23 @@ static void node_composit_buts_vecblur(uiLayout *layout, bContext *UNUSED(C), Po uiItemR(layout, ptr, "use_curved", UI_ITEM_R_SPLIT_EMPTY_NAME, nullptr, ICON_NONE); } +using namespace blender::realtime_compositor; + +class VectorBlurOperation : public NodeOperation { + public: + using NodeOperation::NodeOperation; + + void execute() override + { + get_input("Image").pass_through(get_result("Image")); + } +}; + +static NodeOperation *get_compositor_operation(Context &context, DNode node) +{ + return new VectorBlurOperation(context, node); +} + } // namespace blender::nodes::node_composite_vec_blur_cc void register_node_type_cmp_vecblur() @@ -65,6 +84,7 @@ void register_node_type_cmp_vecblur() node_type_init(&ntype, file_ns::node_composit_init_vecblur); node_type_storage( &ntype, "NodeBlurData", node_free_standard_storage, node_copy_standard_storage); + ntype.get_compositor_operation = file_ns::get_compositor_operation; nodeRegisterType(&ntype); } diff --git a/source/blender/nodes/composite/nodes/node_composite_viewer.cc b/source/blender/nodes/composite/nodes/node_composite_viewer.cc index 05f395183b5..4e82b31ca47 100644 --- a/source/blender/nodes/composite/nodes/node_composite_viewer.cc +++ b/source/blender/nodes/composite/nodes/node_composite_viewer.cc @@ -5,6 +5,8 @@ * \ingroup cmpnodes */ +#include "BLI_math_vec_types.hh" + #include "BKE_global.h" #include "BKE_image.h" @@ -13,6 +15,13 @@ #include "UI_interface.h" #include "UI_resources.h" +#include "GPU_shader.h" +#include "GPU_state.h" +#include "GPU_texture.h" + +#include "COM_node_operation.hh" +#include "COM_utilities.hh" + #include "node_composite_util.hh" /* **************** VIEWER ******************** */ @@ -55,6 +64,125 @@ static void node_composit_buts_viewer_ex(uiLayout *layout, bContext *UNUSED(C), } } +using namespace blender::realtime_compositor; + +class ViewerOperation : public NodeOperation { + public: + using NodeOperation::NodeOperation; + + void execute() override + { + const Result &image = get_input("Image"); + const Result &alpha = get_input("Alpha"); + + if (image.is_single_value() && alpha.is_single_value()) { + execute_clear(); + } + else if (ignore_alpha()) { + execute_ignore_alpha(); + } + else if (!node().input_by_identifier("Alpha")->is_logically_linked()) { + execute_copy(); + } + else { + execute_set_alpha(); + } + } + + /* Executes when all inputs are single values, in which case, the output texture can just be + * cleared to the appropriate color. */ + void execute_clear() + { + const Result &image = get_input("Image"); + const Result &alpha = get_input("Alpha"); + + float4 color = image.get_color_value(); + if (ignore_alpha()) { + color.w = 1.0f; + } + else if (node().input_by_identifier("Alpha")->is_logically_linked()) { + color.w = alpha.get_float_value(); + } + + GPU_texture_clear(context().get_output_texture(), GPU_DATA_FLOAT, color); + } + + /* Executes when the alpha channel of the image is ignored. */ + void execute_ignore_alpha() + { + GPUShader *shader = shader_manager().get("compositor_convert_color_to_opaque"); + GPU_shader_bind(shader); + + const Result &image = get_input("Image"); + image.bind_as_texture(shader, "input_tx"); + + GPUTexture *output_texture = context().get_output_texture(); + const int image_unit = GPU_shader_get_texture_binding(shader, "output_img"); + GPU_texture_image_bind(output_texture, image_unit); + + compute_dispatch_threads_at_least(shader, compute_domain().size); + + image.unbind_as_texture(); + GPU_texture_image_unbind(output_texture); + GPU_shader_unbind(); + } + + /* Executes when the image texture is written with no adjustments and can thus be copied directly + * to the output texture. */ + void execute_copy() + { + const Result &image = get_input("Image"); + + /* Make sure any prior writes to the texture are reflected before copying it. */ + GPU_memory_barrier(GPU_BARRIER_TEXTURE_UPDATE); + + GPU_texture_copy(context().get_output_texture(), image.texture()); + } + + /* Executes when the alpha channel of the image is set as the value of the input alpha. */ + void execute_set_alpha() + { + GPUShader *shader = shader_manager().get("compositor_set_alpha"); + GPU_shader_bind(shader); + + const Result &image = get_input("Image"); + image.bind_as_texture(shader, "image_tx"); + + const Result &alpha = get_input("Alpha"); + alpha.bind_as_texture(shader, "alpha_tx"); + + GPUTexture *output_texture = context().get_output_texture(); + const int image_unit = GPU_shader_get_texture_binding(shader, "output_img"); + GPU_texture_image_bind(output_texture, image_unit); + + compute_dispatch_threads_at_least(shader, compute_domain().size); + + image.unbind_as_texture(); + alpha.unbind_as_texture(); + GPU_texture_image_unbind(output_texture); + GPU_shader_unbind(); + } + + /* If true, the alpha channel of the image is set to 1, that is, it becomes opaque. If false, the + * alpha channel of the image is retained, but only if the alpha input is not linked. If the + * alpha input is linked, it the value of that input will be used as the alpha of the image. */ + bool ignore_alpha() + { + return bnode().custom2 & CMP_NODE_OUTPUT_IGNORE_ALPHA; + } + + /* The operation domain have the same dimensions of the output without any transformations. */ + Domain compute_domain() override + { + return Domain(context().get_output_size()); + } +}; + +static NodeOperation *get_compositor_operation(Context &context, DNode node) +{ + return new ViewerOperation(context, node); +} + } // namespace blender::nodes::node_composite_viewer_cc void register_node_type_cmp_viewer() @@ -70,6 +198,7 @@ void register_node_type_cmp_viewer() ntype.flag |= NODE_PREVIEW; node_type_init(&ntype, file_ns::node_composit_init_viewer); node_type_storage(&ntype, "ImageUser", node_free_standard_storage, node_copy_standard_storage); + ntype.get_compositor_operation = file_ns::get_compositor_operation; ntype.no_muting = true; diff --git a/source/blender/nodes/composite/nodes/node_composite_zcombine.cc b/source/blender/nodes/composite/nodes/node_composite_zcombine.cc index be90aeb7acc..e5f460099e9 100644 --- a/source/blender/nodes/composite/nodes/node_composite_zcombine.cc +++ b/source/blender/nodes/composite/nodes/node_composite_zcombine.cc @@ -8,6 +8,8 @@ #include "UI_interface.h" #include "UI_resources.h" +#include "COM_node_operation.hh" + #include "node_composite_util.hh" /* **************** Z COMBINE ******************** */ @@ -33,6 +35,24 @@ static void node_composit_buts_zcombine(uiLayout *layout, bContext *UNUSED(C), P uiItemR(col, ptr, "use_antialias_z", UI_ITEM_R_SPLIT_EMPTY_NAME, nullptr, ICON_NONE); } +using namespace blender::realtime_compositor; + +class ZCombineOperation : public NodeOperation { + public: + using NodeOperation::NodeOperation; + + void execute() override + { + get_input("Image").pass_through(get_result("Image")); + get_result("Z").allocate_invalid(); + } +}; + +static NodeOperation *get_compositor_operation(Context &context, DNode node) +{ + return new ZCombineOperation(context, node); +} + } // namespace blender::nodes::node_composite_zcombine_cc void register_node_type_cmp_zcombine() @@ -44,6 +64,7 @@ void register_node_type_cmp_zcombine() cmp_node_type_base(&ntype, CMP_NODE_ZCOMBINE, "Z Combine", NODE_CLASS_OP_COLOR); ntype.declare = file_ns::cmp_node_zcombine_declare; ntype.draw_buttons = file_ns::node_composit_buts_zcombine; + ntype.get_compositor_operation = file_ns::get_compositor_operation; nodeRegisterType(&ntype); } diff --git a/source/blender/nodes/geometry/nodes/node_geo_distribute_points_on_faces.cc b/source/blender/nodes/geometry/nodes/node_geo_distribute_points_on_faces.cc index cf29c752257..a6c67cac916 100644 --- a/source/blender/nodes/geometry/nodes/node_geo_distribute_points_on_faces.cc +++ b/source/blender/nodes/geometry/nodes/node_geo_distribute_points_on_faces.cc @@ -221,7 +221,7 @@ BLI_NOINLINE static void update_elimination_mask_based_on_density_factors( const float v2_density_factor = std::max(0.0f, density_factors[v2_loop]); const float probability = v0_density_factor * bary_coord.x + v1_density_factor * bary_coord.y + - v2_density_factor * bary_coord.z; + v2_density_factor * bary_coord.z; const float hash = noise::hash_float_to_float(bary_coord); if (hash > probability) { diff --git a/source/blender/nodes/shader/nodes/node_shader_mix_rgb.cc b/source/blender/nodes/shader/nodes/node_shader_mix_rgb.cc index 12707623049..478a6812c36 100644 --- a/source/blender/nodes/shader/nodes/node_shader_mix_rgb.cc +++ b/source/blender/nodes/shader/nodes/node_shader_mix_rgb.cc @@ -32,7 +32,7 @@ static const char *gpu_shader_get_name(int mode) case MA_RAMP_SCREEN: return "mix_screen"; case MA_RAMP_DIV: - return "mix_div"; + return "mix_div_fallback"; case MA_RAMP_DIFF: return "mix_diff"; case MA_RAMP_DARK: @@ -70,18 +70,23 @@ static int gpu_shader_mix_rgb(GPUMaterial *mat, { const char *name = gpu_shader_get_name(node->custom1); - if (name != nullptr) { - int ret = GPU_stack_link(mat, node, name, in, out); - if (ret && node->custom2 & SHD_MIXRGB_CLAMP) { - const float min[3] = {0.0f, 0.0f, 0.0f}; - const float max[3] = {1.0f, 1.0f, 1.0f}; - GPU_link( - mat, "clamp_color", out[0].link, GPU_constant(min), GPU_constant(max), &out[0].link); - } - return ret; + if (name == nullptr) { + return 0; } - return 0; + const float min = 0.0f; + const float max = 1.0f; + const GPUNodeLink *factor_link = in[0].link ? in[0].link : GPU_uniform(in[0].vec); + GPU_link(mat, "clamp_value", factor_link, GPU_constant(&min), GPU_constant(&max), &in[0].link); + + int ret = GPU_stack_link(mat, node, name, in, out); + + if (ret && node->custom2 & SHD_MIXRGB_CLAMP) { + const float min[3] = {0.0f, 0.0f, 0.0f}; + const float max[3] = {1.0f, 1.0f, 1.0f}; + GPU_link(mat, "clamp_color", out[0].link, GPU_constant(min), GPU_constant(max), &out[0].link); + } + return ret; } class MixRGBFunction : public fn::MultiFunction { diff --git a/source/blender/windowmanager/intern/wm_event_system.cc b/source/blender/windowmanager/intern/wm_event_system.cc index d90259c0cde..77f6b3c861f 100644 --- a/source/blender/windowmanager/intern/wm_event_system.cc +++ b/source/blender/windowmanager/intern/wm_event_system.cc @@ -5214,6 +5214,13 @@ void wm_event_add_ghostevent(wmWindowManager *wm, wmWindow *win, int type, void event.prev_type = event.type; event.prev_val = event.val; + /* Always use modifiers from the active window since + changes to modifiers aren't sent to inactive windows, see: T66088. */ + if ((wm->winactive != win) && (wm->winactive && wm->winactive->eventstate)) { + event.modifier = wm->winactive->eventstate->modifier; + event.keymodifier = wm->winactive->eventstate->keymodifier; + } + /* Ensure the event state is correct, any deviation from this may cause bugs. * * NOTE: #EVENT_NONE is set when unknown keys are pressed, @@ -5256,6 +5263,10 @@ void wm_event_add_ghostevent(wmWindowManager *wm, wmWindow *win, int type, void if (win_other) { wmEvent event_other = *win_other->eventstate; + /* Use the modifier state of this window. */ + event_other.modifier = event.modifier; + event_other.keymodifier = event.keymodifier; + /* See comment for this operation on `event` for details. */ event_other.prev_type = event_other.type; event_other.prev_val = event_other.val; @@ -5345,6 +5356,10 @@ void wm_event_add_ghostevent(wmWindowManager *wm, wmWindow *win, int type, void if (win_other) { wmEvent event_other = *win_other->eventstate; + /* Use the modifier state of this window. */ + event_other.modifier = event.modifier; + event_other.keymodifier = event.keymodifier; + /* See comment for this operation on `event` for details. */ event_other.prev_type = event_other.type; event_other.prev_val = event_other.val; diff --git a/source/blender/windowmanager/intern/wm_window.c b/source/blender/windowmanager/intern/wm_window.c index a5690b52a5a..0c31ff87fdd 100644 --- a/source/blender/windowmanager/intern/wm_window.c +++ b/source/blender/windowmanager/intern/wm_window.c @@ -1113,14 +1113,22 @@ static bool ghost_event_proc(GHOST_EventHandle evt, GHOST_TUserDataPtr C_void_pt } wmWindow *win = GHOST_GetWindowUserData(ghostwin); + /* Win23/GHOST modifier bug, see T40317 */ +#ifndef WIN32 +//# define USE_WIN_ACTIVATE +#endif + switch (type) { case GHOST_kEventWindowDeactivate: wm_event_add_ghostevent(wm, win, type, data); win->active = 0; /* XXX */ - /* clear modifiers for inactive windows */ + /* When window activation is enabled, these modifiers are set with window activation. + * Otherwise leave them set so re-activation doesn't loose keys which are held. */ +#ifdef USE_WIN_ACTIVATE win->eventstate->modifier = 0; win->eventstate->keymodifier = 0; +#endif break; case GHOST_kEventWindowActivate: { @@ -1128,11 +1136,6 @@ static bool ghost_event_proc(GHOST_EventHandle evt, GHOST_TUserDataPtr C_void_pt (query_qual(CONTROL) ? KM_CTRL : 0) | (query_qual(ALT) ? KM_ALT : 0) | (query_qual(OS) ? KM_OSKEY : 0)); - /* Win23/GHOST modifier bug, see T40317 */ -#ifndef WIN32 -//# define USE_WIN_ACTIVATE -#endif - /* No context change! C->wm->windrawable is drawable, or for area queues. */ wm->winactive = win; |