Welcome to mirror list, hosted at ThFree Co, Russian Federation.

git.blender.org/blender.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
Diffstat (limited to 'source/blender/nodes/composite/nodes')
-rw-r--r--source/blender/nodes/composite/nodes/node_composite_alpha_over.cc4
-rw-r--r--source/blender/nodes/composite/nodes/node_composite_bilateralblur.cc55
-rw-r--r--source/blender/nodes/composite/nodes/node_composite_blur.cc401
-rw-r--r--source/blender/nodes/composite/nodes/node_composite_bokehblur.cc103
-rw-r--r--source/blender/nodes/composite/nodes/node_composite_bokehimage.cc48
-rw-r--r--source/blender/nodes/composite/nodes/node_composite_boxmask.cc13
-rw-r--r--source/blender/nodes/composite/nodes/node_composite_channel_matte.cc15
-rw-r--r--source/blender/nodes/composite/nodes/node_composite_chroma_matte.cc13
-rw-r--r--source/blender/nodes/composite/nodes/node_composite_color_matte.cc13
-rw-r--r--source/blender/nodes/composite/nodes/node_composite_color_spill.cc21
-rw-r--r--source/blender/nodes/composite/nodes/node_composite_colorbalance.cc23
-rw-r--r--source/blender/nodes/composite/nodes/node_composite_colorcorrection.cc53
-rw-r--r--source/blender/nodes/composite/nodes/node_composite_crop.cc9
-rw-r--r--source/blender/nodes/composite/nodes/node_composite_curves.cc18
-rw-r--r--source/blender/nodes/composite/nodes/node_composite_despeckle.cc54
-rw-r--r--source/blender/nodes/composite/nodes/node_composite_diff_matte.cc11
-rw-r--r--source/blender/nodes/composite/nodes/node_composite_dilate.cc468
-rw-r--r--source/blender/nodes/composite/nodes/node_composite_directionalblur.cc122
-rw-r--r--source/blender/nodes/composite/nodes/node_composite_distance_matte.cc13
-rw-r--r--source/blender/nodes/composite/nodes/node_composite_ellipsemask.cc13
-rw-r--r--source/blender/nodes/composite/nodes/node_composite_filter.cc112
-rw-r--r--source/blender/nodes/composite/nodes/node_composite_huecorrect.cc6
-rw-r--r--source/blender/nodes/composite/nodes/node_composite_image.cc20
-rw-r--r--source/blender/nodes/composite/nodes/node_composite_lensdist.cc13
-rw-r--r--source/blender/nodes/composite/nodes/node_composite_luma_matte.cc11
-rw-r--r--source/blender/nodes/composite/nodes/node_composite_map_value.cc21
-rw-r--r--source/blender/nodes/composite/nodes/node_composite_movieclip.cc11
-rw-r--r--source/blender/nodes/composite/nodes/node_composite_normal.cc7
-rw-r--r--source/blender/nodes/composite/nodes/node_composite_pixelate.cc29
-rw-r--r--source/blender/nodes/composite/nodes/node_composite_rgb.cc4
-rw-r--r--source/blender/nodes/composite/nodes/node_composite_scale.cc149
-rw-r--r--source/blender/nodes/composite/nodes/node_composite_sepcomb_color.cc22
-rw-r--r--source/blender/nodes/composite/nodes/node_composite_setalpha.cc9
-rw-r--r--source/blender/nodes/composite/nodes/node_composite_translate.cc13
34 files changed, 1671 insertions, 226 deletions
diff --git a/source/blender/nodes/composite/nodes/node_composite_alpha_over.cc b/source/blender/nodes/composite/nodes/node_composite_alpha_over.cc
index 64c59eb24e3..12f81da3d1c 100644
--- a/source/blender/nodes/composite/nodes/node_composite_alpha_over.cc
+++ b/source/blender/nodes/composite/nodes/node_composite_alpha_over.cc
@@ -18,6 +18,8 @@
namespace blender::nodes::node_composite_alpha_over_cc {
+NODE_STORAGE_FUNCS(NodeTwoFloats)
+
static void cmp_node_alphaover_declare(NodeDeclarationBuilder &b)
{
b.add_input<decl::Float>(N_("Fac"))
@@ -86,7 +88,7 @@ class AlphaOverShaderNode : public ShaderNode {
float get_premultiply_factor()
{
- return ((NodeTwoFloats *)bnode().storage)->x;
+ return node_storage(bnode()).x;
}
};
diff --git a/source/blender/nodes/composite/nodes/node_composite_bilateralblur.cc b/source/blender/nodes/composite/nodes/node_composite_bilateralblur.cc
index 66a321eb088..ac9a6c89aa4 100644
--- a/source/blender/nodes/composite/nodes/node_composite_bilateralblur.cc
+++ b/source/blender/nodes/composite/nodes/node_composite_bilateralblur.cc
@@ -5,10 +5,15 @@
* \ingroup cmpnodes
*/
+#include "BLI_math_base.hh"
+
#include "UI_interface.h"
#include "UI_resources.h"
+#include "GPU_shader.h"
+
#include "COM_node_operation.hh"
+#include "COM_utilities.hh"
#include "node_composite_util.hh"
@@ -16,10 +21,16 @@
namespace blender::nodes::node_composite_bilateralblur_cc {
+NODE_STORAGE_FUNCS(NodeBilateralBlurData)
+
static void cmp_node_bilateralblur_declare(NodeDeclarationBuilder &b)
{
- b.add_input<decl::Color>(N_("Image")).default_value({1.0f, 1.0f, 1.0f, 1.0f});
- b.add_input<decl::Color>(N_("Determinator")).default_value({1.0f, 1.0f, 1.0f, 1.0f});
+ b.add_input<decl::Color>(N_("Image"))
+ .default_value({1.0f, 1.0f, 1.0f, 1.0f})
+ .compositor_domain_priority(0);
+ b.add_input<decl::Color>(N_("Determinator"))
+ .default_value({1.0f, 1.0f, 1.0f, 1.0f})
+ .compositor_domain_priority(1);
b.add_output<decl::Color>(N_("Image"));
}
@@ -52,7 +63,45 @@ class BilateralBlurOperation : public NodeOperation {
void execute() override
{
- get_input("Image").pass_through(get_result("Image"));
+ const Result &input_image = get_input("Image");
+ /* Single value inputs can't be blurred and are returned as is. */
+ if (input_image.is_single_value()) {
+ get_input("Image").pass_through(get_result("Image"));
+ return;
+ }
+
+ GPUShader *shader = shader_manager().get("compositor_bilateral_blur");
+ GPU_shader_bind(shader);
+
+ GPU_shader_uniform_1i(shader, "radius", get_blur_radius());
+ GPU_shader_uniform_1f(shader, "threshold", get_threshold());
+
+ input_image.bind_as_texture(shader, "input_tx");
+
+ const Result &determinator_image = get_input("Determinator");
+ determinator_image.bind_as_texture(shader, "determinator_tx");
+
+ const Domain domain = compute_domain();
+ Result &output_image = get_result("Image");
+ output_image.allocate_texture(domain);
+ output_image.bind_as_image(shader, "output_img");
+
+ compute_dispatch_threads_at_least(shader, domain.size);
+
+ GPU_shader_unbind();
+ output_image.unbind_as_image();
+ input_image.unbind_as_texture();
+ determinator_image.unbind_as_texture();
+ }
+
+ int get_blur_radius()
+ {
+ return math::ceil(node_storage(bnode()).iter + node_storage(bnode()).sigma_space);
+ }
+
+ float get_threshold()
+ {
+ return node_storage(bnode()).sigma_color;
}
};
diff --git a/source/blender/nodes/composite/nodes/node_composite_blur.cc b/source/blender/nodes/composite/nodes/node_composite_blur.cc
index cb1d93fe10b..630f18361e3 100644
--- a/source/blender/nodes/composite/nodes/node_composite_blur.cc
+++ b/source/blender/nodes/composite/nodes/node_composite_blur.cc
@@ -5,12 +5,27 @@
* \ingroup cmpnodes
*/
+#include <cstdint>
+
+#include "BLI_array.hh"
+#include "BLI_assert.h"
+#include "BLI_index_range.hh"
+#include "BLI_math_base.hh"
+#include "BLI_math_vec_types.hh"
+#include "BLI_math_vector.hh"
+
#include "RNA_access.h"
#include "UI_interface.h"
#include "UI_resources.h"
+#include "RE_pipeline.h"
+
+#include "GPU_state.h"
+#include "GPU_texture.h"
+
#include "COM_node_operation.hh"
+#include "COM_utilities.hh"
#include "node_composite_util.hh"
@@ -18,6 +33,8 @@
namespace blender::nodes::node_composite_blur_cc {
+NODE_STORAGE_FUNCS(NodeBlurData)
+
static void cmp_node_blur_declare(NodeDeclarationBuilder &b)
{
b.add_input<decl::Color>(N_("Image")).default_value({1.0f, 1.0f, 1.0f, 1.0f});
@@ -75,13 +92,395 @@ static void node_composit_buts_blur(uiLayout *layout, bContext *UNUSED(C), Point
using namespace blender::realtime_compositor;
+/* A helper class that computes and caches a 1D GPU texture containing the weights of the separable
+ * filter of the given type and radius. The filter is assumed to be symmetric, because the filter
+ * functions are all even functions. Consequently, only the positive half of the filter is computed
+ * and the shader takes that into consideration. */
+class SymmetricSeparableBlurWeights {
+ private:
+ float radius_ = 1.0f;
+ int type_ = R_FILTER_GAUSS;
+ GPUTexture *texture_ = nullptr;
+
+ public:
+ ~SymmetricSeparableBlurWeights()
+ {
+ if (texture_) {
+ GPU_texture_free(texture_);
+ }
+ }
+
+ /* Check if a texture containing the weights was already computed for the given filter type and
+ * radius. If such texture exists, do nothing, otherwise, free the already computed texture and
+ * recompute it with the given filter type and radius. */
+ void update(float radius, int type)
+ {
+ if (texture_ && type == type_ && radius == radius_) {
+ return;
+ }
+
+ if (texture_) {
+ GPU_texture_free(texture_);
+ }
+
+ /* The size of filter is double the radius plus 1, but since the filter is symmetric, we only
+ * compute half of it and no doubling happens. We add 1 to make sure the filter size is always
+ * odd and there is a center weight. */
+ const int size = math::ceil(radius) + 1;
+ Array<float> weights(size);
+
+ float sum = 0.0f;
+
+ /* First, compute the center weight. */
+ const float center_weight = RE_filter_value(type, 0.0f);
+ weights[0] = center_weight;
+ sum += center_weight;
+
+ /* Second, compute the other weights in the positive direction, making sure to add double the
+ * weight to the sum of weights because the filter is symmetric and we only loop over half of
+ * it. Skip the center weight already computed by dropping the front index. */
+ const float scale = radius > 0.0f ? 1.0f / radius : 0.0f;
+ for (const int i : weights.index_range().drop_front(1)) {
+ const float weight = RE_filter_value(type, i * scale);
+ weights[i] = weight;
+ sum += weight * 2.0f;
+ }
+
+ /* Finally, normalize the weights. */
+ for (const int i : weights.index_range()) {
+ weights[i] /= sum;
+ }
+
+ texture_ = GPU_texture_create_1d("Weights", size, 1, GPU_R16F, weights.data());
+
+ type_ = type;
+ radius_ = radius;
+ }
+
+ void bind_as_texture(GPUShader *shader, const char *texture_name)
+ {
+ const int texture_image_unit = GPU_shader_get_texture_binding(shader, texture_name);
+ GPU_texture_bind(texture_, texture_image_unit);
+ }
+
+ void unbind_as_texture()
+ {
+ GPU_texture_unbind(texture_);
+ }
+};
+
+/* A helper class that computes and caches a 2D GPU texture containing the weights of the filter of
+ * the given type and radius. The filter is assumed to be symmetric, because the filter functions
+ * are evaluated on the normalized distance to the center. Consequently, only the upper right
+ * quadrant are computed and the shader takes that into consideration. */
+class SymmetricBlurWeights {
+ private:
+ int type_ = R_FILTER_GAUSS;
+ float2 radius_ = float2(1.0f);
+ GPUTexture *texture_ = nullptr;
+
+ public:
+ ~SymmetricBlurWeights()
+ {
+ if (texture_) {
+ GPU_texture_free(texture_);
+ }
+ }
+
+ /* Check if a texture containing the weights was already computed for the given filter type and
+ * radius. If such texture exists, do nothing, otherwise, free the already computed texture and
+ * recompute it with the given filter type and radius. */
+ void update(float2 radius, int type)
+ {
+ if (texture_ && type == type_ && radius == radius_) {
+ return;
+ }
+
+ if (texture_) {
+ GPU_texture_free(texture_);
+ }
+
+ /* The full size of filter is double the radius plus 1, but since the filter is symmetric, we
+ * only compute a single quadrant of it and so no doubling happens. We add 1 to make sure the
+ * filter size is always odd and there is a center weight. */
+ const float2 scale = math::safe_divide(float2(1.0f), radius);
+ const int2 size = int2(math::ceil(radius)) + int2(1);
+ Array<float> weights(size.x * size.y);
+
+ float sum = 0.0f;
+
+ /* First, compute the center weight. */
+ const float center_weight = RE_filter_value(type, 0.0f);
+ weights[0] = center_weight;
+ sum += center_weight;
+
+ /* Then, compute the weights along the positive x axis, making sure to add double the weight to
+ * the sum of weights because the filter is symmetric and we only loop over the positive half
+ * of the x axis. Skip the center weight already computed by dropping the front index. */
+ for (const int x : IndexRange(size.x).drop_front(1)) {
+ const float weight = RE_filter_value(type, x * scale.x);
+ weights[x] = weight;
+ sum += weight * 2.0f;
+ }
+
+ /* Then, compute the weights along the positive y axis, making sure to add double the weight to
+ * the sum of weights because the filter is symmetric and we only loop over the positive half
+ * of the y axis. Skip the center weight already computed by dropping the front index. */
+ for (const int y : IndexRange(size.y).drop_front(1)) {
+ const float weight = RE_filter_value(type, y * scale.y);
+ weights[size.x * y] = weight;
+ sum += weight * 2.0f;
+ }
+
+ /* Then, compute the other weights in the upper right quadrant, making sure to add quadruple
+ * the weight to the sum of weights because the filter is symmetric and we only loop over one
+ * quadrant of it. Skip the weights along the y and x axis already computed by dropping the
+ * front index. */
+ for (const int y : IndexRange(size.y).drop_front(1)) {
+ for (const int x : IndexRange(size.x).drop_front(1)) {
+ const float weight = RE_filter_value(type, math::length(float2(x, y) * scale));
+ weights[size.x * y + x] = weight;
+ sum += weight * 4.0f;
+ }
+ }
+
+ /* Finally, normalize the weights. */
+ for (const int y : IndexRange(size.y)) {
+ for (const int x : IndexRange(size.x)) {
+ weights[size.x * y + x] /= sum;
+ }
+ }
+
+ texture_ = GPU_texture_create_2d("Weights", size.x, size.y, 1, GPU_R16F, weights.data());
+
+ type_ = type;
+ radius_ = radius;
+ }
+
+ void bind_as_texture(GPUShader *shader, const char *texture_name)
+ {
+ const int texture_image_unit = GPU_shader_get_texture_binding(shader, texture_name);
+ GPU_texture_bind(texture_, texture_image_unit);
+ }
+
+ void unbind_as_texture()
+ {
+ GPU_texture_unbind(texture_);
+ }
+};
+
class BlurOperation : public NodeOperation {
+ private:
+ /* Cached symmetric blur weights. */
+ SymmetricBlurWeights blur_weights_;
+ /* Cached symmetric blur weights for the separable horizontal pass. */
+ SymmetricSeparableBlurWeights blur_horizontal_weights_;
+ /* Cached symmetric blur weights for the separable vertical pass. */
+ SymmetricSeparableBlurWeights blur_vertical_weights_;
+
public:
using NodeOperation::NodeOperation;
void execute() override
{
- get_input("Image").pass_through(get_result("Image"));
+ if (is_identity()) {
+ get_input("Image").pass_through(get_result("Image"));
+ return;
+ }
+
+ if (use_separable_filter()) {
+ GPUTexture *horizontal_pass_result = execute_separable_blur_horizontal_pass();
+ execute_separable_blur_vertical_pass(horizontal_pass_result);
+ }
+ else {
+ execute_blur();
+ }
+ }
+
+ void execute_blur()
+ {
+ GPUShader *shader = shader_manager().get("compositor_symmetric_blur");
+ GPU_shader_bind(shader);
+
+ GPU_shader_uniform_1b(shader, "extend_bounds", get_extend_bounds());
+ GPU_shader_uniform_1b(shader, "gamma_correct", node_storage(bnode()).gamma);
+
+ const Result &input_image = get_input("Image");
+ input_image.bind_as_texture(shader, "input_tx");
+
+ blur_weights_.update(compute_blur_radius(), node_storage(bnode()).filtertype);
+ blur_weights_.bind_as_texture(shader, "weights_tx");
+
+ Domain domain = compute_domain();
+ if (get_extend_bounds()) {
+ /* Add a radius amount of pixels in both sides of the image, hence the multiply by 2. */
+ domain.size += int2(math::ceil(compute_blur_radius())) * 2;
+ }
+
+ Result &output_image = get_result("Image");
+ output_image.allocate_texture(domain);
+ output_image.bind_as_image(shader, "output_img");
+
+ compute_dispatch_threads_at_least(shader, domain.size);
+
+ GPU_shader_unbind();
+ output_image.unbind_as_image();
+ input_image.unbind_as_texture();
+ blur_weights_.unbind_as_texture();
+ }
+
+ GPUTexture *execute_separable_blur_horizontal_pass()
+ {
+ GPUShader *shader = shader_manager().get("compositor_symmetric_separable_blur");
+ GPU_shader_bind(shader);
+
+ GPU_shader_uniform_1b(shader, "extend_bounds", get_extend_bounds());
+ GPU_shader_uniform_1b(shader, "gamma_correct_input", node_storage(bnode()).gamma);
+ GPU_shader_uniform_1b(shader, "gamma_uncorrect_output", false);
+
+ const Result &input_image = get_input("Image");
+ input_image.bind_as_texture(shader, "input_tx");
+
+ blur_horizontal_weights_.update(compute_blur_radius().x, node_storage(bnode()).filtertype);
+ blur_horizontal_weights_.bind_as_texture(shader, "weights_tx");
+
+ Domain domain = compute_domain();
+ if (get_extend_bounds()) {
+ domain.size.x += static_cast<int>(math::ceil(compute_blur_radius().x)) * 2;
+ }
+
+ /* We allocate an output image of a transposed size, that is, with a height equivalent to the
+ * width of the input and vice versa. This is done as a performance optimization. The shader
+ * will blur the image horizontally and write it to the intermediate output transposed. Then
+ * the vertical pass will execute the same horizontal blur shader, but since its input is
+ * transposed, it will effectively do a vertical blur and write to the output transposed,
+ * effectively undoing the transposition in the horizontal pass. This is done to improve
+ * spatial cache locality in the shader and to avoid having two separate shaders for each blur
+ * pass. */
+ const int2 transposed_domain = int2(domain.size.y, domain.size.x);
+
+ GPUTexture *horizontal_pass_result = texture_pool().acquire_color(transposed_domain);
+ const int image_unit = GPU_shader_get_texture_binding(shader, "output_img");
+ GPU_texture_image_bind(horizontal_pass_result, image_unit);
+
+ compute_dispatch_threads_at_least(shader, domain.size);
+
+ GPU_shader_unbind();
+ input_image.unbind_as_texture();
+ blur_horizontal_weights_.unbind_as_texture();
+ GPU_texture_image_unbind(horizontal_pass_result);
+
+ return horizontal_pass_result;
+ }
+
+ void execute_separable_blur_vertical_pass(GPUTexture *horizontal_pass_result)
+ {
+ GPUShader *shader = shader_manager().get("compositor_symmetric_separable_blur");
+ GPU_shader_bind(shader);
+
+ GPU_shader_uniform_1b(shader, "extend_bounds", get_extend_bounds());
+ GPU_shader_uniform_1b(shader, "gamma_correct_input", false);
+ GPU_shader_uniform_1b(shader, "gamma_uncorrect_output", node_storage(bnode()).gamma);
+
+ GPU_memory_barrier(GPU_BARRIER_TEXTURE_FETCH);
+ const int texture_image_unit = GPU_shader_get_texture_binding(shader, "input_tx");
+ GPU_texture_bind(horizontal_pass_result, texture_image_unit);
+
+ blur_vertical_weights_.update(compute_blur_radius().y, node_storage(bnode()).filtertype);
+ blur_vertical_weights_.bind_as_texture(shader, "weights_tx");
+
+ Domain domain = compute_domain();
+ if (get_extend_bounds()) {
+ /* Add a radius amount of pixels in both sides of the image, hence the multiply by 2. */
+ domain.size += int2(math::ceil(compute_blur_radius())) * 2;
+ }
+
+ Result &output_image = get_result("Image");
+ output_image.allocate_texture(domain);
+ output_image.bind_as_image(shader, "output_img");
+
+ /* Notice that the domain is transposed, see the note on the horizontal pass method for more
+ * information on the reasoning behind this. */
+ compute_dispatch_threads_at_least(shader, int2(domain.size.y, domain.size.x));
+
+ GPU_shader_unbind();
+ output_image.unbind_as_image();
+ blur_vertical_weights_.unbind_as_texture();
+ GPU_texture_unbind(horizontal_pass_result);
+ }
+
+ float2 compute_blur_radius()
+ {
+ const float size = math::clamp(get_input("Size").get_float_value_default(1.0f), 0.0f, 1.0f);
+
+ if (!node_storage(bnode()).relative) {
+ return float2(node_storage(bnode()).sizex, node_storage(bnode()).sizey) * size;
+ }
+
+ int2 image_size = get_input("Image").domain().size;
+ switch (node_storage(bnode()).aspect) {
+ case CMP_NODE_BLUR_ASPECT_Y:
+ image_size.y = image_size.x;
+ break;
+ case CMP_NODE_BLUR_ASPECT_X:
+ image_size.x = image_size.y;
+ break;
+ default:
+ BLI_assert(node_storage(bnode()).aspect == CMP_NODE_BLUR_ASPECT_NONE);
+ break;
+ }
+
+ return float2(image_size) * get_size_factor() * size;
+ }
+
+ /* Returns true if the operation does nothing and the input can be passed through. */
+ bool is_identity()
+ {
+ const Result &input = get_input("Image");
+ /* Single value inputs can't be blurred and are returned as is. */
+ if (input.is_single_value()) {
+ return true;
+ }
+
+ /* Zero blur radius. The operation does nothing and the input can be passed through. */
+ if (compute_blur_radius() == float2(0.0)) {
+ return true;
+ }
+
+ return false;
+ }
+
+ /* The blur node can operate with different filter types, evaluated on the normalized distance to
+ * the center of the filter. Some of those filters are separable and can be computed as such. If
+ * the bokeh member is disabled in the node, then the filter is always computed as separable even
+ * if it is not in fact separable, in which case, the used filter is a cheaper approximation to
+ * the actual filter. If the bokeh member is enabled, then the filter is computed as separable if
+ * it is in fact separable and as a normal 2D filter otherwise. */
+ bool use_separable_filter()
+ {
+ if (!node_storage(bnode()).bokeh) {
+ return true;
+ }
+
+ /* Both Box and Gaussian filters are separable. The rest is not. */
+ switch (node_storage(bnode()).filtertype) {
+ case R_FILTER_BOX:
+ case R_FILTER_GAUSS:
+ case R_FILTER_FAST_GAUSS:
+ return true;
+ default:
+ return false;
+ }
+ }
+
+ float2 get_size_factor()
+ {
+ return float2(node_storage(bnode()).percentx, node_storage(bnode()).percenty) / 100.0f;
+ }
+
+ bool get_extend_bounds()
+ {
+ return bnode().custom1 & CMP_NODEFLAG_BLUR_EXTEND_BOUNDS;
}
};
diff --git a/source/blender/nodes/composite/nodes/node_composite_bokehblur.cc b/source/blender/nodes/composite/nodes/node_composite_bokehblur.cc
index 538f00af12d..9c0617ee8c3 100644
--- a/source/blender/nodes/composite/nodes/node_composite_bokehblur.cc
+++ b/source/blender/nodes/composite/nodes/node_composite_bokehblur.cc
@@ -5,10 +5,16 @@
* \ingroup cmpnodes
*/
+#include "BLI_math_base.hh"
+#include "BLI_math_vec_types.hh"
+
#include "UI_interface.h"
#include "UI_resources.h"
+#include "GPU_texture.h"
+
#include "COM_node_operation.hh"
+#include "COM_utilities.hh"
#include "node_composite_util.hh"
@@ -18,10 +24,22 @@ namespace blender::nodes::node_composite_bokehblur_cc {
static void cmp_node_bokehblur_declare(NodeDeclarationBuilder &b)
{
- b.add_input<decl::Color>(N_("Image")).default_value({0.8f, 0.8f, 0.8f, 1.0f});
- b.add_input<decl::Color>(N_("Bokeh")).default_value({1.0f, 1.0f, 1.0f, 1.0f});
- b.add_input<decl::Float>(N_("Size")).default_value(1.0f).min(0.0f).max(10.0f);
- b.add_input<decl::Float>(N_("Bounding box")).default_value(1.0f).min(0.0f).max(1.0f);
+ b.add_input<decl::Color>(N_("Image"))
+ .default_value({0.8f, 0.8f, 0.8f, 1.0f})
+ .compositor_domain_priority(0);
+ b.add_input<decl::Color>(N_("Bokeh"))
+ .default_value({1.0f, 1.0f, 1.0f, 1.0f})
+ .compositor_skip_realization();
+ b.add_input<decl::Float>(N_("Size"))
+ .default_value(1.0f)
+ .min(0.0f)
+ .max(10.0f)
+ .compositor_domain_priority(1);
+ b.add_input<decl::Float>(N_("Bounding box"))
+ .default_value(1.0f)
+ .min(0.0f)
+ .max(1.0f)
+ .compositor_domain_priority(2);
b.add_output<decl::Color>(N_("Image"));
}
@@ -47,7 +65,82 @@ class BokehBlurOperation : public NodeOperation {
void execute() override
{
- get_input("Image").pass_through(get_result("Image"));
+ if (is_identity()) {
+ get_input("Image").pass_through(get_result("Image"));
+ return;
+ }
+
+ GPUShader *shader = shader_manager().get("compositor_blur");
+ GPU_shader_bind(shader);
+
+ GPU_shader_uniform_1i(shader, "radius", compute_blur_radius());
+ GPU_shader_uniform_1b(shader, "extend_bounds", get_extend_bounds());
+
+ const Result &input_image = get_input("Image");
+ input_image.bind_as_texture(shader, "input_tx");
+
+ const Result &input_weights = get_input("Bokeh");
+ input_weights.bind_as_texture(shader, "weights_tx");
+
+ const Result &input_mask = get_input("Bounding box");
+ input_mask.bind_as_texture(shader, "mask_tx");
+
+ Domain domain = compute_domain();
+ if (get_extend_bounds()) {
+ /* Add a radius amount of pixels in both sides of the image, hence the multiply by 2. */
+ domain.size += int2(compute_blur_radius() * 2);
+ }
+
+ Result &output_image = get_result("Image");
+ output_image.allocate_texture(domain);
+ output_image.bind_as_image(shader, "output_img");
+
+ compute_dispatch_threads_at_least(shader, domain.size);
+
+ GPU_shader_unbind();
+ output_image.unbind_as_image();
+ input_image.unbind_as_texture();
+ input_weights.unbind_as_texture();
+ input_mask.unbind_as_texture();
+ }
+
+ int compute_blur_radius()
+ {
+ const int2 image_size = get_input("Image").domain().size;
+ const int max_size = math::max(image_size.x, image_size.y);
+
+ /* The [0, 10] range of the size is arbitrary and is merely in place to avoid very long
+ * computations of the bokeh blur. */
+ const float size = math::clamp(get_input("Size").get_float_value_default(1.0f), 0.0f, 10.0f);
+
+ /* The 100 divisor is arbitrary and was chosen using visual judgment. */
+ return size * (max_size / 100.0f);
+ }
+
+ bool is_identity()
+ {
+ const Result &input = get_input("Image");
+ if (input.is_single_value()) {
+ return true;
+ }
+
+ if (compute_blur_radius() == 0) {
+ return true;
+ }
+
+ /* This input is, in fact, a boolean mask. If it is zero, no blurring will take place.
+ * Otherwise, the blurring will take place ignoring the value of the input entirely. */
+ const Result &bounding_box = get_input("Bounding box");
+ if (bounding_box.is_single_value() && bounding_box.get_float_value() == 0.0) {
+ return true;
+ }
+
+ return false;
+ }
+
+ bool get_extend_bounds()
+ {
+ return bnode().custom1 & CMP_NODEFLAG_BLUR_EXTEND_BOUNDS;
}
};
diff --git a/source/blender/nodes/composite/nodes/node_composite_bokehimage.cc b/source/blender/nodes/composite/nodes/node_composite_bokehimage.cc
index a11cba37191..81cc8990d35 100644
--- a/source/blender/nodes/composite/nodes/node_composite_bokehimage.cc
+++ b/source/blender/nodes/composite/nodes/node_composite_bokehimage.cc
@@ -5,10 +5,16 @@
* \ingroup cmpnodes
*/
+#include "BLI_math_base.h"
+#include "BLI_math_vec_types.hh"
+
#include "UI_interface.h"
#include "UI_resources.h"
+#include "GPU_shader.h"
+
#include "COM_node_operation.hh"
+#include "COM_utilities.hh"
#include "node_composite_util.hh"
@@ -16,6 +22,8 @@
namespace blender::nodes::node_composite_bokehimage_cc {
+NODE_STORAGE_FUNCS(NodeBokehImage)
+
static void cmp_node_bokehimage_declare(NodeDeclarationBuilder &b)
{
b.add_output<decl::Color>(N_("Image"));
@@ -55,7 +63,45 @@ class BokehImageOperation : public NodeOperation {
void execute() override
{
- get_result("Image").allocate_invalid();
+ GPUShader *shader = shader_manager().get("compositor_bokeh_image");
+ GPU_shader_bind(shader);
+
+ GPU_shader_uniform_1f(shader, "exterior_angle", get_exterior_angle());
+ GPU_shader_uniform_1f(shader, "rotation", get_rotation());
+ GPU_shader_uniform_1f(shader, "roundness", node_storage(bnode()).rounding);
+ GPU_shader_uniform_1f(shader, "catadioptric", node_storage(bnode()).catadioptric);
+ GPU_shader_uniform_1f(shader, "lens_shift", node_storage(bnode()).lensshift);
+
+ Result &output = get_result("Image");
+ const Domain domain = compute_domain();
+ output.allocate_texture(domain);
+ output.bind_as_image(shader, "output_img");
+
+ compute_dispatch_threads_at_least(shader, domain.size);
+
+ output.unbind_as_image();
+ GPU_shader_unbind();
+ }
+
+ Domain compute_domain() override
+ {
+ return Domain(int2(512));
+ }
+
+ /* The exterior angle is the angle between each two consecutive vertices of the regular polygon
+ * from its center. */
+ float get_exterior_angle()
+ {
+ return (M_PI * 2.0f) / node_storage(bnode()).flaps;
+ }
+
+ float get_rotation()
+ {
+ /* Offset the rotation such that the second vertex of the regular polygon lies on the positive
+ * y axis, which is 90 degrees minus the angle that it makes with the positive x axis assuming
+ * the first vertex lies on the positive x axis. */
+ const float offset = M_PI_2 - get_exterior_angle();
+ return node_storage(bnode()).angle - offset;
}
};
diff --git a/source/blender/nodes/composite/nodes/node_composite_boxmask.cc b/source/blender/nodes/composite/nodes/node_composite_boxmask.cc
index 9c7bb6432cb..3cf0932e1b3 100644
--- a/source/blender/nodes/composite/nodes/node_composite_boxmask.cc
+++ b/source/blender/nodes/composite/nodes/node_composite_boxmask.cc
@@ -23,6 +23,8 @@
namespace blender::nodes::node_composite_boxmask_cc {
+NODE_STORAGE_FUNCS(NodeBoxMask)
+
static void cmp_node_boxmask_declare(NodeDeclarationBuilder &b)
{
b.add_input<decl::Float>(N_("Mask")).default_value(0.0f).min(0.0f).max(1.0f);
@@ -123,24 +125,19 @@ class BoxMaskOperation : public NodeOperation {
}
}
- NodeBoxMask &get_node_box_mask()
- {
- return *static_cast<NodeBoxMask *>(bnode().storage);
- }
-
float2 get_location()
{
- return float2(get_node_box_mask().x, get_node_box_mask().y);
+ return float2(node_storage(bnode()).x, node_storage(bnode()).y);
}
float2 get_size()
{
- return float2(get_node_box_mask().width, get_node_box_mask().height);
+ return float2(node_storage(bnode()).width, node_storage(bnode()).height);
}
float get_angle()
{
- return get_node_box_mask().rotation;
+ return node_storage(bnode()).rotation;
}
};
diff --git a/source/blender/nodes/composite/nodes/node_composite_channel_matte.cc b/source/blender/nodes/composite/nodes/node_composite_channel_matte.cc
index 018632f776c..3b825017da8 100644
--- a/source/blender/nodes/composite/nodes/node_composite_channel_matte.cc
+++ b/source/blender/nodes/composite/nodes/node_composite_channel_matte.cc
@@ -20,6 +20,8 @@
namespace blender::nodes::node_composite_channel_matte_cc {
+NODE_STORAGE_FUNCS(NodeChroma)
+
static void cmp_node_channel_matte_declare(NodeDeclarationBuilder &b)
{
b.add_input<decl::Color>(N_("Image"))
@@ -130,15 +132,10 @@ class ChannelMatteShaderNode : public ShaderNode {
return bnode().custom2 - 1;
}
- NodeChroma *get_node_chroma()
- {
- return static_cast<NodeChroma *>(bnode().storage);
- }
-
/* Get the index of the channel used to compute the limit value. */
int get_limit_channel()
{
- return get_node_chroma()->channel - 1;
+ return node_storage(bnode()).channel - 1;
}
/* Get the indices of the channels used to compute the limit value. We always assume the limit
@@ -146,7 +143,7 @@ class ChannelMatteShaderNode : public ShaderNode {
* the maximum of two identical values is the same value. */
void get_limit_channels(float limit_channels[2])
{
- if (get_node_chroma()->algorithm == CMP_NODE_CHANNEL_MATTE_LIMIT_ALGORITHM_MAX) {
+ if (node_storage(bnode()).algorithm == CMP_NODE_CHANNEL_MATTE_LIMIT_ALGORITHM_MAX) {
/* If the algorithm is Max, store the indices of the other two channels other than the matte
* channel. */
limit_channels[0] = (get_matte_channel() + 1) % 3;
@@ -161,12 +158,12 @@ class ChannelMatteShaderNode : public ShaderNode {
float get_max_limit()
{
- return get_node_chroma()->t1;
+ return node_storage(bnode()).t1;
}
float get_min_limit()
{
- return get_node_chroma()->t2;
+ return node_storage(bnode()).t2;
}
};
diff --git a/source/blender/nodes/composite/nodes/node_composite_chroma_matte.cc b/source/blender/nodes/composite/nodes/node_composite_chroma_matte.cc
index cb3648c5680..e5ce87169d4 100644
--- a/source/blender/nodes/composite/nodes/node_composite_chroma_matte.cc
+++ b/source/blender/nodes/composite/nodes/node_composite_chroma_matte.cc
@@ -22,6 +22,8 @@
namespace blender::nodes::node_composite_chroma_matte_cc {
+NODE_STORAGE_FUNCS(NodeChroma)
+
static void cmp_node_chroma_matte_declare(NodeDeclarationBuilder &b)
{
b.add_input<decl::Color>(N_("Image"))
@@ -86,24 +88,19 @@ class ChromaMatteShaderNode : public ShaderNode {
GPU_uniform(&falloff));
}
- NodeChroma *get_node_chroma()
- {
- return static_cast<NodeChroma *>(bnode().storage);
- }
-
float get_acceptance()
{
- return std::tan(get_node_chroma()->t1) / 2.0f;
+ return std::tan(node_storage(bnode()).t1) / 2.0f;
}
float get_cutoff()
{
- return get_node_chroma()->t2;
+ return node_storage(bnode()).t2;
}
float get_falloff()
{
- return get_node_chroma()->fstrength;
+ return node_storage(bnode()).fstrength;
}
};
diff --git a/source/blender/nodes/composite/nodes/node_composite_color_matte.cc b/source/blender/nodes/composite/nodes/node_composite_color_matte.cc
index 5e3aaf512e6..08329601f14 100644
--- a/source/blender/nodes/composite/nodes/node_composite_color_matte.cc
+++ b/source/blender/nodes/composite/nodes/node_composite_color_matte.cc
@@ -18,6 +18,8 @@
namespace blender::nodes::node_composite_color_matte_cc {
+NODE_STORAGE_FUNCS(NodeChroma)
+
static void cmp_node_color_matte_declare(NodeDeclarationBuilder &b)
{
b.add_input<decl::Color>(N_("Image"))
@@ -83,25 +85,20 @@ class ColorMatteShaderNode : public ShaderNode {
GPU_uniform(&value_epsilon));
}
- NodeChroma *get_node_chroma()
- {
- return static_cast<NodeChroma *>(bnode().storage);
- }
-
float get_hue_epsilon()
{
/* Divide by 2 because the hue wraps around. */
- return get_node_chroma()->t1 / 2.0f;
+ return node_storage(bnode()).t1 / 2.0f;
}
float get_saturation_epsilon()
{
- return get_node_chroma()->t2;
+ return node_storage(bnode()).t2;
}
float get_value_epsilon()
{
- return get_node_chroma()->t3;
+ return node_storage(bnode()).t3;
}
};
diff --git a/source/blender/nodes/composite/nodes/node_composite_color_spill.cc b/source/blender/nodes/composite/nodes/node_composite_color_spill.cc
index 9744c01a256..29401d7b20f 100644
--- a/source/blender/nodes/composite/nodes/node_composite_color_spill.cc
+++ b/source/blender/nodes/composite/nodes/node_composite_color_spill.cc
@@ -20,6 +20,8 @@
namespace blender::nodes::node_composite_color_spill_cc {
+NODE_STORAGE_FUNCS(NodeColorspill)
+
static void cmp_node_color_spill_declare(NodeDeclarationBuilder &b)
{
b.add_input<decl::Color>(N_("Image"))
@@ -131,18 +133,13 @@ class ColorSpillShaderNode : public ShaderNode {
return (CMPNodeColorSpillLimitAlgorithm)bnode().custom2;
}
- NodeColorspill *get_node_color_spill()
- {
- return static_cast<NodeColorspill *>(bnode().storage);
- }
-
void get_spill_scale(float spill_scale[3])
{
- const NodeColorspill *node_color_spill = get_node_color_spill();
- if (node_color_spill->unspill) {
- spill_scale[0] = node_color_spill->uspillr;
- spill_scale[1] = node_color_spill->uspillg;
- spill_scale[2] = node_color_spill->uspillb;
+ const NodeColorspill &node_color_spill = node_storage(bnode());
+ if (node_color_spill.unspill) {
+ spill_scale[0] = node_color_spill.uspillr;
+ spill_scale[1] = node_color_spill.uspillg;
+ spill_scale[2] = node_color_spill.uspillb;
spill_scale[get_spill_channel()] *= -1.0f;
}
else {
@@ -156,7 +153,7 @@ class ColorSpillShaderNode : public ShaderNode {
/* Get the index of the channel used for limiting. */
int get_limit_channel()
{
- return get_node_color_spill()->limchan;
+ return node_storage(bnode()).limchan;
}
/* Get the indices of the channels used to compute the limit value. We always assume the limit
@@ -179,7 +176,7 @@ class ColorSpillShaderNode : public ShaderNode {
float get_limit_scale()
{
- return get_node_color_spill()->limscale;
+ return node_storage(bnode()).limscale;
}
};
diff --git a/source/blender/nodes/composite/nodes/node_composite_colorbalance.cc b/source/blender/nodes/composite/nodes/node_composite_colorbalance.cc
index 95675169c76..e05fbf00a25 100644
--- a/source/blender/nodes/composite/nodes/node_composite_colorbalance.cc
+++ b/source/blender/nodes/composite/nodes/node_composite_colorbalance.cc
@@ -48,6 +48,8 @@ void ntreeCompositColorBalanceSyncFromCDL(bNodeTree *UNUSED(ntree), bNode *node)
namespace blender::nodes::node_composite_colorbalance_cc {
+NODE_STORAGE_FUNCS(NodeColorBalance)
+
static void cmp_node_colorbalance_declare(NodeDeclarationBuilder &b)
{
b.add_input<decl::Float>(N_("Fac"))
@@ -161,7 +163,7 @@ class ColorBalanceShaderNode : public ShaderNode {
GPUNodeStack *inputs = get_inputs_array();
GPUNodeStack *outputs = get_outputs_array();
- const NodeColorBalance *node_color_balance = get_node_color_balance();
+ const NodeColorBalance &node_color_balance = node_storage(bnode());
if (get_color_balance_method() == CMP_NODE_COLOR_BALANCE_LGG) {
GPU_stack_link(material,
@@ -169,9 +171,9 @@ class ColorBalanceShaderNode : public ShaderNode {
"node_composite_color_balance_lgg",
inputs,
outputs,
- GPU_uniform(node_color_balance->lift),
- GPU_uniform(node_color_balance->gamma),
- GPU_uniform(node_color_balance->gain));
+ GPU_uniform(node_color_balance.lift),
+ GPU_uniform(node_color_balance.gamma),
+ GPU_uniform(node_color_balance.gain));
return;
}
@@ -180,21 +182,16 @@ class ColorBalanceShaderNode : public ShaderNode {
"node_composite_color_balance_asc_cdl",
inputs,
outputs,
- GPU_uniform(node_color_balance->offset),
- GPU_uniform(node_color_balance->power),
- GPU_uniform(node_color_balance->slope),
- GPU_uniform(&node_color_balance->offset_basis));
+ GPU_uniform(node_color_balance.offset),
+ GPU_uniform(node_color_balance.power),
+ GPU_uniform(node_color_balance.slope),
+ GPU_uniform(&node_color_balance.offset_basis));
}
CMPNodeColorBalanceMethod get_color_balance_method()
{
return (CMPNodeColorBalanceMethod)bnode().custom1;
}
-
- NodeColorBalance *get_node_color_balance()
- {
- return static_cast<NodeColorBalance *>(bnode().storage);
- }
};
static ShaderNode *get_compositor_shader_node(DNode node)
diff --git a/source/blender/nodes/composite/nodes/node_composite_colorcorrection.cc b/source/blender/nodes/composite/nodes/node_composite_colorcorrection.cc
index 36e6672ce1c..92b10fc1877 100644
--- a/source/blender/nodes/composite/nodes/node_composite_colorcorrection.cc
+++ b/source/blender/nodes/composite/nodes/node_composite_colorcorrection.cc
@@ -20,6 +20,8 @@
namespace blender::nodes::node_composite_colorcorrection_cc {
+NODE_STORAGE_FUNCS(NodeColorCorrection)
+
static void cmp_node_colorcorrection_declare(NodeDeclarationBuilder &b)
{
b.add_input<decl::Color>(N_("Image"))
@@ -294,7 +296,7 @@ class ColorCorrectionShaderNode : public ShaderNode {
float luminance_coefficients[3];
IMB_colormanagement_get_luminance_coefficients(luminance_coefficients);
- const NodeColorCorrection *node_color_correction = get_node_color_correction();
+ const NodeColorCorrection &node_color_correction = node_storage(bnode());
GPU_stack_link(material,
&bnode(),
@@ -302,28 +304,28 @@ class ColorCorrectionShaderNode : public ShaderNode {
inputs,
outputs,
GPU_constant(enabled_channels),
- GPU_uniform(&node_color_correction->startmidtones),
- GPU_uniform(&node_color_correction->endmidtones),
- GPU_uniform(&node_color_correction->master.saturation),
- GPU_uniform(&node_color_correction->master.contrast),
- GPU_uniform(&node_color_correction->master.gamma),
- GPU_uniform(&node_color_correction->master.gain),
- GPU_uniform(&node_color_correction->master.lift),
- GPU_uniform(&node_color_correction->shadows.saturation),
- GPU_uniform(&node_color_correction->shadows.contrast),
- GPU_uniform(&node_color_correction->shadows.gamma),
- GPU_uniform(&node_color_correction->shadows.gain),
- GPU_uniform(&node_color_correction->shadows.lift),
- GPU_uniform(&node_color_correction->midtones.saturation),
- GPU_uniform(&node_color_correction->midtones.contrast),
- GPU_uniform(&node_color_correction->midtones.gamma),
- GPU_uniform(&node_color_correction->midtones.gain),
- GPU_uniform(&node_color_correction->midtones.lift),
- GPU_uniform(&node_color_correction->highlights.saturation),
- GPU_uniform(&node_color_correction->highlights.contrast),
- GPU_uniform(&node_color_correction->highlights.gamma),
- GPU_uniform(&node_color_correction->highlights.gain),
- GPU_uniform(&node_color_correction->highlights.lift),
+ GPU_uniform(&node_color_correction.startmidtones),
+ GPU_uniform(&node_color_correction.endmidtones),
+ GPU_uniform(&node_color_correction.master.saturation),
+ GPU_uniform(&node_color_correction.master.contrast),
+ GPU_uniform(&node_color_correction.master.gamma),
+ GPU_uniform(&node_color_correction.master.gain),
+ GPU_uniform(&node_color_correction.master.lift),
+ GPU_uniform(&node_color_correction.shadows.saturation),
+ GPU_uniform(&node_color_correction.shadows.contrast),
+ GPU_uniform(&node_color_correction.shadows.gamma),
+ GPU_uniform(&node_color_correction.shadows.gain),
+ GPU_uniform(&node_color_correction.shadows.lift),
+ GPU_uniform(&node_color_correction.midtones.saturation),
+ GPU_uniform(&node_color_correction.midtones.contrast),
+ GPU_uniform(&node_color_correction.midtones.gamma),
+ GPU_uniform(&node_color_correction.midtones.gain),
+ GPU_uniform(&node_color_correction.midtones.lift),
+ GPU_uniform(&node_color_correction.highlights.saturation),
+ GPU_uniform(&node_color_correction.highlights.contrast),
+ GPU_uniform(&node_color_correction.highlights.gamma),
+ GPU_uniform(&node_color_correction.highlights.gain),
+ GPU_uniform(&node_color_correction.highlights.lift),
GPU_constant(luminance_coefficients));
}
@@ -333,11 +335,6 @@ class ColorCorrectionShaderNode : public ShaderNode {
enabled_channels[i] = (bnode().custom1 & (1 << i)) ? 1.0f : 0.0f;
}
}
-
- NodeColorCorrection *get_node_color_correction()
- {
- return static_cast<NodeColorCorrection *>(bnode().storage);
- }
};
static ShaderNode *get_compositor_shader_node(DNode node)
diff --git a/source/blender/nodes/composite/nodes/node_composite_crop.cc b/source/blender/nodes/composite/nodes/node_composite_crop.cc
index d7331732fc7..13d02a707be 100644
--- a/source/blender/nodes/composite/nodes/node_composite_crop.cc
+++ b/source/blender/nodes/composite/nodes/node_composite_crop.cc
@@ -27,6 +27,8 @@
namespace blender::nodes::node_composite_crop_cc {
+NODE_STORAGE_FUNCS(NodeTwoXYs)
+
static void cmp_node_crop_declare(NodeDeclarationBuilder &b)
{
b.add_input<decl::Color>(N_("Image"))
@@ -163,11 +165,6 @@ class CropOperation : public NodeOperation {
return bnode().custom2;
}
- NodeTwoXYs &get_node_two_xys()
- {
- return *static_cast<NodeTwoXYs *>(bnode().storage);
- }
-
/* Returns true if the operation does nothing and the input can be passed through. */
bool is_identity()
{
@@ -190,7 +187,7 @@ class CropOperation : public NodeOperation {
void compute_cropping_bounds(int2 &lower_bound, int2 &upper_bound)
{
- const NodeTwoXYs &node_two_xys = get_node_two_xys();
+ const NodeTwoXYs &node_two_xys = node_storage(bnode());
const int2 input_size = get_input("Image").domain().size;
if (get_is_relative()) {
diff --git a/source/blender/nodes/composite/nodes/node_composite_curves.cc b/source/blender/nodes/composite/nodes/node_composite_curves.cc
index c5d303c576a..bf45e219730 100644
--- a/source/blender/nodes/composite/nodes/node_composite_curves.cc
+++ b/source/blender/nodes/composite/nodes/node_composite_curves.cc
@@ -47,15 +47,15 @@ class TimeCurveOperation : public NodeOperation {
Result &result = get_result("Fac");
result.allocate_single_value();
- CurveMapping *curve_mapping = get_curve_mapping();
+ CurveMapping *curve_mapping = const_cast<CurveMapping *>(get_curve_mapping());
BKE_curvemapping_init(curve_mapping);
const float time = BKE_curvemapping_evaluateF(curve_mapping, 0, compute_normalized_time());
result.set_float_value(clamp_f(time, 0.0f, 1.0f));
}
- CurveMapping *get_curve_mapping()
+ const CurveMapping *get_curve_mapping()
{
- return static_cast<CurveMapping *>(bnode().storage);
+ return static_cast<const CurveMapping *>(bnode().storage);
}
int get_start_time()
@@ -143,7 +143,7 @@ class VectorCurvesShaderNode : public ShaderNode {
GPUNodeStack *inputs = get_inputs_array();
GPUNodeStack *outputs = get_outputs_array();
- CurveMapping *curve_mapping = get_curve_mapping();
+ CurveMapping *curve_mapping = const_cast<CurveMapping *>(get_curve_mapping());
BKE_curvemapping_init(curve_mapping);
float *band_values;
@@ -173,9 +173,9 @@ class VectorCurvesShaderNode : public ShaderNode {
GPU_uniform(end_slopes));
}
- CurveMapping *get_curve_mapping()
+ const CurveMapping *get_curve_mapping()
{
- return static_cast<CurveMapping *>(bnode().storage);
+ return static_cast<const CurveMapping *>(bnode().storage);
}
};
@@ -239,7 +239,7 @@ class RGBCurvesShaderNode : public ShaderNode {
GPUNodeStack *inputs = get_inputs_array();
GPUNodeStack *outputs = get_outputs_array();
- CurveMapping *curve_mapping = get_curve_mapping();
+ CurveMapping *curve_mapping = const_cast<CurveMapping *>(get_curve_mapping());
BKE_curvemapping_init(curve_mapping);
float *band_values;
@@ -311,9 +311,9 @@ class RGBCurvesShaderNode : public ShaderNode {
GPU_uniform(end_slopes));
}
- CurveMapping *get_curve_mapping()
+ const CurveMapping *get_curve_mapping()
{
- return static_cast<CurveMapping *>(bnode().storage);
+ return static_cast<const CurveMapping *>(bnode().storage);
}
};
diff --git a/source/blender/nodes/composite/nodes/node_composite_despeckle.cc b/source/blender/nodes/composite/nodes/node_composite_despeckle.cc
index 0b9f9c8f76d..aa6725b8750 100644
--- a/source/blender/nodes/composite/nodes/node_composite_despeckle.cc
+++ b/source/blender/nodes/composite/nodes/node_composite_despeckle.cc
@@ -8,7 +8,10 @@
#include "UI_interface.h"
#include "UI_resources.h"
+#include "GPU_shader.h"
+
#include "COM_node_operation.hh"
+#include "COM_utilities.hh"
#include "node_composite_util.hh"
@@ -18,8 +21,15 @@ namespace blender::nodes::node_composite_despeckle_cc {
static void cmp_node_despeckle_declare(NodeDeclarationBuilder &b)
{
- b.add_input<decl::Float>(N_("Fac")).default_value(1.0f).min(0.0f).max(1.0f).subtype(PROP_FACTOR);
- b.add_input<decl::Color>(N_("Image")).default_value({1.0f, 1.0f, 1.0f, 1.0f});
+ b.add_input<decl::Float>(N_("Fac"))
+ .default_value(1.0f)
+ .min(0.0f)
+ .max(1.0f)
+ .subtype(PROP_FACTOR)
+ .compositor_domain_priority(1);
+ b.add_input<decl::Color>(N_("Image"))
+ .default_value({1.0f, 1.0f, 1.0f, 1.0f})
+ .compositor_domain_priority(0);
b.add_output<decl::Color>(N_("Image"));
}
@@ -46,7 +56,45 @@ class DespeckleOperation : public NodeOperation {
void execute() override
{
- get_input("Image").pass_through(get_result("Image"));
+ const Result &input_image = get_input("Image");
+ /* Single value inputs can't be despeckled and are returned as is. */
+ if (input_image.is_single_value()) {
+ get_input("Image").pass_through(get_result("Image"));
+ return;
+ }
+
+ GPUShader *shader = shader_manager().get("compositor_despeckle");
+ GPU_shader_bind(shader);
+
+ GPU_shader_uniform_1f(shader, "threshold", get_threshold());
+ GPU_shader_uniform_1f(shader, "neighbor_threshold", get_neighbor_threshold());
+
+ input_image.bind_as_texture(shader, "input_tx");
+
+ const Result &factor_image = get_input("Fac");
+ factor_image.bind_as_texture(shader, "factor_tx");
+
+ const Domain domain = compute_domain();
+ Result &output_image = get_result("Image");
+ output_image.allocate_texture(domain);
+ output_image.bind_as_image(shader, "output_img");
+
+ compute_dispatch_threads_at_least(shader, domain.size);
+
+ GPU_shader_unbind();
+ output_image.unbind_as_image();
+ input_image.unbind_as_texture();
+ factor_image.unbind_as_texture();
+ }
+
+ float get_threshold()
+ {
+ return bnode().custom3;
+ }
+
+ float get_neighbor_threshold()
+ {
+ return bnode().custom4;
}
};
diff --git a/source/blender/nodes/composite/nodes/node_composite_diff_matte.cc b/source/blender/nodes/composite/nodes/node_composite_diff_matte.cc
index e129dcaa6ef..8912d00a9be 100644
--- a/source/blender/nodes/composite/nodes/node_composite_diff_matte.cc
+++ b/source/blender/nodes/composite/nodes/node_composite_diff_matte.cc
@@ -18,6 +18,8 @@
namespace blender::nodes::node_composite_diff_matte_cc {
+NODE_STORAGE_FUNCS(NodeChroma)
+
static void cmp_node_diff_matte_declare(NodeDeclarationBuilder &b)
{
b.add_input<decl::Color>(N_("Image 1"))
@@ -71,19 +73,14 @@ class DifferenceMatteShaderNode : public ShaderNode {
GPU_uniform(&falloff));
}
- NodeChroma *get_node_chroma()
- {
- return static_cast<NodeChroma *>(bnode().storage);
- }
-
float get_tolerance()
{
- return get_node_chroma()->t1;
+ return node_storage(bnode()).t1;
}
float get_falloff()
{
- return get_node_chroma()->t2;
+ return node_storage(bnode()).t2;
}
};
diff --git a/source/blender/nodes/composite/nodes/node_composite_dilate.cc b/source/blender/nodes/composite/nodes/node_composite_dilate.cc
index 46199d3ff04..551dfacb276 100644
--- a/source/blender/nodes/composite/nodes/node_composite_dilate.cc
+++ b/source/blender/nodes/composite/nodes/node_composite_dilate.cc
@@ -5,12 +5,27 @@
* \ingroup cmpnodes
*/
+#include <cmath>
+
+#include "BLI_array.hh"
+#include "BLI_assert.h"
+#include "BLI_math_base.hh"
+
+#include "DNA_scene_types.h"
+
#include "RNA_access.h"
#include "UI_interface.h"
#include "UI_resources.h"
+#include "RE_pipeline.h"
+
+#include "GPU_shader.h"
+#include "GPU_state.h"
+#include "GPU_texture.h"
+
#include "COM_node_operation.hh"
+#include "COM_utilities.hh"
#include "node_composite_util.hh"
@@ -18,6 +33,8 @@
namespace blender::nodes::node_composite_dilate_cc {
+NODE_STORAGE_FUNCS(NodeDilateErode)
+
static void cmp_node_dilate_declare(NodeDeclarationBuilder &b)
{
b.add_input<decl::Float>(N_("Mask")).default_value(0.0f).min(0.0f).max(1.0f);
@@ -36,10 +53,10 @@ static void node_composit_buts_dilateerode(uiLayout *layout, bContext *UNUSED(C)
uiItemR(layout, ptr, "mode", UI_ITEM_R_SPLIT_EMPTY_NAME, nullptr, ICON_NONE);
uiItemR(layout, ptr, "distance", UI_ITEM_R_SPLIT_EMPTY_NAME, nullptr, ICON_NONE);
switch (RNA_enum_get(ptr, "mode")) {
- case CMP_NODE_DILATEERODE_DISTANCE_THRESH:
+ case CMP_NODE_DILATE_ERODE_DISTANCE_THRESHOLD:
uiItemR(layout, ptr, "edge", UI_ITEM_R_SPLIT_EMPTY_NAME, nullptr, ICON_NONE);
break;
- case CMP_NODE_DILATEERODE_DISTANCE_FEATHER:
+ case CMP_NODE_DILATE_ERODE_DISTANCE_FEATHER:
uiItemR(layout, ptr, "falloff", UI_ITEM_R_SPLIT_EMPTY_NAME, nullptr, ICON_NONE);
break;
}
@@ -47,13 +64,458 @@ static void node_composit_buts_dilateerode(uiLayout *layout, bContext *UNUSED(C)
using namespace blender::realtime_compositor;
+/* Computes a falloff that is equal to 1 at an input of zero and decrease to zero at an input of 1,
+ * with the rate of decrease depending on the falloff type. */
+static float compute_distance_falloff(float x, int falloff_type)
+{
+ x = 1.0f - x;
+
+ switch (falloff_type) {
+ case PROP_SMOOTH:
+ return 3.0f * x * x - 2.0f * x * x * x;
+ case PROP_SPHERE:
+ return std::sqrt(2.0f * x - x * x);
+ case PROP_ROOT:
+ return std::sqrt(x);
+ case PROP_SHARP:
+ return x * x;
+ case PROP_INVSQUARE:
+ return x * (2.0f - x);
+ case PROP_LIN:
+ return x;
+ default:
+ BLI_assert_unreachable();
+ return x;
+ }
+}
+
+/* A helper class that computes and caches 1D GPU textures containing the weights of the separable
+ * Gaussian filter of the given radius as well as an inverse distance falloff of the given type and
+ * radius. The weights and falloffs are symmetric, because the Gaussian and falloff functions are
+ * all even functions. Consequently, only the positive half of the filter is computed and the
+ * shader takes that into consideration. */
+class SymmetricSeparableMorphologicalDistanceFeatherWeights {
+ private:
+ int radius_ = 1;
+ int falloff_type_ = PROP_SMOOTH;
+ GPUTexture *weights_texture_ = nullptr;
+ GPUTexture *distance_falloffs_texture_ = nullptr;
+
+ public:
+ ~SymmetricSeparableMorphologicalDistanceFeatherWeights()
+ {
+ if (weights_texture_) {
+ GPU_texture_free(weights_texture_);
+ }
+
+ if (distance_falloffs_texture_) {
+ GPU_texture_free(distance_falloffs_texture_);
+ }
+ }
+
+ /* Check if textures containing the weights and distance falloffs were already computed for the
+ * given distance falloff type and radius. If such textures exists, do nothing, otherwise, free
+ * the already computed textures and recompute it with the given distance falloff type and
+ * radius. */
+ void update(int radius, int falloff_type)
+ {
+ if (weights_texture_ && distance_falloffs_texture_ && falloff_type == falloff_type_ &&
+ radius == radius_) {
+ return;
+ }
+
+ radius_ = radius;
+ falloff_type_ = falloff_type;
+
+ compute_weights();
+ compute_distance_falloffs();
+ }
+
+ void compute_weights()
+ {
+ if (weights_texture_) {
+ GPU_texture_free(weights_texture_);
+ }
+
+ /* The size of filter is double the radius plus 1, but since the filter is symmetric, we only
+ * compute half of it and no doubling happens. We add 1 to make sure the filter size is always
+ * odd and there is a center weight. */
+ const int size = radius_ + 1;
+ Array<float> weights(size);
+
+ float sum = 0.0f;
+
+ /* First, compute the center weight. */
+ const float center_weight = RE_filter_value(R_FILTER_GAUSS, 0.0f);
+ weights[0] = center_weight;
+ sum += center_weight;
+
+ /* Second, compute the other weights in the positive direction, making sure to add double the
+ * weight to the sum of weights because the filter is symmetric and we only loop over half of
+ * it. Skip the center weight already computed by dropping the front index. */
+ const float scale = radius_ > 0.0f ? 1.0f / radius_ : 0.0f;
+ for (const int i : weights.index_range().drop_front(1)) {
+ const float weight = RE_filter_value(R_FILTER_GAUSS, i * scale);
+ weights[i] = weight;
+ sum += weight * 2.0f;
+ }
+
+ /* Finally, normalize the weights. */
+ for (const int i : weights.index_range()) {
+ weights[i] /= sum;
+ }
+
+ weights_texture_ = GPU_texture_create_1d("Weights", size, 1, GPU_R16F, weights.data());
+ }
+
+ void compute_distance_falloffs()
+ {
+ if (distance_falloffs_texture_) {
+ GPU_texture_free(distance_falloffs_texture_);
+ }
+
+ /* The size of the distance falloffs is double the radius plus 1, but since the falloffs are
+ * symmetric, we only compute half of them and no doubling happens. We add 1 to make sure the
+ * falloffs size is always odd and there is a center falloff. */
+ const int size = radius_ + 1;
+ Array<float> falloffs(size);
+
+ /* Compute the distance falloffs in the positive direction only, because the falloffs are
+ * symmetric. */
+ const float scale = radius_ > 0.0f ? 1.0f / radius_ : 0.0f;
+ for (const int i : falloffs.index_range()) {
+ falloffs[i] = compute_distance_falloff(i * scale, falloff_type_);
+ }
+
+ distance_falloffs_texture_ = GPU_texture_create_1d(
+ "Distance Factors", size, 1, GPU_R16F, falloffs.data());
+ }
+
+ void bind_weights_as_texture(GPUShader *shader, const char *texture_name)
+ {
+ const int texture_image_unit = GPU_shader_get_texture_binding(shader, texture_name);
+ GPU_texture_bind(weights_texture_, texture_image_unit);
+ }
+
+ void unbind_weights_as_texture()
+ {
+ GPU_texture_unbind(weights_texture_);
+ }
+
+ void bind_distance_falloffs_as_texture(GPUShader *shader, const char *texture_name)
+ {
+ const int texture_image_unit = GPU_shader_get_texture_binding(shader, texture_name);
+ GPU_texture_bind(distance_falloffs_texture_, texture_image_unit);
+ }
+
+ void unbind_distance_falloffs_as_texture()
+ {
+ GPU_texture_unbind(distance_falloffs_texture_);
+ }
+};
+
class DilateErodeOperation : public NodeOperation {
+ private:
+ /* Cached symmetric blur weights and distance falloffs for the distance feature method. */
+ SymmetricSeparableMorphologicalDistanceFeatherWeights distance_feather_weights_;
+
public:
using NodeOperation::NodeOperation;
void execute() override
{
- get_input("Mask").pass_through(get_result("Mask"));
+ if (is_identity()) {
+ get_input("Mask").pass_through(get_result("Mask"));
+ return;
+ }
+
+ switch (get_method()) {
+ case CMP_NODE_DILATE_ERODE_STEP:
+ execute_step();
+ return;
+ case CMP_NODE_DILATE_ERODE_DISTANCE:
+ execute_distance();
+ return;
+ case CMP_NODE_DILATE_ERODE_DISTANCE_THRESHOLD:
+ execute_distance_threshold();
+ return;
+ case CMP_NODE_DILATE_ERODE_DISTANCE_FEATHER:
+ execute_distance_feather();
+ return;
+ default:
+ BLI_assert_unreachable();
+ return;
+ }
+ }
+
+ /* ----------------------------
+ * Step Morphological Operator.
+ * ---------------------------- */
+
+ void execute_step()
+ {
+ GPUTexture *horizontal_pass_result = execute_step_horizontal_pass();
+ execute_step_vertical_pass(horizontal_pass_result);
+ }
+
+ GPUTexture *execute_step_horizontal_pass()
+ {
+ GPUShader *shader = shader_manager().get(get_morphological_step_shader_name());
+ GPU_shader_bind(shader);
+
+ /* Pass the absolute value of the distance. We have specialized shaders for each sign. */
+ GPU_shader_uniform_1i(shader, "radius", math::abs(get_distance()));
+
+ const Result &input_mask = get_input("Mask");
+ input_mask.bind_as_texture(shader, "input_tx");
+
+ /* We allocate an output image of a transposed size, that is, with a height equivalent to the
+ * width of the input and vice versa. This is done as a performance optimization. The shader
+ * will process the image horizontally and write it to the intermediate output transposed. Then
+ * the vertical pass will execute the same horizontal pass shader, but since its input is
+ * transposed, it will effectively do a vertical pass and write to the output transposed,
+ * effectively undoing the transposition in the horizontal pass. This is done to improve
+ * spatial cache locality in the shader and to avoid having two separate shaders for each of
+ * the passes. */
+ const Domain domain = compute_domain();
+ const int2 transposed_domain = int2(domain.size.y, domain.size.x);
+
+ GPUTexture *horizontal_pass_result = texture_pool().acquire_color(transposed_domain);
+ const int image_unit = GPU_shader_get_texture_binding(shader, "output_img");
+ GPU_texture_image_bind(horizontal_pass_result, image_unit);
+
+ compute_dispatch_threads_at_least(shader, domain.size);
+
+ GPU_shader_unbind();
+ input_mask.unbind_as_texture();
+ GPU_texture_image_unbind(horizontal_pass_result);
+
+ return horizontal_pass_result;
+ }
+
+ void execute_step_vertical_pass(GPUTexture *horizontal_pass_result)
+ {
+ GPUShader *shader = shader_manager().get(get_morphological_step_shader_name());
+ GPU_shader_bind(shader);
+
+ /* Pass the absolute value of the distance. We have specialized shaders for each sign. */
+ GPU_shader_uniform_1i(shader, "radius", math::abs(get_distance()));
+
+ GPU_memory_barrier(GPU_BARRIER_TEXTURE_FETCH);
+ const int texture_image_unit = GPU_shader_get_texture_binding(shader, "input_tx");
+ GPU_texture_bind(horizontal_pass_result, texture_image_unit);
+
+ const Domain domain = compute_domain();
+ Result &output_mask = get_result("Mask");
+ output_mask.allocate_texture(domain);
+ output_mask.bind_as_image(shader, "output_img");
+
+ /* Notice that the domain is transposed, see the note on the horizontal pass method for more
+ * information on the reasoning behind this. */
+ compute_dispatch_threads_at_least(shader, int2(domain.size.y, domain.size.x));
+
+ GPU_shader_unbind();
+ output_mask.unbind_as_image();
+ GPU_texture_unbind(horizontal_pass_result);
+ }
+
+ const char *get_morphological_step_shader_name()
+ {
+ if (get_distance() > 0) {
+ return "compositor_morphological_step_dilate";
+ }
+ return "compositor_morphological_step_erode";
+ }
+
+ /* --------------------------------
+ * Distance Morphological Operator.
+ * -------------------------------- */
+
+ void execute_distance()
+ {
+ GPUShader *shader = shader_manager().get(get_morphological_distance_shader_name());
+ GPU_shader_bind(shader);
+
+ /* Pass the absolute value of the distance. We have specialized shaders for each sign. */
+ GPU_shader_uniform_1i(shader, "radius", math::abs(get_distance()));
+
+ const Result &input_mask = get_input("Mask");
+ input_mask.bind_as_texture(shader, "input_tx");
+
+ const Domain domain = compute_domain();
+ Result &output_mask = get_result("Mask");
+ output_mask.allocate_texture(domain);
+ output_mask.bind_as_image(shader, "output_img");
+
+ compute_dispatch_threads_at_least(shader, domain.size);
+
+ GPU_shader_unbind();
+ output_mask.unbind_as_image();
+ input_mask.unbind_as_texture();
+ }
+
+ const char *get_morphological_distance_shader_name()
+ {
+ if (get_distance() > 0) {
+ return "compositor_morphological_distance_dilate";
+ }
+ return "compositor_morphological_distance_erode";
+ }
+
+ /* ------------------------------------------
+ * Distance Threshold Morphological Operator.
+ * ------------------------------------------ */
+
+ void execute_distance_threshold()
+ {
+ GPUShader *shader = shader_manager().get("compositor_morphological_distance_threshold");
+ GPU_shader_bind(shader);
+
+ GPU_shader_uniform_1f(shader, "inset", get_inset());
+ GPU_shader_uniform_1i(shader, "radius", get_morphological_distance_threshold_radius());
+ GPU_shader_uniform_1i(shader, "distance", get_distance());
+
+ const Result &input_mask = get_input("Mask");
+ input_mask.bind_as_texture(shader, "input_tx");
+
+ const Domain domain = compute_domain();
+ Result &output_mask = get_result("Mask");
+ output_mask.allocate_texture(domain);
+ output_mask.bind_as_image(shader, "output_img");
+
+ compute_dispatch_threads_at_least(shader, domain.size);
+
+ GPU_shader_unbind();
+ output_mask.unbind_as_image();
+ input_mask.unbind_as_texture();
+ }
+
+ /* See the discussion in the implementation for more information. */
+ int get_morphological_distance_threshold_radius()
+ {
+ return static_cast<int>(math::ceil(get_inset())) + math::abs(get_distance());
+ }
+
+ /* ----------------------------------------
+ * Distance Feather Morphological Operator.
+ * ---------------------------------------- */
+
+ void execute_distance_feather()
+ {
+ GPUTexture *horizontal_pass_result = execute_distance_feather_horizontal_pass();
+ execute_distance_feather_vertical_pass(horizontal_pass_result);
+ }
+
+ GPUTexture *execute_distance_feather_horizontal_pass()
+ {
+ GPUShader *shader = shader_manager().get(get_morphological_distance_feather_shader_name());
+ GPU_shader_bind(shader);
+
+ const Result &input_image = get_input("Mask");
+ input_image.bind_as_texture(shader, "input_tx");
+
+ distance_feather_weights_.update(math::abs(get_distance()), node_storage(bnode()).falloff);
+ distance_feather_weights_.bind_weights_as_texture(shader, "weights_tx");
+ distance_feather_weights_.bind_distance_falloffs_as_texture(shader, "falloffs_tx");
+
+ /* We allocate an output image of a transposed size, that is, with a height equivalent to the
+ * width of the input and vice versa. This is done as a performance optimization. The shader
+ * will process the image horizontally and write it to the intermediate output transposed. Then
+ * the vertical pass will execute the same horizontal pass shader, but since its input is
+ * transposed, it will effectively do a vertical pass and write to the output transposed,
+ * effectively undoing the transposition in the horizontal pass. This is done to improve
+ * spatial cache locality in the shader and to avoid having two separate shaders for each of
+ * the passes. */
+ const Domain domain = compute_domain();
+ const int2 transposed_domain = int2(domain.size.y, domain.size.x);
+
+ GPUTexture *horizontal_pass_result = texture_pool().acquire_color(transposed_domain);
+ const int image_unit = GPU_shader_get_texture_binding(shader, "output_img");
+ GPU_texture_image_bind(horizontal_pass_result, image_unit);
+
+ compute_dispatch_threads_at_least(shader, domain.size);
+
+ GPU_shader_unbind();
+ input_image.unbind_as_texture();
+ distance_feather_weights_.unbind_weights_as_texture();
+ distance_feather_weights_.unbind_distance_falloffs_as_texture();
+ GPU_texture_image_unbind(horizontal_pass_result);
+
+ return horizontal_pass_result;
+ }
+
+ void execute_distance_feather_vertical_pass(GPUTexture *horizontal_pass_result)
+ {
+ GPUShader *shader = shader_manager().get(get_morphological_distance_feather_shader_name());
+ GPU_shader_bind(shader);
+
+ GPU_memory_barrier(GPU_BARRIER_TEXTURE_FETCH);
+ const int texture_image_unit = GPU_shader_get_texture_binding(shader, "input_tx");
+ GPU_texture_bind(horizontal_pass_result, texture_image_unit);
+
+ distance_feather_weights_.update(math::abs(get_distance()), node_storage(bnode()).falloff);
+ distance_feather_weights_.bind_weights_as_texture(shader, "weights_tx");
+ distance_feather_weights_.bind_distance_falloffs_as_texture(shader, "falloffs_tx");
+
+ const Domain domain = compute_domain();
+ Result &output_image = get_result("Mask");
+ output_image.allocate_texture(domain);
+ output_image.bind_as_image(shader, "output_img");
+
+ /* Notice that the domain is transposed, see the note on the horizontal pass method for more
+ * information on the reasoning behind this. */
+ compute_dispatch_threads_at_least(shader, int2(domain.size.y, domain.size.x));
+
+ GPU_shader_unbind();
+ output_image.unbind_as_image();
+ distance_feather_weights_.unbind_weights_as_texture();
+ distance_feather_weights_.unbind_distance_falloffs_as_texture();
+ GPU_texture_unbind(horizontal_pass_result);
+ }
+
+ const char *get_morphological_distance_feather_shader_name()
+ {
+ if (get_distance() > 0) {
+ return "compositor_morphological_distance_feather_dilate";
+ }
+ return "compositor_morphological_distance_feather_erode";
+ }
+
+ /* ---------------
+ * Common Methods.
+ * --------------- */
+
+ bool is_identity()
+ {
+ const Result &input = get_input("Mask");
+ if (input.is_single_value()) {
+ return true;
+ }
+
+ if (get_method() == CMP_NODE_DILATE_ERODE_DISTANCE_THRESHOLD && get_inset() != 0.0f) {
+ return false;
+ }
+
+ if (get_distance() == 0) {
+ return true;
+ }
+
+ return false;
+ }
+
+ int get_distance()
+ {
+ return bnode().custom2;
+ }
+
+ float get_inset()
+ {
+ return bnode().custom3;
+ }
+
+ CMPNodeDilateErodeMethod get_method()
+ {
+ return (CMPNodeDilateErodeMethod)bnode().custom1;
}
};
diff --git a/source/blender/nodes/composite/nodes/node_composite_directionalblur.cc b/source/blender/nodes/composite/nodes/node_composite_directionalblur.cc
index eacba5ad12d..6e6bec70283 100644
--- a/source/blender/nodes/composite/nodes/node_composite_directionalblur.cc
+++ b/source/blender/nodes/composite/nodes/node_composite_directionalblur.cc
@@ -5,18 +5,30 @@
* \ingroup cmpnodes
*/
+#include "BLI_float3x3.hh"
+#include "BLI_math_base.hh"
+#include "BLI_math_vec_types.hh"
+#include "BLI_math_vector.hh"
+
#include "UI_interface.h"
#include "UI_resources.h"
+#include "GPU_shader.h"
+
#include "COM_node_operation.hh"
+#include "COM_utilities.hh"
#include "node_composite_util.hh"
namespace blender::nodes::node_composite_directionalblur_cc {
+NODE_STORAGE_FUNCS(NodeDBlurData)
+
static void cmp_node_directional_blur_declare(NodeDeclarationBuilder &b)
{
- b.add_input<decl::Color>(N_("Image")).default_value({1.0f, 1.0f, 1.0f, 1.0f});
+ b.add_input<decl::Color>(N_("Image"))
+ .default_value({1.0f, 1.0f, 1.0f, 1.0f})
+ .compositor_domain_priority(0);
b.add_output<decl::Color>(N_("Image"));
}
@@ -61,7 +73,113 @@ class DirectionalBlurOperation : public NodeOperation {
void execute() override
{
- get_input("Image").pass_through(get_result("Image"));
+ if (is_identity()) {
+ get_input("Image").pass_through(get_result("Image"));
+ return;
+ }
+
+ GPUShader *shader = shader_manager().get("compositor_directional_blur");
+ GPU_shader_bind(shader);
+
+ /* The number of iterations does not cover the original image, that is, the image with no
+ * transformation. So add an extra iteration for the original image and put that into
+ * consideration in the shader. */
+ GPU_shader_uniform_1i(shader, "iterations", get_iterations() + 1);
+ GPU_shader_uniform_mat3_as_mat4(shader, "inverse_transformation", get_transformation().ptr());
+
+ const Result &input_image = get_input("Image");
+ input_image.bind_as_texture(shader, "input_tx");
+
+ GPU_texture_filter_mode(input_image.texture(), true);
+ GPU_texture_wrap_mode(input_image.texture(), false, false);
+
+ const Domain domain = compute_domain();
+ Result &output_image = get_result("Image");
+ output_image.allocate_texture(domain);
+ output_image.bind_as_image(shader, "output_img");
+
+ compute_dispatch_threads_at_least(shader, domain.size);
+
+ GPU_shader_unbind();
+ output_image.unbind_as_image();
+ input_image.unbind_as_texture();
+ }
+
+ /* Get the amount of translation that will be applied on each iteration. The translation is in
+ * the negative x direction rotated in the clock-wise direction, hence the negative sign for the
+ * rotation and translation vector. */
+ float2 get_translation()
+ {
+ const float diagonal_length = math::length(float2(get_input("Image").domain().size));
+ const float translation_amount = diagonal_length * node_storage(bnode()).distance;
+ const float3x3 rotation = float3x3::from_rotation(-node_storage(bnode()).angle);
+ return rotation * float2(-translation_amount / get_iterations(), 0.0f);
+ }
+
+ /* Get the amount of rotation that will be applied on each iteration. */
+ float get_rotation()
+ {
+ return node_storage(bnode()).spin / get_iterations();
+ }
+
+ /* Get the amount of scale that will be applied on each iteration. The scale is identity when the
+ * user supplies 0, so we add 1. */
+ float2 get_scale()
+ {
+ return float2(1.0f + node_storage(bnode()).zoom / get_iterations());
+ }
+
+ float2 get_origin()
+ {
+ const float2 center = float2(node_storage(bnode()).center_x, node_storage(bnode()).center_y);
+ return float2(get_input("Image").domain().size) * center;
+ }
+
+ float3x3 get_transformation()
+ {
+ /* Construct the transformation that will be applied on each iteration. */
+ const float3x3 transformation = float3x3::from_translation_rotation_scale(
+ get_translation(), get_rotation(), get_scale());
+ /* Change the origin of the transformation to the user-specified origin. */
+ const float3x3 origin_transformation = float3x3::from_origin_transformation(transformation,
+ get_origin());
+ /* The shader will transform the coordinates, not the image itself, so take the inverse. */
+ return origin_transformation.inverted();
+ }
+
+ /* The actual number of iterations is 2 to the power of the user supplied iterations. The power
+ * is implemented using a bit shift. But also make sure it doesn't exceed the upper limit which
+ * is the number of diagonal pixels. */
+ int get_iterations()
+ {
+ const int iterations = 2 << (node_storage(bnode()).iter - 1);
+ const int upper_limit = math::ceil(math::length(float2(get_input("Image").domain().size)));
+ return math::min(iterations, upper_limit);
+ }
+
+ /* Returns true if the operation does nothing and the input can be passed through. */
+ bool is_identity()
+ {
+ const Result &input = get_input("Image");
+ /* Single value inputs can't be blurred and are returned as is. */
+ if (input.is_single_value()) {
+ return true;
+ }
+
+ /* If any of the following options are non-zero, then the operation is not an identity. */
+ if (node_storage(bnode()).distance != 0.0f) {
+ return false;
+ }
+
+ if (node_storage(bnode()).spin != 0.0f) {
+ return false;
+ }
+
+ if (node_storage(bnode()).zoom != 0.0f) {
+ return false;
+ }
+
+ return true;
}
};
diff --git a/source/blender/nodes/composite/nodes/node_composite_distance_matte.cc b/source/blender/nodes/composite/nodes/node_composite_distance_matte.cc
index 9d910b3f409..6a786571f43 100644
--- a/source/blender/nodes/composite/nodes/node_composite_distance_matte.cc
+++ b/source/blender/nodes/composite/nodes/node_composite_distance_matte.cc
@@ -18,6 +18,8 @@
namespace blender::nodes::node_composite_distance_matte_cc {
+NODE_STORAGE_FUNCS(NodeChroma)
+
static void cmp_node_distance_matte_declare(NodeDeclarationBuilder &b)
{
b.add_input<decl::Color>(N_("Image"))
@@ -90,24 +92,19 @@ class DistanceMatteShaderNode : public ShaderNode {
GPU_uniform(&falloff));
}
- NodeChroma *get_node_chroma()
- {
- return static_cast<NodeChroma *>(bnode().storage);
- }
-
CMPNodeDistanceMatteColorSpace get_color_space()
{
- return (CMPNodeDistanceMatteColorSpace)get_node_chroma()->channel;
+ return (CMPNodeDistanceMatteColorSpace)node_storage(bnode()).channel;
}
float get_tolerance()
{
- return get_node_chroma()->t1;
+ return node_storage(bnode()).t1;
}
float get_falloff()
{
- return get_node_chroma()->t2;
+ return node_storage(bnode()).t2;
}
};
diff --git a/source/blender/nodes/composite/nodes/node_composite_ellipsemask.cc b/source/blender/nodes/composite/nodes/node_composite_ellipsemask.cc
index 54dfa00eadd..7c031b354e5 100644
--- a/source/blender/nodes/composite/nodes/node_composite_ellipsemask.cc
+++ b/source/blender/nodes/composite/nodes/node_composite_ellipsemask.cc
@@ -23,6 +23,8 @@
namespace blender::nodes::node_composite_ellipsemask_cc {
+NODE_STORAGE_FUNCS(NodeEllipseMask)
+
static void cmp_node_ellipsemask_declare(NodeDeclarationBuilder &b)
{
b.add_input<decl::Float>(N_("Mask")).default_value(0.0f).min(0.0f).max(1.0f);
@@ -121,24 +123,19 @@ class EllipseMaskOperation : public NodeOperation {
}
}
- NodeEllipseMask &get_node_ellipse_mask()
- {
- return *static_cast<NodeEllipseMask *>(bnode().storage);
- }
-
float2 get_location()
{
- return float2(get_node_ellipse_mask().x, get_node_ellipse_mask().y);
+ return float2(node_storage(bnode()).x, node_storage(bnode()).y);
}
float2 get_size()
{
- return float2(get_node_ellipse_mask().width, get_node_ellipse_mask().height);
+ return float2(node_storage(bnode()).width, node_storage(bnode()).height);
}
float get_angle()
{
- return get_node_ellipse_mask().rotation;
+ return node_storage(bnode()).rotation;
}
};
diff --git a/source/blender/nodes/composite/nodes/node_composite_filter.cc b/source/blender/nodes/composite/nodes/node_composite_filter.cc
index 854cf684806..bd7b443e17e 100644
--- a/source/blender/nodes/composite/nodes/node_composite_filter.cc
+++ b/source/blender/nodes/composite/nodes/node_composite_filter.cc
@@ -5,10 +5,13 @@
* \ingroup cmpnodes
*/
+#include "BLI_float3x3.hh"
+
#include "UI_interface.h"
#include "UI_resources.h"
#include "COM_node_operation.hh"
+#include "COM_utilities.hh"
#include "node_composite_util.hh"
@@ -18,8 +21,15 @@ namespace blender::nodes::node_composite_filter_cc {
static void cmp_node_filter_declare(NodeDeclarationBuilder &b)
{
- b.add_input<decl::Float>(N_("Fac")).default_value(1.0f).min(0.0f).max(1.0f).subtype(PROP_FACTOR);
- b.add_input<decl::Color>(N_("Image")).default_value({1.0f, 1.0f, 1.0f, 1.0f});
+ b.add_input<decl::Float>(N_("Fac"))
+ .default_value(1.0f)
+ .min(0.0f)
+ .max(1.0f)
+ .subtype(PROP_FACTOR)
+ .compositor_domain_priority(1);
+ b.add_input<decl::Color>(N_("Image"))
+ .default_value({1.0f, 1.0f, 1.0f, 1.0f})
+ .compositor_domain_priority(0);
b.add_output<decl::Color>(N_("Image"));
}
@@ -36,7 +46,103 @@ class FilterOperation : public NodeOperation {
void execute() override
{
- get_input("Image").pass_through(get_result("Image"));
+ GPUShader *shader = shader_manager().get(get_shader_name());
+ GPU_shader_bind(shader);
+
+ GPU_shader_uniform_mat3_as_mat4(shader, "kernel", get_filter_kernel().ptr());
+
+ const Result &input_image = get_input("Image");
+ input_image.bind_as_texture(shader, "input_tx");
+
+ const Result &factor = get_input("Fac");
+ factor.bind_as_texture(shader, "factor_tx");
+
+ const Domain domain = compute_domain();
+
+ Result &output_image = get_result("Image");
+ output_image.allocate_texture(domain);
+ output_image.bind_as_image(shader, "output_img");
+
+ compute_dispatch_threads_at_least(shader, domain.size);
+
+ input_image.unbind_as_texture();
+ factor.unbind_as_texture();
+ output_image.unbind_as_image();
+ GPU_shader_unbind();
+ }
+
+ CMPNodeFilterMethod get_filter_method()
+ {
+ return (CMPNodeFilterMethod)bnode().custom1;
+ }
+
+ float3x3 get_filter_kernel()
+ {
+ /* Initialize the kernels as arrays of rows with the top row first. Edge detection kernels
+ * return the kernel in the X direction, while the kernel in the Y direction will be computed
+ * inside the shader by transposing the kernel in the X direction. */
+ switch (get_filter_method()) {
+ case CMP_NODE_FILTER_SOFT: {
+ const float kernel[3][3] = {{1.0f / 16.0f, 2.0f / 16.0f, 1.0f / 16.0f},
+ {2.0f / 16.0f, 4.0f / 16.0f, 2.0f / 16.0f},
+ {1.0f / 16.0f, 2.0f / 16.0f, 1.0f / 16.0f}};
+ return float3x3(kernel);
+ }
+ case CMP_NODE_FILTER_SHARP_BOX: {
+ const float kernel[3][3] = {
+ {-1.0f, -1.0f, -1.0f}, {-1.0f, 9.0f, -1.0f}, {-1.0f, -1.0f, -1.0f}};
+ return float3x3(kernel);
+ }
+ case CMP_NODE_FILTER_LAPLACE: {
+ const float kernel[3][3] = {{-1.0f / 8.0f, -1.0f / 8.0f, -1.0f / 8.0f},
+ {-1.0f / 8.0f, 1.0f, -1.0f / 8.0f},
+ {-1.0f / 8.0f, -1.0f / 8.0f, -1.0f / 8.0f}};
+ return float3x3(kernel);
+ }
+ case CMP_NODE_FILTER_SOBEL: {
+ const float kernel[3][3] = {{1.0f, 0.0f, -1.0f}, {2.0f, 0.0f, -2.0f}, {1.0f, 0.0f, -1.0f}};
+ return float3x3(kernel);
+ }
+ case CMP_NODE_FILTER_PREWITT: {
+ const float kernel[3][3] = {{1.0f, 0.0f, -1.0f}, {1.0f, 0.0f, -1.0f}, {1.0f, 0.0f, -1.0f}};
+ return float3x3(kernel);
+ }
+ case CMP_NODE_FILTER_KIRSCH: {
+ const float kernel[3][3] = {
+ {5.0f, -3.0f, -2.0f}, {5.0f, -3.0f, -2.0f}, {5.0f, -3.0f, -2.0f}};
+ return float3x3(kernel);
+ }
+ case CMP_NODE_FILTER_SHADOW: {
+ const float kernel[3][3] = {{1.0f, 2.0f, 1.0f}, {0.0f, 1.0f, 0.0f}, {-1.0f, -2.0f, -1.0f}};
+ return float3x3(kernel);
+ }
+ case CMP_NODE_FILTER_SHARP_DIAMOND: {
+ const float kernel[3][3] = {
+ {0.0f, -1.0f, 0.0f}, {-1.0f, 5.0f, -1.0f}, {0.0f, -1.0f, 0.0f}};
+ return float3x3(kernel);
+ }
+ default: {
+ const float kernel[3][3] = {{0.0f, 0.0f, 0.0f}, {0.0f, 1.0f, 0.0f}, {0.0f, 0.0f, 0.0f}};
+ return float3x3(kernel);
+ }
+ }
+ }
+
+ const char *get_shader_name()
+ {
+ switch (get_filter_method()) {
+ case CMP_NODE_FILTER_LAPLACE:
+ case CMP_NODE_FILTER_SOBEL:
+ case CMP_NODE_FILTER_PREWITT:
+ case CMP_NODE_FILTER_KIRSCH:
+ return "compositor_edge_filter";
+ case CMP_NODE_FILTER_SOFT:
+ case CMP_NODE_FILTER_SHARP_BOX:
+ case CMP_NODE_FILTER_SHADOW:
+ case CMP_NODE_FILTER_SHARP_DIAMOND:
+ default:
+ return "compositor_filter";
+ }
}
};
diff --git a/source/blender/nodes/composite/nodes/node_composite_huecorrect.cc b/source/blender/nodes/composite/nodes/node_composite_huecorrect.cc
index a84420231aa..6333860a19b 100644
--- a/source/blender/nodes/composite/nodes/node_composite_huecorrect.cc
+++ b/source/blender/nodes/composite/nodes/node_composite_huecorrect.cc
@@ -59,7 +59,7 @@ class HueCorrectShaderNode : public ShaderNode {
GPUNodeStack *inputs = get_inputs_array();
GPUNodeStack *outputs = get_outputs_array();
- CurveMapping *curve_mapping = get_curve_mapping();
+ CurveMapping *curve_mapping = const_cast<CurveMapping *>(get_curve_mapping());
BKE_curvemapping_init(curve_mapping);
float *band_values;
@@ -84,9 +84,9 @@ class HueCorrectShaderNode : public ShaderNode {
GPU_uniform(range_dividers));
}
- CurveMapping *get_curve_mapping()
+ const CurveMapping *get_curve_mapping()
{
- return static_cast<CurveMapping *>(bnode().storage);
+ return static_cast<const CurveMapping *>(bnode().storage);
}
};
diff --git a/source/blender/nodes/composite/nodes/node_composite_image.cc b/source/blender/nodes/composite/nodes/node_composite_image.cc
index d8852e9333f..4d1eff0b940 100644
--- a/source/blender/nodes/composite/nodes/node_composite_image.cc
+++ b/source/blender/nodes/composite/nodes/node_composite_image.cc
@@ -457,8 +457,8 @@ class ImageOperation : public NodeOperation {
update_image_frame_number();
- for (const OutputSocketRef *output : node()->outputs()) {
- compute_output(output->identifier());
+ for (const bNodeSocket *output : this->node()->output_sockets()) {
+ compute_output(output->identifier);
}
}
@@ -488,12 +488,12 @@ class ImageOperation : public NodeOperation {
/* Allocate all needed outputs as invalid. This should be called when is_valid returns false. */
void allocate_invalid()
{
- for (const OutputSocketRef *output : node()->outputs()) {
- if (!should_compute_output(output->identifier())) {
+ for (const bNodeSocket *output : this->node()->output_sockets()) {
+ if (!should_compute_output(output->identifier)) {
continue;
}
- Result &result = get_result(output->identifier());
+ Result &result = get_result(output->identifier);
result.allocate_invalid();
}
}
@@ -535,7 +535,7 @@ class ImageOperation : public NodeOperation {
/* Get a copy of the image user that is appropriate to retrieve the image buffer for the output
* with the given identifier. This essentially sets the appropriate pass and view indices that
- * corresponds to the output. */
+ * corresponds to the output. */
ImageUser compute_image_user_for_output(StringRef identifier)
{
ImageUser image_user = *get_image_user();
@@ -594,7 +594,7 @@ class ImageOperation : public NodeOperation {
const char *get_pass_name(StringRef identifier)
{
DOutputSocket output = node().output_by_identifier(identifier);
- return static_cast<NodeImageLayer *>(output->bsocket()->storage)->pass_name;
+ return static_cast<NodeImageLayer *>(output->storage)->pass_name;
}
/* Get the index of the pass with the given name in the selected render layer's passes list
@@ -850,9 +850,9 @@ class RenderLayerOperation : public NodeOperation {
alpha_result.unbind_as_image();
/* Other output passes are not supported for now, so allocate them as invalid. */
- for (const OutputSocketRef *output : node()->outputs()) {
- if (output->identifier() != "Image" && output->identifier() != "Alpha") {
- get_result(output->identifier()).allocate_invalid();
+ for (const bNodeSocket *output : this->node()->output_sockets()) {
+ if (!STREQ(output->identifier, "Image") && !STREQ(output->identifier, "Alpha")) {
+ get_result(output->identifier).allocate_invalid();
}
}
}
diff --git a/source/blender/nodes/composite/nodes/node_composite_lensdist.cc b/source/blender/nodes/composite/nodes/node_composite_lensdist.cc
index 2d4c0afcda7..260fccf66d0 100644
--- a/source/blender/nodes/composite/nodes/node_composite_lensdist.cc
+++ b/source/blender/nodes/composite/nodes/node_composite_lensdist.cc
@@ -32,6 +32,8 @@
namespace blender::nodes::node_composite_lensdist_cc {
+NODE_STORAGE_FUNCS(NodeLensDist)
+
static void cmp_node_lensdist_declare(NodeDeclarationBuilder &b)
{
b.add_input<decl::Color>(N_("Image"))
@@ -197,22 +199,17 @@ class LensDistortionOperation : public NodeOperation {
bool get_is_projector()
{
- return get_node_lens_distortion().proj;
+ return node_storage(bnode()).proj;
}
bool get_is_jitter()
{
- return get_node_lens_distortion().jit;
+ return node_storage(bnode()).jit;
}
bool get_is_fit()
{
- return get_node_lens_distortion().fit;
- }
-
- NodeLensDist &get_node_lens_distortion()
- {
- return *static_cast<NodeLensDist *>(bnode().storage);
+ return node_storage(bnode()).fit;
}
/* Returns true if the operation does nothing and the input can be passed through. */
diff --git a/source/blender/nodes/composite/nodes/node_composite_luma_matte.cc b/source/blender/nodes/composite/nodes/node_composite_luma_matte.cc
index 092a12a7ea4..59ae62ec411 100644
--- a/source/blender/nodes/composite/nodes/node_composite_luma_matte.cc
+++ b/source/blender/nodes/composite/nodes/node_composite_luma_matte.cc
@@ -20,6 +20,8 @@
namespace blender::nodes::node_composite_luma_matte_cc {
+NODE_STORAGE_FUNCS(NodeChroma)
+
static void cmp_node_luma_matte_declare(NodeDeclarationBuilder &b)
{
b.add_input<decl::Color>(N_("Image"))
@@ -74,19 +76,14 @@ class LuminanceMatteShaderNode : public ShaderNode {
GPU_constant(luminance_coefficients));
}
- NodeChroma *get_node_chroma()
- {
- return static_cast<NodeChroma *>(bnode().storage);
- }
-
float get_high()
{
- return get_node_chroma()->t1;
+ return node_storage(bnode()).t1;
}
float get_low()
{
- return get_node_chroma()->t2;
+ return node_storage(bnode()).t2;
}
};
diff --git a/source/blender/nodes/composite/nodes/node_composite_map_value.cc b/source/blender/nodes/composite/nodes/node_composite_map_value.cc
index ec9b2d56636..e30de39605d 100644
--- a/source/blender/nodes/composite/nodes/node_composite_map_value.cc
+++ b/source/blender/nodes/composite/nodes/node_composite_map_value.cc
@@ -22,6 +22,8 @@
namespace blender::nodes::node_composite_map_value_cc {
+NODE_STORAGE_FUNCS(TexMapping)
+
static void cmp_node_map_value_declare(NodeDeclarationBuilder &b)
{
b.add_input<decl::Float>(N_("Value"))
@@ -69,7 +71,7 @@ class MapValueShaderNode : public ShaderNode {
GPUNodeStack *inputs = get_inputs_array();
GPUNodeStack *outputs = get_outputs_array();
- const TexMapping *texture_mapping = get_texture_mapping();
+ const TexMapping &texture_mapping = node_storage(bnode());
const float use_min = get_use_min();
const float use_max = get_use_max();
@@ -79,27 +81,22 @@ class MapValueShaderNode : public ShaderNode {
"node_composite_map_value",
inputs,
outputs,
- GPU_uniform(texture_mapping->loc),
- GPU_uniform(texture_mapping->size),
+ GPU_uniform(texture_mapping.loc),
+ GPU_uniform(texture_mapping.size),
GPU_constant(&use_min),
- GPU_uniform(texture_mapping->min),
+ GPU_uniform(texture_mapping.min),
GPU_constant(&use_max),
- GPU_uniform(texture_mapping->max));
- }
-
- TexMapping *get_texture_mapping()
- {
- return static_cast<TexMapping *>(bnode().storage);
+ GPU_uniform(texture_mapping.max));
}
bool get_use_min()
{
- return get_texture_mapping()->flag & TEXMAP_CLIP_MIN;
+ return node_storage(bnode()).flag & TEXMAP_CLIP_MIN;
}
bool get_use_max()
{
- return get_texture_mapping()->flag & TEXMAP_CLIP_MAX;
+ return node_storage(bnode()).flag & TEXMAP_CLIP_MAX;
}
};
diff --git a/source/blender/nodes/composite/nodes/node_composite_movieclip.cc b/source/blender/nodes/composite/nodes/node_composite_movieclip.cc
index ec95de3da18..b9d9620a214 100644
--- a/source/blender/nodes/composite/nodes/node_composite_movieclip.cc
+++ b/source/blender/nodes/composite/nodes/node_composite_movieclip.cc
@@ -239,7 +239,7 @@ class MovieClipOperation : public NodeOperation {
GPUTexture *get_movie_clip_texture()
{
MovieClip *movie_clip = get_movie_clip();
- MovieClipUser *movie_clip_user = static_cast<MovieClipUser *>(bnode().storage);
+ MovieClipUser *movie_clip_user = get_movie_clip_user();
BKE_movieclip_user_set_frame(movie_clip_user, context().get_frame_number());
return BKE_movieclip_get_gpu_texture(movie_clip, movie_clip_user);
}
@@ -247,13 +247,20 @@ class MovieClipOperation : public NodeOperation {
void free_movie_clip_texture()
{
MovieClip *movie_clip = get_movie_clip();
- return BKE_movieclip_free_gputexture(movie_clip);
+ if (movie_clip) {
+ BKE_movieclip_free_gputexture(movie_clip);
+ }
}
MovieClip *get_movie_clip()
{
return (MovieClip *)bnode().id;
}
+
+ MovieClipUser *get_movie_clip_user()
+ {
+ return static_cast<MovieClipUser *>(bnode().storage);
+ }
};
static NodeOperation *get_compositor_operation(Context &context, DNode node)
diff --git a/source/blender/nodes/composite/nodes/node_composite_normal.cc b/source/blender/nodes/composite/nodes/node_composite_normal.cc
index f61ace01cfd..a1a6303e21b 100644
--- a/source/blender/nodes/composite/nodes/node_composite_normal.cc
+++ b/source/blender/nodes/composite/nodes/node_composite_normal.cc
@@ -51,9 +51,12 @@ class NormalShaderNode : public ShaderNode {
}
/* The vector value is stored in the default value of the output socket. */
- float *get_vector_value()
+ const float *get_vector_value()
{
- return node().output_by_identifier("Normal")->default_value<bNodeSocketValueVector>()->value;
+ return node()
+ .output_by_identifier("Normal")
+ ->default_value_typed<bNodeSocketValueVector>()
+ ->value;
}
};
diff --git a/source/blender/nodes/composite/nodes/node_composite_pixelate.cc b/source/blender/nodes/composite/nodes/node_composite_pixelate.cc
index 4567464a547..c4e42f8247d 100644
--- a/source/blender/nodes/composite/nodes/node_composite_pixelate.cc
+++ b/source/blender/nodes/composite/nodes/node_composite_pixelate.cc
@@ -5,6 +5,9 @@
* \ingroup cmpnodes
*/
+#include "BLI_math_vec_types.hh"
+#include "BLI_math_vector.hh"
+
#include "COM_node_operation.hh"
#include "node_composite_util.hh"
@@ -27,8 +30,34 @@ class PixelateOperation : public NodeOperation {
void execute() override
{
+ /* It might seems strange that the input is passed through without any processing, but note
+ * that the actual processing happens inside the domain realization input processor of the
+ * input. Indeed, the pixelate node merely realizes its input on a smaller-sized domain that
+ * matches its apparent size, that is, its size after the domain transformation. The pixelate
+ * node has no effect if the input is scaled-up. See the compute_domain method for more
+ * information. */
get_input("Color").pass_through(get_result("Color"));
}
+
+ /* Compute a smaller-sized domain that matches the apparent size of the input while having a unit
+ * scale transformation, see the execute method for more information. */
+ Domain compute_domain() override
+ {
+ Domain domain = get_input("Color").domain();
+
+ /* Get the scaling component of the domain transformation, but make sure it doesn't exceed 1,
+ * because pixelation should only happen if the input is scaled down. */
+ const float2 scale = math::min(float2(1.0f), domain.transformation.scale_2d());
+
+ /* Multiply the size of the domain by its scale to match its apparent size, but make sure it is
+ * at least 1 pixel in both axis. */
+ domain.size = math::max(int2(float2(domain.size) * scale), int2(1));
+
+ /* Reset the scale of the transformation by transforming it with the inverse of the scale. */
+ domain.transformation *= float3x3::from_scale(math::safe_divide(float2(1.0f), scale));
+
+ return domain;
+ }
};
static NodeOperation *get_compositor_operation(Context &context, DNode node)
diff --git a/source/blender/nodes/composite/nodes/node_composite_rgb.cc b/source/blender/nodes/composite/nodes/node_composite_rgb.cc
index 6f3a00af7e3..f107961f301 100644
--- a/source/blender/nodes/composite/nodes/node_composite_rgb.cc
+++ b/source/blender/nodes/composite/nodes/node_composite_rgb.cc
@@ -33,8 +33,8 @@ class RGBOperation : public NodeOperation {
Result &result = get_result("RGBA");
result.allocate_single_value();
- const bNodeSocket *socket = static_cast<bNodeSocket *>(bnode().outputs.first);
- float4 color = float4(static_cast<bNodeSocketValueRGBA *>(socket->default_value)->value);
+ const bNodeSocket *socket = static_cast<const bNodeSocket *>(bnode().outputs.first);
+ float4 color = float4(static_cast<const bNodeSocketValueRGBA *>(socket->default_value)->value);
result.set_color_value(color);
}
diff --git a/source/blender/nodes/composite/nodes/node_composite_scale.cc b/source/blender/nodes/composite/nodes/node_composite_scale.cc
index 8b43ae8c9ca..eb2d7162c69 100644
--- a/source/blender/nodes/composite/nodes/node_composite_scale.cc
+++ b/source/blender/nodes/composite/nodes/node_composite_scale.cc
@@ -5,6 +5,11 @@
* \ingroup cmpnodes
*/
+#include "BLI_assert.h"
+#include "BLI_float3x3.hh"
+#include "BLI_math_base.hh"
+#include "BLI_math_vec_types.hh"
+
#include "RNA_access.h"
#include "UI_interface.h"
@@ -20,16 +25,26 @@ namespace blender::nodes::node_composite_scale_cc {
static void cmp_node_scale_declare(NodeDeclarationBuilder &b)
{
- b.add_input<decl::Color>(N_("Image")).default_value({1.0f, 1.0f, 1.0f, 1.0f});
- b.add_input<decl::Float>(N_("X")).default_value(1.0f).min(0.0001f).max(CMP_SCALE_MAX);
- b.add_input<decl::Float>(N_("Y")).default_value(1.0f).min(0.0001f).max(CMP_SCALE_MAX);
+ b.add_input<decl::Color>(N_("Image"))
+ .default_value({1.0f, 1.0f, 1.0f, 1.0f})
+ .compositor_domain_priority(0);
+ b.add_input<decl::Float>(N_("X"))
+ .default_value(1.0f)
+ .min(0.0001f)
+ .max(CMP_SCALE_MAX)
+ .compositor_expects_single_value();
+ b.add_input<decl::Float>(N_("Y"))
+ .default_value(1.0f)
+ .min(0.0001f)
+ .max(CMP_SCALE_MAX)
+ .compositor_expects_single_value();
b.add_output<decl::Color>(N_("Image"));
}
static void node_composite_update_scale(bNodeTree *ntree, bNode *node)
{
bNodeSocket *sock;
- bool use_xy_scale = ELEM(node->custom1, CMP_SCALE_RELATIVE, CMP_SCALE_ABSOLUTE);
+ bool use_xy_scale = ELEM(node->custom1, CMP_NODE_SCALE_RELATIVE, CMP_NODE_SCALE_ABSOLUTE);
/* Only show X/Y scale factor inputs for modes using them! */
for (sock = (bNodeSocket *)node->inputs.first; sock; sock = sock->next) {
@@ -43,7 +58,7 @@ static void node_composit_buts_scale(uiLayout *layout, bContext *UNUSED(C), Poin
{
uiItemR(layout, ptr, "space", UI_ITEM_R_SPLIT_EMPTY_NAME, "", ICON_NONE);
- if (RNA_enum_get(ptr, "space") == CMP_SCALE_RENDERPERCENT) {
+ if (RNA_enum_get(ptr, "space") == CMP_NODE_SCALE_RENDER_SIZE) {
uiLayout *row;
uiItemR(layout,
ptr,
@@ -65,7 +80,129 @@ class ScaleOperation : public NodeOperation {
void execute() override
{
- get_input("Image").pass_through(get_result("Image"));
+ Result &input = get_input("Image");
+ Result &result = get_result("Image");
+ input.pass_through(result);
+
+ const float3x3 transformation = float3x3::from_translation_rotation_scale(
+ get_translation(), 0.0f, get_scale());
+
+ result.transform(transformation);
+ result.get_realization_options().interpolation = Interpolation::Bilinear;
+ }
+
+ float2 get_scale()
+ {
+ switch (get_scale_method()) {
+ case CMP_NODE_SCALE_RELATIVE:
+ return get_scale_relative();
+ case CMP_NODE_SCALE_ABSOLUTE:
+ return get_scale_absolute();
+ case CMP_NODE_SCALE_RENDER_PERCENT:
+ return get_scale_render_percent();
+ case CMP_NODE_SCALE_RENDER_SIZE:
+ return get_scale_render_size();
+ default:
+ BLI_assert_unreachable();
+ return float2(1.0f);
+ }
+ }
+
+ /* Scale by the input factors. */
+ float2 get_scale_relative()
+ {
+ return float2(get_input("X").get_float_value_default(1.0f),
+ get_input("Y").get_float_value_default(1.0f));
+ }
+
+ /* Scale such that the new size matches the input absolute size. */
+ float2 get_scale_absolute()
+ {
+ const float2 input_size = float2(get_input("Image").domain().size);
+ const float2 absolute_size = float2(get_input("X").get_float_value_default(1.0f),
+ get_input("Y").get_float_value_default(1.0f));
+ return absolute_size / input_size;
+ }
+
+ /* Scale by the render resolution percentage. */
+ float2 get_scale_render_percent()
+ {
+ return float2(context().get_scene()->r.size / 100.0f);
+ }
+
+ float2 get_scale_render_size()
+ {
+ switch (get_scale_render_size_method()) {
+ case CMP_NODE_SCALE_RENDER_SIZE_STRETCH:
+ return get_scale_render_size_stretch();
+ case CMP_NODE_SCALE_RENDER_SIZE_FIT:
+ return get_scale_render_size_fit();
+ case CMP_NODE_SCALE_RENDER_SIZE_CROP:
+ return get_scale_render_size_crop();
+ default:
+ BLI_assert_unreachable();
+ return float2(1.0f);
+ }
+ }
+
+ /* Scale such that the new size matches the render size. Since the input is freely scaled, it is
+ * potentially stretched, hence the name. */
+ float2 get_scale_render_size_stretch()
+ {
+ const float2 input_size = float2(get_input("Image").domain().size);
+ const float2 render_size = float2(context().get_output_size());
+ return render_size / input_size;
+ }
+
+ /* Scale such that the dimension with the smaller scaling factor matches that of the render size
+ * while maintaining the input's aspect ratio. Since the other dimension is guaranteed not to
+ * exceed the render size region due to its larger scaling factor, the image is said to be fit
+ * inside that region, hence the name. */
+ float2 get_scale_render_size_fit()
+ {
+ const float2 input_size = float2(get_input("Image").domain().size);
+ const float2 render_size = float2(context().get_output_size());
+ const float2 scale = render_size / input_size;
+ return float2(math::min(scale.x, scale.y));
+ }
+
+ /* Scale such that the dimension with the larger scaling factor matches that of the render size
+ * while maintaining the input's aspect ratio. Since the other dimension is guaranteed to exceed
+ * the render size region due to its lower scaling factor, the image will be cropped inside that
+ * region, hence the name. */
+ float2 get_scale_render_size_crop()
+ {
+ const float2 input_size = float2(get_input("Image").domain().size);
+ const float2 render_size = float2(context().get_output_size());
+ const float2 scale = render_size / input_size;
+ return float2(math::max(scale.x, scale.y));
+ }
+
+ float2 get_translation()
+ {
+ /* Only the render size option supports offset translation. */
+ if (get_scale_method() != CMP_NODE_SCALE_RENDER_SIZE) {
+ return float2(0.0f);
+ }
+
+ /* Translate by the offset factor relative to the new size. */
+ const float2 input_size = float2(get_input("Image").domain().size);
+ return get_offset() * input_size * get_scale();
+ }
+
+ CMPNodeScaleMethod get_scale_method()
+ {
+ return (CMPNodeScaleMethod)bnode().custom1;
+ }
+
+ CMPNodeScaleRenderSizeMethod get_scale_render_size_method()
+ {
+ return (CMPNodeScaleRenderSizeMethod)bnode().custom2;
+ }
+
+ float2 get_offset()
+ {
+ return float2(bnode().custom3, bnode().custom4);
}
};
diff --git a/source/blender/nodes/composite/nodes/node_composite_sepcomb_color.cc b/source/blender/nodes/composite/nodes/node_composite_sepcomb_color.cc
index d1f0b7977f8..f6792d7ce61 100644
--- a/source/blender/nodes/composite/nodes/node_composite_sepcomb_color.cc
+++ b/source/blender/nodes/composite/nodes/node_composite_sepcomb_color.cc
@@ -62,6 +62,8 @@ static void node_cmp_combsep_color_label(const ListBase *sockets, CMPNodeCombSep
namespace blender::nodes::node_composite_separate_color_cc {
+NODE_STORAGE_FUNCS(NodeCMPCombSepColor)
+
static void cmp_node_separate_color_declare(NodeDeclarationBuilder &b)
{
b.add_input<decl::Color>(N_("Image"))
@@ -93,14 +95,9 @@ class SeparateColorShaderNode : public ShaderNode {
GPU_stack_link(material, &bnode(), get_shader_function_name(), inputs, outputs);
}
- NodeCMPCombSepColor *get_node_combine_separate_color()
- {
- return static_cast<NodeCMPCombSepColor *>(bnode().storage);
- }
-
const char *get_shader_function_name()
{
- switch (get_node_combine_separate_color()->mode) {
+ switch (node_storage(bnode()).mode) {
case CMP_NODE_COMBSEP_COLOR_RGB:
return "node_composite_separate_rgba";
case CMP_NODE_COMBSEP_COLOR_HSV:
@@ -110,7 +107,7 @@ class SeparateColorShaderNode : public ShaderNode {
case CMP_NODE_COMBSEP_COLOR_YUV:
return "node_composite_separate_yuva_itu_709";
case CMP_NODE_COMBSEP_COLOR_YCC:
- switch (get_node_combine_separate_color()->ycc_mode) {
+ switch (node_storage(bnode()).ycc_mode) {
case BLI_YCC_ITU_BT601:
return "node_composite_separate_ycca_itu_601";
case BLI_YCC_ITU_BT709:
@@ -153,6 +150,8 @@ void register_node_type_cmp_separate_color()
namespace blender::nodes::node_composite_combine_color_cc {
+NODE_STORAGE_FUNCS(NodeCMPCombSepColor)
+
static void cmp_node_combine_color_declare(NodeDeclarationBuilder &b)
{
b.add_input<decl::Float>(N_("Red"))
@@ -202,14 +201,9 @@ class CombineColorShaderNode : public ShaderNode {
GPU_stack_link(material, &bnode(), get_shader_function_name(), inputs, outputs);
}
- NodeCMPCombSepColor *get_node_combine_separate_color()
- {
- return static_cast<NodeCMPCombSepColor *>(bnode().storage);
- }
-
const char *get_shader_function_name()
{
- switch (get_node_combine_separate_color()->mode) {
+ switch (node_storage(bnode()).mode) {
case CMP_NODE_COMBSEP_COLOR_RGB:
return "node_composite_combine_rgba";
case CMP_NODE_COMBSEP_COLOR_HSV:
@@ -219,7 +213,7 @@ class CombineColorShaderNode : public ShaderNode {
case CMP_NODE_COMBSEP_COLOR_YUV:
return "node_composite_combine_yuva_itu_709";
case CMP_NODE_COMBSEP_COLOR_YCC:
- switch (get_node_combine_separate_color()->ycc_mode) {
+ switch (node_storage(bnode()).ycc_mode) {
case BLI_YCC_ITU_BT601:
return "node_composite_combine_ycca_itu_601";
case BLI_YCC_ITU_BT709:
diff --git a/source/blender/nodes/composite/nodes/node_composite_setalpha.cc b/source/blender/nodes/composite/nodes/node_composite_setalpha.cc
index 9930125aa70..df3aca2c665 100644
--- a/source/blender/nodes/composite/nodes/node_composite_setalpha.cc
+++ b/source/blender/nodes/composite/nodes/node_composite_setalpha.cc
@@ -18,6 +18,8 @@
namespace blender::nodes::node_composite_setalpha_cc {
+NODE_STORAGE_FUNCS(NodeSetAlpha)
+
static void cmp_node_setalpha_declare(NodeDeclarationBuilder &b)
{
b.add_input<decl::Color>(N_("Image"))
@@ -54,18 +56,13 @@ class SetAlphaShaderNode : public ShaderNode {
GPUNodeStack *inputs = get_inputs_array();
GPUNodeStack *outputs = get_outputs_array();
- if (get_node_set_alpha()->mode == CMP_NODE_SETALPHA_MODE_APPLY) {
+ if (node_storage(bnode()).mode == CMP_NODE_SETALPHA_MODE_APPLY) {
GPU_stack_link(material, &bnode(), "node_composite_set_alpha_apply", inputs, outputs);
return;
}
GPU_stack_link(material, &bnode(), "node_composite_set_alpha_replace", inputs, outputs);
}
-
- NodeSetAlpha *get_node_set_alpha()
- {
- return static_cast<NodeSetAlpha *>(bnode().storage);
- }
};
static ShaderNode *get_compositor_shader_node(DNode node)
diff --git a/source/blender/nodes/composite/nodes/node_composite_translate.cc b/source/blender/nodes/composite/nodes/node_composite_translate.cc
index fbd53b8310f..e0f87ff604a 100644
--- a/source/blender/nodes/composite/nodes/node_composite_translate.cc
+++ b/source/blender/nodes/composite/nodes/node_composite_translate.cc
@@ -19,6 +19,8 @@
namespace blender::nodes::node_composite_translate_cc {
+NODE_STORAGE_FUNCS(NodeTranslateData)
+
static void cmp_node_translate_declare(NodeDeclarationBuilder &b)
{
b.add_input<decl::Color>(N_("Image"))
@@ -76,24 +78,19 @@ class TranslateOperation : public NodeOperation {
result.get_realization_options().repeat_y = get_repeat_y();
}
- NodeTranslateData &get_node_translate()
- {
- return *static_cast<NodeTranslateData *>(bnode().storage);
- }
-
bool get_use_relative()
{
- return get_node_translate().relative;
+ return node_storage(bnode()).relative;
}
bool get_repeat_x()
{
- return ELEM(get_node_translate().wrap_axis, CMP_NODE_WRAP_X, CMP_NODE_WRAP_XY);
+ return ELEM(node_storage(bnode()).wrap_axis, CMP_NODE_WRAP_X, CMP_NODE_WRAP_XY);
}
bool get_repeat_y()
{
- return ELEM(get_node_translate().wrap_axis, CMP_NODE_WRAP_Y, CMP_NODE_WRAP_XY);
+ return ELEM(node_storage(bnode()).wrap_axis, CMP_NODE_WRAP_Y, CMP_NODE_WRAP_XY);
}
};