Welcome to mirror list, hosted at ThFree Co, Russian Federation.

git.blender.org/blender.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorOmar Emara <mail@OmarEmara.dev>2022-08-18 13:20:18 +0300
committerOmar Emara <mail@OmarEmara.dev>2022-08-18 13:20:18 +0300
commit1854d313218b9cb1be1a67e256783cda2703db8f (patch)
tree2eb043e18d636c3b9c4e07a45b14f091dde68ef2 /source/blender/nodes
parentb828d453e93c30e81d3cb0d5aa3edc553ea9a333 (diff)
Realtime Compositor: Implement directional blur node
This patch implements the directional blur node for the realtime compositor. Differential Revision: https://developer.blender.org/D15672 Reviewed By: Clement Foucault
Diffstat (limited to 'source/blender/nodes')
-rw-r--r--source/blender/nodes/composite/nodes/node_composite_directionalblur.cc122
1 files changed, 121 insertions, 1 deletions
diff --git a/source/blender/nodes/composite/nodes/node_composite_directionalblur.cc b/source/blender/nodes/composite/nodes/node_composite_directionalblur.cc
index eacba5ad12d..d317c442ab3 100644
--- a/source/blender/nodes/composite/nodes/node_composite_directionalblur.cc
+++ b/source/blender/nodes/composite/nodes/node_composite_directionalblur.cc
@@ -5,10 +5,18 @@
* \ingroup cmpnodes
*/
+#include "BLI_float3x3.hh"
+#include "BLI_math_base.hh"
+#include "BLI_math_vec_types.hh"
+#include "BLI_math_vector.hh"
+
#include "UI_interface.h"
#include "UI_resources.h"
+#include "GPU_shader.h"
+
#include "COM_node_operation.hh"
+#include "COM_utilities.hh"
#include "node_composite_util.hh"
@@ -61,7 +69,119 @@ class DirectionalBlurOperation : public NodeOperation {
void execute() override
{
- get_input("Image").pass_through(get_result("Image"));
+ if (is_identity()) {
+ get_input("Image").pass_through(get_result("Image"));
+ return;
+ }
+
+ GPUShader *shader = shader_manager().get("compositor_directional_blur");
+ GPU_shader_bind(shader);
+
+ /* The number of iterations does not cover the original image, that is, the image with no
+ * transformation. So add an extra iteration for the original image and put that into
+ * consideration in the shader. */
+ GPU_shader_uniform_1i(shader, "iterations", get_iterations() + 1);
+ GPU_shader_uniform_mat3_as_mat4(shader, "inverse_transformation", get_transformation().ptr());
+
+ const Result &input_image = get_input("Image");
+ input_image.bind_as_texture(shader, "input_tx");
+
+ GPU_texture_filter_mode(input_image.texture(), true);
+ GPU_texture_wrap_mode(input_image.texture(), false, false);
+
+ const Domain domain = compute_domain();
+ Result &output_image = get_result("Image");
+ output_image.allocate_texture(domain);
+ output_image.bind_as_image(shader, "output_img");
+
+ compute_dispatch_threads_at_least(shader, domain.size);
+
+ GPU_shader_unbind();
+ output_image.unbind_as_image();
+ input_image.unbind_as_texture();
+ }
+
+ /* Get the amount of translation that will be applied on each iteration. The translation is in
+ * the negative x direction rotated in the clock-wise direction, hence the negative sign for the
+ * rotation and translation vector. */
+ float2 get_translation()
+ {
+ const float diagonal_length = math::length(float2(get_input("Image").domain().size));
+ const float translation_amount = diagonal_length * get_node_directional_blur_data().distance;
+ const float3x3 rotation = float3x3::from_rotation(-get_node_directional_blur_data().angle);
+ return rotation * float2(-translation_amount / get_iterations(), 0.0f);
+ }
+
+ /* Get the amount of rotation that will be applied on each iteration. */
+ float get_rotation()
+ {
+ return get_node_directional_blur_data().spin / get_iterations();
+ }
+
+ /* Get the amount of scale that will be applied on each iteration. The scale is identity when the
+ * user supplies 0, so we add 1. */
+ float2 get_scale()
+ {
+ return float2(1.0f + get_node_directional_blur_data().zoom / get_iterations());
+ }
+
+ float2 get_origin()
+ {
+ const float2 center = float2(get_node_directional_blur_data().center_x,
+ get_node_directional_blur_data().center_y);
+ return float2(get_input("Image").domain().size) * center;
+ }
+
+ float3x3 get_transformation()
+ {
+ /* Construct the transformation that will be applied on each iteration. */
+ const float3x3 transformation = float3x3::from_translation_rotation_scale(
+ get_translation(), get_rotation(), get_scale());
+ /* Change the origin of the transformation to the user-specified origin. */
+ const float3x3 origin_transformation = float3x3::from_origin_transformation(transformation,
+ get_origin());
+ /* The shader will transform the coordinates, not the image itself, so take the inverse. */
+ return origin_transformation.inverted();
+ }
+
+ /* The actual number of iterations is 2 to the power of the user supplied iterations. The power
+ * is implemented using a bit shift. But also make sure it doesn't exceed the upper limit which
+ * is the number of diagonal pixels. */
+ int get_iterations()
+ {
+ const int iterations = 2 << (get_node_directional_blur_data().iter - 1);
+ const int upper_limit = math::ceil(math::length(float2(get_input("Image").domain().size)));
+ return math::min(iterations, upper_limit);
+ }
+
+ /* Returns true if the operation does nothing and the input can be passed through. */
+ bool is_identity()
+ {
+ const Result &input = get_input("Image");
+ /* Single value inputs can't be blurred and are returned as is. */
+ if (input.is_single_value()) {
+ return true;
+ }
+
+ /* If any of the following options are non-zero, then the operation is not an identity. */
+ if (get_node_directional_blur_data().distance != 0.0f) {
+ return false;
+ }
+
+ if (get_node_directional_blur_data().spin != 0.0f) {
+ return false;
+ }
+
+ if (get_node_directional_blur_data().zoom != 0.0f) {
+ return false;
+ }
+
+ return true;
+ }
+
+ NodeDBlurData &get_node_directional_blur_data()
+ {
+ return *static_cast<NodeDBlurData *>(bnode().storage);
}
};