Welcome to mirror list, hosted at ThFree Co, Russian Federation.

git.blender.org/blender.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorManuel Castilla <manzanillawork@gmail.com>2021-08-23 16:30:31 +0300
committerManuel Castilla <manzanillawork@gmail.com>2021-08-23 18:08:45 +0300
commitdaa7c59e38c8fe464004b3becd6956b880c38c92 (patch)
treec579e1eb7908bc24429fad527b9ae58fda4c67d7 /source/blender/compositor/operations/COM_VariableSizeBokehBlurOperation.cc
parent344aca3b1bf2718904455ea6cef1ffd8bedf51a6 (diff)
Compositor: Full frame Bokeh Blur and Blur nodes
Adds full frame implementation to these nodes operations. When enabling "extend bounds" node option, tiled implementation result is slightly different because it's using `TranslateOperation` with bilinear sampling for centering. Full frame always uses nearest to don't lose image quality. It has the disadvantage of causing image jiggling on backdrop when switching size values as it's not pixel perfect. This is fixed by rounding to even. No functional changes. Part of T88150. Reviewed By: jbakker Differential Revision: https://developer.blender.org/D12167
Diffstat (limited to 'source/blender/compositor/operations/COM_VariableSizeBokehBlurOperation.cc')
-rw-r--r--source/blender/compositor/operations/COM_VariableSizeBokehBlurOperation.cc161
1 files changed, 161 insertions, 0 deletions
diff --git a/source/blender/compositor/operations/COM_VariableSizeBokehBlurOperation.cc b/source/blender/compositor/operations/COM_VariableSizeBokehBlurOperation.cc
index 19cd5a53084..6af6f5a6244 100644
--- a/source/blender/compositor/operations/COM_VariableSizeBokehBlurOperation.cc
+++ b/source/blender/compositor/operations/COM_VariableSizeBokehBlurOperation.cc
@@ -18,6 +18,7 @@
#include "COM_VariableSizeBokehBlurOperation.h"
#include "BLI_math.h"
+#include "COM_ExecutionSystem.h"
#include "COM_OpenCLDevice.h"
#include "RE_pipeline.h"
@@ -276,6 +277,166 @@ bool VariableSizeBokehBlurOperation::determineDependingAreaOfInterest(
return false;
}
+void VariableSizeBokehBlurOperation::get_area_of_interest(const int input_idx,
+ const rcti &output_area,
+ rcti &r_input_area)
+{
+ switch (input_idx) {
+ case IMAGE_INPUT_INDEX:
+ case SIZE_INPUT_INDEX: {
+ const float max_dim = MAX2(getWidth(), getHeight());
+ const float scalar = m_do_size_scale ? (max_dim / 100.0f) : 1.0f;
+ const int max_blur_scalar = m_maxBlur * scalar;
+ r_input_area.xmax = output_area.xmax + max_blur_scalar + 2;
+ r_input_area.xmin = output_area.xmin - max_blur_scalar - 2;
+ r_input_area.ymax = output_area.ymax + max_blur_scalar + 2;
+ r_input_area.ymin = output_area.ymin - max_blur_scalar - 2;
+ break;
+ }
+ case BOKEH_INPUT_INDEX: {
+ r_input_area.xmax = COM_BLUR_BOKEH_PIXELS;
+ r_input_area.xmin = 0;
+ r_input_area.ymax = COM_BLUR_BOKEH_PIXELS;
+ r_input_area.ymin = 0;
+ break;
+ }
+#ifdef COM_DEFOCUS_SEARCH
+ case DEFOCUS_INPUT_INDEX: {
+ r_input_area.xmax = (output_area.xmax / InverseSearchRadiusOperation::DIVIDER) + 1;
+ r_input_area.xmin = (output_area.xmin / InverseSearchRadiusOperation::DIVIDER) - 1;
+ r_input_area.ymax = (output_area.ymax / InverseSearchRadiusOperation::DIVIDER) + 1;
+ r_input_area.ymin = (output_area.ymin / InverseSearchRadiusOperation::DIVIDER) - 1;
+ break;
+ }
+#endif
+ }
+}
+
+struct PixelData {
+ float multiplier_accum[4];
+ float color_accum[4];
+ float threshold;
+ float scalar;
+ float size_center;
+ int max_blur_scalar;
+ int step;
+ MemoryBuffer *bokeh_input;
+ MemoryBuffer *size_input;
+ MemoryBuffer *image_input;
+ int image_width;
+ int image_height;
+};
+
+static void blur_pixel(int x, int y, PixelData &p)
+{
+ BLI_assert(p.bokeh_input->getWidth() == COM_BLUR_BOKEH_PIXELS);
+ BLI_assert(p.bokeh_input->getHeight() == COM_BLUR_BOKEH_PIXELS);
+
+#ifdef COM_DEFOCUS_SEARCH
+ float search[4];
+ inputs[DEFOCUS_INPUT_INDEX]->read_elem_checked(x / InverseSearchRadiusOperation::DIVIDER,
+ y / InverseSearchRadiusOperation::DIVIDER,
+ search);
+ const int minx = search[0];
+ const int miny = search[1];
+ const int maxx = search[2];
+ const int maxy = search[3];
+#else
+ const int minx = MAX2(x - p.max_blur_scalar, 0);
+ const int miny = MAX2(y - p.max_blur_scalar, 0);
+ const int maxx = MIN2(x + p.max_blur_scalar, p.image_width);
+ const int maxy = MIN2(y + p.max_blur_scalar, p.image_height);
+#endif
+
+ const int color_row_stride = p.image_input->row_stride * p.step;
+ const int color_elem_stride = p.image_input->elem_stride * p.step;
+ const int size_row_stride = p.size_input->row_stride * p.step;
+ const int size_elem_stride = p.size_input->elem_stride * p.step;
+ const float *row_color = p.image_input->get_elem(minx, miny);
+ const float *row_size = p.size_input->get_elem(minx, miny);
+ for (int ny = miny; ny < maxy;
+ ny += p.step, row_size += size_row_stride, row_color += color_row_stride) {
+ const float dy = ny - y;
+ const float *size_elem = row_size;
+ const float *color = row_color;
+ for (int nx = minx; nx < maxx;
+ nx += p.step, size_elem += size_elem_stride, color += color_elem_stride) {
+ if (nx == x && ny == y) {
+ continue;
+ }
+ const float size = MIN2(size_elem[0] * p.scalar, p.size_center);
+ if (size <= p.threshold) {
+ continue;
+ }
+ const float dx = nx - x;
+ if (size <= fabsf(dx) || size <= fabsf(dy)) {
+ continue;
+ }
+
+ /* XXX: There is no way to ensure bokeh input is an actual bokeh with #COM_BLUR_BOKEH_PIXELS
+ * size, anything may be connected. Use the real input size and remove asserts? */
+ const float u = (float)(COM_BLUR_BOKEH_PIXELS / 2) +
+ (dx / size) * (float)((COM_BLUR_BOKEH_PIXELS / 2) - 1);
+ const float v = (float)(COM_BLUR_BOKEH_PIXELS / 2) +
+ (dy / size) * (float)((COM_BLUR_BOKEH_PIXELS / 2) - 1);
+ float bokeh[4];
+ p.bokeh_input->read_elem_checked(u, v, bokeh);
+ madd_v4_v4v4(p.color_accum, bokeh, color);
+ add_v4_v4(p.multiplier_accum, bokeh);
+ }
+ }
+}
+
+void VariableSizeBokehBlurOperation::update_memory_buffer_partial(MemoryBuffer *output,
+ const rcti &area,
+ Span<MemoryBuffer *> inputs)
+{
+ PixelData p;
+ p.bokeh_input = inputs[BOKEH_INPUT_INDEX];
+ p.size_input = inputs[SIZE_INPUT_INDEX];
+ p.image_input = inputs[IMAGE_INPUT_INDEX];
+ p.step = QualityStepHelper::getStep();
+ p.threshold = m_threshold;
+ p.image_width = this->getWidth();
+ p.image_height = this->getHeight();
+
+ rcti scalar_area;
+ this->get_area_of_interest(SIZE_INPUT_INDEX, area, scalar_area);
+ BLI_rcti_isect(&scalar_area, &p.size_input->get_rect(), &scalar_area);
+ const float max_size = p.size_input->get_max_value(scalar_area);
+
+ const float max_dim = MAX2(this->getWidth(), this->getHeight());
+ p.scalar = m_do_size_scale ? (max_dim / 100.0f) : 1.0f;
+ p.max_blur_scalar = static_cast<int>(max_size * p.scalar);
+ CLAMP(p.max_blur_scalar, 1, m_maxBlur);
+
+ for (BuffersIterator<float> it = output->iterate_with({p.image_input, p.size_input}, area);
+ !it.is_end();
+ ++it) {
+ const float *color = it.in(0);
+ const float size = *it.in(1);
+ copy_v4_v4(p.color_accum, color);
+ copy_v4_fl(p.multiplier_accum, 1.0f);
+ p.size_center = size * p.scalar;
+
+ if (p.size_center > p.threshold) {
+ blur_pixel(it.x, it.y, p);
+ }
+
+ it.out[0] = p.color_accum[0] / p.multiplier_accum[0];
+ it.out[1] = p.color_accum[1] / p.multiplier_accum[1];
+ it.out[2] = p.color_accum[2] / p.multiplier_accum[2];
+ it.out[3] = p.color_accum[3] / p.multiplier_accum[3];
+
+ /* Blend in out values over the threshold, otherwise we get sharp, ugly transitions. */
+ if ((p.size_center > p.threshold) && (p.size_center < p.threshold * 2.0f)) {
+ /* Factor from 0-1. */
+ const float fac = (p.size_center - p.threshold) / p.threshold;
+ interp_v4_v4v4(it.out, color, it.out, fac);
+ }
+ }
+}
+
#ifdef COM_DEFOCUS_SEARCH
// InverseSearchRadiusOperation
InverseSearchRadiusOperation::InverseSearchRadiusOperation()