Welcome to mirror list, hosted at ThFree Co, Russian Federation.

git.blender.org/blender.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLukas Tönne <lukas.toenne@gmail.com>2013-12-04 19:05:56 +0400
committerLukas Tönne <lukas.toenne@gmail.com>2013-12-04 19:05:56 +0400
commit67134a7bf689279785e2e40b29cd24243813998b (patch)
tree6c3a117459901d455d52726bd7bddd3e931e9650 /source/blender/compositor/operations/COM_PlaneTrackWarpImageOperation.cpp
parent04e434cd81edf942289f7094bc5fdc3ab8846259 (diff)
Fix for EWA (elliptical weighted average) sampling in the compositor.
EWA sampling is designed for downsampling images, i.e. scaling down the size of input image pixels, which happens regularly in compositing. While the standard sampling methods (linear, cubic) work reasonably well for linear transformations, they don't yield good results in non-linear cases like perspective projection or arbitrary displacement. EWA sampling is comparable to mipmapping, but avoids problems with discontinuities. To work correctly the EWA algorithm needs partial derivatives of the mapping functions which convert output pixel coordinates back into the input image space (2x2 Jacobian matrix). With these derivatives the EWA algorithm projects ellipses into the input space and accumulates colors over their area. This calculation was not done correctly in the compositor, only the derivatives du/dx and dv/dy were calculation, basically this means it only worked for non-rotated input images. The patch introduces full derivative calculations du/dx, du/dy, dv/dx, dv/dy for the 3 nodes which use EWA sampling currently: PlaneTrackWarp, MapUV and Displace. In addition the calculation of ellipsis area and axis-aligned bounding boxes has been fixed. For the MapUV and Displace nodes the derivatives have to be estimated by evaluating the UV/displacement inputs with 1-pixel offsets, which can still have problems on discontinuities and sub-pixel variations. These potential problems can only be alleviated by more radical design changes in the compositor functions, which are out of scope for now. Basically the values passed to the UV/Displacement inputs would need to be associated with their 1st order derivatives, which requires a general approach to derivatives in all nodes.
Diffstat (limited to 'source/blender/compositor/operations/COM_PlaneTrackWarpImageOperation.cpp')
-rw-r--r--source/blender/compositor/operations/COM_PlaneTrackWarpImageOperation.cpp101
1 files changed, 22 insertions, 79 deletions
diff --git a/source/blender/compositor/operations/COM_PlaneTrackWarpImageOperation.cpp b/source/blender/compositor/operations/COM_PlaneTrackWarpImageOperation.cpp
index dba3a6f3505..7780023c465 100644
--- a/source/blender/compositor/operations/COM_PlaneTrackWarpImageOperation.cpp
+++ b/source/blender/compositor/operations/COM_PlaneTrackWarpImageOperation.cpp
@@ -36,60 +36,17 @@ extern "C" {
#include "BKE_tracking.h"
}
-BLI_INLINE bool isPointInsideQuad(const float x, const float y, const float corners[4][2])
-{
- float point[2];
-
- point[0] = x;
- point[1] = y;
-
- return isect_point_tri_v2(point, corners[0], corners[1], corners[2]) ||
- isect_point_tri_v2(point, corners[0], corners[2], corners[3]);
-}
-
-BLI_INLINE void warpCoord(float x, float y, float matrix[3][3], float uv[2])
+BLI_INLINE void warpCoord(float x, float y, float matrix[3][3], float uv[2], float deriv[2][2])
{
float vec[3] = {x, y, 1.0f};
mul_m3_v3(matrix, vec);
- vec[0] /= vec[2];
- vec[1] /= vec[2];
+ uv[0] = vec[0] / vec[2];
+ uv[1] = vec[1] / vec[2];
- copy_v2_v2(uv, vec);
-}
-
-BLI_INLINE void resolveUVAndDxDy(const float x, const float y, float matrix[3][3],
- float *u_r, float *v_r, float *dx_r, float *dy_r)
-{
- float inputUV[2];
- float uv_a[2], uv_b[2];
-
- float dx, dy;
- float uv_l, uv_r;
- float uv_u, uv_d;
-
- warpCoord(x, y, matrix, inputUV);
-
- /* adaptive sampling, red (U) channel */
- warpCoord(x - 1, y, matrix, uv_a);
- warpCoord(x + 1, y, matrix, uv_b);
- uv_l = fabsf(inputUV[0] - uv_a[0]);
- uv_r = fabsf(inputUV[0] - uv_b[0]);
-
- dx = 0.5f * (uv_l + uv_r);
-
- /* adaptive sampling, green (V) channel */
- warpCoord(x, y - 1, matrix, uv_a);
- warpCoord(x, y + 1, matrix, uv_b);
- uv_u = fabsf(inputUV[1] - uv_a[1]);
- uv_d = fabsf(inputUV[1] - uv_b[1]);
-
- dy = 0.5f * (uv_u + uv_d);
-
- *dx_r = dx;
- *dy_r = dy;
-
- *u_r = inputUV[0];
- *v_r = inputUV[1];
+ deriv[0][0] = (matrix[0][0] - matrix[0][2] * uv[0]) / vec[2];
+ deriv[1][0] = (matrix[0][1] - matrix[0][2] * uv[1]) / vec[2];
+ deriv[0][1] = (matrix[1][0] - matrix[1][2] * uv[0]) / vec[2];
+ deriv[1][1] = (matrix[1][1] - matrix[1][2] * uv[1]) / vec[2];
}
PlaneTrackWarpImageOperation::PlaneTrackWarpImageOperation() : PlaneTrackCommonOperation()
@@ -98,9 +55,6 @@ PlaneTrackWarpImageOperation::PlaneTrackWarpImageOperation() : PlaneTrackCommonO
this->addOutputSocket(COM_DT_COLOR);
this->m_pixelReader = NULL;
this->setComplex(true);
-
- /* Currently hardcoded to 8 samples. */
- this->m_osa = 8;
}
void PlaneTrackWarpImageOperation::initExecution()
@@ -109,8 +63,6 @@ void PlaneTrackWarpImageOperation::initExecution()
this->m_pixelReader = this->getInputSocketReader(0);
- BLI_jitter_init(this->m_jitter[0], this->m_osa);
-
const int width = this->m_pixelReader->getWidth();
const int height = this->m_pixelReader->getHeight();
float frame_corners[4][2] = {{0.0f, 0.0f},
@@ -127,41 +79,32 @@ void PlaneTrackWarpImageOperation::deinitExecution()
this->m_pixelReader = NULL;
}
-void PlaneTrackWarpImageOperation::executePixelSampled(float output[4], float x_, float y_, PixelSampler sampler)
+void PlaneTrackWarpImageOperation::executePixelSampled(float output[4], float x, float y, PixelSampler sampler)
{
- float color_accum[4];
+ float xy[2] = {x, y};
+ float uv[2];
+ float deriv[2][2];
- zero_v4(color_accum);
- for (int sample = 0; sample < this->m_osa; sample++) {
- float current_x = x_ + this->m_jitter[sample][0],
- current_y = y_ + this->m_jitter[sample][1];
- if (isPointInsideQuad(current_x, current_y, this->m_frameSpaceCorners)) {
- float current_color[4];
- float u, v, dx, dy;
+ pixelTransform(xy, uv, deriv);
- resolveUVAndDxDy(current_x, current_y, m_perspectiveMatrix, &u, &v, &dx, &dy);
-
- /* derivatives are to be in normalized space.. */
- dx /= this->m_pixelReader->getWidth();
- dy /= this->m_pixelReader->getHeight();
-
- this->m_pixelReader->readFiltered(current_color, u, v, dx, dy, COM_PS_NEAREST);
- add_v4_v4(color_accum, current_color);
- }
- }
+ m_pixelReader->readFiltered(output, uv[0], uv[1], deriv[0], deriv[1], COM_PS_BILINEAR);
+}
- mul_v4_v4fl(output, color_accum, 1.0f / this->m_osa);
+void PlaneTrackWarpImageOperation::pixelTransform(const float xy[2], float r_uv[2], float r_deriv[2][2])
+{
+ warpCoord(xy[0], xy[1], m_perspectiveMatrix, r_uv, r_deriv);
}
bool PlaneTrackWarpImageOperation::determineDependingAreaOfInterest(rcti *input, ReadBufferOperation *readOperation, rcti *output)
{
float UVs[4][2];
+ float deriv[2][2];
/* TODO(sergey): figure out proper way to do this. */
- warpCoord(input->xmin - 2, input->ymin - 2, this->m_perspectiveMatrix, UVs[0]);
- warpCoord(input->xmax + 2, input->ymin - 2, this->m_perspectiveMatrix, UVs[1]);
- warpCoord(input->xmax + 2, input->ymax + 2, this->m_perspectiveMatrix, UVs[2]);
- warpCoord(input->xmin - 2, input->ymax + 2, this->m_perspectiveMatrix, UVs[3]);
+ warpCoord(input->xmin - 2, input->ymin - 2, this->m_perspectiveMatrix, UVs[0], deriv);
+ warpCoord(input->xmax + 2, input->ymin - 2, this->m_perspectiveMatrix, UVs[1], deriv);
+ warpCoord(input->xmax + 2, input->ymax + 2, this->m_perspectiveMatrix, UVs[2], deriv);
+ warpCoord(input->xmin - 2, input->ymax + 2, this->m_perspectiveMatrix, UVs[3], deriv);
float min[2], max[2];
INIT_MINMAX2(min, max);