Welcome to mirror list, hosted at ThFree Co, Russian Federation.

git.blender.org/blender.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSergey Sharybin <sergey.vfx@gmail.com>2019-03-08 19:00:11 +0300
committerSergey Sharybin <sergey.vfx@gmail.com>2019-03-08 19:02:22 +0300
commitbfebb494f634ce3b8c6c468bac72d2abc51af45b (patch)
tree474304916556fa3d9b62d2b35fd6f100e75fdb53 /source/blender/blenkernel/intern/subdiv_ccg.c
parentaac7d53d58fa825d1135f96d9c73fd199084af44 (diff)
Fix T61961; Smooth brush distorts multires
The issue was caused by lack of proper accumulation for averaging. Doing it incrementally introduced a bias.
Diffstat (limited to 'source/blender/blenkernel/intern/subdiv_ccg.c')
-rw-r--r--source/blender/blenkernel/intern/subdiv_ccg.c174
1 files changed, 114 insertions, 60 deletions
diff --git a/source/blender/blenkernel/intern/subdiv_ccg.c b/source/blender/blenkernel/intern/subdiv_ccg.c
index 7d9dc3288a8..c262ccfe16f 100644
--- a/source/blender/blenkernel/intern/subdiv_ccg.c
+++ b/source/blender/blenkernel/intern/subdiv_ccg.c
@@ -1007,17 +1007,53 @@ static void average_grid_element(SubdivCCG *subdiv_ccg,
}
}
-static void copy_grid_element(SubdivCCG *subdiv_ccg,
- CCGKey *key,
- CCGElem *destination,
- CCGElem *source)
+/* Accumulator to hold data during averaging. */
+typedef struct GridElementAccumulator {
+ float co[3];
+ float no[3];
+ float mask;
+} GridElementAccumulator;
+
+static void element_accumulator_init(GridElementAccumulator *accumulator)
{
- copy_v3_v3(CCG_elem_co(key, destination), CCG_elem_co(key, source));
+ zero_v3(accumulator->co);
+ zero_v3(accumulator->no);
+ accumulator->mask = 0.0f;
+}
+
+static void element_accumulator_add(GridElementAccumulator *accumulator,
+ const SubdivCCG *subdiv_ccg,
+ CCGKey *key,
+ /*const*/ CCGElem *grid_element)
+{
+ add_v3_v3(accumulator->co, CCG_elem_co(key, grid_element));
+ if (subdiv_ccg->has_normal) {
+ add_v3_v3(accumulator->no, CCG_elem_no(key, grid_element));
+ }
+ if (subdiv_ccg->has_mask) {
+ accumulator->mask += *CCG_elem_mask(key, grid_element);
+ }
+}
+
+static void element_accumulator_mul_fl(GridElementAccumulator *accumulator,
+ const float f)
+{
+ mul_v3_fl(accumulator->co, f);
+ mul_v3_fl(accumulator->no, f);
+ accumulator->mask *= f;
+}
+
+static void element_accumulator_copy(SubdivCCG *subdiv_ccg,
+ CCGKey *key,
+ CCGElem *destination,
+ const GridElementAccumulator *accumulator)
+{
+ copy_v3_v3(CCG_elem_co(key, destination), accumulator->co);
if (subdiv_ccg->has_normal) {
- copy_v3_v3(CCG_elem_no(key, destination), CCG_elem_no(key, source));
+ copy_v3_v3(CCG_elem_no(key, destination), accumulator->no);
}
if (subdiv_ccg->has_mask) {
- *CCG_elem_mask(key, destination) = *CCG_elem_mask(key, source);
+ *CCG_elem_mask(key, destination) = accumulator->mask;
}
}
@@ -1061,10 +1097,15 @@ typedef struct AverageGridsBoundariesData {
CCGKey *key;
} AverageGridsBoundariesData;
+typedef struct AverageGridsBoundariesTLSData {
+ GridElementAccumulator *accumulators;
+} AverageGridsBoundariesTLSData;
+
static void subdiv_ccg_average_grids_boundary(
SubdivCCG *subdiv_ccg,
CCGKey *key,
- SubdivCCGAdjacentEdge *adjacent_edge)
+ SubdivCCGAdjacentEdge *adjacent_edge,
+ AverageGridsBoundariesTLSData *tls)
{
const int num_adjacent_faces = adjacent_edge->num_adjacent_faces;
const int grid_size2 = subdiv_ccg->grid_size * 2;
@@ -1072,39 +1113,35 @@ static void subdiv_ccg_average_grids_boundary(
/* Nothing to average with. */
return;
}
- /* Incremental average result to elements of a first adjacent face.
- *
- * Arguably, this is less precise than accumulating and then diving once,
- * but on another hand this is more stable when coordinates are big. */
- for (int face_index = 1; face_index < num_adjacent_faces; face_index++) {
- /* NOTE: We ignore very first and very last elements, they correspond
- * to corner vertices, and they can belong to multiple edges.
- * The fact, that they can belong to multiple edges means we can't
- * safely average them.
- * The fact, that they correspond to a corner elements, means they will
- * be handled at the upcoming pass over corner elements. */
+ if (tls->accumulators == NULL) {
+ tls->accumulators = MEM_calloc_arrayN(sizeof(GridElementAccumulator),
+ grid_size2,
+ "average accumulators");
+ }
+ else {
for (int i = 1; i < grid_size2 - 1; i++) {
- CCGElem *grid_element_0 =
- adjacent_edge->boundary_elements[0][i];
- CCGElem *grid_element_face_index =
+ element_accumulator_init(&tls->accumulators[i]);
+ }
+ }
+ for (int face_index = 0; face_index < num_adjacent_faces; face_index++) {
+ for (int i = 1; i < grid_size2 - 1; i++) {
+ CCGElem *grid_element =
adjacent_edge->boundary_elements[face_index][i];
- average_grid_element(subdiv_ccg,
- key,
- grid_element_0,
- grid_element_face_index);
+ element_accumulator_add(
+ &tls->accumulators[i], subdiv_ccg, key, grid_element);
}
}
+ for (int i = 1; i < grid_size2 -1; i++) {
+ element_accumulator_mul_fl(
+ &tls->accumulators[i], 1.0f / (float)num_adjacent_faces);
+ }
/* Copy averaged value to all the other faces. */
- for (int face_index = 1; face_index < num_adjacent_faces; face_index++) {
- for (int i = 1; i < grid_size2 -1; i++) {
- CCGElem *grid_element_0 =
- adjacent_edge->boundary_elements[0][i];
- CCGElem *grid_element_face_index =
+ for (int face_index = 0; face_index < num_adjacent_faces; face_index++) {
+ for (int i = 1; i < grid_size2 - 1; i++) {
+ CCGElem *grid_element =
adjacent_edge->boundary_elements[face_index][i];
- copy_grid_element(subdiv_ccg,
- key,
- grid_element_face_index,
- grid_element_0);
+ element_accumulator_copy(
+ subdiv_ccg, key, grid_element, &tls->accumulators[i]);
}
}
}
@@ -1112,14 +1149,23 @@ static void subdiv_ccg_average_grids_boundary(
static void subdiv_ccg_average_grids_boundaries_task(
void *__restrict userdata_v,
const int adjacent_edge_index,
- const ParallelRangeTLS *__restrict UNUSED(tls_v))
+ const ParallelRangeTLS *__restrict tls_v)
{
AverageGridsBoundariesData *data = userdata_v;
+ AverageGridsBoundariesTLSData *tls = tls_v->userdata_chunk;
SubdivCCG *subdiv_ccg = data->subdiv_ccg;
CCGKey *key = data->key;
SubdivCCGAdjacentEdge *adjacent_edge =
&subdiv_ccg->adjacent_edges[adjacent_edge_index];
- subdiv_ccg_average_grids_boundary(subdiv_ccg, key, adjacent_edge);
+ subdiv_ccg_average_grids_boundary(subdiv_ccg, key, adjacent_edge, tls);
+}
+
+static void subdiv_ccg_average_grids_boundaries_finalize(
+ void *__restrict UNUSED(userdata),
+ void *__restrict tls_v)
+{
+ AverageGridsBoundariesTLSData *tls = tls_v;
+ MEM_SAFE_FREE(tls->accumulators);
}
typedef struct AverageGridsCornerData {
@@ -1137,28 +1183,17 @@ static void subdiv_ccg_average_grids_corners(
/* Nothing to average with. */
return;
}
- /* Incremental average result to elements of a first adjacent face.
- * See comment to the boundary averaging. */
- for (int face_index = 1; face_index < num_adjacent_faces; face_index++) {
- CCGElem *grid_element_0 =
- adjacent_vertex->corner_elements[0];
- CCGElem *grid_element_face_index =
- adjacent_vertex->corner_elements[face_index];
- average_grid_element(subdiv_ccg,
- key,
- grid_element_0,
- grid_element_face_index);
+ GridElementAccumulator accumulator;
+ element_accumulator_init(&accumulator);
+ for (int face_index = 0; face_index < num_adjacent_faces; face_index++) {
+ CCGElem *grid_element = adjacent_vertex->corner_elements[face_index];
+ element_accumulator_add(&accumulator, subdiv_ccg, key, grid_element);
}
+ element_accumulator_mul_fl(&accumulator, 1.0f / (float)num_adjacent_faces);
/* Copy averaged value to all the other faces. */
- for (int face_index = 1; face_index < num_adjacent_faces; face_index++) {
- CCGElem *grid_element_0 =
- adjacent_vertex->corner_elements[0];
- CCGElem *grid_element_face_index =
- adjacent_vertex->corner_elements[face_index];
- copy_grid_element(subdiv_ccg,
- key,
- grid_element_face_index,
- grid_element_0);
+ for (int face_index = 0; face_index < num_adjacent_faces; face_index++) {
+ CCGElem *grid_element = adjacent_vertex->corner_elements[face_index];
+ element_accumulator_copy(subdiv_ccg, key, grid_element, &accumulator);
}
}
@@ -1175,22 +1210,33 @@ static void subdiv_ccg_average_grids_corners_task(
subdiv_ccg_average_grids_corners(subdiv_ccg, key, adjacent_vertex);
}
-static void subdiv_ccg_average_all_boundaries_and_corners(
+static void subdiv_ccg_average_all_boundaries(
SubdivCCG *subdiv_ccg,
CCGKey *key)
{
ParallelRangeSettings parallel_range_settings;
BLI_parallel_range_settings_defaults(&parallel_range_settings);
- /* Average grids across coarse edges. */
AverageGridsBoundariesData boundaries_data = {
.subdiv_ccg = subdiv_ccg,
.key = key,
};
+ AverageGridsBoundariesTLSData tls_data = {NULL};
+ parallel_range_settings.userdata_chunk = &tls_data;
+ parallel_range_settings.userdata_chunk_size = sizeof(tls_data);
+ parallel_range_settings.func_finalize =
+ subdiv_ccg_average_grids_boundaries_finalize;
BLI_task_parallel_range(0, subdiv_ccg->num_adjacent_edges,
&boundaries_data,
subdiv_ccg_average_grids_boundaries_task,
&parallel_range_settings);
- /* Average grids at coarse vertices. */
+}
+
+static void subdiv_ccg_average_all_corners(
+ SubdivCCG *subdiv_ccg,
+ CCGKey *key)
+{
+ ParallelRangeSettings parallel_range_settings;
+ BLI_parallel_range_settings_defaults(&parallel_range_settings);
AverageGridsCornerData corner_data = {
.subdiv_ccg = subdiv_ccg,
.key = key,
@@ -1201,6 +1247,14 @@ static void subdiv_ccg_average_all_boundaries_and_corners(
&parallel_range_settings);
}
+static void subdiv_ccg_average_all_boundaries_and_corners(
+ SubdivCCG *subdiv_ccg,
+ CCGKey *key)
+{
+ subdiv_ccg_average_all_boundaries(subdiv_ccg, key);
+ subdiv_ccg_average_all_corners(subdiv_ccg, key);
+}
+
void BKE_subdiv_ccg_average_grids(SubdivCCG *subdiv_ccg)
{
CCGKey key;