Welcome to mirror list, hosted at ThFree Co, Russian Federation.

git.blender.org/blender.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
path: root/source
diff options
context:
space:
mode:
authorSergey Sharybin <sergey.vfx@gmail.com>2018-09-18 18:09:08 +0300
committerSergey Sharybin <sergey.vfx@gmail.com>2018-09-18 18:09:08 +0300
commit9bf3e0299b18d65cf0da77a7a0a8a907055a3328 (patch)
tree7464274008c6a71b31bb14915c556ecd05cca9ae /source
parent41c039d6e90d7182042ec5f22386b8cf941c187f (diff)
Subdiv: CCG, initial grids stitching implementation
Currently is only working on an "inner" grid boundaries. Need to implement averaging across face edges.
Diffstat (limited to 'source')
-rw-r--r--source/blender/blenkernel/BKE_subdiv_ccg.h21
-rw-r--r--source/blender/blenkernel/intern/multires.c37
-rw-r--r--source/blender/blenkernel/intern/paint.c2
-rw-r--r--source/blender/blenkernel/intern/subdiv_ccg.c110
4 files changed, 149 insertions, 21 deletions
diff --git a/source/blender/blenkernel/BKE_subdiv_ccg.h b/source/blender/blenkernel/BKE_subdiv_ccg.h
index ed5ef0e19f5..99037a25a64 100644
--- a/source/blender/blenkernel/BKE_subdiv_ccg.h
+++ b/source/blender/blenkernel/BKE_subdiv_ccg.h
@@ -55,6 +55,18 @@ typedef struct SubdivToCCGSettings {
bool need_mask;
} SubdivToCCGSettings;
+/* This is actually a coarse face, which consists of multiple CCG grids. */
+typedef struct SubdivCCGFace {
+ /* Total number of grids in this face.
+ *
+ * This 1:1 corresponds to a number of corners (or loops) from a coarse
+ * face.
+ */
+ int num_grids;
+ /* Index of first grid from this face in SubdivCCG->grids array. */
+ int start_grid_index;
+} SubdivCCGFace;
+
/* Representation of subdivision surface which uses CCG grids. */
typedef struct SubdivCCG {
/* This is a subdivision surface this CCG was created for.
@@ -104,6 +116,12 @@ typedef struct SubdivCCG {
int normal_offset;
int mask_offset;
+ /* Faces from which grids are emitted. */
+ int num_faces;
+ SubdivCCGFace *faces;
+ /* Indexed by grid index, points to corresponding face from `faces`. */
+ SubdivCCGFace **grid_faces;
+
struct DMFlagMat *grid_flag_mats;
BLI_bitmap **grid_hidden;
@@ -161,4 +179,7 @@ void BKE_subdiv_ccg_key_top_level(
/* Recalculate all normals based on grid element coordinates. */
void BKE_subdiv_ccg_recalc_normals(SubdivCCG *subdiv_ccg);
+/* Average grid coordinates and normals along the grid boundatries. */
+void BKE_subdiv_ccg_average_grids(SubdivCCG *subdiv_ccg);
+
#endif /* __BKE_SUBDIV_CCG_H__ */
diff --git a/source/blender/blenkernel/intern/multires.c b/source/blender/blenkernel/intern/multires.c
index 4dd64e3d8b2..e8b1a5faa67 100644
--- a/source/blender/blenkernel/intern/multires.c
+++ b/source/blender/blenkernel/intern/multires.c
@@ -1338,20 +1338,29 @@ void multires_modifier_update_hidden(DerivedMesh *dm)
void multires_stitch_grids(Object *ob)
{
- /* utility for smooth brush */
- if (ob && ob->derivedFinal) {
- CCGDerivedMesh *ccgdm = (CCGDerivedMesh *)ob->derivedFinal;
- CCGFace **faces;
- int totface;
-
- if (ccgdm->pbvh) {
- BKE_pbvh_get_grid_updates(ccgdm->pbvh, false, (void ***)&faces, &totface);
-
- if (totface) {
- ccgSubSurf_stitchFaces(ccgdm->ss, 0, faces, totface);
- MEM_freeN(faces);
- }
- }
+ if (ob == NULL) {
+ return;
+ }
+ SculptSession *sculpt_session = ob->sculpt;
+ if (sculpt_session == NULL) {
+ return;
+ }
+ PBVH *pbvh = sculpt_session->pbvh;
+ SubdivCCG *subdiv_ccg = sculpt_session->subdiv_ccg;
+ if (pbvh == NULL || subdiv_ccg == NULL) {
+ return;
+ }
+ BLI_assert(BKE_pbvh_type(pbvh) == PBVH_GRIDS);
+ /* NOTE: Currently CCG does not keep track of faces, making it impossible
+ * to use BKE_pbvh_get_grid_updates().
+ */
+ CCGFace **faces;
+ int num_faces;
+ BKE_pbvh_get_grid_updates(pbvh, false, (void ***)&faces, &num_faces);
+ if (num_faces) {
+ /* TODO(sergey): Only aveerage actually affected faces. */
+ BKE_subdiv_ccg_average_grids(subdiv_ccg);
+ MEM_freeN(faces);
}
}
diff --git a/source/blender/blenkernel/intern/paint.c b/source/blender/blenkernel/intern/paint.c
index 926ef6a7a54..d843ae5173f 100644
--- a/source/blender/blenkernel/intern/paint.c
+++ b/source/blender/blenkernel/intern/paint.c
@@ -1197,7 +1197,7 @@ static PBVH *build_pbvh_from_ccg(Object *ob, SubdivCCG *subdiv_ccg)
pbvh,
subdiv_ccg->grids, subdiv_ccg->num_grids,
&key,
- NULL,
+ (void **)subdiv_ccg->grid_faces,
subdiv_ccg->grid_flag_mats,
subdiv_ccg->grid_hidden);
pbvh_show_diffuse_color_set(pbvh, ob->sculpt->show_diffuse_color);
diff --git a/source/blender/blenkernel/intern/subdiv_ccg.c b/source/blender/blenkernel/intern/subdiv_ccg.c
index d9c0c721cd5..c6554203cf8 100644
--- a/source/blender/blenkernel/intern/subdiv_ccg.c
+++ b/source/blender/blenkernel/intern/subdiv_ccg.c
@@ -44,6 +44,8 @@
#include "BKE_subdiv.h"
#include "BKE_subdiv_eval.h"
+#include "opensubdiv_topology_refiner_capi.h"
+
/* =============================================================================
* Generally useful internal helpers.
*/
@@ -122,6 +124,7 @@ static void subdiv_ccg_alloc_elements(SubdivCCG *subdiv_ccg,
{
const int element_size = element_size_bytes_get(subdiv_ccg);
/* Allocate memory for surface grids. */
+ const int num_faces = coarse_mesh->totpoly;
const int num_grids = coarse_mesh->totloop;
const int grid_size = grid_size_for_level_get(
subdiv_ccg, subdiv_ccg->level);
@@ -149,6 +152,14 @@ static void subdiv_ccg_alloc_elements(SubdivCCG *subdiv_ccg,
BLI_BITMAP_NEW(grid_area, "ccg grid hidden");
}
/* TOOD(sergey): Allocate memory for loose elements. */
+ /* Allocate memory for faces. */
+ subdiv_ccg->num_faces = num_faces;
+ if (num_faces) {
+ subdiv_ccg->faces = MEM_calloc_arrayN(
+ num_faces, sizeof(SubdivCCGFace), "Subdiv CCG faces");
+ subdiv_ccg->grid_faces = MEM_calloc_arrayN(
+ num_grids, sizeof(SubdivCCGFace*), "Subdiv CCG grid faces");
+ }
}
/* =============================================================================
@@ -219,9 +230,11 @@ static void subdiv_ccg_eval_regular_grid(CCGEvalGridsData *data,
const int grid_size = subdiv_ccg->grid_size;
const float grid_size_1_inv = 1.0f / (float)(grid_size - 1);
const int element_size = element_size_bytes_get(subdiv_ccg);
+ SubdivCCGFace *faces = subdiv_ccg->faces;
+ SubdivCCGFace **grid_faces = subdiv_ccg->grid_faces;
for (int corner = 0; corner < coarse_poly->totloop; corner++) {
- unsigned char *grid = (unsigned char *)subdiv_ccg->grids[
- coarse_poly->loopstart + corner];
+ const int grid_index = coarse_poly->loopstart + corner;
+ unsigned char *grid = (unsigned char *)subdiv_ccg->grids[grid_index];
for (int y = 0; y < grid_size; y++) {
const float grid_v = (float)y * grid_size_1_inv;
for (int x = 0; x < grid_size; x++) {
@@ -237,6 +250,8 @@ static void subdiv_ccg_eval_regular_grid(CCGEvalGridsData *data,
&grid[grid_element_offset]);
}
}
+ /* Assign grid's face. */
+ grid_faces[grid_index] = &faces[coarse_poly_index];
}
}
@@ -248,9 +263,11 @@ static void subdiv_ccg_eval_special_grid(CCGEvalGridsData *data,
const int grid_size = subdiv_ccg->grid_size;
const float grid_size_1_inv = 1.0f / (float)(grid_size - 1);
const int element_size = element_size_bytes_get(subdiv_ccg);
+ SubdivCCGFace *faces = subdiv_ccg->faces;
+ SubdivCCGFace **grid_faces = subdiv_ccg->grid_faces;
for (int corner = 0; corner < coarse_poly->totloop; corner++) {
- unsigned char *grid = (unsigned char *)subdiv_ccg->grids[
- coarse_poly->loopstart + corner];
+ const int grid_index = coarse_poly->loopstart + corner;
+ unsigned char *grid = (unsigned char *)subdiv_ccg->grids[grid_index];
for (int y = 0; y < grid_size; y++) {
const float u = 1.0f - ((float)y * grid_size_1_inv);
for (int x = 0; x < grid_size; x++) {
@@ -266,6 +283,8 @@ static void subdiv_ccg_eval_special_grid(CCGEvalGridsData *data,
&grid[grid_element_offset]);
}
}
+ /* Assign grid's face. */
+ grid_faces[grid_index] = &faces[coarse_poly_index];
}
}
@@ -275,6 +294,7 @@ static void subdiv_ccg_eval_grids_task(
const ParallelRangeTLS *__restrict UNUSED(tls))
{
CCGEvalGridsData *data = userdata_v;
+ SubdivCCG *subdiv_ccg = data->subdiv_ccg;
const Mesh *coarse_mesh = data->coarse_mesh;
const MPoly *coarse_mpoly = coarse_mesh->mpoly;
const MPoly *coarse_poly = &coarse_mpoly[coarse_poly_index];
@@ -284,6 +304,10 @@ static void subdiv_ccg_eval_grids_task(
else {
subdiv_ccg_eval_special_grid(data, coarse_poly);
}
+ /* Assign information in the faces. */
+ subdiv_ccg->faces[coarse_poly_index].num_grids = coarse_poly->totloop;
+ subdiv_ccg->faces[coarse_poly_index].start_grid_index =
+ coarse_poly->loopstart;
}
static bool subdiv_ccg_evaluate_grids(
@@ -330,6 +354,7 @@ SubdivCCG *BKE_subdiv_to_ccg(
{
BKE_subdiv_stats_begin(&subdiv->stats, SUBDIV_STATS_SUBDIV_TO_CCG);
SubdivCCG *subdiv_ccg = MEM_callocN(sizeof(SubdivCCG), "subdiv ccg");
+ subdiv_ccg->subdiv = subdiv;
subdiv_ccg->level = bitscan_forward_i(settings->resolution - 1);
subdiv_ccg->grid_size =
grid_size_for_level_get(subdiv_ccg, subdiv_ccg->level);
@@ -341,7 +366,6 @@ SubdivCCG *BKE_subdiv_to_ccg(
return NULL;
}
BKE_subdiv_stats_end(&subdiv->stats, SUBDIV_STATS_SUBDIV_TO_CCG);
- subdiv_ccg->subdiv = subdiv;
return subdiv_ccg;
}
@@ -378,6 +402,7 @@ void BKE_subdiv_ccg_destroy(SubdivCCG *subdiv_ccg)
if (subdiv_ccg->subdiv != NULL) {
BKE_subdiv_free(subdiv_ccg->subdiv);
}
+ MEM_SAFE_FREE(subdiv_ccg->faces);
MEM_freeN(subdiv_ccg);
}
@@ -528,7 +553,8 @@ static void subdiv_ccg_recalc_inner_grid_normals(SubdivCCG *subdiv_ccg)
BKE_subdiv_ccg_key_top_level(&key, subdiv_ccg);
RecalcInnerNormalsData data = {
.subdiv_ccg = subdiv_ccg,
- .key = &key};
+ .key = &key
+ };
RecalcInnerNormalsTLSData tls_data = {NULL};
ParallelRangeSettings parallel_range_settings;
BLI_parallel_range_settings_defaults(&parallel_range_settings);
@@ -549,4 +575,76 @@ void BKE_subdiv_ccg_recalc_normals(SubdivCCG *subdiv_ccg)
return;
}
subdiv_ccg_recalc_inner_grid_normals(subdiv_ccg);
+ BKE_subdiv_ccg_average_grids(subdiv_ccg);
+}
+
+/* =============================================================================
+ * Boundary averaging/stitching.
+ */
+
+typedef struct AverageInnerGridsData {
+ SubdivCCG *subdiv_ccg;
+ CCGKey *key;
+} AverageInnerGridsData;
+
+static void average_grid_element_value_v3(float a[3], float b[3])
+{
+ add_v3_v3(a, b);
+ mul_v3_fl(a, 0.5f);
+ copy_v3_v3(b, a);
+}
+
+static void average_grid_element(SubdivCCG *subdiv_ccg,
+ CCGKey *key,
+ CCGElem *grid_element_a,
+ CCGElem *grid_element_b)
+{
+ average_grid_element_value_v3(CCG_elem_co(key, grid_element_a),
+ CCG_elem_co(key, grid_element_b));
+ if (subdiv_ccg->has_normal) {
+ average_grid_element_value_v3(CCG_elem_no(key, grid_element_a),
+ CCG_elem_no(key, grid_element_b));
+ }
+}
+
+static void subdiv_ccg_average_inner_grids_task(
+ void *__restrict userdata_v,
+ const int face_index,
+ const ParallelRangeTLS *__restrict UNUSED(tls_v))
+{
+ AverageInnerGridsData *data = userdata_v;
+ SubdivCCG *subdiv_ccg = data->subdiv_ccg;
+ CCGKey *key = data->key;
+ CCGElem **grids = subdiv_ccg->grids;
+ SubdivCCGFace *faces = subdiv_ccg->faces;
+ SubdivCCGFace *face = &faces[face_index];
+ const int num_face_grids = face->num_grids;
+ const int grid_size = subdiv_ccg->grid_size;
+ CCGElem *prev_grid = grids[face->start_grid_index + num_face_grids - 1];
+ for (int corner = 0; corner < num_face_grids; corner++) {
+ CCGElem *grid = grids[face->start_grid_index + corner];
+ for (int i = 0; i < grid_size; i++) {
+ CCGElem *prev_grid_element = CCG_grid_elem(key, prev_grid, i, 0);
+ CCGElem *grid_element = CCG_grid_elem(key, grid, 0, i);
+ average_grid_element(
+ subdiv_ccg, key, prev_grid_element, grid_element);
+ }
+ prev_grid = grid;
+ }
+}
+
+void BKE_subdiv_ccg_average_grids(SubdivCCG *subdiv_ccg)
+{
+ CCGKey key;
+ BKE_subdiv_ccg_key_top_level(&key, subdiv_ccg);
+ AverageInnerGridsData data = {
+ .subdiv_ccg = subdiv_ccg,
+ .key = &key,
+ };
+ ParallelRangeSettings parallel_range_settings;
+ BLI_parallel_range_settings_defaults(&parallel_range_settings);
+ BLI_task_parallel_range(0, subdiv_ccg->num_faces,
+ &data,
+ subdiv_ccg_average_inner_grids_task,
+ &parallel_range_settings);
}