Welcome to mirror list, hosted at ThFree Co, Russian Federation.

git.blender.org/blender.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSergey Sharybin <sergey.vfx@gmail.com>2015-07-20 17:08:06 +0300
committerSergey Sharybin <sergey.vfx@gmail.com>2015-07-20 23:29:26 +0300
commit3d364896725db8336d785ba6cf977b62c0f2c0ce (patch)
treeb189cdc4956b557d19b0c2ea976c6f48affa1ed2 /source/blender/blenkernel/intern
parent2466c4f8cebd3977f29524d79050feff44b40fff (diff)
OpenSubdiv: Commit of OpenSubdiv integration into Blender
This commit contains all the remained parts needed for initial integration of OpenSubdiv into Blender's subdivision surface code. Includes both GPU and CPU backends which works in the following way: - When SubSurf modifier is the last in the modifiers stack then GPU pipeline of OpenSubdiv is used, making viewport performance as fast as possible. This also requires graphscard with GLSL 1.5 support. If this requirement is not met, then no GPU pipeline is used at all. - If SubSurf is not a last modifier or if DerivesMesh is being evaluated for rendering then CPU limit evaluation API from OpenSubdiv is used. This only replaces the legacy evaluation code from CCGSubSurf_legacy, but keeps CCG structures exactly the same as they used to be for ages now. This integration is fully covered with ifdef and not enabled by default because there are several TODOs to be solved first: - Face varying data interpolation is not really cleanly implemented for GPU in OpenSubdiv 3.0. It is also not implemented for limit evaluation API. This basically means we'll have really hard time supporting UVs. - Limit evaluation only works with adaptivly subdivided meshes so far, which basically means all the points of CCG are pushed to the limit. This gives different result from old code. - There are some serious optimizations possible on the topology refiner creation, which would speed up initial OpenSubdiv mesh creation. - There are some hardcoded asumptions in the GPU and DerivedMesh areas which could be generalized. That's something where Antony and Campbell can help, making it so the code is structured in a way which is reusable by all planned viewport projects. - There are also some workarounds in the dependency graph to make sure OpenGL buffers are only freed from the main thread. Those who'll be wanting to make experiments with this code should grab dev branch (NOT master) from https://github.com/Nazg-Gul/OpenSubdiv/tree/dev There are some patches applied in there which we're working on on getting into upstream.
Diffstat (limited to 'source/blender/blenkernel/intern')
-rw-r--r--source/blender/blenkernel/intern/CCGSubSurf.c98
-rw-r--r--source/blender/blenkernel/intern/CCGSubSurf.h46
-rw-r--r--source/blender/blenkernel/intern/CCGSubSurf_intern.h57
-rw-r--r--source/blender/blenkernel/intern/DerivedMesh.c66
-rw-r--r--source/blender/blenkernel/intern/scene.c45
-rw-r--r--source/blender/blenkernel/intern/subsurf_ccg.c609
6 files changed, 792 insertions, 129 deletions
diff --git a/source/blender/blenkernel/intern/CCGSubSurf.c b/source/blender/blenkernel/intern/CCGSubSurf.c
index a60e1fa3076..9ac6166606e 100644
--- a/source/blender/blenkernel/intern/CCGSubSurf.c
+++ b/source/blender/blenkernel/intern/CCGSubSurf.c
@@ -37,6 +37,11 @@
#include "CCGSubSurf_intern.h"
#include "BKE_subsurf.h"
+#ifdef WITH_OPENSUBDIV
+# include "opensubdiv_capi.h"
+# include "opensubdiv_converter_capi.h"
+#endif
+
#include "GL/glew.h"
/***/
@@ -299,6 +304,23 @@ CCGSubSurf *ccgSubSurf_new(CCGMeshIFC *ifc, int subdivLevels, CCGAllocatorIFC *a
ss->tempVerts = NULL;
ss->tempEdges = NULL;
+#ifdef WITH_OPENSUBDIV
+ ss->osd_evaluator = NULL;
+ ss->osd_mesh = NULL;
+ ss->osd_topology_refiner = NULL;
+ ss->osd_mesh_invalid = false;
+ ss->osd_coarse_coords_invalid = false;
+ ss->osd_vao = 0;
+ ss->skip_grids = false;
+ ss->osd_compute = 0;
+ ss->osd_uvs_invalid = true;
+ ss->osd_subsurf_uv = 0;
+ ss->osd_uv_index = -1;
+ ss->osd_next_face_ptex_index = 0;
+ ss->osd_coarse_coords = NULL;
+ ss->osd_num_coarse_coords = 0;
+#endif
+
return ss;
}
}
@@ -307,6 +329,24 @@ void ccgSubSurf_free(CCGSubSurf *ss)
{
CCGAllocatorIFC allocatorIFC = ss->allocatorIFC;
CCGAllocatorHDL allocator = ss->allocator;
+#ifdef WITH_OPENSUBDIV
+ if (ss->osd_evaluator != NULL) {
+ openSubdiv_deleteEvaluatorDescr(ss->osd_evaluator);
+ }
+ if (ss->osd_mesh != NULL) {
+ /* TODO(sergey): Make sure free happens form the main thread! */
+ openSubdiv_deleteOsdGLMesh(ss->osd_mesh);
+ }
+ if (ss->osd_vao != 0) {
+ glDeleteVertexArrays(1, &ss->osd_vao);
+ }
+ if (ss->osd_coarse_coords != NULL) {
+ MEM_freeN(ss->osd_coarse_coords);
+ }
+ if (ss->osd_topology_refiner != NULL) {
+ openSubdiv_deleteTopologyRefinerDescr(ss->osd_topology_refiner);
+ }
+#endif
if (ss->syncState) {
ccg_ehash_free(ss->oldFMap, (EHEntryFreeFP) _face_free, ss);
@@ -467,6 +507,9 @@ CCGError ccgSubSurf_initFullSync(CCGSubSurf *ss)
ss->tempEdges = MEM_mallocN(sizeof(*ss->tempEdges) * ss->lenTempArrays, "CCGSubsurf tempEdges");
ss->syncState = eSyncState_Vert;
+#ifdef WITH_OPENSUBDIV
+ ss->osd_next_face_ptex_index = 0;
+#endif
return eCCGError_None;
}
@@ -607,6 +650,9 @@ CCGError ccgSubSurf_syncVert(CCGSubSurf *ss, CCGVertHDL vHDL, const void *vertDa
ccg_ehash_insert(ss->vMap, (EHEntry *) v);
v->flags = 0;
}
+#ifdef WITH_OPENSUBDIV
+ v->osd_index = ss->vMap->numEntries - 1;
+#endif
}
if (v_r) *v_r = v;
@@ -789,6 +835,15 @@ CCGError ccgSubSurf_syncFace(CCGSubSurf *ss, CCGFaceHDL fHDL, int numVerts, CCGV
}
}
}
+#ifdef WITH_OPENSUBDIV
+ f->osd_index = ss->osd_next_face_ptex_index;
+ if (numVerts == 4) {
+ ss->osd_next_face_ptex_index++;
+ }
+ else {
+ ss->osd_next_face_ptex_index += numVerts;
+ }
+#endif
}
if (f_r) *f_r = f;
@@ -797,7 +852,18 @@ CCGError ccgSubSurf_syncFace(CCGSubSurf *ss, CCGFaceHDL fHDL, int numVerts, CCGV
static void ccgSubSurf__sync(CCGSubSurf *ss)
{
- ccgSubSurf__sync_legacy(ss);
+#ifdef WITH_OPENSUBDIV
+ /* TODO(sergey): This is because OSD evaluator does not support
+ * bilinear subdivision scheme at this moment.
+ */
+ if (ss->meshIFC.simpleSubdiv == false || ss->skip_grids == true) {
+ ccgSubSurf__sync_opensubdiv(ss);
+ }
+ else
+#endif
+ {
+ ccgSubSurf__sync_legacy(ss);
+ }
}
CCGError ccgSubSurf_processSync(CCGSubSurf *ss)
@@ -1128,15 +1194,39 @@ CCGError ccgSubSurf_stitchFaces(CCGSubSurf *ss, int lvl, CCGFace **effectedF, in
int ccgSubSurf_getNumVerts(const CCGSubSurf *ss)
{
- return ss->vMap->numEntries;
+#ifdef WITH_OPENSUBDIV
+ if (ss->skip_grids) {
+ return ccgSubSurf__getNumOsdBaseVerts(ss);
+ }
+ else
+#endif
+ {
+ return ss->vMap->numEntries;
+ }
}
int ccgSubSurf_getNumEdges(const CCGSubSurf *ss)
{
- return ss->eMap->numEntries;
+#ifdef WITH_OPENSUBDIV
+ if (ss->skip_grids) {
+ return ccgSubSurf__getNumOsdBaseEdges(ss);
+ }
+ else
+#endif
+ {
+ return ss->eMap->numEntries;
+ }
}
int ccgSubSurf_getNumFaces(const CCGSubSurf *ss)
{
- return ss->fMap->numEntries;
+#ifdef WITH_OPENSUBDIV
+ if (ss->skip_grids) {
+ return ccgSubSurf__getNumOsdBaseFaces(ss);
+ }
+ else
+#endif
+ {
+ return ss->fMap->numEntries;
+ }
}
CCGVert *ccgSubSurf_getVert(CCGSubSurf *ss, CCGVertHDL v)
diff --git a/source/blender/blenkernel/intern/CCGSubSurf.h b/source/blender/blenkernel/intern/CCGSubSurf.h
index 2b86a2a66b2..23f7e71a311 100644
--- a/source/blender/blenkernel/intern/CCGSubSurf.h
+++ b/source/blender/blenkernel/intern/CCGSubSurf.h
@@ -80,6 +80,9 @@ void ccgSubSurf_free (CCGSubSurf *ss);
CCGError ccgSubSurf_initFullSync (CCGSubSurf *ss);
CCGError ccgSubSurf_initPartialSync (CCGSubSurf *ss);
+#ifdef WITH_OPENSUBDIV
+CCGError ccgSubSurf_initOpenSubdivSync (CCGSubSurf *ss);
+#endif
CCGError ccgSubSurf_syncVert (CCGSubSurf *ss, CCGVertHDL vHDL, const void *vertData, int seam, CCGVert **v_r);
CCGError ccgSubSurf_syncEdge (CCGSubSurf *ss, CCGEdgeHDL eHDL, CCGVertHDL e_vHDL0, CCGVertHDL e_vHDL1, float crease, CCGEdge **e_r);
@@ -190,4 +193,47 @@ CCGFace* ccgFaceIterator_getCurrent (CCGFaceIterator *fi);
int ccgFaceIterator_isStopped (CCGFaceIterator *fi);
void ccgFaceIterator_next (CCGFaceIterator *fi);
+#ifdef WITH_OPENSUBDIV
+struct DerivedMesh;
+
+/* Check if topology changed and evaluators are to be re-created. */
+void ccgSubSurf_checkTopologyChanged(CCGSubSurf *ss, struct DerivedMesh *dm);
+
+/* Create topology refiner from give derived mesh which then later will be
+ * used for GL mesh creation.
+ */
+void ccgSubSurf_prepareTopologyRefiner(CCGSubSurf *ss, struct DerivedMesh *dm);
+
+/* Make sure GL mesh exists, up to date and ready to draw. */
+bool ccgSubSurf_prepareGLMesh(CCGSubSurf *ss, bool use_osd_glsl);
+
+/* Draw given partitions of the GL mesh.
+ *
+ * TODO(sergey): fill_quads is actually an invariant and should be part
+ * of the prepare routine.
+ */
+void ccgSubSurf_drawGLMesh(CCGSubSurf *ss, bool fill_quads,
+ int start_partition, int num_partitions);
+
+/* Controls whether CCG are needed (Cmeaning CPU evaluation) or fully GPU compute
+ * and draw is allowed.
+ */
+void ccgSubSurf_setSkipGrids(CCGSubSurf *ss, bool skip_grids);
+bool ccgSubSurf_needGrids(CCGSubSurf *ss);
+
+/* Set evaluator's face varying data from UV coordinates.
+ * Used for CPU evaluation.
+ */
+void ccgSubSurf_evaluatorSetFVarUV(CCGSubSurf *ss,
+ struct DerivedMesh *dm,
+ int layer_index);
+
+/* TODO(sergey): Temporary call to test things. */
+void ccgSubSurf_evaluatorFVarUV(CCGSubSurf *ss,
+ int face_index, int S,
+ float grid_u, float grid_v,
+ float uv[2]);
+
+#endif
+
#endif /* __CCGSUBSURF_H__ */
diff --git a/source/blender/blenkernel/intern/CCGSubSurf_intern.h b/source/blender/blenkernel/intern/CCGSubSurf_intern.h
index 1689ac482ef..d80bdcdb7fc 100644
--- a/source/blender/blenkernel/intern/CCGSubSurf_intern.h
+++ b/source/blender/blenkernel/intern/CCGSubSurf_intern.h
@@ -161,6 +161,9 @@ typedef enum {
eSyncState_Edge,
eSyncState_Face,
eSyncState_Partial,
+#ifdef WITH_OPENSUBDIV
+ eSyncState_OpenSubdiv,
+#endif
} SyncState;
struct CCGSubSurf {
@@ -203,6 +206,60 @@ struct CCGSubSurf {
int lenTempArrays;
CCGVert **tempVerts;
CCGEdge **tempEdges;
+
+#ifdef WITH_OPENSUBDIV
+ /* Skip grids means no CCG geometry is created and subsurf is possible
+ * to be completely done on GPU.
+ */
+ bool skip_grids;
+
+ /* ** GPU backend. ** */
+
+ /* Compute device used by GL mesh. */
+ short osd_compute;
+ /* Coarse (base mesh) vertex coordinates.
+ *
+ * Filled in from the modifier stack and passed to OpenSubdiv compute
+ * on mesh display.
+ */
+ float (*osd_coarse_coords)[3];
+ int osd_num_coarse_coords;
+ /* Denotes whether coarse positions in the GL mesh are invalid.
+ * Used to avoid updating GL mesh coords on every redraw.
+ */
+ bool osd_coarse_coords_invalid;
+
+ /* GL mesh descriptor, used for refinment and draw. */
+ struct OpenSubdiv_GLMesh *osd_mesh;
+ /* Refiner which is used to create GL mesh.
+ *
+ * Refiner is created from the modifier stack and used later from the main
+ * thread to construct GL mesh to avoid threaded access to GL.
+ */
+ struct OpenSubdiv_TopologyRefinerDescr *osd_topology_refiner; /* Only used at synchronization stage. */
+ /* Denotes whether osd_mesh is invalid now due to topology changes and needs
+ * to be reconstructed.
+ *
+ * Reconstruction happens from main thread due to OpenGL communication.
+ */
+ bool osd_mesh_invalid;
+ /* Vertex array used for osd_mesh draw. */
+ unsigned int osd_vao;
+
+ /* ** CPU backend. ** */
+
+ /* Limit evaluator, used to evaluate CCG. */
+ struct OpenSubdiv_EvaluatorDescr *osd_evaluator;
+ /* Next PTex face index, used while CCG synchroization
+ * to fill in PTex index of CCGFace.
+ */
+ int osd_next_face_ptex_index;
+
+ /* ** Needs review. ** */
+ bool osd_subsurf_uv;
+ int osd_uv_index;
+ bool osd_uvs_invalid;
+#endif
};
/* ** Utility macros ** */
diff --git a/source/blender/blenkernel/intern/DerivedMesh.c b/source/blender/blenkernel/intern/DerivedMesh.c
index 7582eb2324a..05ec83e70a9 100644
--- a/source/blender/blenkernel/intern/DerivedMesh.c
+++ b/source/blender/blenkernel/intern/DerivedMesh.c
@@ -1621,6 +1621,7 @@ static void mesh_calc_modifiers(
const bool useRenderParams, int useDeform,
const bool need_mapping, CustomDataMask dataMask,
const int index, const bool useCache, const bool build_shapekey_layers,
+ const bool allow_gpu,
/* return args */
DerivedMesh **r_deform, DerivedMesh **r_final)
{
@@ -1663,6 +1664,8 @@ static void mesh_calc_modifiers(
if (useCache)
app_flags |= MOD_APPLY_USECACHE;
+ if (allow_gpu)
+ app_flags |= MOD_APPLY_ALLOW_GPU;
if (useDeform)
deform_app_flags |= MOD_APPLY_USECACHE;
@@ -2327,9 +2330,9 @@ static void editbmesh_calc_modifiers(
}
if (mti->applyModifierEM)
- ndm = modwrap_applyModifierEM(md, ob, em, dm, MOD_APPLY_USECACHE);
+ ndm = modwrap_applyModifierEM(md, ob, em, dm, MOD_APPLY_USECACHE | MOD_APPLY_ALLOW_GPU);
else
- ndm = modwrap_applyModifier(md, ob, dm, MOD_APPLY_USECACHE);
+ ndm = modwrap_applyModifier(md, ob, dm, MOD_APPLY_USECACHE | MOD_APPLY_ALLOW_GPU);
ASSERT_IS_VALID_DM(ndm);
if (ndm) {
@@ -2449,6 +2452,23 @@ static void editbmesh_calc_modifiers(
MEM_freeN(deformedVerts);
}
+#ifdef WITH_OPENSUBDIV
+/* The idea is to skip CPU-side ORCO calculation when
+ * we'll be using GPU backend of OpenSubdiv. This is so
+ * playback performance is kept as high as posssible.
+ */
+static bool calc_modifiers_skip_orco(const Object *ob)
+{
+ const ModifierData *last_md = ob->modifiers.last;
+ if (last_md != NULL &&
+ last_md->type == eModifierType_Subsurf)
+ {
+ return true;
+ }
+ return false;
+}
+#endif
+
static void mesh_build_data(
Scene *scene, Object *ob, CustomDataMask dataMask,
const bool build_shapekey_layers, const bool need_mapping)
@@ -2458,8 +2478,15 @@ static void mesh_build_data(
BKE_object_free_derived_caches(ob);
BKE_object_sculpt_modifiers_changed(ob);
+#ifdef WITH_OPENSUBDIV
+ if (calc_modifiers_skip_orco(ob)) {
+ dataMask &= ~CD_MASK_ORCO;
+ }
+#endif
+
mesh_calc_modifiers(
scene, ob, NULL, false, 1, need_mapping, dataMask, -1, true, build_shapekey_layers,
+ true,
&ob->derivedDeform, &ob->derivedFinal);
DM_set_object_boundbox(ob, ob->derivedFinal);
@@ -2486,6 +2513,12 @@ static void editbmesh_build_data(Scene *scene, Object *obedit, BMEditMesh *em, C
BKE_editmesh_free_derivedmesh(em);
+#ifdef WITH_OPENSUBDIV
+ if (calc_modifiers_skip_orco(obedit)) {
+ dataMask &= ~CD_MASK_ORCO;
+ }
+#endif
+
editbmesh_calc_modifiers(
scene, obedit, em, dataMask,
&em->derivedCage, &em->derivedFinal);
@@ -2597,7 +2630,7 @@ DerivedMesh *mesh_create_derived_render(Scene *scene, Object *ob, CustomDataMask
DerivedMesh *final;
mesh_calc_modifiers(
- scene, ob, NULL, true, 1, false, dataMask, -1, false, false,
+ scene, ob, NULL, true, 1, false, dataMask, -1, false, false, false,
NULL, &final);
return final;
@@ -2608,7 +2641,7 @@ DerivedMesh *mesh_create_derived_index_render(Scene *scene, Object *ob, CustomDa
DerivedMesh *final;
mesh_calc_modifiers(
- scene, ob, NULL, true, 1, false, dataMask, index, false, false,
+ scene, ob, NULL, true, 1, false, dataMask, index, false, false, false,
NULL, &final);
return final;
@@ -2627,7 +2660,7 @@ DerivedMesh *mesh_create_derived_view(
ob->transflag |= OB_NO_PSYS_UPDATE;
mesh_calc_modifiers(
- scene, ob, NULL, false, 1, false, dataMask, -1, false, false,
+ scene, ob, NULL, false, 1, false, dataMask, -1, false, false, false,
NULL, &final);
ob->transflag &= ~OB_NO_PSYS_UPDATE;
@@ -2642,7 +2675,7 @@ DerivedMesh *mesh_create_derived_no_deform(
DerivedMesh *final;
mesh_calc_modifiers(
- scene, ob, vertCos, false, 0, false, dataMask, -1, false, false,
+ scene, ob, vertCos, false, 0, false, dataMask, -1, false, false, false,
NULL, &final);
return final;
@@ -2655,7 +2688,7 @@ DerivedMesh *mesh_create_derived_no_virtual(
DerivedMesh *final;
mesh_calc_modifiers(
- scene, ob, vertCos, false, -1, false, dataMask, -1, false, false,
+ scene, ob, vertCos, false, -1, false, dataMask, -1, false, false, false,
NULL, &final);
return final;
@@ -2668,7 +2701,7 @@ DerivedMesh *mesh_create_derived_physics(
DerivedMesh *final;
mesh_calc_modifiers(
- scene, ob, vertCos, false, -1, true, dataMask, -1, false, false,
+ scene, ob, vertCos, false, -1, true, dataMask, -1, false, false, false,
NULL, &final);
return final;
@@ -2682,7 +2715,7 @@ DerivedMesh *mesh_create_derived_no_deform_render(
DerivedMesh *final;
mesh_calc_modifiers(
- scene, ob, vertCos, true, 0, false, dataMask, -1, false, false,
+ scene, ob, vertCos, true, 0, false, dataMask, -1, false, false, false,
NULL, &final);
return final;
@@ -3400,9 +3433,18 @@ void DM_set_object_boundbox(Object *ob, DerivedMesh *dm)
{
float min[3], max[3];
- INIT_MINMAX(min, max);
-
- dm->getMinMax(dm, min, max);
+#ifdef WITH_OPENSUBDIV
+ /* TODO(sergey): Currently no way to access bounding box from hi-res mesh. */
+ if (dm->type == DM_TYPE_CCGDM) {
+ copy_v3_fl3(min, -1.0f, -1.0f, -1.0f);
+ copy_v3_fl3(max, 1.0f, 1.0f, 1.0f);
+ }
+ else
+#endif
+ {
+ INIT_MINMAX(min, max);
+ dm->getMinMax(dm, min, max);
+ }
if (!ob->bb)
ob->bb = MEM_callocN(sizeof(BoundBox), "DM-BoundBox");
diff --git a/source/blender/blenkernel/intern/scene.c b/source/blender/blenkernel/intern/scene.c
index 8f3a99cc051..19c2ff10901 100644
--- a/source/blender/blenkernel/intern/scene.c
+++ b/source/blender/blenkernel/intern/scene.c
@@ -89,6 +89,11 @@
#include "BKE_unit.h"
#include "BKE_world.h"
+#ifdef WITH_OPENSUBDIV
+# include "BKE_modifier.h"
+# include "CCGSubSurf.h"
+#endif
+
#include "DEG_depsgraph.h"
#include "RE_engine.h"
@@ -1345,6 +1350,11 @@ static void scene_do_rb_simulation_recursive(Scene *scene, float ctime)
*/
#define MBALL_SINGLETHREAD_HACK
+/* Need this because CCFDM holds some OpenGL resources. */
+#ifdef WITH_OPENSUBDIV
+# define OPENSUBDIV_GL_WORKAROUND
+#endif
+
#ifdef WITH_LEGACY_DEPSGRAPH
typedef struct StatisicsEntry {
struct StatisicsEntry *next, *prev;
@@ -1546,6 +1556,37 @@ static bool scene_need_update_objects(Main *bmain)
DAG_id_type_tagged(bmain, ID_AR); /* Armature */
}
+#ifdef OPENSUBDIV_GL_WORKAROUND
+/* CCG DrivedMesh currently hold some OpenGL handles, which could only be
+ * released from the main thread.
+ *
+ * Ideally we need to use gpu_buffer_free, but it's a bit tricky because
+ * some buffers are only accessible from OpenSubdiv side.
+ */
+static void scene_free_unused_opensubdiv_cache(Scene *scene)
+{
+ Base *base;
+ for (base = scene->base.first; base; base = base->next) {
+ Object *object = base->object;
+ if (object->type == OB_MESH && object->recalc & OB_RECALC_DATA) {
+ ModifierData *md = object->modifiers.last;
+ if (md != NULL && md->type == eModifierType_Subsurf) {
+ SubsurfModifierData *smd = (SubsurfModifierData *) md;
+ bool object_in_editmode = object->mode == OB_MODE_EDIT;
+ if (object_in_editmode && smd->mCache != NULL) {
+ ccgSubSurf_free(smd->mCache);
+ smd->mCache = NULL;
+ }
+ if (!object_in_editmode && smd->emCache != NULL) {
+ ccgSubSurf_free(smd->emCache);
+ smd->emCache = NULL;
+ }
+ }
+ }
+ }
+}
+#endif
+
static void scene_update_objects(EvaluationContext *eval_ctx, Main *bmain, Scene *scene, Scene *scene_parent)
{
TaskScheduler *task_scheduler = BLI_task_scheduler_get();
@@ -1564,6 +1605,10 @@ static void scene_update_objects(EvaluationContext *eval_ctx, Main *bmain, Scene
return;
}
+#ifdef OPENSUBDIV_GL_WORKAROUND
+ scene_free_unused_opensubdiv_cache(scene);
+#endif
+
state.eval_ctx = eval_ctx;
state.scene = scene;
state.scene_parent = scene_parent;
diff --git a/source/blender/blenkernel/intern/subsurf_ccg.c b/source/blender/blenkernel/intern/subsurf_ccg.c
index d419fc70be9..7d21b3304cd 100644
--- a/source/blender/blenkernel/intern/subsurf_ccg.c
+++ b/source/blender/blenkernel/intern/subsurf_ccg.c
@@ -77,6 +77,10 @@
#include "CCGSubSurf.h"
+#ifdef WITH_OPENSUBDIV
+# include "opensubdiv_capi.h"
+#endif
+
/* assumes MLoop's are layed out 4 for each poly, in order */
#define USE_LOOP_LAYOUT_FAST
@@ -88,7 +92,8 @@ static ThreadRWMutex origindex_cache_rwlock = BLI_RWLOCK_INITIALIZER;
static CCGDerivedMesh *getCCGDerivedMesh(CCGSubSurf *ss,
int drawInteriorEdges,
int useSubsurfUv,
- DerivedMesh *dm);
+ DerivedMesh *dm,
+ bool use_gpu_backend);
static int ccgDM_use_grid_pbvh(CCGDerivedMesh *ccgdm);
///
@@ -268,6 +273,7 @@ static int getFaceIndex(CCGSubSurf *ss, CCGFace *f, int S, int x, int y, int edg
}
}
+#ifndef WITH_OPENSUBDIV
static void get_face_uv_map_vert(UvVertMap *vmap, struct MPoly *mpoly, struct MLoop *ml, int fi, CCGVertHDL *fverts)
{
UvMapVert *v, *nv;
@@ -409,7 +415,96 @@ static int ss_sync_from_uv(CCGSubSurf *ss, CCGSubSurf *origss, DerivedMesh *dm,
return 1;
}
+#endif /* WITH_OPENSUBDIV */
+
+#ifdef WITH_OPENSUBDIV
+static void set_subsurf_ccg_uv(CCGSubSurf *ss,
+ DerivedMesh *dm,
+ DerivedMesh *result,
+ int layer_index)
+{
+ CCGFace **faceMap;
+ MTFace *tf;
+ MLoopUV *mluv;
+ CCGFaceIterator fi;
+ int index, gridSize, gridFaces, totface, x, y, S;
+ MLoopUV *dmloopuv = CustomData_get_layer_n(&dm->loopData, CD_MLOOPUV, layer_index);
+ /* need to update both CD_MTFACE & CD_MLOOPUV, hrmf, we could get away with
+ * just tface except applying the modifier then looses subsurf UV */
+ MTFace *tface = CustomData_get_layer_n(&result->faceData, CD_MTFACE, layer_index);
+ MLoopUV *mloopuv = CustomData_get_layer_n(&result->loopData, CD_MLOOPUV, layer_index);
+
+ if (dmloopuv == NULL || (tface == NULL && mloopuv == NULL)) {
+ return;
+ }
+
+ ccgSubSurf_evaluatorSetFVarUV(ss, dm, layer_index);
+
+ /* get some info from CCGSubSurf */
+ totface = ccgSubSurf_getNumFaces(ss);
+ gridSize = ccgSubSurf_getGridSize(ss);
+ gridFaces = gridSize - 1;
+
+ /* make a map from original faces to CCGFaces */
+ faceMap = MEM_mallocN(totface * sizeof(*faceMap), "facemapuv");
+ for (ccgSubSurf_initFaceIterator(ss, &fi); !ccgFaceIterator_isStopped(&fi); ccgFaceIterator_next(&fi)) {
+ CCGFace *f = ccgFaceIterator_getCurrent(&fi);
+ faceMap[GET_INT_FROM_POINTER(ccgSubSurf_getFaceFaceHandle(f))] = f;
+ }
+
+ /* load coordinates from uvss into tface */
+ tf = tface;
+ mluv = mloopuv;
+ for (index = 0; index < totface; index++) {
+ CCGFace *f = faceMap[index];
+ int numVerts = ccgSubSurf_getFaceNumVerts(f);
+ for (S = 0; S < numVerts; S++) {
+ for (y = 0; y < gridFaces; y++) {
+ for (x = 0; x < gridFaces; x++) {
+ float grid_u = ((float)(x)) / (gridSize - 1),
+ grid_v = ((float)(y)) / (gridSize - 1);
+ float uv[2];
+ /* TODO(sergey): Evaluator all 4 corners. */
+ ccgSubSurf_evaluatorFVarUV(ss,
+ index,
+ S,
+ grid_u, grid_v,
+ uv);
+ if (tf) {
+ copy_v2_v2(tf->uv[0], uv);
+ copy_v2_v2(tf->uv[1], uv);
+ copy_v2_v2(tf->uv[2], uv);
+ copy_v2_v2(tf->uv[3], uv);
+ tf++;
+ }
+ if (mluv) {
+ copy_v2_v2(mluv[0].uv, uv);
+ copy_v2_v2(mluv[1].uv, uv);
+ copy_v2_v2(mluv[2].uv, uv);
+ copy_v2_v2(mluv[3].uv, uv);
+ mluv += 4;
+ }
+ }
+ }
+ }
+ }
+ MEM_freeN(faceMap);
+}
+static void set_subsurf_uv(CCGSubSurf *ss,
+ DerivedMesh *dm,
+ DerivedMesh *result,
+ int layer_index)
+{
+ if (!ccgSubSurf_needGrids(ss)) {
+ /* GPU backend is used, no need to evaluate UVs on CPU. */
+ /* TODO(sergey): Think of how to support edit mode of UVs. */
+ }
+ else {
+ set_subsurf_ccg_uv(ss, dm, result, layer_index);
+ }
+}
+#else /* WITH_OPENSUBDIV */
static void set_subsurf_uv(CCGSubSurf *ss, DerivedMesh *dm, DerivedMesh *result, int n)
{
CCGSubSurf *uvss;
@@ -490,6 +585,7 @@ static void set_subsurf_uv(CCGSubSurf *ss, DerivedMesh *dm, DerivedMesh *result,
ccgSubSurf_free(uvss);
MEM_freeN(faceMap);
}
+#endif /* WITH_OPENSUBDIV */
/* face weighting */
typedef struct FaceVertWeightEntry {
@@ -571,8 +667,10 @@ static void free_ss_weights(WeightTable *wtable)
MEM_freeN(wtable->weight_table);
}
-static void ss_sync_from_derivedmesh(CCGSubSurf *ss, DerivedMesh *dm,
- float (*vertexCos)[3], int useFlatSubdiv)
+static void ss_sync_ccg_from_derivedmesh(CCGSubSurf *ss,
+ DerivedMesh *dm,
+ float (*vertexCos)[3],
+ int useFlatSubdiv)
{
float creaseFactor = (float) ccgSubSurf_getSubdivisionLevels(ss);
#ifndef USE_DYNSIZE
@@ -672,6 +770,37 @@ static void ss_sync_from_derivedmesh(CCGSubSurf *ss, DerivedMesh *dm,
#endif
}
+#ifdef WITH_OPENSUBDIV
+static void ss_sync_osd_from_derivedmesh(CCGSubSurf *ss,
+ DerivedMesh *dm)
+{
+ ccgSubSurf_initFullSync(ss);
+ ccgSubSurf_prepareTopologyRefiner(ss, dm);
+ ccgSubSurf_processSync(ss);
+}
+#endif /* WITH_OPENSUBDIV */
+
+static void ss_sync_from_derivedmesh(CCGSubSurf *ss,
+ DerivedMesh *dm,
+ float (*vertexCos)[3],
+ int use_flat_subdiv)
+{
+#ifdef WITH_OPENSUBDIV
+ /* Reset all related descriptors if actual mesh topology changed or if
+ * other evlauation-related settings changed.
+ */
+ ccgSubSurf_checkTopologyChanged(ss, dm);
+ if (!ccgSubSurf_needGrids(ss)) {
+ /* TODO(sergey): Use vertex coordinates and flat subdiv flag. */
+ ss_sync_osd_from_derivedmesh(ss, dm);
+ }
+ else
+#endif
+ {
+ ss_sync_ccg_from_derivedmesh(ss, dm, vertexCos, use_flat_subdiv);
+ }
+}
+
/***/
static int ccgDM_getVertMapIndex(CCGSubSurf *ss, CCGVert *v)
@@ -1651,6 +1780,16 @@ static void ccgDM_drawEdges(DerivedMesh *dm, bool drawLooseEdges, bool drawAllEd
int gridSize = ccgSubSurf_getGridSize(ss);
int useAging;
+#ifdef WITH_OPENSUBDIV
+ if (ccgdm->useGpuBackend) {
+ /* TODO(sergey): We currently only support all edges drawing. */
+ if (ccgSubSurf_prepareGLMesh(ss, true)) {
+ ccgSubSurf_drawGLMesh(ss, false, -1, -1);
+ }
+ return;
+ }
+#endif
+
CCG_key_top_level(&key, ss);
ccgdm_pbvh_update(ccgdm);
@@ -1722,6 +1861,13 @@ static void ccgDM_drawLooseEdges(DerivedMesh *dm)
int totedge = ccgSubSurf_getNumEdges(ss);
int i, j, edgeSize = ccgSubSurf_getEdgeSize(ss);
+#ifdef WITH_OPENSUBDIV
+ if (ccgdm->useGpuBackend) {
+ /* TODO(sergey): Needs implementation. */
+ return;
+ }
+#endif
+
CCG_key_top_level(&key, ss);
for (j = 0; j < totedge; j++) {
@@ -2301,7 +2447,34 @@ static void ccgDM_drawFacesSolid(DerivedMesh *dm, float (*partial_redraw_planes)
return;
}
-
+
+#ifdef WITH_OPENSUBDIV
+ if (ccgdm->useGpuBackend) {
+ CCGSubSurf *ss = ccgdm->ss;
+ DMFlagMat *faceFlags = ccgdm->faceFlags;
+ int new_matnr;
+ bool draw_smooth;
+ if (UNLIKELY(ccgSubSurf_prepareGLMesh(ss, setMaterial != NULL) == false)) {
+ return;
+ }
+ /* TODO(sergey): Single matierial currently. */
+ if (faceFlags) {
+ draw_smooth = (faceFlags[0].flag & ME_SMOOTH);
+ new_matnr = (faceFlags[0].mat_nr + 1);
+ }
+ else {
+ draw_smooth = true;
+ new_matnr = 1;
+ }
+ if (setMaterial) {
+ setMaterial(new_matnr, NULL);
+ }
+ glShadeModel(draw_smooth ? GL_SMOOTH : GL_FLAT);
+ ccgSubSurf_drawGLMesh(ss, true, -1, -1);
+ return;
+ }
+#endif
+
GPU_vertex_setup(dm);
GPU_normal_setup(dm);
GPU_triangle_setup(dm);
@@ -2334,6 +2507,30 @@ static void ccgDM_drawMappedFacesGLSL(DerivedMesh *dm,
short (*lnors)[4][3] = dm->getTessFaceDataArray(dm, CD_TESSLOOPNORMAL);
int a, i, do_draw, numVerts, matnr, new_matnr, totface;
+#ifdef WITH_OPENSUBDIV
+ if (ccgdm->useGpuBackend) {
+ int new_matnr;
+ bool draw_smooth;
+ GPU_draw_update_fvar_offset(dm);
+ if (UNLIKELY(ccgSubSurf_prepareGLMesh(ss, false) == false)) {
+ return;
+ }
+ /* TODO(sergey): Single matierial currently. */
+ if (faceFlags) {
+ draw_smooth = (faceFlags[0].flag & ME_SMOOTH);
+ new_matnr = (faceFlags[0].mat_nr + 1);
+ }
+ else {
+ draw_smooth = true;
+ new_matnr = 1;
+ }
+ glShadeModel(draw_smooth ? GL_SMOOTH : GL_FLAT);
+ setMaterial(new_matnr, &gattribs);
+ ccgSubSurf_drawGLMesh(ss, true, -1, -1);
+ return;
+ }
+#endif
+
CCG_key_top_level(&key, ss);
ccgdm_pbvh_update(ccgdm);
@@ -2506,6 +2703,13 @@ static void ccgDM_drawMappedFacesMat(DerivedMesh *dm,
short (*lnors)[4][3] = dm->getTessFaceDataArray(dm, CD_TESSLOOPNORMAL);
int a, i, numVerts, matnr, new_matnr, totface;
+#ifdef WITH_OPENSUBDIV
+ if (ccgdm->useGpuBackend) {
+ BLI_assert(!"Not currently supported");
+ return;
+ }
+#endif
+
CCG_key_top_level(&key, ss);
ccgdm_pbvh_update(ccgdm);
@@ -2678,6 +2882,16 @@ static void ccgDM_drawFacesTex_common(DerivedMesh *dm,
int mat_index;
int tot_element, start_element, tot_drawn;
+#ifdef WITH_OPENSUBDIV
+ if (ccgdm->useGpuBackend) {
+ if (ccgSubSurf_prepareGLMesh(ss, true) == false) {
+ return;
+ }
+ ccgSubSurf_drawGLMesh(ss, true, -1, -1);
+ return;
+ }
+#endif
+
CCG_key_top_level(&key, ss);
ccgdm_pbvh_update(ccgdm);
@@ -2847,6 +3061,26 @@ static void ccgDM_drawMappedFaces(DerivedMesh *dm,
int gridFaces = gridSize - 1, totface;
int prev_mat_nr = -1;
+#ifdef WITH_OPENSUBDIV
+ if (ccgdm->useGpuBackend) {
+ /* TODO(sergey): This is for cases when vertex colors or weights
+ * are visualising. Currently we don't have CD layers for this data
+ * and here we only make it so there's no garbage displayed.
+ *
+ * In the future we'll either need to have CD for this data or pass
+ * this data as face-varying or vertex-varying data in OSD mesh.
+ */
+ if (setDrawOptions == NULL) {
+ glColor3f(0.8f, 0.8f, 0.8f);
+ }
+ if (UNLIKELY(ccgSubSurf_prepareGLMesh(ss, true) == false)) {
+ return;
+ }
+ ccgSubSurf_drawGLMesh(ss, true, -1, -1);
+ return;
+ }
+#endif
+
CCG_key_top_level(&key, ss);
/* currently unused -- each original face is handled separately */
@@ -3016,6 +3250,13 @@ static void ccgDM_drawMappedEdges(DerivedMesh *dm,
CCGKey key;
int i, useAging, edgeSize = ccgSubSurf_getEdgeSize(ss);
+#ifdef WITH_OPENSUBDIV
+ if (ccgdm->useGpuBackend) {
+ BLI_assert(!"Not currently supported");
+ return;
+ }
+#endif
+
CCG_key_top_level(&key, ss);
ccgSubSurf_getUseAgeCounts(ss, &useAging, NULL, NULL, NULL);
@@ -3051,6 +3292,13 @@ static void ccgDM_drawMappedEdgesInterp(DerivedMesh *dm,
CCGEdgeIterator ei;
int i, useAging, edgeSize = ccgSubSurf_getEdgeSize(ss);
+#ifdef WITH_OPENSUBDIV
+ if (ccgdm->useGpuBackend) {
+ BLI_assert(!"Not currently supported");
+ return;
+ }
+#endif
+
CCG_key_top_level(&key, ss);
ccgSubSurf_getUseAgeCounts(ss, &useAging, NULL, NULL, NULL);
@@ -3145,9 +3393,11 @@ static void ccgDM_release(DerivedMesh *dm)
if (ccgdm->pmap_mem) MEM_freeN(ccgdm->pmap_mem);
MEM_freeN(ccgdm->edgeFlags);
MEM_freeN(ccgdm->faceFlags);
- MEM_freeN(ccgdm->vertMap);
- MEM_freeN(ccgdm->edgeMap);
- MEM_freeN(ccgdm->faceMap);
+ if (ccgdm->useGpuBackend == false) {
+ MEM_freeN(ccgdm->vertMap);
+ MEM_freeN(ccgdm->edgeMap);
+ MEM_freeN(ccgdm->faceMap);
+ }
MEM_freeN(ccgdm);
}
}
@@ -3681,74 +3931,8 @@ static void ccgDM_calcNormals(DerivedMesh *dm)
dm->dirty &= ~DM_DIRTY_NORMALS;
}
-static CCGDerivedMesh *getCCGDerivedMesh(CCGSubSurf *ss,
- int drawInteriorEdges,
- int useSubsurfUv,
- DerivedMesh *dm)
+static void set_default_ccgdm_callbacks(CCGDerivedMesh *ccgdm)
{
- CCGDerivedMesh *ccgdm = MEM_callocN(sizeof(*ccgdm), "ccgdm");
- CCGVertIterator vi;
- CCGEdgeIterator ei;
- CCGFaceIterator fi;
- int index, totvert, totedge, totface;
- int i;
- int vertNum, edgeNum, faceNum;
- int *vertOrigIndex, *faceOrigIndex, *polyOrigIndex, *base_polyOrigIndex, *edgeOrigIndex;
- short *edgeFlags;
- DMFlagMat *faceFlags;
- int *polyidx = NULL;
-#ifndef USE_DYNSIZE
- int *loopidx = NULL, *vertidx = NULL;
- BLI_array_declare(loopidx);
- BLI_array_declare(vertidx);
-#endif
- int loopindex, loopindex2;
- int edgeSize;
- int gridSize;
- int gridFaces, gridCuts;
- /*int gridSideVerts;*/
- int gridSideEdges;
- int numTex, numCol;
- int hasPCol, hasOrigSpace;
- int gridInternalEdges;
- WeightTable wtable = {NULL};
- /* MCol *mcol; */ /* UNUSED */
- MEdge *medge = NULL;
- /* MFace *mface = NULL; */
- MPoly *mpoly = NULL;
- bool has_edge_cd;
-
- DM_from_template(&ccgdm->dm, dm, DM_TYPE_CCGDM,
- ccgSubSurf_getNumFinalVerts(ss),
- ccgSubSurf_getNumFinalEdges(ss),
- ccgSubSurf_getNumFinalFaces(ss),
- ccgSubSurf_getNumFinalFaces(ss) * 4,
- ccgSubSurf_getNumFinalFaces(ss));
-
- CustomData_free_layer_active(&ccgdm->dm.polyData, CD_NORMAL,
- ccgdm->dm.numPolyData);
-
- numTex = CustomData_number_of_layers(&ccgdm->dm.loopData, CD_MLOOPUV);
- numCol = CustomData_number_of_layers(&ccgdm->dm.loopData, CD_MLOOPCOL);
- hasPCol = CustomData_has_layer(&ccgdm->dm.loopData, CD_PREVIEW_MLOOPCOL);
- hasOrigSpace = CustomData_has_layer(&ccgdm->dm.loopData, CD_ORIGSPACE_MLOOP);
-
- if (
- (numTex && CustomData_number_of_layers(&ccgdm->dm.faceData, CD_MTFACE) != numTex) ||
- (numCol && CustomData_number_of_layers(&ccgdm->dm.faceData, CD_MCOL) != numCol) ||
- (hasPCol && !CustomData_has_layer(&ccgdm->dm.faceData, CD_PREVIEW_MCOL)) ||
- (hasOrigSpace && !CustomData_has_layer(&ccgdm->dm.faceData, CD_ORIGSPACE)) )
- {
- CustomData_from_bmeshpoly(&ccgdm->dm.faceData,
- &ccgdm->dm.polyData,
- &ccgdm->dm.loopData,
- ccgSubSurf_getNumFinalFaces(ss));
- }
-
- /* We absolutely need that layer, else it's no valid tessellated data! */
- polyidx = CustomData_add_layer(&ccgdm->dm.faceData, CD_ORIGINDEX, CD_CALLOC,
- NULL, ccgSubSurf_getNumFinalFaces(ss));
-
ccgdm->dm.getMinMax = ccgDM_getMinMax;
ccgdm->dm.getNumVerts = ccgDM_getNumVerts;
ccgdm->dm.getNumEdges = ccgDM_getNumEdges;
@@ -3801,7 +3985,7 @@ static CCGDerivedMesh *getCCGDerivedMesh(CCGSubSurf *ss,
ccgdm->dm.foreachMappedEdge = ccgDM_foreachMappedEdge;
ccgdm->dm.foreachMappedLoop = ccgDM_foreachMappedLoop;
ccgdm->dm.foreachMappedFaceCenter = ccgDM_foreachMappedFaceCenter;
-
+
ccgdm->dm.drawVerts = ccgDM_drawVerts;
ccgdm->dm.drawEdges = ccgDM_drawEdges;
ccgdm->dm.drawLooseEdges = ccgDM_drawLooseEdges;
@@ -3820,14 +4004,22 @@ static CCGDerivedMesh *getCCGDerivedMesh(CCGSubSurf *ss,
ccgdm->dm.copy_gpu_data = ccgDM_copy_gpu_data;
ccgdm->dm.release = ccgDM_release;
-
- ccgdm->ss = ss;
- ccgdm->drawInteriorEdges = drawInteriorEdges;
- ccgdm->useSubsurfUv = useSubsurfUv;
+}
+
+static void create_ccgdm_maps(CCGDerivedMesh *ccgdm,
+ CCGSubSurf *ss)
+{
+ CCGVertIterator vi;
+ CCGEdgeIterator ei;
+ CCGFaceIterator fi;
+ int totvert, totedge, totface;
totvert = ccgSubSurf_getNumVerts(ss);
ccgdm->vertMap = MEM_mallocN(totvert * sizeof(*ccgdm->vertMap), "vertMap");
- for (ccgSubSurf_initVertIterator(ss, &vi); !ccgVertIterator_isStopped(&vi); ccgVertIterator_next(&vi)) {
+ for (ccgSubSurf_initVertIterator(ss, &vi);
+ !ccgVertIterator_isStopped(&vi);
+ ccgVertIterator_next(&vi))
+ {
CCGVert *v = ccgVertIterator_getCurrent(&vi);
ccgdm->vertMap[GET_INT_FROM_POINTER(ccgSubSurf_getVertVertHandle(v))].vert = v;
@@ -3835,7 +4027,10 @@ static CCGDerivedMesh *getCCGDerivedMesh(CCGSubSurf *ss,
totedge = ccgSubSurf_getNumEdges(ss);
ccgdm->edgeMap = MEM_mallocN(totedge * sizeof(*ccgdm->edgeMap), "edgeMap");
- for (ccgSubSurf_initEdgeIterator(ss, &ei); !ccgEdgeIterator_isStopped(&ei); ccgEdgeIterator_next(&ei)) {
+ for (ccgSubSurf_initEdgeIterator(ss, &ei);
+ !ccgEdgeIterator_isStopped(&ei);
+ ccgEdgeIterator_next(&ei))
+ {
CCGEdge *e = ccgEdgeIterator_getCurrent(&ei);
ccgdm->edgeMap[GET_INT_FROM_POINTER(ccgSubSurf_getEdgeEdgeHandle(e))].edge = e;
@@ -3843,13 +4038,60 @@ static CCGDerivedMesh *getCCGDerivedMesh(CCGSubSurf *ss,
totface = ccgSubSurf_getNumFaces(ss);
ccgdm->faceMap = MEM_mallocN(totface * sizeof(*ccgdm->faceMap), "faceMap");
- for (ccgSubSurf_initFaceIterator(ss, &fi); !ccgFaceIterator_isStopped(&fi); ccgFaceIterator_next(&fi)) {
+ for (ccgSubSurf_initFaceIterator(ss, &fi);
+ !ccgFaceIterator_isStopped(&fi);
+ ccgFaceIterator_next(&fi))
+ {
CCGFace *f = ccgFaceIterator_getCurrent(&fi);
ccgdm->faceMap[GET_INT_FROM_POINTER(ccgSubSurf_getFaceFaceHandle(f))].face = f;
}
+}
+
+/* Fill in all geometry arrays making it possible to access any
+ * hires data from the CPU.
+ */
+static void set_ccgdm_all_geometry(CCGDerivedMesh *ccgdm,
+ CCGSubSurf *ss,
+ DerivedMesh *dm,
+ bool useSubsurfUv)
+{
+ const int totvert = ccgSubSurf_getNumVerts(ss);
+ const int totedge = ccgSubSurf_getNumEdges(ss);
+ const int totface = ccgSubSurf_getNumFaces(ss);
+ int index;
+ int i;
+ int vertNum = 0, edgeNum = 0, faceNum = 0;
+ int *vertOrigIndex, *faceOrigIndex, *polyOrigIndex, *base_polyOrigIndex, *edgeOrigIndex;
+ short *edgeFlags = ccgdm->edgeFlags;
+ DMFlagMat *faceFlags = ccgdm->faceFlags;
+ int *polyidx = NULL;
+#ifndef USE_DYNSIZE
+ int *loopidx = NULL, *vertidx = NULL;
+ BLI_array_declare(loopidx);
+ BLI_array_declare(vertidx);
+#endif
+ int loopindex, loopindex2;
+ int edgeSize;
+ int gridSize;
+ int gridFaces, gridCuts;
+ int gridSideEdges;
+ int numTex, numCol;
+ int hasPCol, hasOrigSpace;
+ int gridInternalEdges;
+ WeightTable wtable = {NULL};
+ MEdge *medge = NULL;
+ MPoly *mpoly = NULL;
+ bool has_edge_cd;
- ccgdm->reverseFaceMap = MEM_callocN(sizeof(int) * ccgSubSurf_getNumFinalFaces(ss), "reverseFaceMap");
+ numTex = CustomData_number_of_layers(&ccgdm->dm.loopData, CD_MLOOPUV);
+ numCol = CustomData_number_of_layers(&ccgdm->dm.loopData, CD_MLOOPCOL);
+ hasPCol = CustomData_has_layer(&ccgdm->dm.loopData, CD_PREVIEW_MLOOPCOL);
+ hasOrigSpace = CustomData_has_layer(&ccgdm->dm.loopData, CD_ORIGSPACE_MLOOP);
+
+ /* We absolutely need that layer, else it's no valid tessellated data! */
+ polyidx = CustomData_add_layer(&ccgdm->dm.faceData, CD_ORIGINDEX, CD_CALLOC,
+ NULL, ccgSubSurf_getNumFinalFaces(ss));
edgeSize = ccgSubSurf_getEdgeSize(ss);
gridSize = ccgSubSurf_getGridSize(ss);
@@ -3857,11 +4099,7 @@ static CCGDerivedMesh *getCCGDerivedMesh(CCGSubSurf *ss,
gridCuts = gridSize - 2;
/*gridInternalVerts = gridSideVerts * gridSideVerts; - as yet, unused */
gridSideEdges = gridSize - 1;
- gridInternalEdges = (gridSideEdges - 1) * gridSideEdges * 2;
-
- vertNum = 0;
- edgeNum = 0;
- faceNum = 0;
+ gridInternalEdges = (gridSideEdges - 1) * gridSideEdges * 2;
/* mvert = dm->getVertArray(dm); */ /* UNUSED */
medge = dm->getEdgeArray(dm);
@@ -3869,10 +4107,6 @@ static CCGDerivedMesh *getCCGDerivedMesh(CCGSubSurf *ss,
mpoly = CustomData_get_layer(&dm->polyData, CD_MPOLY);
base_polyOrigIndex = CustomData_get_layer(&dm->polyData, CD_ORIGINDEX);
-
- /*CDDM hack*/
- edgeFlags = ccgdm->edgeFlags = MEM_callocN(sizeof(short) * totedge, "edgeFlags");
- faceFlags = ccgdm->faceFlags = MEM_callocN(sizeof(DMFlagMat) * totface, "faceFlags");
vertOrigIndex = DM_get_vert_data_layer(&ccgdm->dm, CD_ORIGINDEX);
edgeOrigIndex = DM_get_edge_data_layer(&ccgdm->dm, CD_ORIGINDEX);
@@ -3902,13 +4136,12 @@ static CCGDerivedMesh *getCCGDerivedMesh(CCGSubSurf *ss,
#ifdef USE_DYNSIZE
int loopidx[numVerts], vertidx[numVerts];
#endif
-
w = get_ss_weights(&wtable, gridCuts, numVerts);
ccgdm->faceMap[index].startVert = vertNum;
ccgdm->faceMap[index].startEdge = edgeNum;
ccgdm->faceMap[index].startFace = faceNum;
-
+
faceFlags->flag = mpoly ? mpoly[origIndex].flag : 0;
faceFlags->mat_nr = mpoly ? mpoly[origIndex].mat_nr : 0;
faceFlags++;
@@ -3932,7 +4165,6 @@ static CCGDerivedMesh *getCCGDerivedMesh(CCGSubSurf *ss,
CCGVert *v = ccgSubSurf_getFaceVert(f, s);
vertidx[s] = GET_INT_FROM_POINTER(ccgSubSurf_getVertVertHandle(v));
}
-
/*I think this is for interpolating the center vert?*/
w2 = w; // + numVerts*(g2_wid-1) * (g2_wid-1); //numVerts*((g2_wid-1) * g2_wid+g2_wid-1);
@@ -4003,7 +4235,7 @@ static CCGDerivedMesh *getCCGDerivedMesh(CCGSubSurf *ss,
CustomData_interp(&dm->loopData, &ccgdm->dm.loopData,
loopidx, w2, NULL, numVerts, loopindex2);
loopindex2++;
-
+
w2 = w + s * numVerts * g2_wid * g2_wid + ((y) * g2_wid + (x + 1)) * numVerts;
CustomData_interp(&dm->loopData, &ccgdm->dm.loopData,
loopidx, w2, NULL, numVerts, loopindex2);
@@ -4016,7 +4248,7 @@ static CCGDerivedMesh *getCCGDerivedMesh(CCGSubSurf *ss,
ccg_loops_to_corners(&ccgdm->dm.faceData, &ccgdm->dm.loopData,
&ccgdm->dm.polyData, loopindex2 - 4, faceNum, faceNum,
numTex, numCol, hasPCol, hasOrigSpace);
-
+
/*set original index data*/
if (faceOrigIndex) {
/* reference the index in 'polyOrigIndex' */
@@ -4123,26 +4355,149 @@ static CCGDerivedMesh *getCCGDerivedMesh(CCGSubSurf *ss,
vertNum++;
}
- ccgdm->dm.numVertData = vertNum;
- ccgdm->dm.numEdgeData = edgeNum;
- ccgdm->dm.numTessFaceData = faceNum;
- ccgdm->dm.numLoopData = loopindex2;
- ccgdm->dm.numPolyData = faceNum;
-
- /* All tessellated CD layers were updated! */
- ccgdm->dm.dirty &= ~DM_DIRTY_TESS_CDLAYERS;
-
#ifndef USE_DYNSIZE
BLI_array_free(vertidx);
BLI_array_free(loopidx);
#endif
free_ss_weights(&wtable);
+ BLI_assert(vertNum == ccgSubSurf_getNumFinalVerts(ss));
+ BLI_assert(edgeNum == ccgSubSurf_getNumFinalEdges(ss));
+ BLI_assert(loopindex2 == ccgSubSurf_getNumFinalFaces(ss) * 4);
+ BLI_assert(faceNum == ccgSubSurf_getNumFinalFaces(ss));
+
+}
+
+/* Fill in only geometry arrays needed for the GPU tessellation. */
+static void set_ccgdm_gpu_geometry(CCGDerivedMesh *ccgdm,
+ CCGSubSurf *ss,
+ DerivedMesh *dm)
+{
+ const int totface = ccgSubSurf_getNumFaces(ss);
+ MPoly *mpoly = CustomData_get_layer(&dm->polyData, CD_MPOLY);
+ int index;
+ DMFlagMat *faceFlags = ccgdm->faceFlags;
+
+ for (index = 0; index < totface; index++) {
+ faceFlags->flag = mpoly ? mpoly[index].flag : 0;
+ faceFlags->mat_nr = mpoly ? mpoly[index].mat_nr : 0;
+ faceFlags++;
+ }
+
+ /* TODO(sergey): Fill in edge flags. */
+}
+
+static CCGDerivedMesh *getCCGDerivedMesh(CCGSubSurf *ss,
+ int drawInteriorEdges,
+ int useSubsurfUv,
+ DerivedMesh *dm,
+ bool use_gpu_backend)
+{
+ const int totedge = ccgSubSurf_getNumEdges(ss);
+ const int totface = ccgSubSurf_getNumFaces(ss);
+ CCGDerivedMesh *ccgdm = MEM_callocN(sizeof(*ccgdm), "ccgdm");
+ int numTex, numCol;
+ int hasPCol, hasOrigSpace;
+
+ if (use_gpu_backend == false) {
+ DM_from_template(&ccgdm->dm, dm, DM_TYPE_CCGDM,
+ ccgSubSurf_getNumFinalVerts(ss),
+ ccgSubSurf_getNumFinalEdges(ss),
+ ccgSubSurf_getNumFinalFaces(ss),
+ ccgSubSurf_getNumFinalFaces(ss) * 4,
+ ccgSubSurf_getNumFinalFaces(ss));
+
+ numTex = CustomData_number_of_layers(&ccgdm->dm.loopData,
+ CD_MLOOPUV);
+ numCol = CustomData_number_of_layers(&ccgdm->dm.loopData,
+ CD_MLOOPCOL);
+ hasPCol = CustomData_has_layer(&ccgdm->dm.loopData,
+ CD_PREVIEW_MLOOPCOL);
+ hasOrigSpace = CustomData_has_layer(&ccgdm->dm.loopData,
+ CD_ORIGSPACE_MLOOP);
+
+ if (
+ (numTex && CustomData_number_of_layers(&ccgdm->dm.faceData,
+ CD_MTFACE) != numTex) ||
+ (numCol && CustomData_number_of_layers(&ccgdm->dm.faceData,
+ CD_MCOL) != numCol) ||
+ (hasPCol && !CustomData_has_layer(&ccgdm->dm.faceData,
+ CD_PREVIEW_MCOL)) ||
+ (hasOrigSpace && !CustomData_has_layer(&ccgdm->dm.faceData,
+ CD_ORIGSPACE)) )
+ {
+ CustomData_from_bmeshpoly(&ccgdm->dm.faceData,
+ &ccgdm->dm.polyData,
+ &ccgdm->dm.loopData,
+ ccgSubSurf_getNumFinalFaces(ss));
+ }
+
+ CustomData_free_layer_active(&ccgdm->dm.polyData, CD_NORMAL,
+ ccgdm->dm.numPolyData);
+
+ ccgdm->reverseFaceMap =
+ MEM_callocN(sizeof(int) * ccgSubSurf_getNumFinalFaces(ss),
+ "reverseFaceMap");
+
+ create_ccgdm_maps(ccgdm, ss);
+ }
+ else {
+ DM_from_template(&ccgdm->dm, dm, DM_TYPE_CCGDM,
+ 0, 0, 0, 0, dm->getNumPolys(dm));
+ CustomData_copy_data(&dm->polyData,
+ &ccgdm->dm.polyData,
+ 0, 0, dm->getNumPolys(dm));
+ }
+
+ set_default_ccgdm_callbacks(ccgdm);
+
+ ccgdm->ss = ss;
+ ccgdm->drawInteriorEdges = drawInteriorEdges;
+ ccgdm->useSubsurfUv = useSubsurfUv;
+ ccgdm->useGpuBackend = use_gpu_backend;
+
+ /* CDDM hack. */
+ ccgdm->edgeFlags = MEM_callocN(sizeof(short) * totedge, "edgeFlags");
+ ccgdm->faceFlags = MEM_callocN(sizeof(DMFlagMat) * totface, "faceFlags");
+
+ if (use_gpu_backend == false) {
+ set_ccgdm_all_geometry(ccgdm, ss, dm, useSubsurfUv != 0);
+ }
+ else {
+ set_ccgdm_gpu_geometry(ccgdm, ss, dm);
+ }
+
+ ccgdm->dm.numVertData = ccgSubSurf_getNumFinalVerts(ss);
+ ccgdm->dm.numEdgeData = ccgSubSurf_getNumFinalEdges(ss);
+ ccgdm->dm.numTessFaceData = ccgSubSurf_getNumFinalFaces(ss);
+ ccgdm->dm.numLoopData = ccgdm->dm.numTessFaceData * 4;
+ ccgdm->dm.numPolyData = ccgdm->dm.numTessFaceData;
+
+ /* All tessellated CD layers were updated! */
+ ccgdm->dm.dirty &= ~DM_DIRTY_TESS_CDLAYERS;
+
return ccgdm;
}
/***/
+static bool subsurf_use_gpu_backend(SubsurfFlags flags)
+{
+#ifdef WITH_OPENSUBDIV
+ /* Use GPU backend if it's a last modifier in the stack
+ * and user choosed to use any of the OSD compute devices,
+ * but also check if GPU has all needed features.
+ */
+ return
+ (flags & SUBSURF_USE_GPU_BACKEND) != 0 &&
+ (U.opensubdiv_compute_type != USER_OPENSUBDIV_COMPUTE_NONE) &&
+ (openSubdiv_supportGPUDisplay());
+#else
+ (void)flags;
+ return false;
+#endif
+}
+
struct DerivedMesh *subsurf_make_derived_from_derived(
struct DerivedMesh *dm,
struct SubsurfModifierData *smd,
@@ -4154,18 +4509,28 @@ struct DerivedMesh *subsurf_make_derived_from_derived(
int useSubsurfUv = smd->flags & eSubsurfModifierFlag_SubsurfUv;
int drawInteriorEdges = !(smd->flags & eSubsurfModifierFlag_ControlEdges);
CCGDerivedMesh *result;
+ bool use_gpu_backend = subsurf_use_gpu_backend(flags);
/* note: editmode calculation can only run once per
* modifier stack evaluation (uses freed cache) [#36299] */
if (flags & SUBSURF_FOR_EDIT_MODE) {
int levels = (smd->modifier.scene) ? get_render_subsurf_level(&smd->modifier.scene->r, smd->levels, false) : smd->levels;
+ /* TODO(sergey): Same as emCache below. */
+ if ((flags & SUBSURF_IN_EDIT_MODE) && smd->mCache) {
+ ccgSubSurf_free(smd->mCache);
+ smd->mCache = NULL;
+ }
+
smd->emCache = _getSubSurf(smd->emCache, levels, 3, useSimple | useAging | CCG_CALC_NORMALS);
- ss_sync_from_derivedmesh(smd->emCache, dm, vertCos, useSimple);
+#ifdef WITH_OPENSUBDIV
+ ccgSubSurf_setSkipGrids(smd->emCache, use_gpu_backend);
+#endif
+ ss_sync_from_derivedmesh(smd->emCache, dm, vertCos, useSimple);
result = getCCGDerivedMesh(smd->emCache,
drawInteriorEdges,
- useSubsurfUv, dm);
+ useSubsurfUv, dm, use_gpu_backend);
}
else if (flags & SUBSURF_USE_RENDER_PARAMS) {
/* Do not use cache in render mode. */
@@ -4180,7 +4545,7 @@ struct DerivedMesh *subsurf_make_derived_from_derived(
ss_sync_from_derivedmesh(ss, dm, vertCos, useSimple);
result = getCCGDerivedMesh(ss,
- drawInteriorEdges, useSubsurfUv, dm);
+ drawInteriorEdges, useSubsurfUv, dm, false);
result->freeSS = 1;
}
@@ -4212,23 +4577,41 @@ struct DerivedMesh *subsurf_make_derived_from_derived(
result = getCCGDerivedMesh(smd->mCache,
drawInteriorEdges,
- useSubsurfUv, dm);
+ useSubsurfUv, dm, false);
}
else {
CCGFlags ccg_flags = useSimple | CCG_USE_ARENA | CCG_CALC_NORMALS;
-
+ CCGSubSurf *prevSS = NULL;
+
if (smd->mCache && (flags & SUBSURF_IS_FINAL_CALC)) {
+#ifdef WITH_OPENSUBDIV
+ /* With OpenSubdiv enabled we always tries to re-use previos
+ * subsurf structure in order to save computation time since
+ * re-creation is rather a complicated business.
+ *
+ * TODO(sergey): There was a good eason why final calculation
+ * used to free entirely cached subsurf structure. reason of
+ * this is to be investiated still to be sure we don't have
+ * regressions here.
+ */
+ prevSS = smd->mCache;
+#else
ccgSubSurf_free(smd->mCache);
smd->mCache = NULL;
+#endif
}
+
if (flags & SUBSURF_ALLOC_PAINT_MASK)
ccg_flags |= CCG_ALLOC_MASK;
- ss = _getSubSurf(NULL, levels, 3, ccg_flags);
+ ss = _getSubSurf(prevSS, levels, 3, ccg_flags);
+#ifdef WITH_OPENSUBDIV
+ ccgSubSurf_setSkipGrids(ss, use_gpu_backend);
+#endif
ss_sync_from_derivedmesh(ss, dm, vertCos, useSimple);
- result = getCCGDerivedMesh(ss, drawInteriorEdges, useSubsurfUv, dm);
+ result = getCCGDerivedMesh(ss, drawInteriorEdges, useSubsurfUv, dm, use_gpu_backend);
if (flags & SUBSURF_IS_FINAL_CALC)
smd->mCache = ss;