Welcome to mirror list, hosted at ThFree Co, Russian Federation.

git.blender.org/blender.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSergey Sharybin <sergey.vfx@gmail.com>2015-07-20 17:08:06 +0300
committerSergey Sharybin <sergey.vfx@gmail.com>2015-07-20 23:29:26 +0300
commit3d364896725db8336d785ba6cf977b62c0f2c0ce (patch)
treeb189cdc4956b557d19b0c2ea976c6f48affa1ed2 /source/blender/gpu
parent2466c4f8cebd3977f29524d79050feff44b40fff (diff)
OpenSubdiv: Commit of OpenSubdiv integration into Blender
This commit contains all the remained parts needed for initial integration of OpenSubdiv into Blender's subdivision surface code. Includes both GPU and CPU backends which works in the following way: - When SubSurf modifier is the last in the modifiers stack then GPU pipeline of OpenSubdiv is used, making viewport performance as fast as possible. This also requires graphscard with GLSL 1.5 support. If this requirement is not met, then no GPU pipeline is used at all. - If SubSurf is not a last modifier or if DerivesMesh is being evaluated for rendering then CPU limit evaluation API from OpenSubdiv is used. This only replaces the legacy evaluation code from CCGSubSurf_legacy, but keeps CCG structures exactly the same as they used to be for ages now. This integration is fully covered with ifdef and not enabled by default because there are several TODOs to be solved first: - Face varying data interpolation is not really cleanly implemented for GPU in OpenSubdiv 3.0. It is also not implemented for limit evaluation API. This basically means we'll have really hard time supporting UVs. - Limit evaluation only works with adaptivly subdivided meshes so far, which basically means all the points of CCG are pushed to the limit. This gives different result from old code. - There are some serious optimizations possible on the topology refiner creation, which would speed up initial OpenSubdiv mesh creation. - There are some hardcoded asumptions in the GPU and DerivedMesh areas which could be generalized. That's something where Antony and Campbell can help, making it so the code is structured in a way which is reusable by all planned viewport projects. - There are also some workarounds in the dependency graph to make sure OpenGL buffers are only freed from the main thread. Those who'll be wanting to make experiments with this code should grab dev branch (NOT master) from https://github.com/Nazg-Gul/OpenSubdiv/tree/dev There are some patches applied in there which we're working on on getting into upstream.
Diffstat (limited to 'source/blender/gpu')
-rw-r--r--source/blender/gpu/CMakeLists.txt4
-rw-r--r--source/blender/gpu/GPU_draw.h5
-rw-r--r--source/blender/gpu/GPU_material.h10
-rw-r--r--source/blender/gpu/SConscript4
-rw-r--r--source/blender/gpu/intern/gpu_codegen.c178
-rw-r--r--source/blender/gpu/intern/gpu_codegen.h3
-rw-r--r--source/blender/gpu/intern/gpu_draw.c73
-rw-r--r--source/blender/gpu/intern/gpu_extensions.c100
-rw-r--r--source/blender/gpu/intern/gpu_material.c89
-rw-r--r--source/blender/gpu/intern/gpu_simple_shader.c3
-rw-r--r--source/blender/gpu/shaders/gpu_shader_material.glsl5
-rw-r--r--source/blender/gpu/shaders/gpu_shader_vertex.glsl21
12 files changed, 452 insertions, 43 deletions
diff --git a/source/blender/gpu/CMakeLists.txt b/source/blender/gpu/CMakeLists.txt
index 23a2b77d1e7..328623f884f 100644
--- a/source/blender/gpu/CMakeLists.txt
+++ b/source/blender/gpu/CMakeLists.txt
@@ -92,6 +92,7 @@ set(SRC
intern/gpu_private.h
)
+data_to_c_simple(shaders/gpu_shader_geometry.glsl SRC)
data_to_c_simple(shaders/gpu_program_smoke_frag.glsl SRC)
data_to_c_simple(shaders/gpu_program_smoke_color_frag.glsl SRC)
data_to_c_simple(shaders/gpu_shader_material.glsl SRC)
@@ -127,6 +128,9 @@ if(WITH_IMAGE_DDS)
add_definitions(-DWITH_DDS)
endif()
+if(WITH_OPENSUBDIV)
+ add_definitions(-DWITH_OPENSUBDIV)
+endif()
blender_add_lib(bf_gpu "${SRC}" "${INC}" "${INC_SYS}")
diff --git a/source/blender/gpu/GPU_draw.h b/source/blender/gpu/GPU_draw.h
index 26db4058d34..0992f8e9d21 100644
--- a/source/blender/gpu/GPU_draw.h
+++ b/source/blender/gpu/GPU_draw.h
@@ -148,6 +148,11 @@ void GPU_create_smoke(struct SmokeModifierData *smd, int highres);
/* Delayed free of OpenGL buffers by main thread */
void GPU_free_unused_buffers(void);
+#ifdef WITH_OPENSUBDIV
+struct DerivedMesh;
+void GPU_draw_update_fvar_offset(struct DerivedMesh *dm);
+#endif
+
#ifdef __cplusplus
}
#endif
diff --git a/source/blender/gpu/GPU_material.h b/source/blender/gpu/GPU_material.h
index 5995366c095..dd08ed83e5a 100644
--- a/source/blender/gpu/GPU_material.h
+++ b/source/blender/gpu/GPU_material.h
@@ -206,8 +206,8 @@ GPUBlendMode GPU_material_alpha_blend(GPUMaterial *material, float obcol[4]);
/* High level functions to create and use GPU materials */
GPUMaterial *GPU_material_world(struct Scene *scene, struct World *wo);
-GPUMaterial *GPU_material_from_blender(struct Scene *scene, struct Material *ma);
-GPUMaterial *GPU_material_matcap(struct Scene *scene, struct Material *ma);
+GPUMaterial *GPU_material_from_blender(struct Scene *scene, struct Material *ma, bool use_opensubdiv);
+GPUMaterial *GPU_material_matcap(struct Scene *scene, struct Material *ma, bool use_opensubdiv);
void GPU_material_free(struct ListBase *gpumaterial);
void GPU_materials_free(void);
@@ -322,6 +322,12 @@ typedef struct GPUParticleInfo
float angular_velocity[3];
} GPUParticleInfo;
+#ifdef WITH_OPENSUBDIV
+struct DerivedMesh;
+void GPU_material_update_fvar_offset(GPUMaterial *gpu_material,
+ struct DerivedMesh *dm);
+#endif
+
#ifdef __cplusplus
}
#endif
diff --git a/source/blender/gpu/SConscript b/source/blender/gpu/SConscript
index ff5fb42c021..880a6d14e26 100644
--- a/source/blender/gpu/SConscript
+++ b/source/blender/gpu/SConscript
@@ -60,9 +60,13 @@ if env['WITH_BF_SMOKE']:
if env['WITH_BF_DDS']:
defs.append('WITH_DDS')
+if env['WITH_BF_OPENSUBDIV']:
+ defs.append('WITH_OPENSUBDIV')
+
# generated data files
import os
sources.extend((
+ os.path.join(env['DATA_SOURCES'], "gpu_shader_geometry.glsl.c"),
os.path.join(env['DATA_SOURCES'], "gpu_program_smoke_frag.glsl.c"),
os.path.join(env['DATA_SOURCES'], "gpu_program_smoke_color_frag.glsl.c"),
os.path.join(env['DATA_SOURCES'], "gpu_shader_simple_frag.glsl.c"),
diff --git a/source/blender/gpu/intern/gpu_codegen.c b/source/blender/gpu/intern/gpu_codegen.c
index 335342c7123..68b9e3845f7 100644
--- a/source/blender/gpu/intern/gpu_codegen.c
+++ b/source/blender/gpu/intern/gpu_codegen.c
@@ -56,7 +56,7 @@
extern char datatoc_gpu_shader_material_glsl[];
extern char datatoc_gpu_shader_vertex_glsl[];
extern char datatoc_gpu_shader_vertex_world_glsl[];
-
+extern char datatoc_gpu_shader_geometry_glsl[];
static char *glsl_material_library = NULL;
@@ -531,8 +531,19 @@ static int codegen_print_uniforms_functions(DynStr *ds, ListBase *nodes)
}
}
else if (input->source == GPU_SOURCE_ATTRIB && input->attribfirst) {
+#ifdef WITH_OPENSUBDIV
+ bool skip_opensubdiv = input->attribtype == CD_TANGENT;
+ if (skip_opensubdiv) {
+ BLI_dynstr_appendf(ds, "#ifndef USE_OPENSUBDIV\n");
+ }
+#endif
BLI_dynstr_appendf(ds, "varying %s var%d;\n",
GPU_DATATYPE_STR[input->type], input->attribid);
+#ifdef WITH_OPENSUBDIV
+ if (skip_opensubdiv) {
+ BLI_dynstr_appendf(ds, "#endif\n");
+ }
+#endif
}
}
}
@@ -633,6 +644,12 @@ static char *code_generate_fragment(ListBase *nodes, GPUOutput *output)
char *code;
int builtins;
+#ifdef WITH_OPENSUBDIV
+ GPUNode *node;
+ GPUInput *input;
+#endif
+
+
#if 0
BLI_dynstr_append(ds, FUNCTION_PROTOTYPES);
#endif
@@ -650,7 +667,35 @@ static char *code_generate_fragment(ListBase *nodes, GPUOutput *output)
if (builtins & GPU_VIEW_NORMAL)
BLI_dynstr_append(ds, "\tvec3 facingnormal = (gl_FrontFacing)? varnormal: -varnormal;\n");
-
+
+ /* Calculate tangent space. */
+#ifdef WITH_OPENSUBDIV
+ {
+ bool has_tangent = false;
+ for (node = nodes->first; node; node = node->next) {
+ for (input = node->inputs.first; input; input = input->next) {
+ if (input->source == GPU_SOURCE_ATTRIB && input->attribfirst) {
+ if (input->attribtype == CD_TANGENT) {
+ BLI_dynstr_appendf(ds, "#ifdef USE_OPENSUBDIV\n");
+ BLI_dynstr_appendf(ds, "\t%s var%d;\n",
+ GPU_DATATYPE_STR[input->type],
+ input->attribid);
+ if (has_tangent == false) {
+ BLI_dynstr_appendf(ds, "\tvec3 Q1 = dFdx(inpt.v.position.xyz);\n");
+ BLI_dynstr_appendf(ds, "\tvec3 Q2 = dFdy(inpt.v.position.xyz);\n");
+ BLI_dynstr_appendf(ds, "\tvec2 st1 = dFdx(inpt.v.uv);\n");
+ BLI_dynstr_appendf(ds, "\tvec2 st2 = dFdy(inpt.v.uv);\n");
+ BLI_dynstr_appendf(ds, "\tvec3 T = normalize(Q1 * st2.t - Q2 * st1.t);\n");
+ }
+ BLI_dynstr_appendf(ds, "\tvar%d = vec4(T, 1.0);\n", input->attribid);
+ BLI_dynstr_appendf(ds, "#endif\n");
+ }
+ }
+ }
+ }
+ }
+#endif
+
codegen_declare_tmps(ds, nodes);
codegen_call_functions(ds, nodes, output);
@@ -678,10 +723,21 @@ static char *code_generate_vertex(ListBase *nodes, const GPUMatType type)
for (node = nodes->first; node; node = node->next) {
for (input = node->inputs.first; input; input = input->next) {
if (input->source == GPU_SOURCE_ATTRIB && input->attribfirst) {
+#ifdef WITH_OPENSUBDIV
+ bool skip_opensubdiv = ELEM(input->attribtype, CD_MTFACE, CD_TANGENT);
+ if (skip_opensubdiv) {
+ BLI_dynstr_appendf(ds, "#ifndef USE_OPENSUBDIV\n");
+ }
+#endif
BLI_dynstr_appendf(ds, "attribute %s att%d;\n",
GPU_DATATYPE_STR[input->type], input->attribid);
BLI_dynstr_appendf(ds, "varying %s var%d;\n",
GPU_DATATYPE_STR[input->type], input->attribid);
+#ifdef WITH_OPENSUBDIV
+ if (skip_opensubdiv) {
+ BLI_dynstr_appendf(ds, "#endif\n");
+ }
+#endif
}
}
}
@@ -706,11 +762,29 @@ static char *code_generate_vertex(ListBase *nodes, const GPUMatType type)
for (input = node->inputs.first; input; input = input->next)
if (input->source == GPU_SOURCE_ATTRIB && input->attribfirst) {
if (input->attribtype == CD_TANGENT) { /* silly exception */
+#ifdef WITH_OPENSUBDIV
+ BLI_dynstr_appendf(ds, "#ifndef USE_OPENSUBDIV\n");
+#endif
BLI_dynstr_appendf(ds, "\tvar%d.xyz = normalize(gl_NormalMatrix * att%d.xyz);\n", input->attribid, input->attribid);
BLI_dynstr_appendf(ds, "\tvar%d.w = att%d.w;\n", input->attribid, input->attribid);
+#ifdef WITH_OPENSUBDIV
+ BLI_dynstr_appendf(ds, "#endif\n");
+#endif
}
- else
+ else {
+#ifdef WITH_OPENSUBDIV
+ bool is_mtface = input->attribtype == CD_MTFACE;
+ if (is_mtface) {
+ BLI_dynstr_appendf(ds, "#ifndef USE_OPENSUBDIV\n");
+ }
+#endif
BLI_dynstr_appendf(ds, "\tvar%d = att%d;\n", input->attribid, input->attribid);
+#ifdef WITH_OPENSUBDIV
+ if (is_mtface) {
+ BLI_dynstr_appendf(ds, "#endif\n");
+ }
+#endif
+ }
}
/* unfortunately special handling is needed here because we abuse gl_Color/gl_SecondaryColor flat shading */
else if (input->source == GPU_SOURCE_OPENGL_BUILTIN) {
@@ -738,6 +812,61 @@ static char *code_generate_vertex(ListBase *nodes, const GPUMatType type)
return code;
}
+static char *code_generate_geometry(ListBase *nodes, bool use_opensubdiv)
+{
+#ifdef WITH_OPENSUBDIV
+ if (use_opensubdiv) {
+ DynStr *ds = BLI_dynstr_new();
+ GPUNode *node;
+ GPUInput *input;
+ char *code;
+
+ /* Generate varying declarations. */
+ for (node = nodes->first; node; node = node->next) {
+ for (input = node->inputs.first; input; input = input->next) {
+ if (input->source == GPU_SOURCE_ATTRIB && input->attribfirst) {
+ if (input->attribtype == CD_MTFACE) {
+ BLI_dynstr_appendf(ds, "varying %s var%d;\n",
+ GPU_DATATYPE_STR[input->type],
+ input->attribid);
+ BLI_dynstr_appendf(ds, "uniform int fvar%d_offset;\n",
+ input->attribid);
+ }
+ }
+ }
+ }
+
+ BLI_dynstr_append(ds, datatoc_gpu_shader_geometry_glsl);
+
+ /* Generate varying assignments. */
+ for (node = nodes->first; node; node = node->next) {
+ for (input = node->inputs.first; input; input = input->next) {
+ if (input->source == GPU_SOURCE_ATTRIB && input->attribfirst) {
+ if (input->attribtype == CD_MTFACE) {
+ BLI_dynstr_appendf(ds,
+ "\tINTERP_FACE_VARYING_2(var%d, "
+ "fvar%d_offset, st);\n",
+ input->attribid,
+ input->attribid);
+ }
+ }
+ }
+ }
+
+ BLI_dynstr_append(ds, "}\n\n");
+ code = BLI_dynstr_get_cstring(ds);
+ BLI_dynstr_free(ds);
+
+ //if (G.debug & G_DEBUG) printf("%s\n", code);
+
+ return code;
+ }
+#else
+ UNUSED_VARS(nodes, use_opensubdiv);
+#endif
+ return NULL;
+}
+
void GPU_code_generate_glsl_lib(void)
{
DynStr *ds;
@@ -786,8 +915,28 @@ static void gpu_nodes_extract_dynamic_inputs(GPUPass *pass, ListBase *nodes)
/* attributes don't need to be bound, they already have
* an id that the drawing functions will use */
- if (input->source == GPU_SOURCE_ATTRIB ||
- input->source == GPU_SOURCE_BUILTIN ||
+ if (input->source == GPU_SOURCE_ATTRIB) {
+#ifdef WITH_OPENSUBDIV
+ /* We do need mtface attributes for later, so we can
+ * update face-varuing variables offset in the texture
+ * buffer for proper sampling from the shader.
+ *
+ * We don't do anything about attribute itself, we
+ * only use it to learn which uniform name is to be
+ * updated.
+ *
+ * TODO(sergey): We can add ad extra uniform input
+ * for the offset, which will be purely internal and
+ * which would avoid having such an exceptions.
+ */
+ if (input->attribtype != CD_MTFACE) {
+ continue;
+ }
+#else
+ continue;
+#endif
+ }
+ if (input->source == GPU_SOURCE_BUILTIN ||
input->source == GPU_SOURCE_OPENGL_BUILTIN)
{
continue;
@@ -811,6 +960,14 @@ static void gpu_nodes_extract_dynamic_inputs(GPUPass *pass, ListBase *nodes)
if (extract)
input->shaderloc = GPU_shader_get_uniform(shader, input->shadername);
+#ifdef WITH_OPENSUBDIV
+ if (input->source == GPU_SOURCE_ATTRIB &&
+ input->attribtype == CD_MTFACE)
+ {
+ extract = 1;
+ }
+#endif
+
/* extract nodes */
if (extract) {
BLI_remlink(&node->inputs, input);
@@ -1432,11 +1589,11 @@ static void gpu_nodes_prune(ListBase *nodes, GPUNodeLink *outlink)
GPUPass *GPU_generate_pass(ListBase *nodes, GPUNodeLink *outlink,
GPUVertexAttribs *attribs, int *builtins,
- const GPUMatType type, const char *UNUSED(name))
+ const GPUMatType type, const char *UNUSED(name), const bool use_opensubdiv)
{
GPUShader *shader;
GPUPass *pass;
- char *vertexcode, *fragmentcode;
+ char *vertexcode, *geometrycode, *fragmentcode;
#if 0
if (!FUNCTION_LIB) {
@@ -1454,7 +1611,8 @@ GPUPass *GPU_generate_pass(ListBase *nodes, GPUNodeLink *outlink,
/* generate code and compile with opengl */
fragmentcode = code_generate_fragment(nodes, outlink->output);
vertexcode = code_generate_vertex(nodes, type);
- shader = GPU_shader_create(vertexcode, fragmentcode, NULL, glsl_material_library, NULL, 0, 0, 0);
+ geometrycode = code_generate_geometry(nodes, use_opensubdiv);
+ shader = GPU_shader_create(vertexcode, fragmentcode, geometrycode, glsl_material_library, NULL, 0, 0, 0);
/* failed? */
if (!shader) {
@@ -1474,6 +1632,7 @@ GPUPass *GPU_generate_pass(ListBase *nodes, GPUNodeLink *outlink,
pass->output = outlink->output;
pass->shader = shader;
pass->fragmentcode = fragmentcode;
+ pass->geometrycode = geometrycode;
pass->vertexcode = vertexcode;
pass->libcode = glsl_material_library;
@@ -1490,8 +1649,9 @@ void GPU_pass_free(GPUPass *pass)
gpu_inputs_free(&pass->inputs);
if (pass->fragmentcode)
MEM_freeN(pass->fragmentcode);
+ if (pass->geometrycode)
+ MEM_freeN(pass->geometrycode);
if (pass->vertexcode)
MEM_freeN(pass->vertexcode);
MEM_freeN(pass);
}
-
diff --git a/source/blender/gpu/intern/gpu_codegen.h b/source/blender/gpu/intern/gpu_codegen.h
index c6ed2e3f837..5aa187014ba 100644
--- a/source/blender/gpu/intern/gpu_codegen.h
+++ b/source/blender/gpu/intern/gpu_codegen.h
@@ -161,6 +161,7 @@ struct GPUPass {
struct GPUOutput *output;
struct GPUShader *shader;
char *fragmentcode;
+ char *geometrycode;
char *vertexcode;
const char *libcode;
};
@@ -170,7 +171,7 @@ typedef struct GPUPass GPUPass;
GPUPass *GPU_generate_pass(ListBase *nodes, struct GPUNodeLink *outlink,
struct GPUVertexAttribs *attribs, int *builtin,
- const GPUMatType type, const char *name);
+ const GPUMatType type, const char *name, const bool use_opensubdiv);
struct GPUShader *GPU_pass_shader(GPUPass *pass);
diff --git a/source/blender/gpu/intern/gpu_draw.c b/source/blender/gpu/intern/gpu_draw.c
index 04441fc1b20..af8b2a0f806 100644
--- a/source/blender/gpu/intern/gpu_draw.c
+++ b/source/blender/gpu/intern/gpu_draw.c
@@ -71,6 +71,7 @@
#include "BKE_node.h"
#include "BKE_object.h"
#include "BKE_scene.h"
+#include "BKE_subsurf.h"
#include "BKE_DerivedMesh.h"
#include "GPU_buffers.h"
@@ -82,6 +83,13 @@
#include "smoke_API.h"
+#ifdef WITH_OPENSUBDIV
+# include "DNA_mesh_types.h"
+# include "BKE_editmesh.h"
+
+# include "gpu_codegen.h"
+#endif
+
extern Material defmaterial; /* from material.c */
/* Text Rendering */
@@ -1357,7 +1365,7 @@ void GPU_free_images_old(void)
{
Image *ima;
static int lasttime = 0;
- int ctime = PIL_check_seconds_timer_i();
+ int ctime = (int)PIL_check_seconds_timer();
/*
* Run garbage collector once for every collecting period of time
@@ -1433,6 +1441,7 @@ static struct GPUMaterialState {
int lastmatnr, lastretval;
GPUBlendMode lastalphablend;
+ bool is_opensubdiv;
} GMS = {NULL};
/* fixed function material, alpha handed by caller */
@@ -1519,6 +1528,27 @@ void GPU_begin_object_materials(View3D *v3d, RegionView3D *rv3d, Scene *scene, O
const bool gamma = BKE_scene_check_color_management_enabled(scene);
const bool new_shading_nodes = BKE_scene_use_new_shading_nodes(scene);
const bool use_matcap = (v3d->flag2 & V3D_SHOW_SOLID_MATCAP) != 0; /* assumes v3d->defmaterial->preview is set */
+ bool use_opensubdiv = false;
+
+#ifdef WITH_OPENSUBDIV
+ {
+ DerivedMesh *derivedFinal = NULL;
+ Mesh *me = ob->data;
+ BMEditMesh *em = me->edit_btmesh;
+
+ if (em != NULL) {
+ derivedFinal = em->derivedFinal;
+ }
+ else {
+ derivedFinal = ob->derivedFinal;
+ }
+
+ if (derivedFinal != NULL && derivedFinal->type == DM_TYPE_CCGDM) {
+ CCGDerivedMesh *ccgdm = (CCGDerivedMesh *) derivedFinal;
+ use_opensubdiv = ccgdm->useGpuBackend;
+ }
+ }
+#endif
#ifdef WITH_GAMEENGINE
if (rv3d->rflag & RV3D_IS_GAME_ENGINE) {
@@ -1541,6 +1571,7 @@ void GPU_begin_object_materials(View3D *v3d, RegionView3D *rv3d, Scene *scene, O
GMS.gob = ob;
GMS.gscene = scene;
+ GMS.is_opensubdiv = use_opensubdiv;
GMS.totmat = use_matcap ? 1 : ob->totcol + 1; /* materials start from 1, default material is 0 */
GMS.glay = (v3d->localvd)? v3d->localvd->lay: v3d->lay; /* keep lamps visible in local view */
GMS.gscenelock = (v3d->scenelock != 0);
@@ -1572,7 +1603,7 @@ void GPU_begin_object_materials(View3D *v3d, RegionView3D *rv3d, Scene *scene, O
/* viewport material, setup in space_view3d, defaults to matcap using ma->preview now */
if (use_matcap) {
GMS.gmatbuf[0] = v3d->defmaterial;
- GPU_material_matcap(scene, v3d->defmaterial);
+ GPU_material_matcap(scene, v3d->defmaterial, use_opensubdiv);
/* do material 1 too, for displists! */
memcpy(&GMS.matbuf[1], &GMS.matbuf[0], sizeof(GPUMaterialFixed));
@@ -1590,7 +1621,7 @@ void GPU_begin_object_materials(View3D *v3d, RegionView3D *rv3d, Scene *scene, O
if (glsl) {
GMS.gmatbuf[0] = &defmaterial;
- GPU_material_from_blender(GMS.gscene, &defmaterial);
+ GPU_material_from_blender(GMS.gscene, &defmaterial, GMS.is_opensubdiv);
}
GMS.alphablend[0] = GPU_BLEND_SOLID;
@@ -1604,7 +1635,7 @@ void GPU_begin_object_materials(View3D *v3d, RegionView3D *rv3d, Scene *scene, O
if (ma == NULL) ma = &defmaterial;
/* create glsl material if requested */
- gpumat = glsl? GPU_material_from_blender(GMS.gscene, ma): NULL;
+ gpumat = glsl? GPU_material_from_blender(GMS.gscene, ma, GMS.is_opensubdiv): NULL;
if (gpumat) {
/* do glsl only if creating it succeed, else fallback */
@@ -1708,7 +1739,7 @@ int GPU_enable_material(int nr, void *attribs)
/* unbind glsl material */
if (GMS.gboundmat) {
if (GMS.is_alpha_pass) glDepthMask(0);
- GPU_material_unbind(GPU_material_from_blender(GMS.gscene, GMS.gboundmat));
+ GPU_material_unbind(GPU_material_from_blender(GMS.gscene, GMS.gboundmat, GMS.is_opensubdiv));
GMS.gboundmat = NULL;
}
@@ -1735,7 +1766,7 @@ int GPU_enable_material(int nr, void *attribs)
float auto_bump_scale;
- gpumat = GPU_material_from_blender(GMS.gscene, mat);
+ gpumat = GPU_material_from_blender(GMS.gscene, mat, GMS.is_opensubdiv);
GPU_material_vertex_attributes(gpumat, gattribs);
if (GMS.dob)
@@ -1802,7 +1833,7 @@ void GPU_disable_material(void)
glDisable(GL_CULL_FACE);
if (GMS.is_alpha_pass) glDepthMask(0);
- GPU_material_unbind(GPU_material_from_blender(GMS.gscene, GMS.gboundmat));
+ GPU_material_unbind(GPU_material_from_blender(GMS.gscene, GMS.gboundmat, GMS.is_opensubdiv));
GMS.gboundmat = NULL;
}
@@ -2108,3 +2139,31 @@ void GPU_state_init(void)
gpu_multisample(false);
}
+#ifdef WITH_OPENSUBDIV
+/* Update face-varying variables offset which might be
+ * different from mesh to mesh sharing the same material.
+ */
+void GPU_draw_update_fvar_offset(DerivedMesh *dm)
+{
+ int i;
+
+ /* Sanity check to be sure we only do this for OpenSubdiv draw. */
+ BLI_assert(dm->type == DM_TYPE_CCGDM);
+ BLI_assert(GMS.is_opensubdiv);
+
+ for (i = 0; i < GMS.totmat; ++i) {
+ Material *material = GMS.gmatbuf[i];
+ GPUMaterial *gpu_material;
+
+ if (material == NULL) {
+ continue;
+ }
+
+ gpu_material = GPU_material_from_blender(GMS.gscene,
+ material,
+ GMS.is_opensubdiv);
+
+ GPU_material_update_fvar_offset(gpu_material, dm);
+ }
+}
+#endif
diff --git a/source/blender/gpu/intern/gpu_extensions.c b/source/blender/gpu/intern/gpu_extensions.c
index b757aff4bdb..c71b827f463 100644
--- a/source/blender/gpu/intern/gpu_extensions.c
+++ b/source/blender/gpu/intern/gpu_extensions.c
@@ -61,8 +61,9 @@
# include "BLI_winstuff.h"
#endif
-#define MAX_DEFINE_LENGTH 72
-#define MAX_EXT_DEFINE_LENGTH 280
+/* TODO(sergey): Find better default values for this constants. */
+#define MAX_DEFINE_LENGTH 1024
+#define MAX_EXT_DEFINE_LENGTH 1024
/* Extensions support */
@@ -1528,8 +1529,14 @@ static void shader_print_errors(const char *task, const char *log, const char **
fprintf(stderr, "%s\n", log);
}
-static const char *gpu_shader_version(void)
+static const char *gpu_shader_version(bool use_opensubdiv)
{
+#ifdef WITH_OPENSUBDIV
+ if (use_opensubdiv) {
+ return "#version 150";
+ }
+#endif
+
/* turn on glsl 1.30 for bicubic bump mapping and ATI clipping support */
if (GLEW_VERSION_3_0 &&
(GPU_bicubic_bump_support() || GPU_type_matches(GPU_DEVICE_ATI, GPU_OS_ANY, GPU_DRIVER_ANY)))
@@ -1543,9 +1550,15 @@ static const char *gpu_shader_version(void)
static void gpu_shader_standard_extensions(char defines[MAX_EXT_DEFINE_LENGTH])
{
+#ifdef WITH_OPENSUBDIV
+ strcat(defines, "#extension GL_ARB_texture_query_lod: enable\n"
+ "#extension GL_ARB_gpu_shader5 : enable\n"
+ "#extension GL_ARB_explicit_attrib_location : require\n");
+#else
/* need this extension for high quality bump mapping */
if (GPU_bicubic_bump_support())
strcat(defines, "#extension GL_ARB_texture_query_lod: enable\n");
+#endif
if (GPU_geometry_shader_support())
strcat(defines, "#extension GL_EXT_geometry_shader4: enable\n");
@@ -1556,7 +1569,8 @@ static void gpu_shader_standard_extensions(char defines[MAX_EXT_DEFINE_LENGTH])
}
}
-static void gpu_shader_standard_defines(char defines[MAX_DEFINE_LENGTH])
+static void gpu_shader_standard_defines(bool use_opensubdiv,
+ char defines[MAX_DEFINE_LENGTH])
{
/* some useful defines to detect GPU type */
if (GPU_type_matches(GPU_DEVICE_ATI, GPU_OS_ANY, GPU_DRIVER_ANY)) {
@@ -1571,6 +1585,28 @@ static void gpu_shader_standard_defines(char defines[MAX_DEFINE_LENGTH])
if (GPU_bicubic_bump_support())
strcat(defines, "#define BUMP_BICUBIC\n");
+
+#ifdef WITH_OPENSUBDIV
+ /* TODO(sergey): Check whether we actually compiling shader for
+ * the OpenSubdiv mesh.
+ */
+ if (use_opensubdiv) {
+ strcat(defines, "#define USE_OPENSUBDIV\n");
+
+ /* TODO(sergey): not strictly speaking a define, but this is
+ * a global typedef which we don't have better place to define
+ * in yet.
+ */
+ strcat(defines, "struct VertexData {\n"
+ " vec4 position;\n"
+ " vec3 normal;\n"
+ " vec2 uv;"
+ "};\n");
+ }
+#else
+ UNUSED_VARS(use_opensubdiv);
+#endif
+
return;
}
@@ -1640,6 +1676,15 @@ void GPU_program_parameter_4f(GPUProgram *program, unsigned int location, float
GPUShader *GPU_shader_create(const char *vertexcode, const char *fragcode, const char *geocode, const char *libcode, const char *defines, int input, int output, int number)
{
+#ifdef WITH_OPENSUBDIF
+ /* TODO(sergey): used to add #version 150 to the geometry shader.
+ * Could safely be renamed to "use_geometry_code" since it's evry much
+ * liely any of geometry code will want to use GLSL 1.5.
+ */
+ bool use_opensubdiv = geocode != NULL;
+#else
+ bool use_opensubdiv = false;
+#endif
GLint status;
GLcharARB log[5000];
GLsizei length = 0;
@@ -1671,7 +1716,7 @@ GPUShader *GPU_shader_create(const char *vertexcode, const char *fragcode, const
return NULL;
}
- gpu_shader_standard_defines(standard_defines);
+ gpu_shader_standard_defines(use_opensubdiv, standard_defines);
gpu_shader_standard_extensions(standard_extensions);
if (vertexcode) {
@@ -1679,7 +1724,7 @@ GPUShader *GPU_shader_create(const char *vertexcode, const char *fragcode, const
/* custom limit, may be too small, beware */
int num_source = 0;
- source[num_source++] = gpu_shader_version();
+ source[num_source++] = gpu_shader_version(use_opensubdiv);
source[num_source++] = standard_extensions;
source[num_source++] = standard_defines;
@@ -1702,13 +1747,25 @@ GPUShader *GPU_shader_create(const char *vertexcode, const char *fragcode, const
}
if (fragcode) {
- const char *source[6];
+ const char *source[7];
int num_source = 0;
- source[num_source++] = gpu_shader_version();
+ source[num_source++] = gpu_shader_version(use_opensubdiv);
source[num_source++] = standard_extensions;
source[num_source++] = standard_defines;
+#ifdef WITH_OPENSUBDIV
+ /* TODO(sergey): Move to fragment shader source code generation. */
+ if (use_opensubdiv) {
+ source[num_source++] =
+ "#ifdef USE_OPENSUBDIV\n"
+ "in block {\n"
+ " VertexData v;\n"
+ "} inpt;\n"
+ "#endif\n";
+ }
+#endif
+
if (defines) source[num_source++] = defines;
if (libcode) source[num_source++] = libcode;
source[num_source++] = fragcode;
@@ -1732,7 +1789,7 @@ GPUShader *GPU_shader_create(const char *vertexcode, const char *fragcode, const
const char *source[6];
int num_source = 0;
- source[num_source++] = gpu_shader_version();
+ source[num_source++] = gpu_shader_version(use_opensubdiv);
source[num_source++] = standard_extensions;
source[num_source++] = standard_defines;
@@ -1753,7 +1810,9 @@ GPUShader *GPU_shader_create(const char *vertexcode, const char *fragcode, const
return NULL;
}
- GPU_shader_geometry_stage_primitive_io(shader, input, output, number);
+ if (!use_opensubdiv) {
+ GPU_shader_geometry_stage_primitive_io(shader, input, output, number);
+ }
}
@@ -1762,6 +1821,18 @@ GPUShader *GPU_shader_create(const char *vertexcode, const char *fragcode, const
glAttachObjectARB(shader->object, lib->lib);
#endif
+#ifdef WITH_OPENSUBDIV
+ if (use_opensubdiv) {
+ glBindAttribLocation(shader->object, 0, "position");
+ glBindAttribLocation(shader->object, 1, "normal");
+ GPU_shader_geometry_stage_primitive_io(shader,
+ GL_LINES_ADJACENCY_EXT,
+ GL_TRIANGLE_STRIP,
+ 4);
+
+ }
+#endif
+
glLinkProgramARB(shader->object);
glGetObjectParameterivARB(shader->object, GL_OBJECT_LINK_STATUS_ARB, &status);
if (!status) {
@@ -1775,6 +1846,15 @@ GPUShader *GPU_shader_create(const char *vertexcode, const char *fragcode, const
return NULL;
}
+#ifdef WITH_OPENSUBDIV
+ /* TODO(sergey): Find a better place for this. */
+ {
+ glProgramUniform1i(shader->object,
+ glGetUniformLocation(shader->object, "FVarDataBuffer"),
+ 31); /* GL_TEXTURE31 */
+ }
+#endif
+
return shader;
}
diff --git a/source/blender/gpu/intern/gpu_material.c b/source/blender/gpu/intern/gpu_material.c
index 5db516daa26..5b647232934 100644
--- a/source/blender/gpu/intern/gpu_material.c
+++ b/source/blender/gpu/intern/gpu_material.c
@@ -119,6 +119,8 @@ struct GPUMaterial {
ListBase lamps;
bool bound;
+
+ bool is_opensubdiv;
};
struct GPULamp {
@@ -223,7 +225,8 @@ static int GPU_material_construct_end(GPUMaterial *material, const char *passnam
outlink = material->outlink;
material->pass = GPU_generate_pass(&material->nodes, outlink,
- &material->attribs, &material->builtins, material->type, passname);
+ &material->attribs, &material->builtins, material->type,
+ passname, material->is_opensubdiv);
if (!material->pass)
return 0;
@@ -1673,21 +1676,27 @@ static GPUNodeLink *gpu_material_preview_matcap(GPUMaterial *mat, Material *ma)
}
/* new solid draw mode with glsl matcaps */
-GPUMaterial *GPU_material_matcap(Scene *scene, Material *ma)
+GPUMaterial *GPU_material_matcap(Scene *scene, Material *ma, bool use_opensubdiv)
{
GPUMaterial *mat;
GPUNodeLink *outlink;
LinkData *link;
- for (link = ma->gpumaterial.first; link; link = link->next)
- if (((GPUMaterial*)link->data)->scene == scene)
- return link->data;
+ for (link = ma->gpumaterial.first; link; link = link->next) {
+ GPUMaterial *current_material = (GPUMaterial*)link->data;
+ if (current_material->scene == scene &&
+ current_material->is_opensubdiv == use_opensubdiv)
+ {
+ return current_material;
+ }
+ }
/* allocate material */
mat = GPU_material_construct_begin(ma);
mat->scene = scene;
mat->type = GPU_MATERIAL_TYPE_MESH;
-
+ mat->is_opensubdiv = use_opensubdiv;
+
if (ma->preview && ma->preview->rect[0]) {
outlink = gpu_material_preview_matcap(mat, ma);
}
@@ -1749,20 +1758,26 @@ GPUMaterial *GPU_material_world(struct Scene *scene, struct World *wo)
}
-GPUMaterial *GPU_material_from_blender(Scene *scene, Material *ma)
+GPUMaterial *GPU_material_from_blender(Scene *scene, Material *ma, bool use_opensubdiv)
{
GPUMaterial *mat;
GPUNodeLink *outlink;
LinkData *link;
- for (link = ma->gpumaterial.first; link; link = link->next)
- if (((GPUMaterial*)link->data)->scene == scene)
- return link->data;
+ for (link = ma->gpumaterial.first; link; link = link->next) {
+ GPUMaterial *current_material = (GPUMaterial*)link->data;
+ if (current_material->scene == scene &&
+ current_material->is_opensubdiv == use_opensubdiv)
+ {
+ return current_material;
+ }
+ }
/* allocate material */
mat = GPU_material_construct_begin(ma);
mat->scene = scene;
mat->type = GPU_MATERIAL_TYPE_MESH;
+ mat->is_opensubdiv = use_opensubdiv;
/* render pipeline option */
if (ma->mode & MA_TRANSP)
@@ -2250,7 +2265,8 @@ GPUShaderExport *GPU_shader_export(struct Scene *scene, struct Material *ma)
if (!GPU_glsl_support())
return NULL;
- mat = GPU_material_from_blender(scene, ma);
+ /* TODO(sergey): How to detemine whether we need OSD or not here? */
+ mat = GPU_material_from_blender(scene, ma, false);
pass = (mat)? mat->pass: NULL;
if (pass && pass->fragmentcode && pass->vertexcode) {
@@ -2421,3 +2437,54 @@ void GPU_free_shader_export(GPUShaderExport *shader)
MEM_freeN(shader);
}
+#ifdef WITH_OPENSUBDIV
+void GPU_material_update_fvar_offset(GPUMaterial *gpu_material,
+ DerivedMesh *dm)
+{
+ GPUPass *pass = gpu_material->pass;
+ GPUShader *shader = (pass != NULL ? pass->shader : NULL);
+ ListBase *inputs = (pass != NULL ? &pass->inputs : NULL);
+ GPUInput *input;
+
+ if (shader == NULL) {
+ return;
+ }
+
+ GPU_shader_bind(shader);
+
+ for (input = inputs->first;
+ input != NULL;
+ input = input->next)
+ {
+ if (input->source == GPU_SOURCE_ATTRIB &&
+ input->attribtype == CD_MTFACE)
+ {
+ char name[64];
+ /* TODO(sergey): This will work for until names are
+ * consistent, we'll need to solve this somehow in the future.
+ */
+ int layer_index;
+ int location;
+
+ if (input->attribname[0] != '\0') {
+ layer_index = CustomData_get_named_layer(&dm->loopData,
+ CD_MLOOPUV,
+ input->attribname);
+ }
+ else {
+ layer_index = CustomData_get_active_layer(&dm->loopData,
+ CD_MLOOPUV);
+ }
+
+ BLI_snprintf(name, sizeof(name),
+ "fvar%d_offset",
+ input->attribid);
+ location = GPU_shader_get_uniform(shader, name);
+ /* Multiply by 2 because we're offseting U and V variables. */
+ GPU_shader_uniform_int(shader, location, layer_index * 2);
+ }
+ }
+
+ GPU_shader_unbind();
+}
+#endif
diff --git a/source/blender/gpu/intern/gpu_simple_shader.c b/source/blender/gpu/intern/gpu_simple_shader.c
index b439a37f3c3..89d3c0f59df 100644
--- a/source/blender/gpu/intern/gpu_simple_shader.c
+++ b/source/blender/gpu/intern/gpu_simple_shader.c
@@ -152,7 +152,8 @@ static GPUShader *gpu_simple_shader(int options)
datatoc_gpu_shader_simple_vert_glsl,
datatoc_gpu_shader_simple_frag_glsl,
NULL,
- NULL, defines, 0, 0, 0);
+ NULL,
+ defines, 0, 0, 0);
if (shader) {
/* set texture map to first texture unit */
diff --git a/source/blender/gpu/shaders/gpu_shader_material.glsl b/source/blender/gpu/shaders/gpu_shader_material.glsl
index ee413c1e4de..12f55a955e3 100644
--- a/source/blender/gpu/shaders/gpu_shader_material.glsl
+++ b/source/blender/gpu/shaders/gpu_shader_material.glsl
@@ -2593,6 +2593,7 @@ void material_preview_matcap(vec4 color, sampler2D ima, vec4 N, vec4 mask, out v
vec3 normal;
vec2 tex;
+#ifndef USE_OPENSUBDIV
/* remap to 0.0 - 1.0 range. This is done because OpenGL 2.0 clamps colors
* between shader stages and we want the full range of the normal */
normal = vec3(2.0, 2.0, 2.0) * vec3(N.x, N.y, N.z) - vec3(1.0, 1.0, 1.0);
@@ -2600,6 +2601,10 @@ void material_preview_matcap(vec4 color, sampler2D ima, vec4 N, vec4 mask, out v
normal.z = 0.0;
}
normal = normalize(normal);
+#else
+ normal = inpt.v.normal;
+ mask = vec4(1.0, 1.0, 1.0, 1.0);
+#endif
tex.x = 0.5 + 0.49 * normal.x;
tex.y = 0.5 + 0.49 * normal.y;
diff --git a/source/blender/gpu/shaders/gpu_shader_vertex.glsl b/source/blender/gpu/shaders/gpu_shader_vertex.glsl
index b5d8dcc0f35..7e332706695 100644
--- a/source/blender/gpu/shaders/gpu_shader_vertex.glsl
+++ b/source/blender/gpu/shaders/gpu_shader_vertex.glsl
@@ -1,3 +1,11 @@
+#ifdef USE_OPENSUBDIV
+in vec3 normal;
+in vec4 position;
+
+out block {
+ VertexData v;
+} outpt;
+#endif
varying vec3 varposition;
varying vec3 varnormal;
@@ -8,10 +16,15 @@ varying float gl_ClipDistance[6];
void main()
{
- vec4 co = gl_ModelViewMatrix * gl_Vertex;
+#ifndef USE_OPENSUBDIV
+ vec4 position = gl_Vertex;
+ vec3 normal = gl_Normal;
+#endif
+
+ vec4 co = gl_ModelViewMatrix * position;
varposition = co.xyz;
- varnormal = normalize(gl_NormalMatrix * gl_Normal);
+ varnormal = normalize(gl_NormalMatrix * normal);
gl_Position = gl_ProjectionMatrix * co;
#ifdef CLIP_WORKAROUND
@@ -24,3 +37,7 @@ void main()
gl_ClipVertex = co;
#endif
+#ifdef USE_OPENSUBDIV
+ outpt.v.position = co;
+ outpt.v.normal = varnormal;
+#endif