Welcome to mirror list, hosted at ThFree Co, Russian Federation.

git.blender.org/blender.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSergey Sharybin <sergey.vfx@gmail.com>2015-07-20 17:08:06 +0300
committerSergey Sharybin <sergey.vfx@gmail.com>2015-07-20 23:29:26 +0300
commit3d364896725db8336d785ba6cf977b62c0f2c0ce (patch)
treeb189cdc4956b557d19b0c2ea976c6f48affa1ed2 /source/blender/gpu/intern/gpu_material.c
parent2466c4f8cebd3977f29524d79050feff44b40fff (diff)
OpenSubdiv: Commit of OpenSubdiv integration into Blender
This commit contains all the remained parts needed for initial integration of OpenSubdiv into Blender's subdivision surface code. Includes both GPU and CPU backends which works in the following way: - When SubSurf modifier is the last in the modifiers stack then GPU pipeline of OpenSubdiv is used, making viewport performance as fast as possible. This also requires graphscard with GLSL 1.5 support. If this requirement is not met, then no GPU pipeline is used at all. - If SubSurf is not a last modifier or if DerivesMesh is being evaluated for rendering then CPU limit evaluation API from OpenSubdiv is used. This only replaces the legacy evaluation code from CCGSubSurf_legacy, but keeps CCG structures exactly the same as they used to be for ages now. This integration is fully covered with ifdef and not enabled by default because there are several TODOs to be solved first: - Face varying data interpolation is not really cleanly implemented for GPU in OpenSubdiv 3.0. It is also not implemented for limit evaluation API. This basically means we'll have really hard time supporting UVs. - Limit evaluation only works with adaptivly subdivided meshes so far, which basically means all the points of CCG are pushed to the limit. This gives different result from old code. - There are some serious optimizations possible on the topology refiner creation, which would speed up initial OpenSubdiv mesh creation. - There are some hardcoded asumptions in the GPU and DerivedMesh areas which could be generalized. That's something where Antony and Campbell can help, making it so the code is structured in a way which is reusable by all planned viewport projects. - There are also some workarounds in the dependency graph to make sure OpenGL buffers are only freed from the main thread. Those who'll be wanting to make experiments with this code should grab dev branch (NOT master) from https://github.com/Nazg-Gul/OpenSubdiv/tree/dev There are some patches applied in there which we're working on on getting into upstream.
Diffstat (limited to 'source/blender/gpu/intern/gpu_material.c')
-rw-r--r--source/blender/gpu/intern/gpu_material.c89
1 files changed, 78 insertions, 11 deletions
diff --git a/source/blender/gpu/intern/gpu_material.c b/source/blender/gpu/intern/gpu_material.c
index 5db516daa26..5b647232934 100644
--- a/source/blender/gpu/intern/gpu_material.c
+++ b/source/blender/gpu/intern/gpu_material.c
@@ -119,6 +119,8 @@ struct GPUMaterial {
ListBase lamps;
bool bound;
+
+ bool is_opensubdiv;
};
struct GPULamp {
@@ -223,7 +225,8 @@ static int GPU_material_construct_end(GPUMaterial *material, const char *passnam
outlink = material->outlink;
material->pass = GPU_generate_pass(&material->nodes, outlink,
- &material->attribs, &material->builtins, material->type, passname);
+ &material->attribs, &material->builtins, material->type,
+ passname, material->is_opensubdiv);
if (!material->pass)
return 0;
@@ -1673,21 +1676,27 @@ static GPUNodeLink *gpu_material_preview_matcap(GPUMaterial *mat, Material *ma)
}
/* new solid draw mode with glsl matcaps */
-GPUMaterial *GPU_material_matcap(Scene *scene, Material *ma)
+GPUMaterial *GPU_material_matcap(Scene *scene, Material *ma, bool use_opensubdiv)
{
GPUMaterial *mat;
GPUNodeLink *outlink;
LinkData *link;
- for (link = ma->gpumaterial.first; link; link = link->next)
- if (((GPUMaterial*)link->data)->scene == scene)
- return link->data;
+ for (link = ma->gpumaterial.first; link; link = link->next) {
+ GPUMaterial *current_material = (GPUMaterial*)link->data;
+ if (current_material->scene == scene &&
+ current_material->is_opensubdiv == use_opensubdiv)
+ {
+ return current_material;
+ }
+ }
/* allocate material */
mat = GPU_material_construct_begin(ma);
mat->scene = scene;
mat->type = GPU_MATERIAL_TYPE_MESH;
-
+ mat->is_opensubdiv = use_opensubdiv;
+
if (ma->preview && ma->preview->rect[0]) {
outlink = gpu_material_preview_matcap(mat, ma);
}
@@ -1749,20 +1758,26 @@ GPUMaterial *GPU_material_world(struct Scene *scene, struct World *wo)
}
-GPUMaterial *GPU_material_from_blender(Scene *scene, Material *ma)
+GPUMaterial *GPU_material_from_blender(Scene *scene, Material *ma, bool use_opensubdiv)
{
GPUMaterial *mat;
GPUNodeLink *outlink;
LinkData *link;
- for (link = ma->gpumaterial.first; link; link = link->next)
- if (((GPUMaterial*)link->data)->scene == scene)
- return link->data;
+ for (link = ma->gpumaterial.first; link; link = link->next) {
+ GPUMaterial *current_material = (GPUMaterial*)link->data;
+ if (current_material->scene == scene &&
+ current_material->is_opensubdiv == use_opensubdiv)
+ {
+ return current_material;
+ }
+ }
/* allocate material */
mat = GPU_material_construct_begin(ma);
mat->scene = scene;
mat->type = GPU_MATERIAL_TYPE_MESH;
+ mat->is_opensubdiv = use_opensubdiv;
/* render pipeline option */
if (ma->mode & MA_TRANSP)
@@ -2250,7 +2265,8 @@ GPUShaderExport *GPU_shader_export(struct Scene *scene, struct Material *ma)
if (!GPU_glsl_support())
return NULL;
- mat = GPU_material_from_blender(scene, ma);
+ /* TODO(sergey): How to detemine whether we need OSD or not here? */
+ mat = GPU_material_from_blender(scene, ma, false);
pass = (mat)? mat->pass: NULL;
if (pass && pass->fragmentcode && pass->vertexcode) {
@@ -2421,3 +2437,54 @@ void GPU_free_shader_export(GPUShaderExport *shader)
MEM_freeN(shader);
}
+#ifdef WITH_OPENSUBDIV
+void GPU_material_update_fvar_offset(GPUMaterial *gpu_material,
+ DerivedMesh *dm)
+{
+ GPUPass *pass = gpu_material->pass;
+ GPUShader *shader = (pass != NULL ? pass->shader : NULL);
+ ListBase *inputs = (pass != NULL ? &pass->inputs : NULL);
+ GPUInput *input;
+
+ if (shader == NULL) {
+ return;
+ }
+
+ GPU_shader_bind(shader);
+
+ for (input = inputs->first;
+ input != NULL;
+ input = input->next)
+ {
+ if (input->source == GPU_SOURCE_ATTRIB &&
+ input->attribtype == CD_MTFACE)
+ {
+ char name[64];
+ /* TODO(sergey): This will work for until names are
+ * consistent, we'll need to solve this somehow in the future.
+ */
+ int layer_index;
+ int location;
+
+ if (input->attribname[0] != '\0') {
+ layer_index = CustomData_get_named_layer(&dm->loopData,
+ CD_MLOOPUV,
+ input->attribname);
+ }
+ else {
+ layer_index = CustomData_get_active_layer(&dm->loopData,
+ CD_MLOOPUV);
+ }
+
+ BLI_snprintf(name, sizeof(name),
+ "fvar%d_offset",
+ input->attribid);
+ location = GPU_shader_get_uniform(shader, name);
+ /* Multiply by 2 because we're offseting U and V variables. */
+ GPU_shader_uniform_int(shader, location, layer_index * 2);
+ }
+ }
+
+ GPU_shader_unbind();
+}
+#endif