Welcome to mirror list, hosted at ThFree Co, Russian Federation.

git.blender.org/blender.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorClément Foucault <foucault.clem@gmail.com>2017-09-15 21:08:31 +0300
committerClément Foucault <foucault.clem@gmail.com>2017-09-15 21:09:09 +0300
commitf565d8c4ae9a2ddf0520a15e2407744aecff7cba (patch)
tree0f454d1f19110205f0600f5401eb63bbe6eacaaf /source/blender/draw/intern
parent208d6f28c8125dafd60bdf391a51145d6d7f46ec (diff)
Eevee: Fix T52738: Probes are black.
This fix the crappy binding logic. Note the current method is doing a lot of useless binding. We should somewhat order the texture so that reused textures are already bound most of the time.
Diffstat (limited to 'source/blender/draw/intern')
-rw-r--r--source/blender/draw/intern/draw_manager.c89
1 files changed, 61 insertions, 28 deletions
diff --git a/source/blender/draw/intern/draw_manager.c b/source/blender/draw/intern/draw_manager.c
index 3072eb50429..724d63c2921 100644
--- a/source/blender/draw/intern/draw_manager.c
+++ b/source/blender/draw/intern/draw_manager.c
@@ -364,8 +364,8 @@ static struct DRWResourceState {
GPUTexture **bound_texs;
GPUUniformBuffer **bound_ubos;
- int bind_tex_inc;
- int bind_ubo_inc;
+ bool *bound_tex_slots;
+ bool *bound_ubo_slots;
} RST = {NULL};
static struct DRWMatrixOveride {
@@ -1833,35 +1833,55 @@ static void draw_geometry(DRWShadingGroup *shgroup, Gwn_Batch *geom, const float
draw_geometry_execute(shgroup, geom);
}
-static void draw_bind_texture(GPUTexture *tex)
+static void bind_texture(GPUTexture *tex)
{
- if (RST.bound_texs[RST.bind_tex_inc] != tex) {
- if (RST.bind_tex_inc >= 0) {
- if (RST.bound_texs[RST.bind_tex_inc] != NULL) {
- GPU_texture_unbind(RST.bound_texs[RST.bind_tex_inc]);
+ int bind_num = GPU_texture_bound_number(tex);
+ if (bind_num == -1) {
+ for (int i = 0; i < GPU_max_textures(); ++i) {
+ if (RST.bound_tex_slots[i] == false) {
+ GPU_texture_bind(tex, i);
+ RST.bound_texs[i] = tex;
+ RST.bound_tex_slots[i] = true;
+ return;
}
- RST.bound_texs[RST.bind_tex_inc] = tex;
- GPU_texture_bind(tex, RST.bind_tex_inc);
- }
- else {
- printf("Not enough texture slots! Reduce number of textures used by your shader.\n");
}
+
+ printf("Not enough texture slots! Reduce number of textures used by your shader.\n");
}
- RST.bind_tex_inc--;
+ RST.bound_tex_slots[bind_num] = true;
}
-static void draw_bind_ubo(GPUUniformBuffer *ubo)
+static void bind_ubo(GPUUniformBuffer *ubo)
{
- if (RST.bound_ubos[RST.bind_ubo_inc] != ubo) {
- if (RST.bind_ubo_inc >= 0) {
- RST.bound_ubos[RST.bind_ubo_inc] = ubo;
- GPU_uniformbuffer_bind(ubo, RST.bind_ubo_inc);
- }
- else {
- printf("Not enough ubo slots!\n");
+ int bind_num = GPU_uniformbuffer_bindpoint(ubo);
+ if (bind_num == -1) {
+ for (int i = 0; i < GPU_max_ubo_binds(); ++i) {
+ if (RST.bound_ubo_slots[i] == false) {
+ GPU_uniformbuffer_bind(ubo, i);
+ RST.bound_ubos[i] = ubo;
+ RST.bound_ubo_slots[i] = true;
+ return;
+ }
}
+
+ /* This is not depending on user input.
+ * It is our responsability to make sure there enough slots. */
+ BLI_assert(0 && "Not enough ubo slots! This should not happen!\n");
+
+ /* printf so user can report bad behaviour */
+ printf("Not enough ubo slots! This should not happen!\n");
}
- RST.bind_ubo_inc--;
+ RST.bound_ubo_slots[bind_num] = true;
+}
+
+static void release_texture_slots(void)
+{
+ memset(RST.bound_tex_slots, 0x0, sizeof(bool) * GPU_max_textures());
+}
+
+static void release_ubo_slots(void)
+{
+ memset(RST.bound_ubo_slots, 0x0, sizeof(bool) * GPU_max_ubo_binds());
}
static void draw_shgroup(DRWShadingGroup *shgroup, DRWState pass_state)
@@ -1869,8 +1889,6 @@ static void draw_shgroup(DRWShadingGroup *shgroup, DRWState pass_state)
BLI_assert(shgroup->shader);
BLI_assert(shgroup->interface);
- RST.bind_tex_inc = GPU_max_textures() - 1; /* Reset texture counter. */
- RST.bind_ubo_inc = GPU_max_ubo_binds() - 1; /* Reset UBO counter. */
DRWInterface *interface = shgroup->interface;
GPUTexture *tex;
GPUUniformBuffer *ubo;
@@ -1889,8 +1907,12 @@ static void draw_shgroup(DRWShadingGroup *shgroup, DRWState pass_state)
shgroup_dynamic_batch_from_calls(shgroup);
}
+ release_texture_slots();
+ release_ubo_slots();
+
DRW_state_set((pass_state & shgroup->state_extra_disable) | shgroup->state_extra);
+
/* Binding Uniform */
/* Don't check anything, Interface should already contain the least uniform as possible */
for (DRWUniform *uni = interface->uniforms.first; uni; uni = uni->next) {
@@ -1919,7 +1941,7 @@ static void draw_shgroup(DRWShadingGroup *shgroup, DRWState pass_state)
case DRW_UNIFORM_TEXTURE:
tex = (GPUTexture *)uni->value;
BLI_assert(tex);
- draw_bind_texture(tex);
+ bind_texture(tex);
GPU_shader_uniform_texture(shgroup->shader, uni->location, tex);
break;
case DRW_UNIFORM_BUFFER:
@@ -1928,12 +1950,12 @@ static void draw_shgroup(DRWShadingGroup *shgroup, DRWState pass_state)
}
tex = *((GPUTexture **)uni->value);
BLI_assert(tex);
- draw_bind_texture(tex);
+ bind_texture(tex);
GPU_shader_uniform_texture(shgroup->shader, uni->location, tex);
break;
case DRW_UNIFORM_BLOCK:
ubo = (GPUUniformBuffer *)uni->value;
- draw_bind_ubo(ubo);
+ bind_ubo(ubo);
GPU_shader_uniform_buffer(shgroup->shader, uni->location, ubo);
break;
}
@@ -2054,7 +2076,10 @@ static void DRW_draw_pass_ex(DRWPass *pass, DRWShadingGroup *start_group, DRWSha
/* Clear Bound Ubos */
for (int i = 0; i < GPU_max_ubo_binds(); i++) {
- RST.bound_ubos[i] = NULL;
+ if (RST.bound_ubos[i] != NULL) {
+ GPU_uniformbuffer_unbind(RST.bound_ubos[i]);
+ RST.bound_ubos[i] = NULL;
+ }
}
if (DST.shader) {
@@ -2527,11 +2552,17 @@ static void DRW_viewport_var_init(void)
if (RST.bound_texs == NULL) {
RST.bound_texs = MEM_callocN(sizeof(GPUTexture *) * GPU_max_textures(), "Bound GPUTexture refs");
}
+ if (RST.bound_tex_slots == NULL) {
+ RST.bound_tex_slots = MEM_callocN(sizeof(GPUUniformBuffer *) * GPU_max_textures(), "Bound Texture Slots");
+ }
/* Alloc array of ubos reference. */
if (RST.bound_ubos == NULL) {
RST.bound_ubos = MEM_callocN(sizeof(GPUUniformBuffer *) * GPU_max_ubo_binds(), "Bound GPUUniformBuffer refs");
}
+ if (RST.bound_ubo_slots == NULL) {
+ RST.bound_ubo_slots = MEM_callocN(sizeof(GPUUniformBuffer *) * GPU_max_textures(), "Bound UBO Slots");
+ }
}
void DRW_viewport_matrix_get(float mat[4][4], DRWViewportMatrixType type)
@@ -3671,6 +3702,8 @@ void DRW_engines_free(void)
MEM_SAFE_FREE(RST.bound_texs);
MEM_SAFE_FREE(RST.bound_ubos);
+ MEM_SAFE_FREE(RST.bound_tex_slots);
+ MEM_SAFE_FREE(RST.bound_ubo_slots);
#ifdef WITH_CLAY_ENGINE
BLI_remlink(&R_engines, &DRW_engine_viewport_clay_type);