Welcome to mirror list, hosted at ThFree Co, Russian Federation.

git.blender.org/blender.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--source/blender/draw/intern/draw_manager_shader.c193
-rw-r--r--source/blender/gpu/CMakeLists.txt1
-rw-r--r--source/blender/gpu/GPU_material.h3
-rw-r--r--source/blender/gpu/intern/gpu_material.c24
4 files changed, 85 insertions, 136 deletions
diff --git a/source/blender/draw/intern/draw_manager_shader.c b/source/blender/draw/intern/draw_manager_shader.c
index 9f8a68f81f6..facd9eecf8e 100644
--- a/source/blender/draw/intern/draw_manager_shader.c
+++ b/source/blender/draw/intern/draw_manager_shader.c
@@ -9,6 +9,8 @@
#include "DNA_object_types.h"
#include "DNA_world_types.h"
+#include "PIL_time.h"
+
#include "BLI_dynstr.h"
#include "BLI_listbase.h"
#include "BLI_string_utils.h"
@@ -48,48 +50,22 @@ extern char datatoc_common_fullscreen_vert_glsl[];
*
* \{ */
-typedef struct DRWDeferredShader {
- struct DRWDeferredShader *prev, *next;
-
- GPUMaterial *mat;
-} DRWDeferredShader;
-
typedef struct DRWShaderCompiler {
- ListBase queue; /* DRWDeferredShader */
- ListBase queue_conclude; /* DRWDeferredShader */
+ ListBase queue; /* GPUMaterial */
SpinLock list_lock;
- DRWDeferredShader *mat_compiling;
- ThreadMutex compilation_lock;
-
void *gl_context;
GPUContext *gpu_context;
bool own_context;
-
- int shaders_done; /* To compute progress. */
} DRWShaderCompiler;
-static void drw_deferred_shader_free(DRWDeferredShader *dsh)
-{
- /* Make sure it is not queued before freeing. */
- MEM_freeN(dsh);
-}
-
-static void drw_deferred_shader_queue_free(ListBase *queue)
-{
- DRWDeferredShader *dsh;
- while ((dsh = BLI_pophead(queue))) {
- drw_deferred_shader_free(dsh);
- }
-}
-
static void drw_deferred_shader_compilation_exec(
void *custom_data,
/* Cannot be const, this function implements wm_jobs_start_callback.
* NOLINTNEXTLINE: readability-non-const-parameter. */
short *stop,
- short *do_update,
- float *progress)
+ short *UNUSED(do_update),
+ float *UNUSED(progress))
{
GPU_render_begin();
DRWShaderCompiler *comp = (DRWShaderCompiler *)custom_data;
@@ -109,50 +85,36 @@ static void drw_deferred_shader_compilation_exec(
GPU_context_active_set(gpu_context);
while (true) {
- BLI_spin_lock(&comp->list_lock);
-
if (*stop != 0) {
/* We don't want user to be able to cancel the compilation
* but wm can kill the task if we are closing blender. */
- BLI_spin_unlock(&comp->list_lock);
break;
}
+ BLI_spin_lock(&comp->list_lock);
/* Pop tail because it will be less likely to lock the main thread
* if all GPUMaterials are to be freed (see DRW_deferred_shader_remove()). */
- comp->mat_compiling = BLI_poptail(&comp->queue);
- if (comp->mat_compiling == NULL) {
- /* No more Shader to compile. */
- BLI_spin_unlock(&comp->list_lock);
- break;
+ LinkData *link = (LinkData *)BLI_poptail(&comp->queue);
+ GPUMaterial *mat = link ? (GPUMaterial *)link->data : NULL;
+ if (mat) {
+ /* Avoid another thread freeing the material mid compilation. */
+ GPU_material_acquire(mat);
}
-
- comp->shaders_done++;
- int total = BLI_listbase_count(&comp->queue) + comp->shaders_done;
-
- BLI_mutex_lock(&comp->compilation_lock);
BLI_spin_unlock(&comp->list_lock);
- /* Do the compilation. */
- GPU_material_compile(comp->mat_compiling->mat);
-
- *progress = (float)comp->shaders_done / (float)total;
- *do_update = true;
+ if (mat) {
+ /* Do the compilation. */
+ GPU_material_compile(mat);
+ GPU_material_release(mat);
+ MEM_freeN(link);
+ }
+ else {
+ break;
+ }
if (GPU_type_matches_ex(GPU_DEVICE_ANY, GPU_OS_ANY, GPU_DRIVER_ANY, GPU_BACKEND_OPENGL)) {
GPU_flush();
}
- BLI_mutex_unlock(&comp->compilation_lock);
-
- BLI_spin_lock(&comp->list_lock);
- if (GPU_material_status(comp->mat_compiling->mat) == GPU_MAT_QUEUED) {
- BLI_addtail(&comp->queue_conclude, comp->mat_compiling);
- }
- else {
- drw_deferred_shader_free(comp->mat_compiling);
- }
- comp->mat_compiling = NULL;
- BLI_spin_unlock(&comp->list_lock);
}
GPU_context_active_set(NULL);
@@ -167,21 +129,9 @@ static void drw_deferred_shader_compilation_free(void *custom_data)
{
DRWShaderCompiler *comp = (DRWShaderCompiler *)custom_data;
- drw_deferred_shader_queue_free(&comp->queue);
-
- if (!BLI_listbase_is_empty(&comp->queue_conclude)) {
- /* Compile the shaders in the context they will be deleted. */
- DRW_opengl_context_enable_ex(false);
- DRWDeferredShader *mat_conclude;
- while ((mat_conclude = BLI_poptail(&comp->queue_conclude))) {
- GPU_material_compile(mat_conclude->mat);
- drw_deferred_shader_free(mat_conclude);
- }
- DRW_opengl_context_disable_ex(true);
- }
-
- BLI_spin_end(&comp->list_lock);
- BLI_mutex_end(&comp->compilation_lock);
+ BLI_spin_lock(&comp->list_lock);
+ BLI_freelistN(&comp->queue);
+ BLI_spin_unlock(&comp->list_lock);
if (comp->own_context) {
/* Only destroy if the job owns the context. */
@@ -198,40 +148,48 @@ static void drw_deferred_shader_compilation_free(void *custom_data)
static void drw_deferred_shader_add(GPUMaterial *mat, bool deferred)
{
+ if (ELEM(GPU_material_status(mat), GPU_MAT_SUCCESS, GPU_MAT_FAILED)) {
+ return;
+ }
/* Do not defer the compilation if we are rendering for image.
* deferred rendering is only possible when `evil_C` is available */
- if (DST.draw_ctx.evil_C == NULL || DRW_state_is_image_render() || !USE_DEFERRED_COMPILATION ||
- !deferred) {
- /* Double checking that this GPUMaterial is not going to be
- * compiled by another thread. */
+ if (DST.draw_ctx.evil_C == NULL || DRW_state_is_image_render() || !USE_DEFERRED_COMPILATION) {
+ deferred = false;
+ }
+
+ if (!deferred) {
DRW_deferred_shader_remove(mat);
- GPU_material_compile(mat);
+ /* Shaders could already be compiling. Have to wait for compilation to finish. */
+ while (GPU_material_status(mat) == GPU_MAT_QUEUED) {
+ PIL_sleep_ms(20);
+ }
+ if (GPU_material_status(mat) == GPU_MAT_CREATED) {
+ GPU_material_compile(mat);
+ }
return;
}
- const bool use_main_context = GPU_use_main_context_workaround();
- const bool job_own_context = !use_main_context;
- DRWDeferredShader *dsh = MEM_callocN(sizeof(DRWDeferredShader), "Deferred Shader");
+ /* Don't add material to the queue twice. */
+ if (GPU_material_status(mat) == GPU_MAT_QUEUED) {
+ return;
+ }
- dsh->mat = mat;
+ const bool use_main_context = GPU_use_main_context_workaround();
+ const bool job_own_context = !use_main_context;
BLI_assert(DST.draw_ctx.evil_C);
wmWindowManager *wm = CTX_wm_manager(DST.draw_ctx.evil_C);
wmWindow *win = CTX_wm_window(DST.draw_ctx.evil_C);
- /* Use original scene ID since this is what the jobs template tests for. */
- Scene *scene = (Scene *)DEG_get_original_id(&DST.draw_ctx.scene->id);
-
/* Get the running job or a new one if none is running. Can only have one job per type & owner.
*/
wmJob *wm_job = WM_jobs_get(
- wm, win, scene, "Shaders Compilation", WM_JOB_PROGRESS, WM_JOB_TYPE_SHADER_COMPILATION);
+ wm, win, wm, "Shaders Compilation", 0, WM_JOB_TYPE_SHADER_COMPILATION);
DRWShaderCompiler *old_comp = (DRWShaderCompiler *)WM_jobs_customdata_get(wm_job);
DRWShaderCompiler *comp = MEM_callocN(sizeof(DRWShaderCompiler), "DRWShaderCompiler");
BLI_spin_init(&comp->list_lock);
- BLI_mutex_init(&comp->compilation_lock);
if (old_comp) {
BLI_spin_lock(&old_comp->list_lock);
@@ -246,7 +204,9 @@ static void drw_deferred_shader_add(GPUMaterial *mat, bool deferred)
}
}
- BLI_addtail(&comp->queue, dsh);
+ GPU_material_status_set(mat, GPU_MAT_QUEUED);
+ LinkData *node = BLI_genericNodeN(mat);
+ BLI_addtail(&comp->queue, node);
/* Create only one context. */
if (comp->gl_context == NULL) {
@@ -277,38 +237,26 @@ static void drw_deferred_shader_add(GPUMaterial *mat, bool deferred)
void DRW_deferred_shader_remove(GPUMaterial *mat)
{
- Scene *scene = GPU_material_scene(mat);
-
for (wmWindowManager *wm = G_MAIN->wm.first; wm; wm = wm->id.next) {
- if (WM_jobs_test(wm, scene, WM_JOB_TYPE_SHADER_COMPILATION) == false) {
+ if (WM_jobs_test(wm, wm, WM_JOB_TYPE_SHADER_COMPILATION) == false) {
/* No job running, do not create a new one by calling WM_jobs_get. */
continue;
}
LISTBASE_FOREACH (wmWindow *, win, &wm->windows) {
wmJob *wm_job = WM_jobs_get(
- wm, win, scene, "Shaders Compilation", WM_JOB_PROGRESS, WM_JOB_TYPE_SHADER_COMPILATION);
+ wm, win, wm, "Shaders Compilation", 0, WM_JOB_TYPE_SHADER_COMPILATION);
DRWShaderCompiler *comp = (DRWShaderCompiler *)WM_jobs_customdata_get(wm_job);
if (comp != NULL) {
BLI_spin_lock(&comp->list_lock);
- DRWDeferredShader *dsh;
- dsh = (DRWDeferredShader *)BLI_findptr(
- &comp->queue, mat, offsetof(DRWDeferredShader, mat));
- if (dsh) {
- BLI_remlink(&comp->queue, dsh);
- }
-
- /* Wait for compilation to finish */
- if ((comp->mat_compiling != NULL) && (comp->mat_compiling->mat == mat)) {
- BLI_mutex_lock(&comp->compilation_lock);
- BLI_mutex_unlock(&comp->compilation_lock);
+ LinkData *link = (LinkData *)BLI_findptr(&comp->queue, mat, offsetof(LinkData, data));
+ if (link) {
+ BLI_remlink(&comp->queue, link);
+ GPU_material_status_set(link->data, GPU_MAT_CREATED);
}
-
BLI_spin_unlock(&comp->list_lock);
- if (dsh) {
- drw_deferred_shader_free(dsh);
- }
+ MEM_freeN(link);
}
}
}
@@ -436,20 +384,12 @@ GPUMaterial *DRW_shader_from_world(World *wo,
false,
callback,
thunk);
- if (!DRW_state_is_image_render() && deferred && GPU_material_status(mat) == GPU_MAT_QUEUED) {
- /* Shader has been already queued. */
- return mat;
- }
-
- if (GPU_material_status(mat) == GPU_MAT_CREATED) {
- GPU_material_status_set(mat, GPU_MAT_QUEUED);
- drw_deferred_shader_add(mat, deferred);
+ if (DRW_state_is_image_render()) {
+ /* Do not deferred if doing render. */
+ deferred = false;
}
- if (!deferred && GPU_material_status(mat) == GPU_MAT_QUEUED) {
- /* Force compilation for shaders already queued. */
- drw_deferred_shader_add(mat, false);
- }
+ drw_deferred_shader_add(mat, deferred);
return mat;
}
@@ -478,20 +418,7 @@ GPUMaterial *DRW_shader_from_material(Material *ma,
deferred = false;
}
- if (deferred && GPU_material_status(mat) == GPU_MAT_QUEUED) {
- /* Shader has been already queued. */
- return mat;
- }
-
- if (GPU_material_status(mat) == GPU_MAT_CREATED) {
- GPU_material_status_set(mat, GPU_MAT_QUEUED);
- drw_deferred_shader_add(mat, deferred);
- }
-
- if (!deferred && GPU_material_status(mat) == GPU_MAT_QUEUED) {
- /* Force compilation for shaders already queued. */
- drw_deferred_shader_add(mat, false);
- }
+ drw_deferred_shader_add(mat, deferred);
return mat;
}
diff --git a/source/blender/gpu/CMakeLists.txt b/source/blender/gpu/CMakeLists.txt
index 49cfad9d89f..042b79565ef 100644
--- a/source/blender/gpu/CMakeLists.txt
+++ b/source/blender/gpu/CMakeLists.txt
@@ -29,6 +29,7 @@ set(INC
../nodes
../nodes/intern
+ ../../../intern/atomic
../../../intern/clog
../../../intern/ghost
../../../intern/glew-mx
diff --git a/source/blender/gpu/GPU_material.h b/source/blender/gpu/GPU_material.h
index 58bbe11b4d6..fff90e6f8ff 100644
--- a/source/blender/gpu/GPU_material.h
+++ b/source/blender/gpu/GPU_material.h
@@ -215,6 +215,9 @@ GPUMaterial *GPU_material_from_nodetree(struct Scene *scene,
void GPU_material_compile(GPUMaterial *mat);
void GPU_material_free(struct ListBase *gpumaterial);
+void GPU_material_acquire(GPUMaterial *mat);
+void GPU_material_release(GPUMaterial *mat);
+
void GPU_materials_free(struct Main *bmain);
struct Scene *GPU_material_scene(GPUMaterial *material);
diff --git a/source/blender/gpu/intern/gpu_material.c b/source/blender/gpu/intern/gpu_material.c
index 3e9cf31e705..e8fb5fb2c45 100644
--- a/source/blender/gpu/intern/gpu_material.c
+++ b/source/blender/gpu/intern/gpu_material.c
@@ -40,6 +40,8 @@
#include "gpu_codegen.h"
#include "gpu_node_graph.h"
+#include "atomic_ops.h"
+
/* Structs */
#define MAX_COLOR_BAND 128
@@ -88,6 +90,8 @@ struct GPUMaterial {
int sss_samples;
bool sss_dirty;
+ uint32_t refcount;
+
#ifndef NDEBUG
char name[64];
#endif
@@ -142,8 +146,10 @@ static void gpu_material_ramp_texture_build(GPUMaterial *mat)
static void gpu_material_free_single(GPUMaterial *material)
{
- /* Cancel / wait any pending lazy compilation. */
- DRW_deferred_shader_remove(material);
+ bool do_free = atomic_sub_and_fetch_uint32(&material->refcount, 1) == 0;
+ if (!do_free) {
+ return;
+ }
gpu_node_graph_free(&material->graph);
@@ -168,6 +174,7 @@ void GPU_material_free(ListBase *gpumaterial)
{
LISTBASE_FOREACH (LinkData *, link, gpumaterial) {
GPUMaterial *material = link->data;
+ DRW_deferred_shader_remove(material);
gpu_material_free_single(material);
MEM_freeN(material);
}
@@ -660,6 +667,7 @@ GPUMaterial *GPU_material_from_nodetree(Scene *scene,
mat->is_volume_shader = is_volume_shader;
mat->graph.used_libraries = BLI_gset_new(
BLI_ghashutil_ptrhash, BLI_ghashutil_ptrcmp, "GPUNodeGraph.used_libraries");
+ mat->refcount = 1;
#ifndef NDEBUG
BLI_snprintf(mat->name, sizeof(mat->name), "%s", name);
#else
@@ -709,11 +717,21 @@ GPUMaterial *GPU_material_from_nodetree(Scene *scene,
return mat;
}
+void GPU_material_acquire(GPUMaterial *mat)
+{
+ atomic_add_and_fetch_uint32(&mat->refcount, 1);
+}
+
+void GPU_material_release(GPUMaterial *mat)
+{
+ gpu_material_free_single(mat);
+}
+
void GPU_material_compile(GPUMaterial *mat)
{
bool success;
- BLI_assert(mat->status == GPU_MAT_QUEUED);
+ BLI_assert(ELEM(mat->status, GPU_MAT_QUEUED, GPU_MAT_CREATED));
BLI_assert(mat->pass);
/* NOTE: The shader may have already been compiled here since we are