Welcome to mirror list, hosted at ThFree Co, Russian Federation.

git.blender.org/blender.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorClément Foucault <foucault.clem@gmail.com>2022-09-27 01:00:21 +0300
committerClément Foucault <foucault.clem@gmail.com>2022-09-27 01:00:28 +0300
commitcd1caa5853e4a4e87a13029b49976c61c780a697 (patch)
tree6dc9f826abc32bf35177b7bf8c1dbdc5e9e59457 /source/blender/draw/intern/draw_manager_shader.c
parent66a863e30197990e20a1e9a45923af6331935b21 (diff)
GPU: Revert part of D16017 that was accidentally commited
This code slipped through the final review step surely caused by a faulty merge. Fixes T101372 Regression: World shader setup crashes Blender in rendered view Regression introduced by rB697b447c2069bbbbaa9929aab0ea1f66ef8bf4d0
Diffstat (limited to 'source/blender/draw/intern/draw_manager_shader.c')
-rw-r--r--source/blender/draw/intern/draw_manager_shader.c190
1 files changed, 33 insertions, 157 deletions
diff --git a/source/blender/draw/intern/draw_manager_shader.c b/source/blender/draw/intern/draw_manager_shader.c
index f452cd47cb7..04a9f3fdd2d 100644
--- a/source/blender/draw/intern/draw_manager_shader.c
+++ b/source/blender/draw/intern/draw_manager_shader.c
@@ -55,9 +55,6 @@ typedef struct DRWShaderCompiler {
ListBase queue; /* GPUMaterial */
SpinLock list_lock;
- /** Optimization queue. */
- ListBase optimize_queue; /* GPUMaterial */
-
void *gl_context;
GPUContext *gpu_context;
bool own_context;
@@ -113,29 +110,8 @@ static void drw_deferred_shader_compilation_exec(
MEM_freeN(link);
}
else {
- /* Check for Material Optimization job once there are no more
- * shaders to compile. */
- BLI_spin_lock(&comp->list_lock);
- /* Pop tail because it will be less likely to lock the main thread
- * if all GPUMaterials are to be freed (see DRW_deferred_shader_remove()). */
- link = (LinkData *)BLI_poptail(&comp->optimize_queue);
- GPUMaterial *optimize_mat = link ? (GPUMaterial *)link->data : NULL;
- if (optimize_mat) {
- /* Avoid another thread freeing the material during optimization. */
- GPU_material_acquire(optimize_mat);
- }
- BLI_spin_unlock(&comp->list_lock);
-
- if (optimize_mat) {
- /* Compile optimized material shader. */
- GPU_material_optimize(optimize_mat);
- GPU_material_release(optimize_mat);
- MEM_freeN(link);
- }
- else {
- /* No more materials to optimize, or shaders to compile. */
- break;
- }
+ /* No more materials to optimize, or shaders to compile. */
+ break;
}
if (GPU_type_matches_ex(GPU_DEVICE_ANY, GPU_OS_ANY, GPU_DRIVER_ANY, GPU_BACKEND_OPENGL)) {
@@ -157,7 +133,6 @@ static void drw_deferred_shader_compilation_free(void *custom_data)
BLI_spin_lock(&comp->list_lock);
BLI_freelistN(&comp->queue);
- BLI_freelistN(&comp->optimize_queue);
BLI_spin_unlock(&comp->list_lock);
if (comp->own_context) {
@@ -173,13 +148,34 @@ static void drw_deferred_shader_compilation_free(void *custom_data)
MEM_freeN(comp);
}
-/**
- * Append either shader compilation or optimization job to deferred queue and
- * ensure shader compilation worker is active.
- * We keep two separate queue's to ensure core compilations always complete before optimization.
- */
-static void drw_deferred_queue_append(GPUMaterial *mat, bool is_optimization_job)
+static void drw_deferred_shader_add(GPUMaterial *mat, bool deferred)
{
+ if (ELEM(GPU_material_status(mat), GPU_MAT_SUCCESS, GPU_MAT_FAILED)) {
+ return;
+ }
+ /* Do not defer the compilation if we are rendering for image.
+ * deferred rendering is only possible when `evil_C` is available */
+ if (DST.draw_ctx.evil_C == NULL || DRW_state_is_image_render() || !USE_DEFERRED_COMPILATION) {
+ deferred = false;
+ }
+
+ if (!deferred) {
+ DRW_deferred_shader_remove(mat);
+ /* Shaders could already be compiling. Have to wait for compilation to finish. */
+ while (GPU_material_status(mat) == GPU_MAT_QUEUED) {
+ PIL_sleep_ms(20);
+ }
+ if (GPU_material_status(mat) == GPU_MAT_CREATED) {
+ GPU_material_compile(mat);
+ }
+ return;
+ }
+
+ /* Don't add material to the queue twice. */
+ if (GPU_material_status(mat) == GPU_MAT_QUEUED) {
+ return;
+ }
+
const bool use_main_context = GPU_use_main_context_workaround();
const bool job_own_context = !use_main_context;
@@ -200,7 +196,6 @@ static void drw_deferred_queue_append(GPUMaterial *mat, bool is_optimization_job
if (old_comp) {
BLI_spin_lock(&old_comp->list_lock);
BLI_movelisttolist(&comp->queue, &old_comp->queue);
- BLI_movelisttolist(&comp->optimize_queue, &old_comp->optimize_queue);
BLI_spin_unlock(&old_comp->list_lock);
/* Do not recreate context, just pass ownership. */
if (old_comp->gl_context) {
@@ -211,18 +206,9 @@ static void drw_deferred_queue_append(GPUMaterial *mat, bool is_optimization_job
}
}
- /* Add to either compilation or optimization queue. */
- if (is_optimization_job) {
- BLI_assert(GPU_material_optimization_status(mat) != GPU_MAT_OPTIMIZATION_QUEUED);
- GPU_material_optimization_status_set(mat, GPU_MAT_OPTIMIZATION_QUEUED);
- LinkData *node = BLI_genericNodeN(mat);
- BLI_addtail(&comp->optimize_queue, node);
- }
- else {
- GPU_material_status_set(mat, GPU_MAT_QUEUED);
- LinkData *node = BLI_genericNodeN(mat);
- BLI_addtail(&comp->queue, node);
- }
+ GPU_material_status_set(mat, GPU_MAT_QUEUED);
+ LinkData *node = BLI_genericNodeN(mat);
+ BLI_addtail(&comp->queue, node);
/* Create only one context. */
if (comp->gl_context == NULL) {
@@ -251,39 +237,6 @@ static void drw_deferred_queue_append(GPUMaterial *mat, bool is_optimization_job
WM_jobs_start(wm, wm_job);
}
-static void drw_deferred_shader_add(GPUMaterial *mat, bool deferred)
-{
- if (ELEM(GPU_material_status(mat), GPU_MAT_SUCCESS, GPU_MAT_FAILED)) {
- return;
- }
-
- /* Do not defer the compilation if we are rendering for image.
- * deferred rendering is only possible when `evil_C` is available */
- if (DST.draw_ctx.evil_C == NULL || DRW_state_is_image_render() || !USE_DEFERRED_COMPILATION) {
- deferred = false;
- }
-
- if (!deferred) {
- DRW_deferred_shader_remove(mat);
- /* Shaders could already be compiling. Have to wait for compilation to finish. */
- while (GPU_material_status(mat) == GPU_MAT_QUEUED) {
- PIL_sleep_ms(20);
- }
- if (GPU_material_status(mat) == GPU_MAT_CREATED) {
- GPU_material_compile(mat);
- }
- return;
- }
-
- /* Don't add material to the queue twice. */
- if (GPU_material_status(mat) == GPU_MAT_QUEUED) {
- return;
- }
-
- /* Add deferred shader compilation to queue. */
- drw_deferred_queue_append(mat, false);
-}
-
void DRW_deferred_shader_remove(GPUMaterial *mat)
{
LISTBASE_FOREACH (wmWindowManager *, wm, &G_MAIN->wm) {
@@ -299,42 +252,9 @@ void DRW_deferred_shader_remove(GPUMaterial *mat)
BLI_remlink(&comp->queue, link);
GPU_material_status_set(link->data, GPU_MAT_CREATED);
}
-
- MEM_SAFE_FREE(link);
-
- /* Search for optimization job in queue. */
- LinkData *opti_link = (LinkData *)BLI_findptr(
- &comp->optimize_queue, mat, offsetof(LinkData, data));
- if (opti_link) {
- BLI_remlink(&comp->optimize_queue, opti_link);
- GPU_material_optimization_status_set(opti_link->data, GPU_MAT_OPTIMIZATION_READY);
- }
BLI_spin_unlock(&comp->list_lock);
- MEM_SAFE_FREE(opti_link);
- }
- }
- }
-}
-
-void DRW_deferred_shader_optimize_remove(GPUMaterial *mat)
-{
- LISTBASE_FOREACH (wmWindowManager *, wm, &G_MAIN->wm) {
- LISTBASE_FOREACH (wmWindow *, win, &wm->windows) {
- DRWShaderCompiler *comp = (DRWShaderCompiler *)WM_jobs_customdata_from_type(
- wm, wm, WM_JOB_TYPE_SHADER_COMPILATION);
- if (comp != NULL) {
- BLI_spin_lock(&comp->list_lock);
- /* Search for optimization job in queue. */
- LinkData *opti_link = (LinkData *)BLI_findptr(
- &comp->optimize_queue, mat, offsetof(LinkData, data));
- if (opti_link) {
- BLI_remlink(&comp->optimize_queue, opti_link);
- GPU_material_optimization_status_set(opti_link->data, GPU_MAT_OPTIMIZATION_READY);
- }
- BLI_spin_unlock(&comp->list_lock);
-
- MEM_SAFE_FREE(opti_link);
+ MEM_SAFE_FREE(link);
}
}
}
@@ -468,7 +388,6 @@ GPUMaterial *DRW_shader_from_world(World *wo,
}
drw_deferred_shader_add(mat, deferred);
- DRW_shader_queue_optimize_material(mat);
return mat;
}
@@ -498,52 +417,9 @@ GPUMaterial *DRW_shader_from_material(Material *ma,
}
drw_deferred_shader_add(mat, deferred);
- DRW_shader_queue_optimize_material(mat);
return mat;
}
-void DRW_shader_queue_optimize_material(GPUMaterial *mat)
-{
- /* Do not perform deferred optimization if performing render.
- * De-queue any queued optimization jobs. */
- if (DRW_state_is_image_render()) {
- if (GPU_material_optimization_status(mat) == GPU_MAT_OPTIMIZATION_QUEUED) {
- /* Remove from pending optimization job queue. */
- DRW_deferred_shader_optimize_remove(mat);
- /* If optimization job had already started, wait for it to complete. */
- while (GPU_material_optimization_status(mat) == GPU_MAT_OPTIMIZATION_QUEUED) {
- PIL_sleep_ms(20);
- }
- }
- return;
- }
-
- /* We do not need to perform optimization on the material if it is already compiled or in the
- * optimization queue. If optimization is not required, the status will be flagged as
- * `GPU_MAT_OPTIMIZATION_SKIP`.
- * We can also skip cases which have already been queued up. */
- if (ELEM(GPU_material_optimization_status(mat),
- GPU_MAT_OPTIMIZATION_SKIP,
- GPU_MAT_OPTIMIZATION_SUCCESS,
- GPU_MAT_OPTIMIZATION_QUEUED)) {
- return;
- }
-
- /* Only queue optimization once the original shader has been successfully compiled. */
- if (GPU_material_status(mat) != GPU_MAT_SUCCESS) {
- return;
- }
-
- /* Defer optimization until sufficient time has passed beyond creation. This avoids excessive
- * recompilation for shaders which are being actively modified. */
- if (!GPU_material_optimization_ready(mat)) {
- return;
- }
-
- /* Add deferred shader compilation to queue. */
- drw_deferred_queue_append(mat, true);
-}
-
void DRW_shader_free(GPUShader *shader)
{
GPU_shader_free(shader);