Welcome to mirror list, hosted at ThFree Co, Russian Federation.

git.blender.org/blender.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorThomas Dinges <dingto>2022-09-22 18:27:51 +0300
committerClément Foucault <foucault.clem@gmail.com>2022-09-22 18:32:43 +0300
commit697b447c2069bbbbaa9929aab0ea1f66ef8bf4d0 (patch)
treef5c78b102b5c1478fb1dbd262b23508f5f072e33 /source/blender/draw
parentbb63b98d1ff5acfd24dff9b5e72175f82f5bca26 (diff)
Metal: MTLContext implementation and immediate mode rendering support.
MTLContext provides functionality for command encoding, binding management and graphics device management. MTLImmediate provides simple draw enablement with dynamically encoded data. These draws utilise temporary scratch buffer memory to provide minimal bandwidth overhead during workload submission. This patch also contains empty placeholders for MTLBatch and MTLDrawList to enable testing of first pixels on-screen without failure. The Metal API also requires access to the GHOST_Context to ensure the same pre-initialized Metal GPU device is used by the viewport. Given the explicit nature of Metal, explicit control is also needed over presentation, to ensure correct work scheduling and rendering pipeline state. Authored by Apple: Michael Parkin-White Ref T96261 (The diff is based on 043f59cb3b5835ba1a0bbf6f1cbad080b527f7f6) Reviewed By: fclem Differential Revision: https://developer.blender.org/D15953
Diffstat (limited to 'source/blender/draw')
-rw-r--r--source/blender/draw/DRW_engine.h1
-rw-r--r--source/blender/draw/engines/eevee/eevee_lightcache.c2
-rw-r--r--source/blender/draw/engines/eevee_next/eevee_shader.cc2
-rw-r--r--source/blender/draw/intern/DRW_render.h1
-rw-r--r--source/blender/draw/intern/draw_manager.c2
-rw-r--r--source/blender/draw/intern/draw_manager_shader.c194
6 files changed, 167 insertions, 35 deletions
diff --git a/source/blender/draw/DRW_engine.h b/source/blender/draw/DRW_engine.h
index dec7a22aadb..04e3bddfb6c 100644
--- a/source/blender/draw/DRW_engine.h
+++ b/source/blender/draw/DRW_engine.h
@@ -201,6 +201,7 @@ void DRW_gpu_render_context_enable(void *re_gpu_context);
void DRW_gpu_render_context_disable(void *re_gpu_context);
void DRW_deferred_shader_remove(struct GPUMaterial *mat);
+void DRW_deferred_shader_optimize_remove(struct GPUMaterial *mat);
/**
* Get DrawData from the given ID-block. In order for this to work, we assume that
diff --git a/source/blender/draw/engines/eevee/eevee_lightcache.c b/source/blender/draw/engines/eevee/eevee_lightcache.c
index 614ea0b0892..0fd87ef43f0 100644
--- a/source/blender/draw/engines/eevee/eevee_lightcache.c
+++ b/source/blender/draw/engines/eevee/eevee_lightcache.c
@@ -597,7 +597,7 @@ static void eevee_lightbake_context_enable(EEVEE_LightBake *lbake)
if (lbake->gl_context) {
DRW_opengl_render_context_enable(lbake->gl_context);
if (lbake->gpu_context == NULL) {
- lbake->gpu_context = GPU_context_create(NULL);
+ lbake->gpu_context = GPU_context_create(NULL, lbake->gl_context);
}
DRW_gpu_render_context_enable(lbake->gpu_context);
}
diff --git a/source/blender/draw/engines/eevee_next/eevee_shader.cc b/source/blender/draw/engines/eevee_next/eevee_shader.cc
index 64b1d4891a9..05ff06e7435 100644
--- a/source/blender/draw/engines/eevee_next/eevee_shader.cc
+++ b/source/blender/draw/engines/eevee_next/eevee_shader.cc
@@ -471,6 +471,8 @@ GPUMaterial *ShaderModule::material_shader_get(const char *name,
this);
GPU_material_status_set(gpumat, GPU_MAT_QUEUED);
GPU_material_compile(gpumat);
+ /* Queue deferred material optimization. */
+ DRW_shader_queue_optimize_material(gpumat);
return gpumat;
}
diff --git a/source/blender/draw/intern/DRW_render.h b/source/blender/draw/intern/DRW_render.h
index 7b80ffd2b88..4bdef577e44 100644
--- a/source/blender/draw/intern/DRW_render.h
+++ b/source/blender/draw/intern/DRW_render.h
@@ -251,6 +251,7 @@ struct GPUMaterial *DRW_shader_from_material(struct Material *ma,
bool deferred,
GPUCodegenCallbackFn callback,
void *thunk);
+void DRW_shader_queue_optimize_material(struct GPUMaterial *mat);
void DRW_shader_free(struct GPUShader *shader);
#define DRW_SHADER_FREE_SAFE(shader) \
do { \
diff --git a/source/blender/draw/intern/draw_manager.c b/source/blender/draw/intern/draw_manager.c
index e1bee89db60..eab79652762 100644
--- a/source/blender/draw/intern/draw_manager.c
+++ b/source/blender/draw/intern/draw_manager.c
@@ -3139,7 +3139,7 @@ void DRW_opengl_context_create(void)
DST.gl_context = WM_opengl_context_create();
WM_opengl_context_activate(DST.gl_context);
/* Be sure to create gpu_context too. */
- DST.gpu_context = GPU_context_create(NULL);
+ DST.gpu_context = GPU_context_create(0, DST.gl_context);
/* So we activate the window's one afterwards. */
wm_window_reset_drawable();
}
diff --git a/source/blender/draw/intern/draw_manager_shader.c b/source/blender/draw/intern/draw_manager_shader.c
index 4bc3898c5e7..6f8df54ead3 100644
--- a/source/blender/draw/intern/draw_manager_shader.c
+++ b/source/blender/draw/intern/draw_manager_shader.c
@@ -51,9 +51,13 @@ extern char datatoc_common_fullscreen_vert_glsl[];
* \{ */
typedef struct DRWShaderCompiler {
+ /** Default compilation queue. */
ListBase queue; /* GPUMaterial */
SpinLock list_lock;
+ /** Optimization queue. */
+ ListBase optimize_queue; /* GPUMaterial */
+
void *gl_context;
GPUContext *gpu_context;
bool own_context;
@@ -109,7 +113,29 @@ static void drw_deferred_shader_compilation_exec(
MEM_freeN(link);
}
else {
- break;
+ /* Check for Material Optimization job once there are no more
+ * shaders to compile. */
+ BLI_spin_lock(&comp->list_lock);
+ /* Pop tail because it will be less likely to lock the main thread
+ * if all GPUMaterials are to be freed (see DRW_deferred_shader_remove()). */
+ LinkData *link = (LinkData *)BLI_poptail(&comp->optimize_queue);
+ GPUMaterial *optimize_mat = link ? (GPUMaterial *)link->data : NULL;
+ if (optimize_mat) {
+ /* Avoid another thread freeing the material during optimization. */
+ GPU_material_acquire(optimize_mat);
+ }
+ BLI_spin_unlock(&comp->list_lock);
+
+ if (optimize_mat) {
+ /* Compile optimized material shader. */
+ GPU_material_optimize(optimize_mat);
+ GPU_material_release(optimize_mat);
+ MEM_freeN(link);
+ }
+ else {
+ /* No more materials to optimize, or shaders to compile. */
+ break;
+ }
}
if (GPU_type_matches_ex(GPU_DEVICE_ANY, GPU_OS_ANY, GPU_DRIVER_ANY, GPU_BACKEND_OPENGL)) {
@@ -131,6 +157,7 @@ static void drw_deferred_shader_compilation_free(void *custom_data)
BLI_spin_lock(&comp->list_lock);
BLI_freelistN(&comp->queue);
+ BLI_freelistN(&comp->optimize_queue);
BLI_spin_unlock(&comp->list_lock);
if (comp->own_context) {
@@ -146,34 +173,13 @@ static void drw_deferred_shader_compilation_free(void *custom_data)
MEM_freeN(comp);
}
-static void drw_deferred_shader_add(GPUMaterial *mat, bool deferred)
+/**
+ * Append either shader compilation or optimization job to deferred queue and
+ * ensure shader compilation worker is active.
+ * We keep two separate queue's to ensure core compilations always complete before optimization.
+ */
+static void drw_deferred_queue_append(GPUMaterial *mat, bool is_optimization_job)
{
- if (ELEM(GPU_material_status(mat), GPU_MAT_SUCCESS, GPU_MAT_FAILED)) {
- return;
- }
- /* Do not defer the compilation if we are rendering for image.
- * deferred rendering is only possible when `evil_C` is available */
- if (DST.draw_ctx.evil_C == NULL || DRW_state_is_image_render() || !USE_DEFERRED_COMPILATION) {
- deferred = false;
- }
-
- if (!deferred) {
- DRW_deferred_shader_remove(mat);
- /* Shaders could already be compiling. Have to wait for compilation to finish. */
- while (GPU_material_status(mat) == GPU_MAT_QUEUED) {
- PIL_sleep_ms(20);
- }
- if (GPU_material_status(mat) == GPU_MAT_CREATED) {
- GPU_material_compile(mat);
- }
- return;
- }
-
- /* Don't add material to the queue twice. */
- if (GPU_material_status(mat) == GPU_MAT_QUEUED) {
- return;
- }
-
const bool use_main_context = GPU_use_main_context_workaround();
const bool job_own_context = !use_main_context;
@@ -194,6 +200,7 @@ static void drw_deferred_shader_add(GPUMaterial *mat, bool deferred)
if (old_comp) {
BLI_spin_lock(&old_comp->list_lock);
BLI_movelisttolist(&comp->queue, &old_comp->queue);
+ BLI_movelisttolist(&comp->optimize_queue, &old_comp->optimize_queue);
BLI_spin_unlock(&old_comp->list_lock);
/* Do not recreate context, just pass ownership. */
if (old_comp->gl_context) {
@@ -204,9 +211,18 @@ static void drw_deferred_shader_add(GPUMaterial *mat, bool deferred)
}
}
- GPU_material_status_set(mat, GPU_MAT_QUEUED);
- LinkData *node = BLI_genericNodeN(mat);
- BLI_addtail(&comp->queue, node);
+ /* Add to either compilation or optimization queue. */
+ if (is_optimization_job) {
+ BLI_assert(GPU_material_optimization_status(mat) != GPU_MAT_OPTIMIZATION_QUEUED);
+ GPU_material_optimization_status_set(mat, GPU_MAT_OPTIMIZATION_QUEUED);
+ LinkData *node = BLI_genericNodeN(mat);
+ BLI_addtail(&comp->optimize_queue, node);
+ }
+ else {
+ GPU_material_status_set(mat, GPU_MAT_QUEUED);
+ LinkData *node = BLI_genericNodeN(mat);
+ BLI_addtail(&comp->queue, node);
+ }
/* Create only one context. */
if (comp->gl_context == NULL) {
@@ -216,7 +232,7 @@ static void drw_deferred_shader_add(GPUMaterial *mat, bool deferred)
}
else {
comp->gl_context = WM_opengl_context_create();
- comp->gpu_context = GPU_context_create(NULL);
+ comp->gpu_context = GPU_context_create(NULL, comp->gl_context);
GPU_context_active_set(NULL);
WM_opengl_context_activate(DST.gl_context);
@@ -235,6 +251,39 @@ static void drw_deferred_shader_add(GPUMaterial *mat, bool deferred)
WM_jobs_start(wm, wm_job);
}
+static void drw_deferred_shader_add(GPUMaterial *mat, bool deferred)
+{
+ if (ELEM(GPU_material_status(mat), GPU_MAT_SUCCESS, GPU_MAT_FAILED)) {
+ return;
+ }
+
+ /* Do not defer the compilation if we are rendering for image.
+ * deferred rendering is only possible when `evil_C` is available */
+ if (DST.draw_ctx.evil_C == NULL || DRW_state_is_image_render() || !USE_DEFERRED_COMPILATION) {
+ deferred = false;
+ }
+
+ if (!deferred) {
+ DRW_deferred_shader_remove(mat);
+ /* Shaders could already be compiling. Have to wait for compilation to finish. */
+ while (GPU_material_status(mat) == GPU_MAT_QUEUED) {
+ PIL_sleep_ms(20);
+ }
+ if (GPU_material_status(mat) == GPU_MAT_CREATED) {
+ GPU_material_compile(mat);
+ }
+ return;
+ }
+
+ /* Don't add material to the queue twice. */
+ if (GPU_material_status(mat) == GPU_MAT_QUEUED) {
+ return;
+ }
+
+ /* Add deferred shader compilation to queue. */
+ drw_deferred_queue_append(mat, false);
+}
+
void DRW_deferred_shader_remove(GPUMaterial *mat)
{
LISTBASE_FOREACH (wmWindowManager *, wm, &G_MAIN->wm) {
@@ -243,14 +292,49 @@ void DRW_deferred_shader_remove(GPUMaterial *mat)
wm, wm, WM_JOB_TYPE_SHADER_COMPILATION);
if (comp != NULL) {
BLI_spin_lock(&comp->list_lock);
+
+ /* Search for compilation job in queue. */
LinkData *link = (LinkData *)BLI_findptr(&comp->queue, mat, offsetof(LinkData, data));
if (link) {
BLI_remlink(&comp->queue, link);
GPU_material_status_set(link->data, GPU_MAT_CREATED);
}
- BLI_spin_unlock(&comp->list_lock);
MEM_SAFE_FREE(link);
+
+ /* Search for optimization job in queue. */
+ LinkData *opti_link = (LinkData *)BLI_findptr(
+ &comp->optimize_queue, mat, offsetof(LinkData, data));
+ if (opti_link) {
+ BLI_remlink(&comp->optimize_queue, opti_link);
+ GPU_material_optimization_status_set(opti_link->data, GPU_MAT_OPTIMIZATION_READY);
+ }
+ BLI_spin_unlock(&comp->list_lock);
+
+ MEM_SAFE_FREE(opti_link);
+ }
+ }
+ }
+}
+
+void DRW_deferred_shader_optimize_remove(GPUMaterial *mat)
+{
+ LISTBASE_FOREACH (wmWindowManager *, wm, &G_MAIN->wm) {
+ LISTBASE_FOREACH (wmWindow *, win, &wm->windows) {
+ DRWShaderCompiler *comp = (DRWShaderCompiler *)WM_jobs_customdata_from_type(
+ wm, wm, WM_JOB_TYPE_SHADER_COMPILATION);
+ if (comp != NULL) {
+ BLI_spin_lock(&comp->list_lock);
+ /* Search for optimization job in queue. */
+ LinkData *opti_link = (LinkData *)BLI_findptr(
+ &comp->optimize_queue, mat, offsetof(LinkData, data));
+ if (opti_link) {
+ BLI_remlink(&comp->optimize_queue, opti_link);
+ GPU_material_optimization_status_set(opti_link->data, GPU_MAT_OPTIMIZATION_READY);
+ }
+ BLI_spin_unlock(&comp->list_lock);
+
+ MEM_SAFE_FREE(opti_link);
}
}
}
@@ -384,6 +468,7 @@ GPUMaterial *DRW_shader_from_world(World *wo,
}
drw_deferred_shader_add(mat, deferred);
+ DRW_shader_queue_optimize_material(mat);
return mat;
}
@@ -413,9 +498,52 @@ GPUMaterial *DRW_shader_from_material(Material *ma,
}
drw_deferred_shader_add(mat, deferred);
+ DRW_shader_queue_optimize_material(mat);
return mat;
}
+void DRW_shader_queue_optimize_material(GPUMaterial *mat)
+{
+ /* Do not perform deferred optimization if performing render.
+ * De-queue any queued optimization jobs. */
+ if (DRW_state_is_image_render()) {
+ if (GPU_material_optimization_status(mat) == GPU_MAT_OPTIMIZATION_QUEUED) {
+ /* Remove from pending optimization job queue. */
+ DRW_deferred_shader_optimize_remove(mat);
+ /* If optimization job had already started, wait for it to complete. */
+ while (GPU_material_optimization_status(mat) == GPU_MAT_OPTIMIZATION_QUEUED) {
+ PIL_sleep_ms(20);
+ }
+ }
+ return;
+ }
+
+ /* We do not need to perform optimization on the material if it is already compiled or in the
+ * optimization queue. If optimization is not required, the status will be flagged as
+ * `GPU_MAT_OPTIMIZATION_SKIP`.
+ * We can also skip cases which have already been queued up. */
+ if (ELEM(GPU_material_optimization_status(mat),
+ GPU_MAT_OPTIMIZATION_SKIP,
+ GPU_MAT_OPTIMIZATION_SUCCESS,
+ GPU_MAT_OPTIMIZATION_QUEUED)) {
+ return;
+ }
+
+ /* Only queue optimization once the original shader has been successfully compiled. */
+ if (GPU_material_status(mat) != GPU_MAT_SUCCESS) {
+ return;
+ }
+
+ /* Defer optimization until sufficient time has passed beyond creation. This avoids excessive
+ * recompilation for shaders which are being actively modified. */
+ if (!GPU_material_optimization_ready(mat)) {
+ return;
+ }
+
+ /* Add deferred shader compilation to queue. */
+ drw_deferred_queue_append(mat, true);
+}
+
void DRW_shader_free(GPUShader *shader)
{
GPU_shader_free(shader);