Welcome to mirror list, hosted at ThFree Co, Russian Federation.

git.blender.org/blender.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorCampbell Barton <ideasman42@gmail.com>2019-04-17 07:17:24 +0300
committerCampbell Barton <ideasman42@gmail.com>2019-04-17 07:21:24 +0300
commite12c08e8d170b7ca40f204a5b0423c23a9fbc2c1 (patch)
tree8cf3453d12edb177a218ef8009357518ec6cab6a /source/blender/draw/intern/draw_manager_shader.c
parentb3dabc200a4b0399ec6b81f2ff2730d07b44fcaa (diff)
ClangFormat: apply to source, most of intern
Apply clang format as proposed in T53211. For details on usage and instructions for migrating branches without conflicts, see: https://wiki.blender.org/wiki/Tools/ClangFormat
Diffstat (limited to 'source/blender/draw/intern/draw_manager_shader.c')
-rw-r--r--source/blender/draw/intern/draw_manager_shader.c544
1 files changed, 300 insertions, 244 deletions
diff --git a/source/blender/draw/intern/draw_manager_shader.c b/source/blender/draw/intern/draw_manager_shader.c
index 1fc6b61b87a..9cb3c1bf226 100644
--- a/source/blender/draw/intern/draw_manager_shader.c
+++ b/source/blender/draw/intern/draw_manager_shader.c
@@ -57,339 +57,395 @@ extern char datatoc_common_fullscreen_vert_glsl[];
* \{ */
typedef struct DRWDeferredShader {
- struct DRWDeferredShader *prev, *next;
+ struct DRWDeferredShader *prev, *next;
- GPUMaterial *mat;
+ GPUMaterial *mat;
} DRWDeferredShader;
typedef struct DRWShaderCompiler {
- ListBase queue; /* DRWDeferredShader */
- SpinLock list_lock;
+ ListBase queue; /* DRWDeferredShader */
+ SpinLock list_lock;
- DRWDeferredShader *mat_compiling;
- ThreadMutex compilation_lock;
+ DRWDeferredShader *mat_compiling;
+ ThreadMutex compilation_lock;
- void *gl_context;
- bool own_context;
+ void *gl_context;
+ bool own_context;
- int shaders_done; /* To compute progress. */
+ int shaders_done; /* To compute progress. */
} DRWShaderCompiler;
static void drw_deferred_shader_free(DRWDeferredShader *dsh)
{
- /* Make sure it is not queued before freeing. */
- MEM_freeN(dsh);
+ /* Make sure it is not queued before freeing. */
+ MEM_freeN(dsh);
}
static void drw_deferred_shader_queue_free(ListBase *queue)
{
- DRWDeferredShader *dsh;
- while ((dsh = BLI_pophead(queue))) {
- drw_deferred_shader_free(dsh);
- }
+ DRWDeferredShader *dsh;
+ while ((dsh = BLI_pophead(queue))) {
+ drw_deferred_shader_free(dsh);
+ }
}
-static void drw_deferred_shader_compilation_exec(void *custom_data, short *stop, short *do_update, float *progress)
+static void drw_deferred_shader_compilation_exec(void *custom_data,
+ short *stop,
+ short *do_update,
+ float *progress)
{
- DRWShaderCompiler *comp = (DRWShaderCompiler *)custom_data;
- void *gl_context = comp->gl_context;
+ DRWShaderCompiler *comp = (DRWShaderCompiler *)custom_data;
+ void *gl_context = comp->gl_context;
- WM_opengl_context_activate(gl_context);
+ WM_opengl_context_activate(gl_context);
- while (true) {
- BLI_spin_lock(&comp->list_lock);
+ while (true) {
+ BLI_spin_lock(&comp->list_lock);
- if (*stop != 0) {
- /* We don't want user to be able to cancel the compilation
- * but wm can kill the task if we are closing blender. */
- BLI_spin_unlock(&comp->list_lock);
- break;
- }
+ if (*stop != 0) {
+ /* We don't want user to be able to cancel the compilation
+ * but wm can kill the task if we are closing blender. */
+ BLI_spin_unlock(&comp->list_lock);
+ break;
+ }
- /* Pop tail because it will be less likely to lock the main thread
- * if all GPUMaterials are to be freed (see DRW_deferred_shader_remove()). */
- comp->mat_compiling = BLI_poptail(&comp->queue);
- if (comp->mat_compiling == NULL) {
- /* No more Shader to compile. */
- BLI_spin_unlock(&comp->list_lock);
- break;
- }
+ /* Pop tail because it will be less likely to lock the main thread
+ * if all GPUMaterials are to be freed (see DRW_deferred_shader_remove()). */
+ comp->mat_compiling = BLI_poptail(&comp->queue);
+ if (comp->mat_compiling == NULL) {
+ /* No more Shader to compile. */
+ BLI_spin_unlock(&comp->list_lock);
+ break;
+ }
- comp->shaders_done++;
- int total = BLI_listbase_count(&comp->queue) + comp->shaders_done;
+ comp->shaders_done++;
+ int total = BLI_listbase_count(&comp->queue) + comp->shaders_done;
- BLI_mutex_lock(&comp->compilation_lock);
- BLI_spin_unlock(&comp->list_lock);
+ BLI_mutex_lock(&comp->compilation_lock);
+ BLI_spin_unlock(&comp->list_lock);
- /* Do the compilation. */
- GPU_material_compile(comp->mat_compiling->mat);
+ /* Do the compilation. */
+ GPU_material_compile(comp->mat_compiling->mat);
- *progress = (float)comp->shaders_done / (float)total;
- *do_update = true;
+ *progress = (float)comp->shaders_done / (float)total;
+ *do_update = true;
- GPU_flush();
- BLI_mutex_unlock(&comp->compilation_lock);
+ GPU_flush();
+ BLI_mutex_unlock(&comp->compilation_lock);
- BLI_spin_lock(&comp->list_lock);
- drw_deferred_shader_free(comp->mat_compiling);
- comp->mat_compiling = NULL;
- BLI_spin_unlock(&comp->list_lock);
- }
+ BLI_spin_lock(&comp->list_lock);
+ drw_deferred_shader_free(comp->mat_compiling);
+ comp->mat_compiling = NULL;
+ BLI_spin_unlock(&comp->list_lock);
+ }
- WM_opengl_context_release(gl_context);
+ WM_opengl_context_release(gl_context);
}
static void drw_deferred_shader_compilation_free(void *custom_data)
{
- DRWShaderCompiler *comp = (DRWShaderCompiler *)custom_data;
+ DRWShaderCompiler *comp = (DRWShaderCompiler *)custom_data;
- drw_deferred_shader_queue_free(&comp->queue);
+ drw_deferred_shader_queue_free(&comp->queue);
- BLI_spin_end(&comp->list_lock);
- BLI_mutex_end(&comp->compilation_lock);
+ BLI_spin_end(&comp->list_lock);
+ BLI_mutex_end(&comp->compilation_lock);
- if (comp->own_context) {
- /* Only destroy if the job owns the context. */
- WM_opengl_context_dispose(comp->gl_context);
- }
+ if (comp->own_context) {
+ /* Only destroy if the job owns the context. */
+ WM_opengl_context_dispose(comp->gl_context);
+ }
- MEM_freeN(comp);
+ MEM_freeN(comp);
}
static void drw_deferred_shader_add(GPUMaterial *mat, bool deferred)
{
- /* Do not deferre the compilation if we are rendering for image. */
- if (DRW_state_is_image_render() || !USE_DEFERRED_COMPILATION || !deferred) {
- /* Double checking that this GPUMaterial is not going to be
- * compiled by another thread. */
- DRW_deferred_shader_remove(mat);
- GPU_material_compile(mat);
- return;
- }
-
- DRWDeferredShader *dsh = MEM_callocN(sizeof(DRWDeferredShader), "Deferred Shader");
-
- dsh->mat = mat;
-
- BLI_assert(DST.draw_ctx.evil_C);
- wmWindowManager *wm = CTX_wm_manager(DST.draw_ctx.evil_C);
- wmWindow *win = CTX_wm_window(DST.draw_ctx.evil_C);
-
- /* Use original scene ID since this is what the jobs template tests for. */
- Scene *scene = (Scene *)DEG_get_original_id(&DST.draw_ctx.scene->id);
-
- /* Get the running job or a new one if none is running. Can only have one job per type & owner. */
- wmJob *wm_job = WM_jobs_get(wm, win, scene, "Shaders Compilation",
- WM_JOB_PROGRESS | WM_JOB_SUSPEND, WM_JOB_TYPE_SHADER_COMPILATION);
-
- DRWShaderCompiler *old_comp = (DRWShaderCompiler *)WM_jobs_customdata_get(wm_job);
-
- DRWShaderCompiler *comp = MEM_callocN(sizeof(DRWShaderCompiler), "DRWShaderCompiler");
- BLI_spin_init(&comp->list_lock);
- BLI_mutex_init(&comp->compilation_lock);
-
- if (old_comp) {
- BLI_spin_lock(&old_comp->list_lock);
- BLI_movelisttolist(&comp->queue, &old_comp->queue);
- BLI_spin_unlock(&old_comp->list_lock);
- /* Do not recreate context, just pass ownership. */
- if (old_comp->gl_context) {
- comp->gl_context = old_comp->gl_context;
- old_comp->own_context = false;
- comp->own_context = true;
- }
- }
-
- BLI_addtail(&comp->queue, dsh);
-
- /* Create only one context. */
- if (comp->gl_context == NULL) {
- comp->gl_context = WM_opengl_context_create();
- WM_opengl_context_activate(DST.gl_context);
- comp->own_context = true;
- }
-
- WM_jobs_customdata_set(wm_job, comp, drw_deferred_shader_compilation_free);
- WM_jobs_timer(wm_job, 0.1, NC_MATERIAL | ND_SHADING_DRAW, 0);
- WM_jobs_callbacks(wm_job, drw_deferred_shader_compilation_exec, NULL, NULL, NULL);
- WM_jobs_start(wm, wm_job);
+ /* Do not deferre the compilation if we are rendering for image. */
+ if (DRW_state_is_image_render() || !USE_DEFERRED_COMPILATION || !deferred) {
+ /* Double checking that this GPUMaterial is not going to be
+ * compiled by another thread. */
+ DRW_deferred_shader_remove(mat);
+ GPU_material_compile(mat);
+ return;
+ }
+
+ DRWDeferredShader *dsh = MEM_callocN(sizeof(DRWDeferredShader), "Deferred Shader");
+
+ dsh->mat = mat;
+
+ BLI_assert(DST.draw_ctx.evil_C);
+ wmWindowManager *wm = CTX_wm_manager(DST.draw_ctx.evil_C);
+ wmWindow *win = CTX_wm_window(DST.draw_ctx.evil_C);
+
+ /* Use original scene ID since this is what the jobs template tests for. */
+ Scene *scene = (Scene *)DEG_get_original_id(&DST.draw_ctx.scene->id);
+
+ /* Get the running job or a new one if none is running. Can only have one job per type & owner. */
+ wmJob *wm_job = WM_jobs_get(wm,
+ win,
+ scene,
+ "Shaders Compilation",
+ WM_JOB_PROGRESS | WM_JOB_SUSPEND,
+ WM_JOB_TYPE_SHADER_COMPILATION);
+
+ DRWShaderCompiler *old_comp = (DRWShaderCompiler *)WM_jobs_customdata_get(wm_job);
+
+ DRWShaderCompiler *comp = MEM_callocN(sizeof(DRWShaderCompiler), "DRWShaderCompiler");
+ BLI_spin_init(&comp->list_lock);
+ BLI_mutex_init(&comp->compilation_lock);
+
+ if (old_comp) {
+ BLI_spin_lock(&old_comp->list_lock);
+ BLI_movelisttolist(&comp->queue, &old_comp->queue);
+ BLI_spin_unlock(&old_comp->list_lock);
+ /* Do not recreate context, just pass ownership. */
+ if (old_comp->gl_context) {
+ comp->gl_context = old_comp->gl_context;
+ old_comp->own_context = false;
+ comp->own_context = true;
+ }
+ }
+
+ BLI_addtail(&comp->queue, dsh);
+
+ /* Create only one context. */
+ if (comp->gl_context == NULL) {
+ comp->gl_context = WM_opengl_context_create();
+ WM_opengl_context_activate(DST.gl_context);
+ comp->own_context = true;
+ }
+
+ WM_jobs_customdata_set(wm_job, comp, drw_deferred_shader_compilation_free);
+ WM_jobs_timer(wm_job, 0.1, NC_MATERIAL | ND_SHADING_DRAW, 0);
+ WM_jobs_callbacks(wm_job, drw_deferred_shader_compilation_exec, NULL, NULL, NULL);
+ WM_jobs_start(wm, wm_job);
}
void DRW_deferred_shader_remove(GPUMaterial *mat)
{
- Scene *scene = GPU_material_scene(mat);
-
- for (wmWindowManager *wm = G_MAIN->wm.first; wm; wm = wm->id.next) {
- if (WM_jobs_test(wm, scene, WM_JOB_TYPE_SHADER_COMPILATION) == false) {
- /* No job running, do not create a new one by calling WM_jobs_get. */
- continue;
- }
- for (wmWindow *win = wm->windows.first; win; win = win->next) {
- wmJob *wm_job = WM_jobs_get(wm, win, scene, "Shaders Compilation",
- WM_JOB_PROGRESS | WM_JOB_SUSPEND, WM_JOB_TYPE_SHADER_COMPILATION);
-
- DRWShaderCompiler *comp = (DRWShaderCompiler *)WM_jobs_customdata_get(wm_job);
- if (comp != NULL) {
- BLI_spin_lock(&comp->list_lock);
- DRWDeferredShader *dsh;
- dsh = (DRWDeferredShader *)BLI_findptr(&comp->queue, mat, offsetof(DRWDeferredShader, mat));
- if (dsh) {
- BLI_remlink(&comp->queue, dsh);
- }
-
- /* Wait for compilation to finish */
- if ((comp->mat_compiling != NULL) && (comp->mat_compiling->mat == mat)) {
- BLI_mutex_lock(&comp->compilation_lock);
- BLI_mutex_unlock(&comp->compilation_lock);
- }
-
- BLI_spin_unlock(&comp->list_lock);
-
- if (dsh) {
- drw_deferred_shader_free(dsh);
- }
- }
- }
- }
+ Scene *scene = GPU_material_scene(mat);
+
+ for (wmWindowManager *wm = G_MAIN->wm.first; wm; wm = wm->id.next) {
+ if (WM_jobs_test(wm, scene, WM_JOB_TYPE_SHADER_COMPILATION) == false) {
+ /* No job running, do not create a new one by calling WM_jobs_get. */
+ continue;
+ }
+ for (wmWindow *win = wm->windows.first; win; win = win->next) {
+ wmJob *wm_job = WM_jobs_get(wm,
+ win,
+ scene,
+ "Shaders Compilation",
+ WM_JOB_PROGRESS | WM_JOB_SUSPEND,
+ WM_JOB_TYPE_SHADER_COMPILATION);
+
+ DRWShaderCompiler *comp = (DRWShaderCompiler *)WM_jobs_customdata_get(wm_job);
+ if (comp != NULL) {
+ BLI_spin_lock(&comp->list_lock);
+ DRWDeferredShader *dsh;
+ dsh = (DRWDeferredShader *)BLI_findptr(
+ &comp->queue, mat, offsetof(DRWDeferredShader, mat));
+ if (dsh) {
+ BLI_remlink(&comp->queue, dsh);
+ }
+
+ /* Wait for compilation to finish */
+ if ((comp->mat_compiling != NULL) && (comp->mat_compiling->mat == mat)) {
+ BLI_mutex_lock(&comp->compilation_lock);
+ BLI_mutex_unlock(&comp->compilation_lock);
+ }
+
+ BLI_spin_unlock(&comp->list_lock);
+
+ if (dsh) {
+ drw_deferred_shader_free(dsh);
+ }
+ }
+ }
+ }
}
/** \} */
/* -------------------------------------------------------------------- */
-GPUShader *DRW_shader_create(const char *vert, const char *geom, const char *frag, const char *defines)
+GPUShader *DRW_shader_create(const char *vert,
+ const char *geom,
+ const char *frag,
+ const char *defines)
{
- return GPU_shader_create(vert, frag, geom, NULL, defines, __func__);
+ return GPU_shader_create(vert, frag, geom, NULL, defines, __func__);
}
GPUShader *DRW_shader_create_with_lib(
- const char *vert, const char *geom, const char *frag, const char *lib, const char *defines)
+ const char *vert, const char *geom, const char *frag, const char *lib, const char *defines)
{
- GPUShader *sh;
- char *vert_with_lib = NULL;
- char *frag_with_lib = NULL;
- char *geom_with_lib = NULL;
-
- vert_with_lib = BLI_string_joinN(lib, vert);
- frag_with_lib = BLI_string_joinN(lib, frag);
- if (geom) {
- geom_with_lib = BLI_string_joinN(lib, geom);
- }
-
- sh = GPU_shader_create(vert_with_lib, frag_with_lib, geom_with_lib, NULL, defines, __func__);
-
- MEM_freeN(vert_with_lib);
- MEM_freeN(frag_with_lib);
- if (geom) {
- MEM_freeN(geom_with_lib);
- }
-
- return sh;
+ GPUShader *sh;
+ char *vert_with_lib = NULL;
+ char *frag_with_lib = NULL;
+ char *geom_with_lib = NULL;
+
+ vert_with_lib = BLI_string_joinN(lib, vert);
+ frag_with_lib = BLI_string_joinN(lib, frag);
+ if (geom) {
+ geom_with_lib = BLI_string_joinN(lib, geom);
+ }
+
+ sh = GPU_shader_create(vert_with_lib, frag_with_lib, geom_with_lib, NULL, defines, __func__);
+
+ MEM_freeN(vert_with_lib);
+ MEM_freeN(frag_with_lib);
+ if (geom) {
+ MEM_freeN(geom_with_lib);
+ }
+
+ return sh;
}
-GPUShader *DRW_shader_create_with_transform_feedback(
- const char *vert, const char *geom, const char *defines,
- const eGPUShaderTFBType prim_type, const char **varying_names, const int varying_count)
+GPUShader *DRW_shader_create_with_transform_feedback(const char *vert,
+ const char *geom,
+ const char *defines,
+ const eGPUShaderTFBType prim_type,
+ const char **varying_names,
+ const int varying_count)
{
- return GPU_shader_create_ex(vert,
- datatoc_gpu_shader_depth_only_frag_glsl,
- geom, NULL, defines,
- prim_type, varying_names, varying_count, __func__);
+ return GPU_shader_create_ex(vert,
+ datatoc_gpu_shader_depth_only_frag_glsl,
+ geom,
+ NULL,
+ defines,
+ prim_type,
+ varying_names,
+ varying_count,
+ __func__);
}
GPUShader *DRW_shader_create_2d(const char *frag, const char *defines)
{
- return GPU_shader_create(datatoc_gpu_shader_2D_vert_glsl, frag, NULL, NULL, defines, __func__);
+ return GPU_shader_create(datatoc_gpu_shader_2D_vert_glsl, frag, NULL, NULL, defines, __func__);
}
GPUShader *DRW_shader_create_3d(const char *frag, const char *defines)
{
- return GPU_shader_create(datatoc_gpu_shader_3D_vert_glsl, frag, NULL, NULL, defines, __func__);
+ return GPU_shader_create(datatoc_gpu_shader_3D_vert_glsl, frag, NULL, NULL, defines, __func__);
}
GPUShader *DRW_shader_create_fullscreen(const char *frag, const char *defines)
{
- return GPU_shader_create(datatoc_common_fullscreen_vert_glsl, frag, NULL, NULL, defines, __func__);
+ return GPU_shader_create(
+ datatoc_common_fullscreen_vert_glsl, frag, NULL, NULL, defines, __func__);
}
GPUShader *DRW_shader_create_3d_depth_only(eGPUShaderConfig sh_cfg)
{
- return GPU_shader_get_builtin_shader_with_config(GPU_SHADER_3D_DEPTH_ONLY, sh_cfg);
+ return GPU_shader_get_builtin_shader_with_config(GPU_SHADER_3D_DEPTH_ONLY, sh_cfg);
}
-GPUMaterial *DRW_shader_find_from_world(World *wo, const void *engine_type, int options, bool deferred)
+GPUMaterial *DRW_shader_find_from_world(World *wo,
+ const void *engine_type,
+ int options,
+ bool deferred)
{
- GPUMaterial *mat = GPU_material_from_nodetree_find(&wo->gpumaterial, engine_type, options);
- if (DRW_state_is_image_render() || !deferred) {
- if (mat != NULL && GPU_material_status(mat) == GPU_MAT_QUEUED) {
- /* XXX Hack : we return NULL so that the engine will call DRW_shader_create_from_XXX
- * with the shader code and we will resume the compilation from there. */
- return NULL;
- }
- }
- return mat;
+ GPUMaterial *mat = GPU_material_from_nodetree_find(&wo->gpumaterial, engine_type, options);
+ if (DRW_state_is_image_render() || !deferred) {
+ if (mat != NULL && GPU_material_status(mat) == GPU_MAT_QUEUED) {
+ /* XXX Hack : we return NULL so that the engine will call DRW_shader_create_from_XXX
+ * with the shader code and we will resume the compilation from there. */
+ return NULL;
+ }
+ }
+ return mat;
}
-GPUMaterial *DRW_shader_find_from_material(Material *ma, const void *engine_type, int options, bool deferred)
+GPUMaterial *DRW_shader_find_from_material(Material *ma,
+ const void *engine_type,
+ int options,
+ bool deferred)
{
- GPUMaterial *mat = GPU_material_from_nodetree_find(&ma->gpumaterial, engine_type, options);
- if (DRW_state_is_image_render() || !deferred) {
- if (mat != NULL && GPU_material_status(mat) == GPU_MAT_QUEUED) {
- /* XXX Hack : we return NULL so that the engine will call DRW_shader_create_from_XXX
- * with the shader code and we will resume the compilation from there. */
- return NULL;
- }
- }
- return mat;
+ GPUMaterial *mat = GPU_material_from_nodetree_find(&ma->gpumaterial, engine_type, options);
+ if (DRW_state_is_image_render() || !deferred) {
+ if (mat != NULL && GPU_material_status(mat) == GPU_MAT_QUEUED) {
+ /* XXX Hack : we return NULL so that the engine will call DRW_shader_create_from_XXX
+ * with the shader code and we will resume the compilation from there. */
+ return NULL;
+ }
+ }
+ return mat;
}
-GPUMaterial *DRW_shader_create_from_world(
- struct Scene *scene, World *wo, const void *engine_type, int options,
- const char *vert, const char *geom, const char *frag_lib, const char *defines, bool deferred)
+GPUMaterial *DRW_shader_create_from_world(struct Scene *scene,
+ World *wo,
+ const void *engine_type,
+ int options,
+ const char *vert,
+ const char *geom,
+ const char *frag_lib,
+ const char *defines,
+ bool deferred)
{
- GPUMaterial *mat = NULL;
- if (DRW_state_is_image_render()) {
- mat = GPU_material_from_nodetree_find(&wo->gpumaterial, engine_type, options);
- }
-
- if (mat == NULL) {
- scene = (Scene *)DEG_get_original_id(&DST.draw_ctx.scene->id);
- mat = GPU_material_from_nodetree(
- scene, wo->nodetree, &wo->gpumaterial, engine_type, options,
- vert, geom, frag_lib, defines, wo->id.name);
- }
-
- if (GPU_material_status(mat) == GPU_MAT_QUEUED) {
- drw_deferred_shader_add(mat, deferred);
- }
-
- return mat;
+ GPUMaterial *mat = NULL;
+ if (DRW_state_is_image_render()) {
+ mat = GPU_material_from_nodetree_find(&wo->gpumaterial, engine_type, options);
+ }
+
+ if (mat == NULL) {
+ scene = (Scene *)DEG_get_original_id(&DST.draw_ctx.scene->id);
+ mat = GPU_material_from_nodetree(scene,
+ wo->nodetree,
+ &wo->gpumaterial,
+ engine_type,
+ options,
+ vert,
+ geom,
+ frag_lib,
+ defines,
+ wo->id.name);
+ }
+
+ if (GPU_material_status(mat) == GPU_MAT_QUEUED) {
+ drw_deferred_shader_add(mat, deferred);
+ }
+
+ return mat;
}
-GPUMaterial *DRW_shader_create_from_material(
- struct Scene *scene, Material *ma, const void *engine_type, int options,
- const char *vert, const char *geom, const char *frag_lib, const char *defines, bool deferred)
+GPUMaterial *DRW_shader_create_from_material(struct Scene *scene,
+ Material *ma,
+ const void *engine_type,
+ int options,
+ const char *vert,
+ const char *geom,
+ const char *frag_lib,
+ const char *defines,
+ bool deferred)
{
- GPUMaterial *mat = NULL;
- if (DRW_state_is_image_render()) {
- mat = GPU_material_from_nodetree_find(&ma->gpumaterial, engine_type, options);
- }
-
- if (mat == NULL) {
- scene = (Scene *)DEG_get_original_id(&DST.draw_ctx.scene->id);
- mat = GPU_material_from_nodetree(
- scene, ma->nodetree, &ma->gpumaterial, engine_type, options,
- vert, geom, frag_lib, defines, ma->id.name);
- }
-
- if (GPU_material_status(mat) == GPU_MAT_QUEUED) {
- drw_deferred_shader_add(mat, deferred);
- }
-
- return mat;
+ GPUMaterial *mat = NULL;
+ if (DRW_state_is_image_render()) {
+ mat = GPU_material_from_nodetree_find(&ma->gpumaterial, engine_type, options);
+ }
+
+ if (mat == NULL) {
+ scene = (Scene *)DEG_get_original_id(&DST.draw_ctx.scene->id);
+ mat = GPU_material_from_nodetree(scene,
+ ma->nodetree,
+ &ma->gpumaterial,
+ engine_type,
+ options,
+ vert,
+ geom,
+ frag_lib,
+ defines,
+ ma->id.name);
+ }
+
+ if (GPU_material_status(mat) == GPU_MAT_QUEUED) {
+ drw_deferred_shader_add(mat, deferred);
+ }
+
+ return mat;
}
void DRW_shader_free(GPUShader *shader)
{
- GPU_shader_free(shader);
+ GPU_shader_free(shader);
}