diff options
author | Clément Foucault <foucault.clem@gmail.com> | 2018-02-28 03:16:23 +0300 |
---|---|---|
committer | Clément Foucault <foucault.clem@gmail.com> | 2018-02-28 03:29:26 +0300 |
commit | 0df21e2504eeba504c201b4842415885ad5e8c91 (patch) | |
tree | ac838ce311ab94fffbad8fb6e7129a06eb255158 | |
parent | d5a55b69181a69d481b1e504f39926ec595dd6b8 (diff) |
DRW: Refactor & Split draw_manager.c into multiple files.
Refactor include:
- Removal of DRWInterface. (was useless)
- Split DRWCallHeader into a new struct DRWCallState that will be reused in the future.
- Use BLI_link_utils for APPEND/PREPEND.
- Creation of the new DRWManager struct type. This will enable us to create more than one manager in the future.
- Removal of some dead code.
-rw-r--r-- | source/blender/draw/CMakeLists.txt | 6 | ||||
-rw-r--r-- | source/blender/draw/intern/DRW_render.h | 2 | ||||
-rw-r--r-- | source/blender/draw/intern/draw_manager.c | 2613 | ||||
-rw-r--r-- | source/blender/draw/intern/draw_manager.h | 346 | ||||
-rw-r--r-- | source/blender/draw/intern/draw_manager_data.c | 867 | ||||
-rw-r--r-- | source/blender/draw/intern/draw_manager_exec.c | 747 | ||||
-rw-r--r-- | source/blender/draw/intern/draw_manager_framebuffer.c | 189 | ||||
-rw-r--r-- | source/blender/draw/intern/draw_manager_profiling.c | 125 | ||||
-rw-r--r-- | source/blender/draw/intern/draw_manager_shader.c | 90 | ||||
-rw-r--r-- | source/blender/draw/intern/draw_manager_texture.c | 213 |
10 files changed, 2662 insertions, 2536 deletions
diff --git a/source/blender/draw/CMakeLists.txt b/source/blender/draw/CMakeLists.txt index 51b3db695c2..ba2aa0448fd 100644 --- a/source/blender/draw/CMakeLists.txt +++ b/source/blender/draw/CMakeLists.txt @@ -64,7 +64,12 @@ set(SRC intern/draw_common.c intern/draw_instance_data.c intern/draw_manager.c + intern/draw_manager_data.c + intern/draw_manager_exec.c + intern/draw_manager_framebuffer.c + intern/draw_manager_shader.c intern/draw_manager_text.c + intern/draw_manager_texture.c intern/draw_manager_profiling.c intern/draw_view.c modes/edit_armature_mode.c @@ -108,6 +113,7 @@ set(SRC intern/draw_cache_impl.h intern/draw_common.h intern/draw_instance_data.h + intern/draw_manager.h intern/draw_manager_text.h intern/draw_manager_profiling.h intern/draw_view.h diff --git a/source/blender/draw/intern/DRW_render.h b/source/blender/draw/intern/DRW_render.h index 203fb80c0a0..2a119651ffd 100644 --- a/source/blender/draw/intern/DRW_render.h +++ b/source/blender/draw/intern/DRW_render.h @@ -288,7 +288,7 @@ typedef enum { DRW_STATE_CULL_BACK = (1 << 6), DRW_STATE_CULL_FRONT = (1 << 7), DRW_STATE_WIRE = (1 << 8), - DRW_STATE_WIRE_LARGE = (1 << 9), +// DRW_STATE_WIRE_LARGE = (1 << 9), /* Removed from ogl in 3.0 */ DRW_STATE_POINT = (1 << 10), DRW_STATE_STIPPLE_2 = (1 << 11), DRW_STATE_STIPPLE_3 = (1 << 12), diff --git a/source/blender/draw/intern/draw_manager.c b/source/blender/draw/intern/draw_manager.c index baa76518270..7e0536c0d25 100644 --- a/source/blender/draw/intern/draw_manager.c +++ b/source/blender/draw/intern/draw_manager.c @@ -29,46 +29,26 @@ #include "BLI_mempool.h" #include "BLI_rect.h" #include "BLI_string.h" -#include "BLI_string_utils.h" #include "BLI_threads.h" -#include "BIF_glutil.h" - -#include "BKE_curve.h" #include "BKE_global.h" #include "BKE_mesh.h" #include "BKE_object.h" -#include "BKE_pbvh.h" -#include "BKE_paint.h" #include "BKE_workspace.h" -#include "BLT_translation.h" -#include "BLF_api.h" - -#include "DRW_engine.h" -#include "DRW_render.h" - +#include "draw_manager.h" #include "DNA_camera_types.h" -#include "DNA_curve_types.h" -#include "DNA_view3d_types.h" -#include "DNA_screen_types.h" #include "DNA_mesh_types.h" #include "DNA_meshdata_types.h" -#include "DNA_meta_types.h" #include "ED_space_api.h" #include "ED_screen.h" +#include "ED_view3d.h" -#include "intern/gpu_codegen.h" -#include "GPU_batch.h" #include "GPU_draw.h" #include "GPU_extensions.h" #include "GPU_framebuffer.h" #include "GPU_immediate.h" -#include "GPU_lamp.h" -#include "GPU_material.h" -#include "GPU_shader.h" -#include "GPU_texture.h" #include "GPU_uniformbuffer.h" #include "GPU_viewport.h" #include "GPU_matrix.h" @@ -82,7 +62,6 @@ #include "UI_resources.h" #include "WM_api.h" -#include "WM_types.h" #include "wm_window.h" #include "draw_manager_text.h" @@ -91,8 +70,6 @@ /* only for callbacks */ #include "draw_cache_impl.h" -#include "draw_instance_data.h" - #include "draw_mode_engines.h" #include "engines/clay/clay_engine.h" #include "engines/eevee/eevee_engine.h" @@ -104,2066 +81,17 @@ #include "DEG_depsgraph.h" #include "DEG_depsgraph_query.h" -/* -------------------------------------------------------------------- */ -/** \name Local Features - * \{ */ - -#define USE_PROFILE - -#ifdef USE_PROFILE -# include "PIL_time.h" - -# define PROFILE_TIMER_FALLOFF 0.1 - -# define PROFILE_START(time_start) \ - double time_start = PIL_check_seconds_timer(); - -# define PROFILE_END_ACCUM(time_accum, time_start) { \ - time_accum += (PIL_check_seconds_timer() - time_start) * 1e3; \ -} ((void)0) - -/* exp average */ -# define PROFILE_END_UPDATE(time_update, time_start) { \ - double _time_delta = (PIL_check_seconds_timer() - time_start) * 1e3; \ - time_update = (time_update * (1.0 - PROFILE_TIMER_FALLOFF)) + \ - (_time_delta * PROFILE_TIMER_FALLOFF); \ -} ((void)0) - -#else /* USE_PROFILE */ - -# define PROFILE_START(time_start) ((void)0) -# define PROFILE_END_ACCUM(time_accum, time_start) ((void)0) -# define PROFILE_END_UPDATE(time_update, time_start) ((void)0) - -#endif /* USE_PROFILE */ - - -/* Use draw manager to call GPU_select, see: DRW_draw_select_loop */ -#define USE_GPU_SELECT - #ifdef USE_GPU_SELECT -# include "ED_view3d.h" -# include "ED_armature.h" # include "GPU_select.h" #endif -/** \} */ - - -#define MAX_ATTRIB_NAME 32 -#define MAX_ATTRIB_COUNT 6 /* Can be adjusted for more */ -#define MAX_PASS_NAME 32 -#define MAX_CLIP_PLANES 6 /* GL_MAX_CLIP_PLANES is at least 6 */ - -extern char datatoc_gpu_shader_2D_vert_glsl[]; -extern char datatoc_gpu_shader_3D_vert_glsl[]; -extern char datatoc_gpu_shader_fullscreen_vert_glsl[]; - -/* Prototypes. */ -static void drw_engines_enable_external(void); - -/* Structures */ -typedef enum { - DRW_UNIFORM_BOOL, - DRW_UNIFORM_SHORT_TO_INT, - DRW_UNIFORM_SHORT_TO_FLOAT, - DRW_UNIFORM_INT, - DRW_UNIFORM_FLOAT, - DRW_UNIFORM_TEXTURE, - DRW_UNIFORM_BUFFER, - DRW_UNIFORM_BLOCK -} DRWUniformType; - -#define MAX_UNIFORM_DATA_SIZE 16 - -struct DRWUniform { - struct DRWUniform *next; - const void *value; - int location; - char type; /* DRWUniformType */ - char length; /* cannot be more than 16 */ - char arraysize; /* cannot be more than 16 too */ -}; - -struct DRWInterface { - DRWUniform *uniforms; /* DRWUniform, single-linked list */ - /* Dynamic batch */ -#ifdef USE_GPU_SELECT - struct DRWInstanceData *inst_selectid; - /* Override for single object instances. */ - int override_selectid; -#endif - Gwn_VertBuf *instance_vbo; - unsigned int instance_count; -#ifndef NDEBUG - char attribs_count; -#endif - /* matrices locations */ - int model; - int modelinverse; - int modelview; - int modelviewinverse; - int modelviewprojection; - int normalview; - int normalworld; - int orcotexfac; - int eye; - /* Matrices needed */ - uint16_t matflag; -}; - -struct DRWPass { - /* Single linked list with last member to append */ - DRWShadingGroup *shgroups; - DRWShadingGroup *shgroups_last; - - DRWState state; - char name[MAX_PASS_NAME]; -}; - -typedef struct DRWCallHeader { - void *prev; -#ifdef USE_GPU_SELECT - int select_id; -#endif - unsigned char type, state; - uint16_t matflag; - /* Culling: Using Bounding Sphere for now for faster culling. - * Not ideal for planes. */ - struct { - float loc[3], rad; /* Bypassed if radius is < 0.0. */ - } bsphere; - /* Matrices */ - float model[4][4]; - float modelinverse[4][4]; - float modelview[4][4]; - float modelviewinverse[4][4]; - float modelviewprojection[4][4]; - float normalview[3][3]; - float normalworld[3][3]; /* Not view dependant */ - float orcotexfac[2][3]; /* Not view dependant */ - float eyevec[3]; -} DRWCallHeader; - -typedef struct DRWCall { - DRWCallHeader head; - - Gwn_Batch *geometry; -} DRWCall; - -typedef struct DRWCallGenerate { - DRWCallHeader head; - - DRWCallGenerateFn *geometry_fn; - void *user_data; -} DRWCallGenerate; - -/* Used by DRWCall.flag */ -enum { - DRW_CALL_SINGLE, /* A single batch */ - DRW_CALL_GENERATE, /* Uses a callback to draw with any number of batches. */ -}; - -/* Used by DRWCall.state */ -enum { - DRW_CALL_CULLED = (1 << 0), - DRW_CALL_NEGSCALE = (1 << 1), -}; - -/* Used by DRWCall.flag */ -enum { - DRW_CALL_MODELINVERSE = (1 << 0), - DRW_CALL_MODELVIEW = (1 << 1), - DRW_CALL_MODELVIEWINVERSE = (1 << 2), - DRW_CALL_MODELVIEWPROJECTION = (1 << 3), - DRW_CALL_NORMALVIEW = (1 << 4), - DRW_CALL_NORMALWORLD = (1 << 5), - DRW_CALL_ORCOTEXFAC = (1 << 6), - DRW_CALL_EYEVEC = (1 << 7), - /* 8 bit flag! */ -}; - -struct DRWShadingGroup { - struct DRWShadingGroup *next; -#ifdef USE_GPU_SELECT - /* backlink to pass we're in */ - DRWPass *pass_parent; -#endif - GPUShader *shader; /* Shader to bind */ - DRWInterface interface; /* Uniforms pointers */ - DRWState state_extra; /* State changes for this batch only (or'd with the pass's state) */ - DRWState state_extra_disable; /* State changes for this batch only (and'd with the pass's state) */ - unsigned int stencil_mask; /* Stencil mask to use for stencil test / write operations */ - int type; - - /* Watch this! Can be nasty for debugging. */ - union { - struct { /* DRW_SHG_NORMAL */ - void *calls; /* DRWCall or DRWCallDynamic depending of type */ - void *calls_first; /* To be able to traverse the list in the order of addition */ - }; - struct { /* DRW_SHG_***_BATCH */ - Gwn_Batch *batch_geom; /* Result of call batching */ - }; - struct { /* DRW_SHG_INSTANCE[_EXTERNAL] */ - Gwn_Batch *instance_geom; /* Geometry to instance */ - Gwn_Batch *instancing_geom;/* Instances attributes */ - float instance_orcofac[2][3]; /* TODO find a better place. */ - }; - }; -}; - -/* Used by DRWShadingGroup.type */ -enum { - DRW_SHG_NORMAL, - DRW_SHG_POINT_BATCH, - DRW_SHG_LINE_BATCH, - DRW_SHG_TRIANGLE_BATCH, - DRW_SHG_INSTANCE, - DRW_SHG_INSTANCE_EXTERNAL, -}; - /** Render State: No persistent data between draw calls. */ -static struct DRWGlobalState { - /* Cache generation */ - ViewportMemoryPool *vmempool; - DRWUniform *last_uniform; - DRWCall *last_call; - DRWCallGenerate *last_callgenerate; - DRWShadingGroup *last_shgroup; - DRWInstanceDataList *idatalist; - DRWInstanceData *common_instance_data[MAX_INSTANCE_DATA_SIZE]; - - /* Rendering state */ - GPUShader *shader; - - /* Managed by `DRW_state_set`, `DRW_state_reset` */ - DRWState state; - unsigned int stencil_mask; - - /* Per viewport */ - GPUViewport *viewport; - struct GPUFrameBuffer *default_framebuffer; - float size[2]; - float screenvecs[2][3]; - float pixsize; - - GLenum backface, frontface; - - /* Clip planes */ - int num_clip_planes; - float clip_planes_eq[MAX_CLIP_PLANES][4]; - - struct { - unsigned int is_select : 1; - unsigned int is_depth : 1; - unsigned int is_image_render : 1; - unsigned int is_scene_render : 1; - unsigned int draw_background : 1; - } options; - - /* Current rendering context */ - DRWContextState draw_ctx; - - /* Convenience pointer to text_store owned by the viewport */ - struct DRWTextStore **text_store_p; - - ListBase enabled_engines; /* RenderEngineType */ - - bool buffer_finish_called; /* Avoid bad usage of DRW_render_instance_buffer_finish */ - - /* Profiling */ - double cache_time; -} DST = {NULL}; - -/** GPU Resource State: Memory storage between drawing. */ -static struct DRWResourceState { - GPUTexture **bound_texs; - - bool *bound_tex_slots; - - int bind_tex_inc; - int bind_ubo_inc; -} RST = {NULL}; - -static struct DRWMatrixOveride { - float original_mat[6][4][4]; - float mat[6][4][4]; - bool override[6]; -} viewport_matrices = {{{{0}}}}; - -/* TODO View Ubo */ -static float viewcamtexcofac[4] = {0}; +DRWManager DST = {NULL}; ListBase DRW_engines = {NULL, NULL}; -/* Unique ghost context used by the draw manager. */ -static void *g_ogl_context = NULL; -static Gwn_Context *g_gwn_context = NULL; - -/* Mutex to lock the drw manager and avoid concurent context usage. */ -static ThreadMutex g_ogl_context_mutex = BLI_MUTEX_INITIALIZER; - -#ifdef USE_GPU_SELECT -static unsigned int g_DRW_select_id = (unsigned int)-1; - -void DRW_select_load_id(unsigned int id) -{ - BLI_assert(G.f & G_PICKSEL); - g_DRW_select_id = id; -} -#endif - - -/* -------------------------------------------------------------------- */ - -/** \name Textures (DRW_texture) - * \{ */ - -static void drw_texture_get_format( - DRWTextureFormat format, - GPUTextureFormat *r_data_type, int *r_channels) -{ - switch (format) { - case DRW_TEX_RGBA_8: *r_data_type = GPU_RGBA8; break; - case DRW_TEX_RGBA_16: *r_data_type = GPU_RGBA16F; break; - case DRW_TEX_RGB_16: *r_data_type = GPU_RGB16F; break; - case DRW_TEX_RGB_11_11_10: *r_data_type = GPU_R11F_G11F_B10F; break; - case DRW_TEX_RG_8: *r_data_type = GPU_RG8; break; - case DRW_TEX_RG_16: *r_data_type = GPU_RG16F; break; - case DRW_TEX_RG_16I: *r_data_type = GPU_RG16I; break; - case DRW_TEX_RG_32: *r_data_type = GPU_RG32F; break; - case DRW_TEX_R_8: *r_data_type = GPU_R8; break; - case DRW_TEX_R_16: *r_data_type = GPU_R16F; break; - case DRW_TEX_R_32: *r_data_type = GPU_R32F; break; -#if 0 - case DRW_TEX_RGBA_32: *r_data_type = GPU_RGBA32F; break; - case DRW_TEX_RGB_8: *r_data_type = GPU_RGB8; break; - case DRW_TEX_RGB_32: *r_data_type = GPU_RGB32F; break; -#endif - case DRW_TEX_DEPTH_16: *r_data_type = GPU_DEPTH_COMPONENT16; break; - case DRW_TEX_DEPTH_24: *r_data_type = GPU_DEPTH_COMPONENT24; break; - case DRW_TEX_DEPTH_24_STENCIL_8: *r_data_type = GPU_DEPTH24_STENCIL8; break; - case DRW_TEX_DEPTH_32: *r_data_type = GPU_DEPTH_COMPONENT32F; break; - default : - /* file type not supported you must uncomment it from above */ - BLI_assert(false); - break; - } - - switch (format) { - case DRW_TEX_RGBA_8: - case DRW_TEX_RGBA_16: - case DRW_TEX_RGBA_32: - *r_channels = 4; - break; - case DRW_TEX_RGB_8: - case DRW_TEX_RGB_16: - case DRW_TEX_RGB_32: - case DRW_TEX_RGB_11_11_10: - *r_channels = 3; - break; - case DRW_TEX_RG_8: - case DRW_TEX_RG_16: - case DRW_TEX_RG_16I: - case DRW_TEX_RG_32: - *r_channels = 2; - break; - default: - *r_channels = 1; - break; - } -} - -static void drw_texture_set_parameters(GPUTexture *tex, DRWTextureFlag flags) -{ - GPU_texture_bind(tex, 0); - if (flags & DRW_TEX_MIPMAP) { - GPU_texture_mipmap_mode(tex, true, flags & DRW_TEX_FILTER); - DRW_texture_generate_mipmaps(tex); - } - else { - GPU_texture_filter_mode(tex, flags & DRW_TEX_FILTER); - } - GPU_texture_wrap_mode(tex, flags & DRW_TEX_WRAP); - GPU_texture_compare_mode(tex, flags & DRW_TEX_COMPARE); - GPU_texture_unbind(tex); -} - -GPUTexture *DRW_texture_create_1D(int w, DRWTextureFormat format, DRWTextureFlag flags, const float *fpixels) -{ - GPUTexture *tex; - GPUTextureFormat data_type; - int channels; - - drw_texture_get_format(format, &data_type, &channels); - tex = GPU_texture_create_1D_custom(w, channels, data_type, fpixels, NULL); - drw_texture_set_parameters(tex, flags); - - return tex; -} - -GPUTexture *DRW_texture_create_2D(int w, int h, DRWTextureFormat format, DRWTextureFlag flags, const float *fpixels) -{ - GPUTexture *tex; - GPUTextureFormat data_type; - int channels; - - drw_texture_get_format(format, &data_type, &channels); - tex = GPU_texture_create_2D_custom(w, h, channels, data_type, fpixels, NULL); - drw_texture_set_parameters(tex, flags); - - return tex; -} - -GPUTexture *DRW_texture_create_2D_array( - int w, int h, int d, DRWTextureFormat format, DRWTextureFlag flags, const float *fpixels) -{ - GPUTexture *tex; - GPUTextureFormat data_type; - int channels; - - drw_texture_get_format(format, &data_type, &channels); - tex = GPU_texture_create_2D_array_custom(w, h, d, channels, data_type, fpixels, NULL); - drw_texture_set_parameters(tex, flags); - - return tex; -} - -GPUTexture *DRW_texture_create_3D( - int w, int h, int d, DRWTextureFormat format, DRWTextureFlag flags, const float *fpixels) -{ - GPUTexture *tex; - GPUTextureFormat data_type; - int channels; - - drw_texture_get_format(format, &data_type, &channels); - tex = GPU_texture_create_3D_custom(w, h, d, channels, data_type, fpixels, NULL); - drw_texture_set_parameters(tex, flags); - - return tex; -} - -GPUTexture *DRW_texture_create_cube(int w, DRWTextureFormat format, DRWTextureFlag flags, const float *fpixels) -{ - GPUTexture *tex; - GPUTextureFormat data_type; - int channels; - - drw_texture_get_format(format, &data_type, &channels); - tex = GPU_texture_create_cube_custom(w, channels, data_type, fpixels, NULL); - drw_texture_set_parameters(tex, flags); - - return tex; -} - -void DRW_texture_generate_mipmaps(GPUTexture *tex) -{ - GPU_texture_bind(tex, 0); - GPU_texture_generate_mipmap(tex); - GPU_texture_unbind(tex); -} - -void DRW_texture_update(GPUTexture *tex, const float *pixels) -{ - GPU_texture_update(tex, pixels); -} - -void DRW_texture_free(GPUTexture *tex) -{ - GPU_texture_free(tex); -} - -/** \} */ - - -/* -------------------------------------------------------------------- */ - -/** \name Uniform Buffer Object (DRW_uniformbuffer) - * \{ */ - -GPUUniformBuffer *DRW_uniformbuffer_create(int size, const void *data) -{ - return GPU_uniformbuffer_create(size, data, NULL); -} - -void DRW_uniformbuffer_update(GPUUniformBuffer *ubo, const void *data) -{ - GPU_uniformbuffer_update(ubo, data); -} - -void DRW_uniformbuffer_free(GPUUniformBuffer *ubo) -{ - GPU_uniformbuffer_free(ubo); -} - -/** \} */ - - -/* -------------------------------------------------------------------- */ - -/** \name Shaders (DRW_shader) - * \{ */ - -GPUShader *DRW_shader_create(const char *vert, const char *geom, const char *frag, const char *defines) -{ - return GPU_shader_create(vert, frag, geom, NULL, defines); -} - -GPUShader *DRW_shader_create_with_lib( - const char *vert, const char *geom, const char *frag, const char *lib, const char *defines) -{ - GPUShader *sh; - char *vert_with_lib = NULL; - char *frag_with_lib = NULL; - char *geom_with_lib = NULL; - - vert_with_lib = BLI_string_joinN(lib, vert); - frag_with_lib = BLI_string_joinN(lib, frag); - - if (geom) { - geom_with_lib = BLI_string_joinN(lib, geom); - } - - sh = GPU_shader_create(vert_with_lib, frag_with_lib, geom_with_lib, NULL, defines); - - MEM_freeN(vert_with_lib); - MEM_freeN(frag_with_lib); - if (geom) { - MEM_freeN(geom_with_lib); - } - - return sh; -} - -GPUShader *DRW_shader_create_2D(const char *frag, const char *defines) -{ - return GPU_shader_create(datatoc_gpu_shader_2D_vert_glsl, frag, NULL, NULL, defines); -} - -GPUShader *DRW_shader_create_3D(const char *frag, const char *defines) -{ - return GPU_shader_create(datatoc_gpu_shader_3D_vert_glsl, frag, NULL, NULL, defines); -} - -GPUShader *DRW_shader_create_fullscreen(const char *frag, const char *defines) -{ - return GPU_shader_create(datatoc_gpu_shader_fullscreen_vert_glsl, frag, NULL, NULL, defines); -} - -GPUShader *DRW_shader_create_3D_depth_only(void) -{ - return GPU_shader_get_builtin_shader(GPU_SHADER_3D_DEPTH_ONLY); -} - -void DRW_shader_free(GPUShader *shader) -{ - GPU_shader_free(shader); -} - -/** \} */ - - -/* -------------------------------------------------------------------- */ - -/** \name Interface (DRW_interface) - * \{ */ - -static void drw_interface_builtin_uniform( - DRWShadingGroup *shgroup, int builtin, const void *value, int length, int arraysize) -{ - int loc = GPU_shader_get_builtin_uniform(shgroup->shader, builtin); - - if (loc == -1) - return; - - DRWUniform *uni = BLI_mempool_alloc(DST.vmempool->uniforms); - uni->location = loc; - uni->type = DRW_UNIFORM_FLOAT; - uni->value = value; - uni->length = length; - uni->arraysize = arraysize; - - /* Prepend */ - uni->next = shgroup->interface.uniforms; - shgroup->interface.uniforms = uni; -} - -static void drw_interface_init(DRWShadingGroup *shgroup, GPUShader *shader) -{ - DRWInterface *interface = &shgroup->interface; - interface->model = GPU_shader_get_builtin_uniform(shader, GWN_UNIFORM_MODEL); - interface->modelinverse = GPU_shader_get_builtin_uniform(shader, GWN_UNIFORM_MODEL_INV); - interface->modelview = GPU_shader_get_builtin_uniform(shader, GWN_UNIFORM_MODELVIEW); - interface->modelviewinverse = GPU_shader_get_builtin_uniform(shader, GWN_UNIFORM_MODELVIEW_INV); - interface->modelviewprojection = GPU_shader_get_builtin_uniform(shader, GWN_UNIFORM_MVP); - interface->normalview = GPU_shader_get_builtin_uniform(shader, GWN_UNIFORM_NORMAL); - interface->normalworld = GPU_shader_get_builtin_uniform(shader, GWN_UNIFORM_WORLDNORMAL); - interface->orcotexfac = GPU_shader_get_builtin_uniform(shader, GWN_UNIFORM_ORCO); - interface->eye = GPU_shader_get_builtin_uniform(shader, GWN_UNIFORM_EYE); - - interface->matflag = 0; - if (interface->modelinverse > -1) - interface->matflag |= DRW_CALL_MODELINVERSE; - if (interface->modelview > -1) - interface->matflag |= DRW_CALL_MODELVIEW; - if (interface->modelviewinverse > -1) - interface->matflag |= DRW_CALL_MODELVIEWINVERSE; - if (interface->modelviewprojection > -1) - interface->matflag |= DRW_CALL_MODELVIEWPROJECTION; - if (interface->normalview > -1) - interface->matflag |= DRW_CALL_NORMALVIEW; - if (interface->normalworld > -1) - interface->matflag |= DRW_CALL_NORMALWORLD; - if (interface->orcotexfac > -1) - interface->matflag |= DRW_CALL_ORCOTEXFAC; - if (interface->eye > -1) - interface->matflag |= DRW_CALL_EYEVEC; - - interface->instance_count = 0; -#ifndef NDEBUG - interface->attribs_count = 0; -#endif - interface->uniforms = NULL; -#ifdef USE_GPU_SELECT - interface->inst_selectid = NULL; - interface->override_selectid = -1; -#endif - - /* TODO : They should be grouped inside a UBO updated once per redraw. */ - drw_interface_builtin_uniform(shgroup, GWN_UNIFORM_VIEW, viewport_matrices.mat[DRW_MAT_VIEW], 16, 1); - drw_interface_builtin_uniform(shgroup, GWN_UNIFORM_VIEW_INV, viewport_matrices.mat[DRW_MAT_VIEWINV], 16, 1); - drw_interface_builtin_uniform(shgroup, GWN_UNIFORM_VIEWPROJECTION, viewport_matrices.mat[DRW_MAT_PERS], 16, 1); - drw_interface_builtin_uniform(shgroup, GWN_UNIFORM_VIEWPROJECTION_INV, viewport_matrices.mat[DRW_MAT_PERSINV], 16, 1); - drw_interface_builtin_uniform(shgroup, GWN_UNIFORM_PROJECTION, viewport_matrices.mat[DRW_MAT_WIN], 16, 1); - drw_interface_builtin_uniform(shgroup, GWN_UNIFORM_PROJECTION_INV, viewport_matrices.mat[DRW_MAT_WININV], 16, 1); - drw_interface_builtin_uniform(shgroup, GWN_UNIFORM_CAMERATEXCO, viewcamtexcofac, 3, 2); - drw_interface_builtin_uniform(shgroup, GWN_UNIFORM_CLIPPLANES, DST.clip_planes_eq, 4, 1); /* TO REMOVE */ -} - -static void drw_interface_instance_init( - DRWShadingGroup *shgroup, GPUShader *shader, Gwn_Batch *batch, Gwn_VertFormat *format) -{ - DRWInterface *interface = &shgroup->interface; - drw_interface_init(shgroup, shader); - -#ifndef NDEBUG - interface->attribs_count = (format != NULL) ? format->attrib_ct : 0; -#endif - BLI_assert(shgroup->type == DRW_SHG_INSTANCE); - BLI_assert(shgroup->instance_geom != NULL); - - if (format != NULL) { - DRW_instancing_buffer_request(DST.idatalist, format, batch, shgroup, - &shgroup->instancing_geom, &interface->instance_vbo); - } -} - -static void drw_interface_batching_init( - DRWShadingGroup *shgroup, GPUShader *shader, Gwn_VertFormat *format) -{ - DRWInterface *interface = &shgroup->interface; - drw_interface_init(shgroup, shader); - -#ifndef NDEBUG - interface->attribs_count = (format != NULL) ? format->attrib_ct : 0; -#endif - BLI_assert(format != NULL); - - Gwn_PrimType type; - switch (shgroup->type) { - case DRW_SHG_POINT_BATCH: type = GWN_PRIM_POINTS; break; - case DRW_SHG_LINE_BATCH: type = GWN_PRIM_LINES; break; - case DRW_SHG_TRIANGLE_BATCH: type = GWN_PRIM_TRIS; break; - default: - BLI_assert(0); - } - - DRW_batching_buffer_request(DST.idatalist, format, type, shgroup, - &shgroup->batch_geom, &interface->instance_vbo); -} - -static void drw_interface_uniform(DRWShadingGroup *shgroup, const char *name, - DRWUniformType type, const void *value, int length, int arraysize) -{ - int location; - if (type == DRW_UNIFORM_BLOCK) { - location = GPU_shader_get_uniform_block(shgroup->shader, name); - } - else { - location = GPU_shader_get_uniform(shgroup->shader, name); - } - - if (location == -1) { - if (G.debug & G_DEBUG) - fprintf(stderr, "Uniform '%s' not found!\n", name); - /* Nice to enable eventually, for now eevee uses uniforms that might not exist. */ - // BLI_assert(0); - return; - } - - DRWUniform *uni = BLI_mempool_alloc(DST.vmempool->uniforms); - - BLI_assert(arraysize > 0 && arraysize <= 16); - BLI_assert(arraysize * length <= MAX_UNIFORM_DATA_SIZE); - - uni->location = location; - uni->type = type; - uni->value = value; - uni->length = length; - uni->arraysize = arraysize; - - /* Prepend */ - uni->next = shgroup->interface.uniforms; - shgroup->interface.uniforms = uni; -} - -Gwn_VertFormat *DRW_shgroup_instance_format_array(const DRWInstanceAttribFormat attribs[], int arraysize) -{ - Gwn_VertFormat *format = MEM_callocN(sizeof(Gwn_VertFormat), "Gwn_VertFormat"); - - for (int i = 0; i < arraysize; ++i) { - GWN_vertformat_attr_add(format, attribs[i].name, - (attribs[i].type == DRW_ATTRIB_INT) ? GWN_COMP_I32 : GWN_COMP_F32, - attribs[i].components, - (attribs[i].type == DRW_ATTRIB_INT) ? GWN_FETCH_INT : GWN_FETCH_FLOAT); - } - return format; -} - -/** \} */ - - -/* -------------------------------------------------------------------- */ - -/** \name Shading Group (DRW_shgroup) - * \{ */ - -static DRWShadingGroup *drw_shgroup_create_ex(struct GPUShader *shader, DRWPass *pass) -{ - DRWShadingGroup *shgroup = BLI_mempool_alloc(DST.vmempool->shgroups); - - /* Append */ - if (pass->shgroups != NULL) { - pass->shgroups_last->next = shgroup; - } - else { - pass->shgroups = shgroup; - } - pass->shgroups_last = shgroup; - shgroup->next = NULL; - shgroup->type = DRW_SHG_NORMAL; - shgroup->shader = shader; - shgroup->state_extra = 0; - shgroup->state_extra_disable = ~0x0; - shgroup->stencil_mask = 0; -#if 0 /* All the same in the union! */ - shgroup->batch_geom = NULL; - - shgroup->instancing_geom = NULL; - shgroup->instance_geom = NULL; -#endif - shgroup->calls = NULL; - shgroup->calls_first = NULL; - -#ifdef USE_GPU_SELECT - shgroup->pass_parent = pass; -#endif - - return shgroup; -} - -static DRWShadingGroup *drw_shgroup_material_create_ex(GPUPass *gpupass, DRWPass *pass) -{ - if (!gpupass) { - /* Shader compilation error */ - return NULL; - } - - DRWShadingGroup *grp = drw_shgroup_create_ex(GPU_pass_shader(gpupass), pass); - return grp; -} - -static DRWShadingGroup *drw_shgroup_material_inputs( - DRWShadingGroup *grp, struct GPUMaterial *material, GPUPass *gpupass) -{ - /* TODO : Ideally we should not convert. But since the whole codegen - * is relying on GPUPass we keep it as is for now. */ - - /* Converting dynamic GPUInput to DRWUniform */ - ListBase *inputs = &gpupass->inputs; - - for (GPUInput *input = inputs->first; input; input = input->next) { - /* Textures */ - if (input->ima) { - double time = 0.0; /* TODO make time variable */ - GPUTexture *tex = GPU_texture_from_blender( - input->ima, input->iuser, input->textarget, input->image_isdata, time, 1); - - if (input->bindtex) { - DRW_shgroup_uniform_texture(grp, input->shadername, tex); - } - } - /* Color Ramps */ - else if (input->tex) { - DRW_shgroup_uniform_texture(grp, input->shadername, input->tex); - } - /* Floats */ - else { - switch (input->type) { - case GPU_FLOAT: - case GPU_VEC2: - case GPU_VEC3: - case GPU_VEC4: - /* Should already be in the material ubo. */ - break; - case GPU_MAT3: - DRW_shgroup_uniform_mat3(grp, input->shadername, (float *)input->dynamicvec); - break; - case GPU_MAT4: - DRW_shgroup_uniform_mat4(grp, input->shadername, (float *)input->dynamicvec); - break; - default: - break; - } - } - } - - GPUUniformBuffer *ubo = GPU_material_get_uniform_buffer(material); - if (ubo != NULL) { - DRW_shgroup_uniform_block(grp, GPU_UBO_BLOCK_NAME, ubo); - } - - return grp; -} - -DRWShadingGroup *DRW_shgroup_material_create( - struct GPUMaterial *material, DRWPass *pass) -{ - GPUPass *gpupass = GPU_material_get_pass(material); - DRWShadingGroup *shgroup = drw_shgroup_material_create_ex(gpupass, pass); - - if (shgroup) { - drw_interface_init(shgroup, GPU_pass_shader(gpupass)); - drw_shgroup_material_inputs(shgroup, material, gpupass); - } - - return shgroup; -} - -static void drw_call_calc_orco(ID *ob_data, float (*r_orcofacs)[3]) -{ - float *texcoloc = NULL; - float *texcosize = NULL; - if (ob_data != NULL) { - switch (GS(ob_data->name)) { - case ID_ME: - BKE_mesh_texspace_get_reference((Mesh *)ob_data, NULL, &texcoloc, NULL, &texcosize); - break; - case ID_CU: - { - Curve *cu = (Curve *)ob_data; - if (cu->bb == NULL || (cu->bb->flag & BOUNDBOX_DIRTY)) { - BKE_curve_texspace_calc(cu); - } - texcoloc = cu->loc; - texcosize = cu->size; - break; - } - case ID_MB: - { - MetaBall *mb = (MetaBall *)ob_data; - texcoloc = mb->loc; - texcosize = mb->size; - break; - } - default: - break; - } - } - - if ((texcoloc != NULL) && (texcosize != NULL)) { - mul_v3_v3fl(r_orcofacs[1], texcosize, 2.0f); - invert_v3(r_orcofacs[1]); - sub_v3_v3v3(r_orcofacs[0], texcoloc, texcosize); - negate_v3(r_orcofacs[0]); - mul_v3_v3(r_orcofacs[0], r_orcofacs[1]); /* result in a nice MADD in the shader */ - } - else { - copy_v3_fl(r_orcofacs[0], 0.0f); - copy_v3_fl(r_orcofacs[1], 1.0f); - } -} - -DRWShadingGroup *DRW_shgroup_material_instance_create( - struct GPUMaterial *material, DRWPass *pass, Gwn_Batch *geom, Object *ob, Gwn_VertFormat *format) -{ - GPUPass *gpupass = GPU_material_get_pass(material); - DRWShadingGroup *shgroup = drw_shgroup_material_create_ex(gpupass, pass); - - if (shgroup) { - shgroup->type = DRW_SHG_INSTANCE; - shgroup->instance_geom = geom; - drw_call_calc_orco(ob->data, shgroup->instance_orcofac); - drw_interface_instance_init(shgroup, GPU_pass_shader(gpupass), geom, format); - drw_shgroup_material_inputs(shgroup, material, gpupass); - } - - return shgroup; -} - -DRWShadingGroup *DRW_shgroup_material_empty_tri_batch_create( - struct GPUMaterial *material, DRWPass *pass, int tri_count) -{ -#ifdef USE_GPU_SELECT - BLI_assert((G.f & G_PICKSEL) == 0); -#endif - GPUPass *gpupass = GPU_material_get_pass(material); - DRWShadingGroup *shgroup = drw_shgroup_material_create_ex(gpupass, pass); - - if (shgroup) { - /* Calling drw_interface_init will cause it to call GWN_draw_primitive(). */ - drw_interface_init(shgroup, GPU_pass_shader(gpupass)); - shgroup->type = DRW_SHG_TRIANGLE_BATCH; - shgroup->interface.instance_count = tri_count * 3; - drw_shgroup_material_inputs(shgroup, material, gpupass); - } - - return shgroup; -} - -DRWShadingGroup *DRW_shgroup_create(struct GPUShader *shader, DRWPass *pass) -{ - DRWShadingGroup *shgroup = drw_shgroup_create_ex(shader, pass); - drw_interface_init(shgroup, shader); - return shgroup; -} - -DRWShadingGroup *DRW_shgroup_instance_create( - struct GPUShader *shader, DRWPass *pass, Gwn_Batch *geom, Gwn_VertFormat *format) -{ - DRWShadingGroup *shgroup = drw_shgroup_create_ex(shader, pass); - shgroup->type = DRW_SHG_INSTANCE; - shgroup->instance_geom = geom; - drw_call_calc_orco(NULL, shgroup->instance_orcofac); - drw_interface_instance_init(shgroup, shader, geom, format); - - return shgroup; -} - -static Gwn_VertFormat *g_pos_format = NULL; - -DRWShadingGroup *DRW_shgroup_point_batch_create(struct GPUShader *shader, DRWPass *pass) -{ - DRW_shgroup_instance_format(g_pos_format, {{"pos", DRW_ATTRIB_FLOAT, 3}}); - - DRWShadingGroup *shgroup = drw_shgroup_create_ex(shader, pass); - shgroup->type = DRW_SHG_POINT_BATCH; - - drw_interface_batching_init(shgroup, shader, g_pos_format); - - return shgroup; -} - -DRWShadingGroup *DRW_shgroup_line_batch_create(struct GPUShader *shader, DRWPass *pass) -{ - DRW_shgroup_instance_format(g_pos_format, {{"pos", DRW_ATTRIB_FLOAT, 3}}); - - DRWShadingGroup *shgroup = drw_shgroup_create_ex(shader, pass); - shgroup->type = DRW_SHG_LINE_BATCH; - - drw_interface_batching_init(shgroup, shader, g_pos_format); - - return shgroup; -} - -/* Very special batch. Use this if you position - * your vertices with the vertex shader - * and dont need any VBO attrib */ -DRWShadingGroup *DRW_shgroup_empty_tri_batch_create(struct GPUShader *shader, DRWPass *pass, int tri_count) -{ -#ifdef USE_GPU_SELECT - BLI_assert((G.f & G_PICKSEL) == 0); -#endif - DRWShadingGroup *shgroup = drw_shgroup_create_ex(shader, pass); - - /* Calling drw_interface_init will cause it to call GWN_draw_primitive(). */ - drw_interface_init(shgroup, shader); - - shgroup->type = DRW_SHG_TRIANGLE_BATCH; - shgroup->interface.instance_count = tri_count * 3; - - return shgroup; -} - -void DRW_shgroup_free(struct DRWShadingGroup *UNUSED(shgroup)) -{ - return; -} - -#define CALL_PREPEND(shgroup, call) { \ - if (shgroup->calls == NULL) { \ - shgroup->calls = call; \ - shgroup->calls_first = call; \ - } \ - else { \ - ((DRWCall *)(shgroup->calls))->head.prev = call; \ - shgroup->calls = call; \ - } \ - call->head.prev = NULL; \ -} ((void)0) - -/* Specify an external batch instead of adding each attrib one by one. */ -void DRW_shgroup_instance_batch(DRWShadingGroup *shgroup, struct Gwn_Batch *batch) -{ - BLI_assert(shgroup->type == DRW_SHG_INSTANCE); - BLI_assert(shgroup->interface.instance_count == 0); - /* You cannot use external instancing batch without a dummy format. */ - BLI_assert(shgroup->instancing_geom != NULL); - - shgroup->type = DRW_SHG_INSTANCE_EXTERNAL; - drw_call_calc_orco(NULL, shgroup->instance_orcofac); - /* PERF : This destroys the vaos cache so better check if it's necessary. */ - /* Note: This WILL break if batch->verts[0] is destroyed and reallocated - * at the same adress. Bindings/VAOs would remain obsolete. */ - //if (shgroup->instancing_geom->inst != batch->verts[0]) - GWN_batch_instbuf_set(shgroup->instancing_geom, batch->verts[0], false); - -#ifdef USE_GPU_SELECT - shgroup->interface.override_selectid = g_DRW_select_id; -#endif -} - -static void drw_call_set_matrices(DRWCallHeader *head, float (*obmat)[4], ID *ob_data) -{ - /* Matrices */ - if (obmat != NULL) { - copy_m4_m4(head->model, obmat); - - if (is_negative_m4(head->model)) { - head->matflag |= DRW_CALL_NEGSCALE; - } - } - else { - unit_m4(head->model); - } - - /* Orco factors */ - if ((head->matflag & DRW_CALL_ORCOTEXFAC) != 0) { - drw_call_calc_orco(ob_data, head->orcotexfac); - head->matflag &= ~DRW_CALL_ORCOTEXFAC; - } - - /* TODO Set culling bsphere IF needed by the DRWPass */ - head->bsphere.rad = -1.0f; -} - -void DRW_shgroup_call_add(DRWShadingGroup *shgroup, Gwn_Batch *geom, float (*obmat)[4]) -{ - BLI_assert(geom != NULL); - BLI_assert(shgroup->type == DRW_SHG_NORMAL); - - DRWCall *call = BLI_mempool_alloc(DST.vmempool->calls); - call->head.type = DRW_CALL_SINGLE; - call->head.state = 0; - call->head.matflag = shgroup->interface.matflag; -#ifdef USE_GPU_SELECT - call->head.select_id = g_DRW_select_id; -#endif - call->geometry = geom; - drw_call_set_matrices(&call->head, obmat, NULL); - CALL_PREPEND(shgroup, call); -} - -void DRW_shgroup_call_object_add(DRWShadingGroup *shgroup, Gwn_Batch *geom, Object *ob) -{ - BLI_assert(geom != NULL); - BLI_assert(shgroup->type == DRW_SHG_NORMAL); - - DRWCall *call = BLI_mempool_alloc(DST.vmempool->calls); - call->head.type = DRW_CALL_SINGLE; - call->head.state = 0; - call->head.matflag = shgroup->interface.matflag; -#ifdef USE_GPU_SELECT - call->head.select_id = g_DRW_select_id; -#endif - call->geometry = geom; - drw_call_set_matrices(&call->head, ob->obmat, ob->data); - CALL_PREPEND(shgroup, call); -} - -void DRW_shgroup_call_generate_add( - DRWShadingGroup *shgroup, - DRWCallGenerateFn *geometry_fn, void *user_data, - float (*obmat)[4]) -{ - BLI_assert(geometry_fn != NULL); - BLI_assert(shgroup->type == DRW_SHG_NORMAL); - - DRWCallGenerate *call = BLI_mempool_alloc(DST.vmempool->calls_generate); - call->head.type = DRW_CALL_GENERATE; - call->head.state = 0; - call->head.matflag = shgroup->interface.matflag; -#ifdef USE_GPU_SELECT - call->head.select_id = g_DRW_select_id; -#endif - call->geometry_fn = geometry_fn; - call->user_data = user_data; - drw_call_set_matrices(&call->head, obmat, NULL); - CALL_PREPEND(shgroup, call); -} - -static void sculpt_draw_cb( - DRWShadingGroup *shgroup, - void (*draw_fn)(DRWShadingGroup *shgroup, Gwn_Batch *geom), - void *user_data) -{ - Object *ob = user_data; - PBVH *pbvh = ob->sculpt->pbvh; - - if (pbvh) { - BKE_pbvh_draw_cb( - pbvh, NULL, NULL, false, - (void (*)(void *, Gwn_Batch *))draw_fn, shgroup); - } -} - -void DRW_shgroup_call_sculpt_add(DRWShadingGroup *shgroup, Object *ob, float (*obmat)[4]) -{ - DRW_shgroup_call_generate_add(shgroup, sculpt_draw_cb, ob, obmat); -} - -void DRW_shgroup_call_dynamic_add_array(DRWShadingGroup *shgroup, const void *attr[], unsigned int attr_len) -{ - DRWInterface *interface = &shgroup->interface; - -#ifdef USE_GPU_SELECT - if (G.f & G_PICKSEL) { - if (interface->inst_selectid == NULL) { - interface->inst_selectid = DRW_instance_data_request(DST.idatalist, 1, 128); - } - - int *select_id = DRW_instance_data_next(interface->inst_selectid); - *select_id = g_DRW_select_id; - } -#endif - - BLI_assert(attr_len == interface->attribs_count); - UNUSED_VARS_NDEBUG(attr_len); - - for (int i = 0; i < attr_len; ++i) { - if (interface->instance_count == interface->instance_vbo->vertex_ct) { - GWN_vertbuf_data_resize(interface->instance_vbo, interface->instance_count + 32); - } - GWN_vertbuf_attr_set(interface->instance_vbo, i, interface->instance_count, attr[i]); - } - - interface->instance_count += 1; -} - -/* Used for instancing with no attributes */ -void DRW_shgroup_set_instance_count(DRWShadingGroup *shgroup, unsigned int count) -{ - DRWInterface *interface = &shgroup->interface; - - BLI_assert(interface->instance_count == 0); - BLI_assert(interface->attribs_count == 0); - -#ifdef USE_GPU_SELECT - if (G.f & G_PICKSEL) { - interface->override_selectid = g_DRW_select_id; - } -#endif - - interface->instance_count = count; -} - -unsigned int DRW_shgroup_get_instance_count(const DRWShadingGroup *shgroup) -{ - return shgroup->interface.instance_count; -} - -/** - * State is added to #Pass.state while drawing. - * Use to temporarily enable draw options. - */ -void DRW_shgroup_state_enable(DRWShadingGroup *shgroup, DRWState state) -{ - shgroup->state_extra |= state; -} - -void DRW_shgroup_state_disable(DRWShadingGroup *shgroup, DRWState state) -{ - shgroup->state_extra_disable &= ~state; -} - -void DRW_shgroup_stencil_mask(DRWShadingGroup *shgroup, unsigned int mask) -{ - BLI_assert(mask <= 255); - shgroup->stencil_mask = mask; -} - -void DRW_shgroup_uniform_texture(DRWShadingGroup *shgroup, const char *name, const GPUTexture *tex) -{ - drw_interface_uniform(shgroup, name, DRW_UNIFORM_TEXTURE, tex, 0, 1); -} - -void DRW_shgroup_uniform_block(DRWShadingGroup *shgroup, const char *name, const GPUUniformBuffer *ubo) -{ - drw_interface_uniform(shgroup, name, DRW_UNIFORM_BLOCK, ubo, 0, 1); -} - -void DRW_shgroup_uniform_buffer(DRWShadingGroup *shgroup, const char *name, GPUTexture **tex) -{ - drw_interface_uniform(shgroup, name, DRW_UNIFORM_BUFFER, tex, 0, 1); -} - -void DRW_shgroup_uniform_bool(DRWShadingGroup *shgroup, const char *name, const int *value, int arraysize) -{ - drw_interface_uniform(shgroup, name, DRW_UNIFORM_BOOL, value, 1, arraysize); -} - -void DRW_shgroup_uniform_float(DRWShadingGroup *shgroup, const char *name, const float *value, int arraysize) -{ - drw_interface_uniform(shgroup, name, DRW_UNIFORM_FLOAT, value, 1, arraysize); -} - -void DRW_shgroup_uniform_vec2(DRWShadingGroup *shgroup, const char *name, const float *value, int arraysize) -{ - drw_interface_uniform(shgroup, name, DRW_UNIFORM_FLOAT, value, 2, arraysize); -} - -void DRW_shgroup_uniform_vec3(DRWShadingGroup *shgroup, const char *name, const float *value, int arraysize) -{ - drw_interface_uniform(shgroup, name, DRW_UNIFORM_FLOAT, value, 3, arraysize); -} - -void DRW_shgroup_uniform_vec4(DRWShadingGroup *shgroup, const char *name, const float *value, int arraysize) -{ - drw_interface_uniform(shgroup, name, DRW_UNIFORM_FLOAT, value, 4, arraysize); -} - -void DRW_shgroup_uniform_short_to_int(DRWShadingGroup *shgroup, const char *name, const short *value, int arraysize) -{ - drw_interface_uniform(shgroup, name, DRW_UNIFORM_SHORT_TO_INT, value, 1, arraysize); -} - -void DRW_shgroup_uniform_short_to_float(DRWShadingGroup *shgroup, const char *name, const short *value, int arraysize) -{ - drw_interface_uniform(shgroup, name, DRW_UNIFORM_SHORT_TO_FLOAT, value, 1, arraysize); -} - -void DRW_shgroup_uniform_int(DRWShadingGroup *shgroup, const char *name, const int *value, int arraysize) -{ - drw_interface_uniform(shgroup, name, DRW_UNIFORM_INT, value, 1, arraysize); -} - -void DRW_shgroup_uniform_ivec2(DRWShadingGroup *shgroup, const char *name, const int *value, int arraysize) -{ - drw_interface_uniform(shgroup, name, DRW_UNIFORM_INT, value, 2, arraysize); -} - -void DRW_shgroup_uniform_ivec3(DRWShadingGroup *shgroup, const char *name, const int *value, int arraysize) -{ - drw_interface_uniform(shgroup, name, DRW_UNIFORM_INT, value, 3, arraysize); -} - -void DRW_shgroup_uniform_mat3(DRWShadingGroup *shgroup, const char *name, const float *value) -{ - drw_interface_uniform(shgroup, name, DRW_UNIFORM_FLOAT, value, 9, 1); -} - -void DRW_shgroup_uniform_mat4(DRWShadingGroup *shgroup, const char *name, const float *value) -{ - drw_interface_uniform(shgroup, name, DRW_UNIFORM_FLOAT, value, 16, 1); -} - -/** \} */ - - -/* -------------------------------------------------------------------- */ - -/** \name Passes (DRW_pass) - * \{ */ - -DRWPass *DRW_pass_create(const char *name, DRWState state) -{ - DRWPass *pass = BLI_mempool_alloc(DST.vmempool->passes); - pass->state = state; - BLI_strncpy(pass->name, name, MAX_PASS_NAME); - - pass->shgroups = NULL; - pass->shgroups_last = NULL; - - return pass; -} - -void DRW_pass_state_set(DRWPass *pass, DRWState state) -{ - pass->state = state; -} - -void DRW_pass_free(DRWPass *pass) -{ - for (DRWShadingGroup *shgroup = pass->shgroups; shgroup; shgroup = shgroup->next) { - DRW_shgroup_free(shgroup); - } - - pass->shgroups = NULL; - pass->shgroups_last = NULL; -} - -void DRW_pass_foreach_shgroup(DRWPass *pass, void (*callback)(void *userData, DRWShadingGroup *shgrp), void *userData) -{ - for (DRWShadingGroup *shgroup = pass->shgroups; shgroup; shgroup = shgroup->next) { - callback(userData, shgroup); - } -} - -typedef struct ZSortData { - float *axis; - float *origin; -} ZSortData; - -static int pass_shgroup_dist_sort(void *thunk, const void *a, const void *b) -{ - const ZSortData *zsortdata = (ZSortData *)thunk; - const DRWShadingGroup *shgrp_a = (const DRWShadingGroup *)a; - const DRWShadingGroup *shgrp_b = (const DRWShadingGroup *)b; - - const DRWCall *call_a; - const DRWCall *call_b; - - call_a = shgrp_a->calls_first; - call_b = shgrp_b->calls_first; - - if (call_a == NULL) return -1; - if (call_b == NULL) return -1; - - float tmp[3]; - sub_v3_v3v3(tmp, zsortdata->origin, call_a->head.model[3]); - const float a_sq = dot_v3v3(zsortdata->axis, tmp); - sub_v3_v3v3(tmp, zsortdata->origin, call_b->head.model[3]); - const float b_sq = dot_v3v3(zsortdata->axis, tmp); - - if (a_sq < b_sq) return 1; - else if (a_sq > b_sq) return -1; - else { - /* If there is a depth prepass put it before */ - if ((shgrp_a->state_extra & DRW_STATE_WRITE_DEPTH) != 0) { - return -1; - } - else if ((shgrp_b->state_extra & DRW_STATE_WRITE_DEPTH) != 0) { - return 1; - } - else return 0; - } -} - -/* ------------------ Shading group sorting --------------------- */ - -#define SORT_IMPL_LINKTYPE DRWShadingGroup - -#define SORT_IMPL_USE_THUNK -#define SORT_IMPL_FUNC shgroup_sort_fn_r -#include "../../blenlib/intern/list_sort_impl.h" -#undef SORT_IMPL_FUNC -#undef SORT_IMPL_USE_THUNK - -#undef SORT_IMPL_LINKTYPE - -/** - * Sort Shading groups by decreasing Z of their first draw call. - * This is usefull for order dependant effect such as transparency. - **/ -void DRW_pass_sort_shgroup_z(DRWPass *pass) -{ - float (*viewinv)[4]; - viewinv = viewport_matrices.mat[DRW_MAT_VIEWINV]; - - ZSortData zsortdata = {viewinv[2], viewinv[3]}; - - if (pass->shgroups && pass->shgroups->next) { - pass->shgroups = shgroup_sort_fn_r(pass->shgroups, pass_shgroup_dist_sort, &zsortdata); - - /* Find the next last */ - DRWShadingGroup *last = pass->shgroups; - while ((last = last->next)) { - /* Do nothing */ - } - pass->shgroups_last = last; - } -} - -/** \} */ - - /* -------------------------------------------------------------------- */ -/** \name Draw (DRW_draw) - * \{ */ - -static void drw_state_set(DRWState state) -{ - if (DST.state == state) { - return; - } - - -#define CHANGED_TO(f) \ - ((DST.state & (f)) ? \ - ((state & (f)) ? 0 : -1) : \ - ((state & (f)) ? 1 : 0)) - -#define CHANGED_ANY(f) \ - ((DST.state & (f)) != (state & (f))) - -#define CHANGED_ANY_STORE_VAR(f, enabled) \ - ((DST.state & (f)) != (enabled = (state & (f)))) - - /* Depth Write */ - { - int test; - if ((test = CHANGED_TO(DRW_STATE_WRITE_DEPTH))) { - if (test == 1) { - glDepthMask(GL_TRUE); - } - else { - glDepthMask(GL_FALSE); - } - } - } - - /* Color Write */ - { - int test; - if ((test = CHANGED_TO(DRW_STATE_WRITE_COLOR))) { - if (test == 1) { - glColorMask(GL_TRUE, GL_TRUE, GL_TRUE, GL_TRUE); - } - else { - glColorMask(GL_FALSE, GL_FALSE, GL_FALSE, GL_FALSE); - } - } - } - - /* Cull */ - { - DRWState test; - if (CHANGED_ANY_STORE_VAR( - DRW_STATE_CULL_BACK | DRW_STATE_CULL_FRONT, - test)) - { - if (test) { - glEnable(GL_CULL_FACE); - - if ((state & DRW_STATE_CULL_BACK) != 0) { - glCullFace(GL_BACK); - } - else if ((state & DRW_STATE_CULL_FRONT) != 0) { - glCullFace(GL_FRONT); - } - else { - BLI_assert(0); - } - } - else { - glDisable(GL_CULL_FACE); - } - } - } - - /* Depth Test */ - { - DRWState test; - if (CHANGED_ANY_STORE_VAR( - DRW_STATE_DEPTH_LESS | DRW_STATE_DEPTH_EQUAL | DRW_STATE_DEPTH_GREATER | DRW_STATE_DEPTH_ALWAYS, - test)) - { - if (test) { - glEnable(GL_DEPTH_TEST); - - if (state & DRW_STATE_DEPTH_LESS) { - glDepthFunc(GL_LEQUAL); - } - else if (state & DRW_STATE_DEPTH_EQUAL) { - glDepthFunc(GL_EQUAL); - } - else if (state & DRW_STATE_DEPTH_GREATER) { - glDepthFunc(GL_GREATER); - } - else if (state & DRW_STATE_DEPTH_ALWAYS) { - glDepthFunc(GL_ALWAYS); - } - else { - BLI_assert(0); - } - } - else { - glDisable(GL_DEPTH_TEST); - } - } - } - - /* Wire Width */ - { - if (CHANGED_ANY(DRW_STATE_WIRE | DRW_STATE_WIRE_LARGE)) { - if ((state & DRW_STATE_WIRE) != 0) { - glLineWidth(1.0f); - } - else if ((state & DRW_STATE_WIRE_LARGE) != 0) { - glLineWidth(UI_GetThemeValuef(TH_OUTLINE_WIDTH) * 2.0f); - } - else { - /* do nothing */ - } - } - } - - /* Points Size */ - { - int test; - if ((test = CHANGED_TO(DRW_STATE_POINT))) { - if (test == 1) { - GPU_enable_program_point_size(); - glPointSize(5.0f); - } - else { - GPU_disable_program_point_size(); - } - } - } - - /* Blending (all buffer) */ - { - int test; - if (CHANGED_ANY_STORE_VAR( - DRW_STATE_BLEND | DRW_STATE_ADDITIVE | DRW_STATE_MULTIPLY | DRW_STATE_TRANSMISSION | - DRW_STATE_ADDITIVE_FULL, - test)) - { - if (test) { - glEnable(GL_BLEND); - - if ((state & DRW_STATE_BLEND) != 0) { - glBlendFuncSeparate(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA, /* RGB */ - GL_ONE, GL_ONE_MINUS_SRC_ALPHA); /* Alpha */ - } - else if ((state & DRW_STATE_MULTIPLY) != 0) { - glBlendFunc(GL_DST_COLOR, GL_ZERO); - } - else if ((state & DRW_STATE_TRANSMISSION) != 0) { - glBlendFunc(GL_ONE, GL_SRC_ALPHA); - } - else if ((state & DRW_STATE_ADDITIVE) != 0) { - /* Do not let alpha accumulate but premult the source RGB by it. */ - glBlendFuncSeparate(GL_SRC_ALPHA, GL_ONE, /* RGB */ - GL_ZERO, GL_ONE); /* Alpha */ - } - else if ((state & DRW_STATE_ADDITIVE_FULL) != 0) { - /* Let alpha accumulate. */ - glBlendFunc(GL_ONE, GL_ONE); - } - else { - BLI_assert(0); - } - } - else { - glDisable(GL_BLEND); - } - } - } - - /* Clip Planes */ - { - int test; - if ((test = CHANGED_TO(DRW_STATE_CLIP_PLANES))) { - if (test == 1) { - for (int i = 0; i < DST.num_clip_planes; ++i) { - glEnable(GL_CLIP_DISTANCE0 + i); - } - } - else { - for (int i = 0; i < MAX_CLIP_PLANES; ++i) { - glDisable(GL_CLIP_DISTANCE0 + i); - } - } - } - } - - /* Line Stipple */ - { - int test; - if (CHANGED_ANY_STORE_VAR( - DRW_STATE_STIPPLE_2 | DRW_STATE_STIPPLE_3 | DRW_STATE_STIPPLE_4, - test)) - { - if (test) { - if ((state & DRW_STATE_STIPPLE_2) != 0) { - setlinestyle(2); - } - else if ((state & DRW_STATE_STIPPLE_3) != 0) { - setlinestyle(3); - } - else if ((state & DRW_STATE_STIPPLE_4) != 0) { - setlinestyle(4); - } - else { - BLI_assert(0); - } - } - else { - setlinestyle(0); - } - } - } - - /* Stencil */ - { - DRWState test; - if (CHANGED_ANY_STORE_VAR( - DRW_STATE_WRITE_STENCIL | - DRW_STATE_STENCIL_EQUAL, - test)) - { - if (test) { - glEnable(GL_STENCIL_TEST); - - /* Stencil Write */ - if ((state & DRW_STATE_WRITE_STENCIL) != 0) { - glStencilMask(0xFF); - glStencilOp(GL_KEEP, GL_KEEP, GL_REPLACE); - } - /* Stencil Test */ - else if ((state & DRW_STATE_STENCIL_EQUAL) != 0) { - glStencilMask(0x00); /* disable write */ - DST.stencil_mask = 0; - } - else { - BLI_assert(0); - } - } - else { - /* disable write & test */ - DST.stencil_mask = 0; - glStencilMask(0x00); - glStencilFunc(GL_ALWAYS, 1, 0xFF); - glDisable(GL_STENCIL_TEST); - } - } - } - -#undef CHANGED_TO -#undef CHANGED_ANY -#undef CHANGED_ANY_STORE_VAR - - DST.state = state; -} - -static void drw_stencil_set(unsigned int mask) -{ - if (DST.stencil_mask != mask) { - /* Stencil Write */ - if ((DST.state & DRW_STATE_WRITE_STENCIL) != 0) { - glStencilFunc(GL_ALWAYS, mask, 0xFF); - DST.stencil_mask = mask; - } - /* Stencil Test */ - else if ((DST.state & DRW_STATE_STENCIL_EQUAL) != 0) { - glStencilFunc(GL_EQUAL, mask, 0xFF); - DST.stencil_mask = mask; - } - } -} - -typedef struct DRWBoundTexture { - struct DRWBoundTexture *next, *prev; - GPUTexture *tex; -} DRWBoundTexture; - -static void draw_matrices_model_prepare(DRWCallHeader *ch) -{ - /* OPTI : We can optimize further by sharing this computation for each call using the same object. */ - /* Order matters */ - if (ch->matflag & (DRW_CALL_MODELVIEW | DRW_CALL_MODELVIEWINVERSE | - DRW_CALL_NORMALVIEW | DRW_CALL_EYEVEC)) - { - mul_m4_m4m4(ch->modelview, viewport_matrices.mat[DRW_MAT_VIEW], ch->model); - } - if (ch->matflag & DRW_CALL_MODELVIEWINVERSE) { - invert_m4_m4(ch->modelviewinverse, ch->modelview); - } - if (ch->matflag & DRW_CALL_MODELVIEWPROJECTION) { - mul_m4_m4m4(ch->modelviewprojection, viewport_matrices.mat[DRW_MAT_PERS], ch->model); - } - if (ch->matflag & DRW_CALL_NORMALVIEW) { - copy_m3_m4(ch->normalview, ch->modelview); - invert_m3(ch->normalview); - transpose_m3(ch->normalview); - } - if (ch->matflag & DRW_CALL_EYEVEC) { - /* Used by orthographic wires */ - float tmp[3][3]; - copy_v3_fl3(ch->eyevec, 0.0f, 0.0f, 1.0f); - invert_m3_m3(tmp, ch->normalview); - /* set eye vector, transformed to object coords */ - mul_m3_v3(tmp, ch->eyevec); - } - /* Non view dependant */ - if (ch->matflag & DRW_CALL_MODELINVERSE) { - invert_m4_m4(ch->modelinverse, ch->model); - ch->matflag &= ~DRW_CALL_MODELINVERSE; - } - if (ch->matflag & DRW_CALL_NORMALWORLD) { - copy_m3_m4(ch->normalworld, ch->model); - invert_m3(ch->normalworld); - transpose_m3(ch->normalworld); - ch->matflag &= ~DRW_CALL_NORMALWORLD; - } -} - -static void draw_geometry_prepare(DRWShadingGroup *shgroup, DRWCallHeader *head) -{ - DRWInterface *interface = &shgroup->interface; - - /* step 1 : bind object dependent matrices */ - if (head != NULL) { - /* OPTI/IDEA(clem): Do this preparation in another thread. */ - draw_matrices_model_prepare(head); - GPU_shader_uniform_vector(shgroup->shader, interface->model, 16, 1, (float *)head->model); - GPU_shader_uniform_vector(shgroup->shader, interface->modelinverse, 16, 1, (float *)head->modelinverse); - GPU_shader_uniform_vector(shgroup->shader, interface->modelview, 16, 1, (float *)head->modelview); - GPU_shader_uniform_vector(shgroup->shader, interface->modelviewinverse, 16, 1, (float *)head->modelviewinverse); - GPU_shader_uniform_vector(shgroup->shader, interface->modelviewprojection, 16, 1, (float *)head->modelviewprojection); - GPU_shader_uniform_vector(shgroup->shader, interface->normalview, 9, 1, (float *)head->normalview); - GPU_shader_uniform_vector(shgroup->shader, interface->normalworld, 9, 1, (float *)head->normalworld); - GPU_shader_uniform_vector(shgroup->shader, interface->orcotexfac, 3, 2, (float *)head->orcotexfac); - GPU_shader_uniform_vector(shgroup->shader, interface->eye, 3, 1, (float *)head->eyevec); - } - else { - BLI_assert((interface->normalview == -1) && (interface->normalworld == -1) && (interface->eye == -1)); - /* For instancing and batching. */ - float unitmat[4][4]; - unit_m4(unitmat); - GPU_shader_uniform_vector(shgroup->shader, interface->model, 16, 1, (float *)unitmat); - GPU_shader_uniform_vector(shgroup->shader, interface->modelinverse, 16, 1, (float *)unitmat); - GPU_shader_uniform_vector(shgroup->shader, interface->modelview, 16, 1, (float *)viewport_matrices.mat[DRW_MAT_VIEW]); - GPU_shader_uniform_vector(shgroup->shader, interface->modelviewinverse, 16, 1, (float *)viewport_matrices.mat[DRW_MAT_VIEWINV]); - GPU_shader_uniform_vector(shgroup->shader, interface->modelviewprojection, 16, 1, (float *)viewport_matrices.mat[DRW_MAT_PERS]); - GPU_shader_uniform_vector(shgroup->shader, interface->orcotexfac, 3, 2, (float *)shgroup->instance_orcofac); - } -} - -static void draw_geometry_execute_ex( - DRWShadingGroup *shgroup, Gwn_Batch *geom, unsigned int start, unsigned int count) -{ - /* Special case: empty drawcall, placement is done via shader, don't bind anything. */ - if (geom == NULL) { - BLI_assert(shgroup->type == DRW_SHG_TRIANGLE_BATCH); /* Add other type if needed. */ - /* Shader is already bound. */ - GWN_draw_primitive(GWN_PRIM_TRIS, count); - return; - } - - /* step 2 : bind vertex array & draw */ - GWN_batch_program_set_no_use(geom, GPU_shader_get_program(shgroup->shader), GPU_shader_get_interface(shgroup->shader)); - /* XXX hacking gawain. we don't want to call glUseProgram! (huge performance loss) */ - geom->program_in_use = true; - if (ELEM(shgroup->type, DRW_SHG_INSTANCE, DRW_SHG_INSTANCE_EXTERNAL)) { - GWN_batch_draw_range_ex(geom, start, count, true); - } - else { - GWN_batch_draw_range(geom, start, count); - } - geom->program_in_use = false; /* XXX hacking gawain */ -} - -static void draw_geometry_execute(DRWShadingGroup *shgroup, Gwn_Batch *geom) -{ - draw_geometry_execute_ex(shgroup, geom, 0, 0); -} - -static void bind_texture(GPUTexture *tex) -{ - int bind_num = GPU_texture_bound_number(tex); - if (bind_num == -1) { - for (int i = 0; i < GPU_max_textures(); ++i) { - RST.bind_tex_inc = (RST.bind_tex_inc + 1) % GPU_max_textures(); - if (RST.bound_tex_slots[RST.bind_tex_inc] == false) { - if (RST.bound_texs[RST.bind_tex_inc] != NULL) { - GPU_texture_unbind(RST.bound_texs[RST.bind_tex_inc]); - } - GPU_texture_bind(tex, RST.bind_tex_inc); - RST.bound_texs[RST.bind_tex_inc] = tex; - RST.bound_tex_slots[RST.bind_tex_inc] = true; - // printf("Binds Texture %d %p\n", RST.bind_tex_inc, tex); - return; - } - } - - printf("Not enough texture slots! Reduce number of textures used by your shader.\n"); - } - RST.bound_tex_slots[bind_num] = true; -} - -static void bind_ubo(GPUUniformBuffer *ubo) -{ - if (RST.bind_ubo_inc < GPU_max_ubo_binds()) { - GPU_uniformbuffer_bind(ubo, RST.bind_ubo_inc); - RST.bind_ubo_inc++; - } - else { - /* This is not depending on user input. - * It is our responsability to make sure there enough slots. */ - BLI_assert(0 && "Not enough ubo slots! This should not happen!\n"); - - /* printf so user can report bad behaviour */ - printf("Not enough ubo slots! This should not happen!\n"); - } -} - -static void release_texture_slots(void) -{ - memset(RST.bound_tex_slots, 0x0, sizeof(bool) * GPU_max_textures()); -} - -static void release_ubo_slots(void) -{ - RST.bind_ubo_inc = 0; -} - -static void draw_shgroup(DRWShadingGroup *shgroup, DRWState pass_state) -{ - BLI_assert(shgroup->shader); - - DRWInterface *interface = &shgroup->interface; - GPUTexture *tex; - GPUUniformBuffer *ubo; - int val; - float fval; - - if (DST.shader != shgroup->shader) { - if (DST.shader) GPU_shader_unbind(); - GPU_shader_bind(shgroup->shader); - DST.shader = shgroup->shader; - } - - release_texture_slots(); - release_ubo_slots(); - - drw_state_set((pass_state & shgroup->state_extra_disable) | shgroup->state_extra); - drw_stencil_set(shgroup->stencil_mask); - - /* Binding Uniform */ - /* Don't check anything, Interface should already contain the least uniform as possible */ - for (DRWUniform *uni = interface->uniforms; uni; uni = uni->next) { - switch (uni->type) { - case DRW_UNIFORM_SHORT_TO_INT: - val = (int)*((short *)uni->value); - GPU_shader_uniform_vector_int( - shgroup->shader, uni->location, uni->length, uni->arraysize, (int *)&val); - break; - case DRW_UNIFORM_SHORT_TO_FLOAT: - fval = (float)*((short *)uni->value); - GPU_shader_uniform_vector( - shgroup->shader, uni->location, uni->length, uni->arraysize, (float *)&fval); - break; - case DRW_UNIFORM_BOOL: - case DRW_UNIFORM_INT: - GPU_shader_uniform_vector_int( - shgroup->shader, uni->location, uni->length, uni->arraysize, (int *)uni->value); - break; - case DRW_UNIFORM_FLOAT: - GPU_shader_uniform_vector( - shgroup->shader, uni->location, uni->length, uni->arraysize, (float *)uni->value); - break; - case DRW_UNIFORM_TEXTURE: - tex = (GPUTexture *)uni->value; - BLI_assert(tex); - bind_texture(tex); - GPU_shader_uniform_texture(shgroup->shader, uni->location, tex); - break; - case DRW_UNIFORM_BUFFER: - if (!DRW_state_is_fbo()) { - break; - } - tex = *((GPUTexture **)uni->value); - BLI_assert(tex); - bind_texture(tex); - GPU_shader_uniform_texture(shgroup->shader, uni->location, tex); - break; - case DRW_UNIFORM_BLOCK: - ubo = (GPUUniformBuffer *)uni->value; - bind_ubo(ubo); - GPU_shader_uniform_buffer(shgroup->shader, uni->location, ubo); - break; - } - } - -#ifdef USE_GPU_SELECT - /* use the first item because of selection we only ever add one */ -# define GPU_SELECT_LOAD_IF_PICKSEL(_call) \ - if ((G.f & G_PICKSEL) && (_call)) { \ - GPU_select_load_id((_call)->head.select_id); \ - } ((void)0) - -# define GPU_SELECT_LOAD_IF_PICKSEL_LIST(_shgroup, _start, _count) \ - _start = 0; \ - _count = _shgroup->interface.instance_count; \ - int *select_id = NULL; \ - if (G.f & G_PICKSEL) { \ - if (_shgroup->interface.override_selectid == -1) { \ - select_id = DRW_instance_data_get(_shgroup->interface.inst_selectid); \ - switch (_shgroup->type) { \ - case DRW_SHG_TRIANGLE_BATCH: _count = 3; break; \ - case DRW_SHG_LINE_BATCH: _count = 2; break; \ - default: _count = 1; break; \ - } \ - } \ - else { \ - GPU_select_load_id(_shgroup->interface.override_selectid); \ - } \ - } \ - while (_start < _shgroup->interface.instance_count) { \ - if (select_id) { \ - GPU_select_load_id(select_id[_start]); \ - } - -# define GPU_SELECT_LOAD_IF_PICKSEL_LIST_END(_start, _count) \ - _start += _count; \ - } - -#else -# define GPU_SELECT_LOAD_IF_PICKSEL(call) -# define GPU_SELECT_LOAD_IF_PICKSEL_LIST_END(start, count) -# define GPU_SELECT_LOAD_IF_PICKSEL_LIST(_shgroup, _start, _count) \ - _start = 0; \ - _count = _shgroup->interface.instance_count; - -#endif - - /* Rendering Calls */ - if (!ELEM(shgroup->type, DRW_SHG_NORMAL)) { - /* Replacing multiple calls with only one */ - if (ELEM(shgroup->type, DRW_SHG_INSTANCE, DRW_SHG_INSTANCE_EXTERNAL)) { - if (shgroup->type == DRW_SHG_INSTANCE_EXTERNAL) { - if (shgroup->instancing_geom != NULL) { - unsigned int count, start; - draw_geometry_prepare(shgroup, NULL); - /* This will only load override_selectid */ - GPU_SELECT_LOAD_IF_PICKSEL_LIST(shgroup, start, count) - { - draw_geometry_execute(shgroup, shgroup->instancing_geom); - } - GPU_SELECT_LOAD_IF_PICKSEL_LIST_END(start, count) - } - } - else { - if (shgroup->interface.instance_count > 0) { - unsigned int count, start; - Gwn_Batch *geom = (shgroup->instancing_geom) ? shgroup->instancing_geom : shgroup->instance_geom; - draw_geometry_prepare(shgroup, NULL); - GPU_SELECT_LOAD_IF_PICKSEL_LIST(shgroup, start, count) - { - draw_geometry_execute_ex(shgroup, geom, start, count); - } - GPU_SELECT_LOAD_IF_PICKSEL_LIST_END(start, count) - } - } - } - else { /* DRW_SHG_***_BATCH */ - /* Some dynamic batch can have no geom (no call to aggregate) */ - if (shgroup->interface.instance_count > 0) { - unsigned int count, start; - draw_geometry_prepare(shgroup, NULL); - GPU_SELECT_LOAD_IF_PICKSEL_LIST(shgroup, start, count) - { - draw_geometry_execute_ex(shgroup, shgroup->batch_geom, start, count); - } - GPU_SELECT_LOAD_IF_PICKSEL_LIST_END(start, count) - } - } - } - else { - bool prev_neg_scale = false; - for (DRWCall *call = shgroup->calls_first; call; call = call->head.prev) { - if ((call->head.state & DRW_CALL_CULLED) != 0) - continue; - - /* Negative scale objects */ - bool neg_scale = call->head.state & DRW_CALL_NEGSCALE; - if (neg_scale != prev_neg_scale) { - glFrontFace((neg_scale) ? DST.backface : DST.frontface); - prev_neg_scale = neg_scale; - } - - GPU_SELECT_LOAD_IF_PICKSEL(call); - - if (call->head.type == DRW_CALL_SINGLE) { - draw_geometry_prepare(shgroup, &call->head); - draw_geometry_execute(shgroup, call->geometry); - } - else { - BLI_assert(call->head.type == DRW_CALL_GENERATE); - DRWCallGenerate *callgen = ((DRWCallGenerate *)call); - draw_geometry_prepare(shgroup, &callgen->head); - callgen->geometry_fn(shgroup, draw_geometry_execute, callgen->user_data); - } - } - /* Reset state */ - glFrontFace(DST.frontface); - } - - /* TODO: remove, (currently causes alpha issue with sculpt, need to investigate) */ - DRW_state_reset(); -} - -static void drw_draw_pass_ex(DRWPass *pass, DRWShadingGroup *start_group, DRWShadingGroup *end_group) -{ - /* Start fresh */ - DST.shader = NULL; - - BLI_assert(DST.buffer_finish_called && "DRW_render_instance_buffer_finish had not been called before drawing"); - - drw_state_set(pass->state); - - DRW_stats_query_start(pass->name); - - for (DRWShadingGroup *shgroup = start_group; shgroup; shgroup = shgroup->next) { - draw_shgroup(shgroup, pass->state); - /* break if upper limit */ - if (shgroup == end_group) { - break; - } - } - - /* Clear Bound textures */ - for (int i = 0; i < GPU_max_textures(); i++) { - if (RST.bound_texs[i] != NULL) { - GPU_texture_unbind(RST.bound_texs[i]); - RST.bound_texs[i] = NULL; - } - } - - if (DST.shader) { - GPU_shader_unbind(); - DST.shader = NULL; - } - - DRW_stats_query_end(); -} - -void DRW_draw_pass(DRWPass *pass) -{ - drw_draw_pass_ex(pass, pass->shgroups, pass->shgroups_last); -} - -/* Draw only a subset of shgroups. Used in special situations as grease pencil strokes */ -void DRW_draw_pass_subset(DRWPass *pass, DRWShadingGroup *start_group, DRWShadingGroup *end_group) -{ - drw_draw_pass_ex(pass, start_group, end_group); -} - void DRW_draw_callbacks_pre_scene(void) { RegionView3D *rv3d = DST.draw_ctx.rv3d; @@ -2180,47 +108,6 @@ void DRW_draw_callbacks_post_scene(void) gpuLoadMatrix(rv3d->viewmat); } -/* Reset state to not interfer with other UI drawcall */ -void DRW_state_reset_ex(DRWState state) -{ - DST.state = ~state; - drw_state_set(state); -} - -void DRW_state_reset(void) -{ - /* Reset blending function */ - glBlendFuncSeparate(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA, GL_ONE, GL_ONE_MINUS_SRC_ALPHA); - - DRW_state_reset_ex(DRW_STATE_DEFAULT); -} - -/* NOTE : Make sure to reset after use! */ -void DRW_state_invert_facing(void) -{ - SWAP(GLenum, DST.backface, DST.frontface); - glFrontFace(DST.frontface); -} - -/** - * This only works if DRWPasses have been tagged with DRW_STATE_CLIP_PLANES, - * and if the shaders have support for it (see usage of gl_ClipDistance). - * Be sure to call DRW_state_clip_planes_reset() after you finish drawing. - **/ -void DRW_state_clip_planes_add(float plane_eq[4]) -{ - BLI_assert(DST.num_clip_planes < MAX_CLIP_PLANES-1); - copy_v4_v4(DST.clip_planes_eq[DST.num_clip_planes++], plane_eq); -} - -void DRW_state_clip_planes_reset(void) -{ - DST.num_clip_planes = 0; -} - -/** \} */ - - struct DRWTextStore *DRW_text_cache_ensure(void) { BLI_assert(DST.text_store_p); @@ -2304,198 +191,9 @@ int DRW_object_is_mode_shade(const Object *ob) /* -------------------------------------------------------------------- */ -/** \name Framebuffers (DRW_framebuffer) +/** \name Color Management * \{ */ -static GPUTextureFormat convert_tex_format( - int fbo_format, - int *r_channels, bool *r_is_depth) -{ - *r_is_depth = ELEM(fbo_format, DRW_TEX_DEPTH_16, DRW_TEX_DEPTH_24, DRW_TEX_DEPTH_24_STENCIL_8); - - switch (fbo_format) { - case DRW_TEX_R_16: *r_channels = 1; return GPU_R16F; - case DRW_TEX_R_32: *r_channels = 1; return GPU_R32F; - case DRW_TEX_RG_8: *r_channels = 2; return GPU_RG8; - case DRW_TEX_RG_16: *r_channels = 2; return GPU_RG16F; - case DRW_TEX_RG_16I: *r_channels = 2; return GPU_RG16I; - case DRW_TEX_RG_32: *r_channels = 2; return GPU_RG32F; - case DRW_TEX_RGBA_8: *r_channels = 4; return GPU_RGBA8; - case DRW_TEX_RGBA_16: *r_channels = 4; return GPU_RGBA16F; - case DRW_TEX_RGBA_32: *r_channels = 4; return GPU_RGBA32F; - case DRW_TEX_DEPTH_16: *r_channels = 1; return GPU_DEPTH_COMPONENT16; - case DRW_TEX_DEPTH_24: *r_channels = 1; return GPU_DEPTH_COMPONENT24; - case DRW_TEX_DEPTH_24_STENCIL_8: *r_channels = 1; return GPU_DEPTH24_STENCIL8; - case DRW_TEX_DEPTH_32: *r_channels = 1; return GPU_DEPTH_COMPONENT32F; - case DRW_TEX_RGB_11_11_10: *r_channels = 3; return GPU_R11F_G11F_B10F; - default: - BLI_assert(false && "Texture format unsupported as render target!"); - *r_channels = 4; return GPU_RGBA8; - } -} - -struct GPUFrameBuffer *DRW_framebuffer_create(void) -{ - return GPU_framebuffer_create(); -} - -void DRW_framebuffer_init( - struct GPUFrameBuffer **fb, void *engine_type, int width, int height, - DRWFboTexture textures[MAX_FBO_TEX], int textures_len) -{ - BLI_assert(textures_len <= MAX_FBO_TEX); - BLI_assert(width > 0 && height > 0); - - bool create_fb = false; - int color_attachment = -1; - - if (!*fb) { - *fb = GPU_framebuffer_create(); - create_fb = true; - } - - for (int i = 0; i < textures_len; ++i) { - int channels; - bool is_depth; - bool create_tex = false; - - DRWFboTexture fbotex = textures[i]; - bool is_temp = (fbotex.flag & DRW_TEX_TEMP) != 0; - - GPUTextureFormat gpu_format = convert_tex_format(fbotex.format, &channels, &is_depth); - - if (!*fbotex.tex || is_temp) { - /* Temp textures need to be queried each frame, others not. */ - if (is_temp) { - *fbotex.tex = GPU_viewport_texture_pool_query( - DST.viewport, engine_type, width, height, channels, gpu_format); - } - else { - *fbotex.tex = GPU_texture_create_2D_custom( - width, height, channels, gpu_format, NULL, NULL); - create_tex = true; - } - } - - if (!is_depth) { - ++color_attachment; - } - - if (create_fb || create_tex) { - drw_texture_set_parameters(*fbotex.tex, fbotex.flag); - GPU_framebuffer_texture_attach(*fb, *fbotex.tex, color_attachment, 0); - } - } - - if (create_fb && (textures_len > 0)) { - if (!GPU_framebuffer_check_valid(*fb, NULL)) { - printf("Error invalid framebuffer\n"); - } - - /* Detach temp textures */ - for (int i = 0; i < textures_len; ++i) { - DRWFboTexture fbotex = textures[i]; - - if ((fbotex.flag & DRW_TEX_TEMP) != 0) { - GPU_framebuffer_texture_detach(*fbotex.tex); - } - } - - if (DST.default_framebuffer != NULL) { - GPU_framebuffer_bind(DST.default_framebuffer); - } - } -} - -void DRW_framebuffer_free(struct GPUFrameBuffer *fb) -{ - GPU_framebuffer_free(fb); -} - -void DRW_framebuffer_bind(struct GPUFrameBuffer *fb) -{ - GPU_framebuffer_bind(fb); -} - -void DRW_framebuffer_clear(bool color, bool depth, bool stencil, float clear_col[4], float clear_depth) -{ - if (color) { - glColorMask(GL_TRUE, GL_TRUE, GL_TRUE, GL_TRUE); - glClearColor(clear_col[0], clear_col[1], clear_col[2], clear_col[3]); - } - if (depth) { - glDepthMask(GL_TRUE); - glClearDepth(clear_depth); - } - if (stencil) { - glStencilMask(0xFF); - } - glClear(((color) ? GL_COLOR_BUFFER_BIT : 0) | - ((depth) ? GL_DEPTH_BUFFER_BIT : 0) | - ((stencil) ? GL_STENCIL_BUFFER_BIT : 0)); -} - -void DRW_framebuffer_read_data(int x, int y, int w, int h, int channels, int slot, float *data) -{ - GLenum type; - switch (channels) { - case 1: type = GL_RED; break; - case 2: type = GL_RG; break; - case 3: type = GL_RGB; break; - case 4: type = GL_RGBA; break; - default: - BLI_assert(false && "wrong number of read channels"); - return; - } - glReadBuffer(GL_COLOR_ATTACHMENT0 + slot); - glReadPixels(x, y, w, h, type, GL_FLOAT, data); -} - -void DRW_framebuffer_read_depth(int x, int y, int w, int h, float *data) -{ - GLenum type = GL_DEPTH_COMPONENT; - - glReadBuffer(GL_COLOR_ATTACHMENT0); /* This is OK! */ - glReadPixels(x, y, w, h, type, GL_FLOAT, data); -} - -void DRW_framebuffer_texture_attach(struct GPUFrameBuffer *fb, GPUTexture *tex, int slot, int mip) -{ - GPU_framebuffer_texture_attach(fb, tex, slot, mip); -} - -void DRW_framebuffer_texture_layer_attach(struct GPUFrameBuffer *fb, struct GPUTexture *tex, int slot, int layer, int mip) -{ - GPU_framebuffer_texture_layer_attach(fb, tex, slot, layer, mip); -} - -void DRW_framebuffer_cubeface_attach(struct GPUFrameBuffer *fb, GPUTexture *tex, int slot, int face, int mip) -{ - GPU_framebuffer_texture_cubeface_attach(fb, tex, slot, face, mip); -} - -void DRW_framebuffer_texture_detach(GPUTexture *tex) -{ - GPU_framebuffer_texture_detach(tex); -} - -void DRW_framebuffer_blit(struct GPUFrameBuffer *fb_read, struct GPUFrameBuffer *fb_write, bool depth, bool stencil) -{ - GPU_framebuffer_blit(fb_read, 0, fb_write, 0, depth, stencil); -} - -void DRW_framebuffer_recursive_downsample( - struct GPUFrameBuffer *fb, struct GPUTexture *tex, int num_iter, - void (*callback)(void *userData, int level), void *userData) -{ - GPU_framebuffer_recursive_downsample(fb, tex, num_iter, callback, userData); -} - -void DRW_framebuffer_viewport_size(struct GPUFrameBuffer *UNUSED(fb_read), int x, int y, int w, int h) -{ - glViewport(x, y, w, h); -} - /* Use color management profile to draw texture to framebuffer */ void DRW_transform_to_display(GPUTexture *tex) { @@ -2564,7 +262,7 @@ void DRW_transform_to_display(GPUTexture *tex) /** \name Viewport (DRW_viewport) * \{ */ -static void *DRW_viewport_engine_data_ensure(void *engine_type) +void *drw_viewport_engine_data_ensure(void *engine_type) { void *data = GPU_viewport_engine_data_get(DST.viewport, engine_type); @@ -2706,19 +404,19 @@ static void drw_viewport_var_init(void) /* Refresh DST.pixelsize */ DST.pixsize = rv3d->pixsize; - copy_m4_m4(viewport_matrices.original_mat[DRW_MAT_PERS], rv3d->persmat); - copy_m4_m4(viewport_matrices.original_mat[DRW_MAT_PERSINV], rv3d->persinv); - copy_m4_m4(viewport_matrices.original_mat[DRW_MAT_VIEW], rv3d->viewmat); - copy_m4_m4(viewport_matrices.original_mat[DRW_MAT_VIEWINV], rv3d->viewinv); - copy_m4_m4(viewport_matrices.original_mat[DRW_MAT_WIN], rv3d->winmat); - invert_m4_m4(viewport_matrices.original_mat[DRW_MAT_WININV], rv3d->winmat); + copy_m4_m4(DST.original_mat[DRW_MAT_PERS], rv3d->persmat); + copy_m4_m4(DST.original_mat[DRW_MAT_PERSINV], rv3d->persinv); + copy_m4_m4(DST.original_mat[DRW_MAT_VIEW], rv3d->viewmat); + copy_m4_m4(DST.original_mat[DRW_MAT_VIEWINV], rv3d->viewinv); + copy_m4_m4(DST.original_mat[DRW_MAT_WIN], rv3d->winmat); + invert_m4_m4(DST.original_mat[DRW_MAT_WININV], rv3d->winmat); - memcpy(viewport_matrices.mat, viewport_matrices.original_mat, sizeof(viewport_matrices.mat)); + memcpy(DST.view_data.mat, DST.original_mat, sizeof(DST.original_mat)); - copy_v4_v4(viewcamtexcofac, rv3d->viewcamtexcofac); + copy_v4_v4(DST.view_data.viewcamtexcofac, rv3d->viewcamtexcofac); } else { - copy_v4_fl4(viewcamtexcofac, 1.0f, 1.0f, 0.0f, 0.0f); + copy_v4_fl4(DST.view_data.viewcamtexcofac, 1.0f, 1.0f, 0.0f, 0.0f); } /* Reset facing */ @@ -2731,35 +429,35 @@ static void drw_viewport_var_init(void) } /* Alloc array of texture reference. */ - if (RST.bound_texs == NULL) { - RST.bound_texs = MEM_callocN(sizeof(GPUTexture *) * GPU_max_textures(), "Bound GPUTexture refs"); + if (DST.RST.bound_texs == NULL) { + DST.RST.bound_texs = MEM_callocN(sizeof(GPUTexture *) * GPU_max_textures(), "Bound GPUTexture refs"); } - if (RST.bound_tex_slots == NULL) { - RST.bound_tex_slots = MEM_callocN(sizeof(bool) * GPU_max_textures(), "Bound Texture Slots"); + if (DST.RST.bound_tex_slots == NULL) { + DST.RST.bound_tex_slots = MEM_callocN(sizeof(bool) * GPU_max_textures(), "Bound Texture Slots"); } - memset(viewport_matrices.override, 0x0, sizeof(viewport_matrices.override)); + DST.override_mat = 0; memset(DST.common_instance_data, 0x0, sizeof(DST.common_instance_data)); } void DRW_viewport_matrix_get(float mat[4][4], DRWViewportMatrixType type) { BLI_assert(type >= DRW_MAT_PERS && type <= DRW_MAT_WININV); - BLI_assert(viewport_matrices.override[type] || DST.draw_ctx.rv3d != NULL); /* Can't use this in render mode. */ + BLI_assert(((DST.override_mat & (1 << type)) != 0)|| DST.draw_ctx.rv3d != NULL); /* Can't use this in render mode. */ - copy_m4_m4(mat, viewport_matrices.mat[type]); + copy_m4_m4(mat, DST.view_data.mat[type]); } void DRW_viewport_matrix_override_set(float mat[4][4], DRWViewportMatrixType type) { - copy_m4_m4(viewport_matrices.mat[type], mat); - viewport_matrices.override[type] = true; + copy_m4_m4(DST.view_data.mat[type], mat); + DST.override_mat |= (1 << type); } void DRW_viewport_matrix_override_unset(DRWViewportMatrixType type) { - copy_m4_m4(viewport_matrices.mat[type], viewport_matrices.original_mat[type]); - viewport_matrices.override[type] = false; + copy_m4_m4(DST.view_data.mat[type], DST.original_mat[type]); + DST.override_mat &= ~(1 << type); } bool DRW_viewport_is_persp_get(void) @@ -2769,7 +467,7 @@ bool DRW_viewport_is_persp_get(void) return rv3d->is_persp; } else { - return viewport_matrices.mat[DRW_MAT_WIN][3][3] == 0.0f; + return DST.view_data.mat[DRW_MAT_WIN][3][3] == 0.0f; } BLI_assert(0); return false; @@ -2884,24 +582,6 @@ ObjectEngineData *DRW_object_engine_data_ensure( return oed; } -/* XXX There is definitly some overlap between this and DRW_object_engine_data_ensure. - * We should get rid of one of the two. */ -LampEngineData *DRW_lamp_engine_data_ensure(Object *ob, RenderEngineType *engine_type) -{ - BLI_assert(ob->type == OB_LAMP); - - Scene *scene = DST.draw_ctx.scene; - - /* TODO Dupliobjects */ - /* TODO Should be per scenelayer */ - return GPU_lamp_engine_data_get(scene, ob, NULL, engine_type); -} - -void DRW_lamp_engine_data_free(LampEngineData *led) -{ - GPU_lamp_engine_data_free(led); -} - /** \} */ @@ -2914,7 +594,7 @@ static void drw_engines_init(void) { for (LinkData *link = DST.enabled_engines.first; link; link = link->next) { DrawEngineType *engine = link->data; - ViewportEngineData *data = DRW_viewport_engine_data_ensure(engine); + ViewportEngineData *data = drw_viewport_engine_data_ensure(engine); PROFILE_START(stime); if (engine->engine_init) { @@ -2929,7 +609,7 @@ static void drw_engines_cache_init(void) { for (LinkData *link = DST.enabled_engines.first; link; link = link->next) { DrawEngineType *engine = link->data; - ViewportEngineData *data = DRW_viewport_engine_data_ensure(engine); + ViewportEngineData *data = drw_viewport_engine_data_ensure(engine); if (data->text_draw_cache) { DRW_text_cache_destroy(data->text_draw_cache); @@ -2949,7 +629,7 @@ static void drw_engines_cache_populate(Object *ob) { for (LinkData *link = DST.enabled_engines.first; link; link = link->next) { DrawEngineType *engine = link->data; - ViewportEngineData *data = DRW_viewport_engine_data_ensure(engine); + ViewportEngineData *data = drw_viewport_engine_data_ensure(engine); if (engine->id_update) { engine->id_update(data, &ob->id); @@ -2965,7 +645,7 @@ static void drw_engines_cache_finish(void) { for (LinkData *link = DST.enabled_engines.first; link; link = link->next) { DrawEngineType *engine = link->data; - ViewportEngineData *data = DRW_viewport_engine_data_ensure(engine); + ViewportEngineData *data = drw_viewport_engine_data_ensure(engine); if (engine->cache_finish) { engine->cache_finish(data); @@ -2977,7 +657,7 @@ static void drw_engines_draw_background(void) { for (LinkData *link = DST.enabled_engines.first; link; link = link->next) { DrawEngineType *engine = link->data; - ViewportEngineData *data = DRW_viewport_engine_data_ensure(engine); + ViewportEngineData *data = drw_viewport_engine_data_ensure(engine); if (engine->draw_background) { PROFILE_START(stime); @@ -3001,7 +681,7 @@ static void drw_engines_draw_scene(void) { for (LinkData *link = DST.enabled_engines.first; link; link = link->next) { DrawEngineType *engine = link->data; - ViewportEngineData *data = DRW_viewport_engine_data_ensure(engine); + ViewportEngineData *data = drw_viewport_engine_data_ensure(engine); PROFILE_START(stime); if (engine->draw_scene) { @@ -3018,7 +698,7 @@ static void drw_engines_draw_text(void) { for (LinkData *link = DST.enabled_engines.first; link; link = link->next) { DrawEngineType *engine = link->data; - ViewportEngineData *data = DRW_viewport_engine_data_ensure(engine); + ViewportEngineData *data = drw_viewport_engine_data_ensure(engine); PROFILE_START(stime); if (data->text_draw_cache) { @@ -3039,7 +719,7 @@ int DRW_draw_region_engine_info_offset(void) int lines = 0; for (LinkData *link = DST.enabled_engines.first; link; link = link->next) { DrawEngineType *engine = link->data; - ViewportEngineData *data = DRW_viewport_engine_data_ensure(engine); + ViewportEngineData *data = drw_viewport_engine_data_ensure(engine); /* Count the number of lines. */ if (data->info[0] != '\0') { @@ -3074,7 +754,7 @@ void DRW_draw_region_engine_info(void) for (LinkData *link = DST.enabled_engines.first; link; link = link->next) { DrawEngineType *engine = link->data; - ViewportEngineData *data = DRW_viewport_engine_data_ensure(engine); + ViewportEngineData *data = drw_viewport_engine_data_ensure(engine); if (data->info[0] != '\0') { char *chr_current = data->info; @@ -3118,6 +798,14 @@ static void use_drw_engine(DrawEngineType *engine) BLI_addtail(&DST.enabled_engines, ld); } +/** + * Use for external render engines. + */ +static void drw_engines_enable_external(void) +{ + use_drw_engine(DRW_engine_viewport_external_type.draw_engine); +} + /* TODO revisit this when proper layering is implemented */ /* Gather all draw engines needed and store them in DST.enabled_engines * That also define the rendering order of engines */ @@ -3197,14 +885,6 @@ static void drw_engines_enable_basic(void) use_drw_engine(DRW_engine_viewport_basic_type.draw_engine); } -/** - * Use for external render engines. - */ -static void drw_engines_enable_external(void) -{ - use_drw_engine(DRW_engine_viewport_external_type.draw_engine); -} - static void drw_engines_enable(ViewLayer *view_layer, RenderEngineType *engine_type) { Object *obact = OBACT(view_layer); @@ -3236,127 +916,6 @@ static unsigned int DRW_engines_get_hash(void) return hash; } -static void draw_stat(rcti *rect, int u, int v, const char *txt, const int size) -{ - BLF_draw_default_ascii(rect->xmin + (1 + u * 5) * U.widget_unit, - rect->ymax - (3 + v) * U.widget_unit, 0.0f, - txt, size); -} - -/* CPU stats */ -static void drw_debug_cpu_stats(void) -{ - int u, v; - double init_tot_time = 0.0, background_tot_time = 0.0, render_tot_time = 0.0, tot_time = 0.0; - /* local coordinate visible rect inside region, to accomodate overlapping ui */ - rcti rect; - struct ARegion *ar = DST.draw_ctx.ar; - ED_region_visible_rect(ar, &rect); - - UI_FontThemeColor(BLF_default(), TH_TEXT_HI); - - /* row by row */ - v = 0; u = 0; - /* Label row */ - char col_label[32]; - sprintf(col_label, "Engine"); - draw_stat(&rect, u++, v, col_label, sizeof(col_label)); - sprintf(col_label, "Init"); - draw_stat(&rect, u++, v, col_label, sizeof(col_label)); - sprintf(col_label, "Background"); - draw_stat(&rect, u++, v, col_label, sizeof(col_label)); - sprintf(col_label, "Render"); - draw_stat(&rect, u++, v, col_label, sizeof(col_label)); - sprintf(col_label, "Total (w/o cache)"); - draw_stat(&rect, u++, v, col_label, sizeof(col_label)); - v++; - - /* Engines rows */ - char time_to_txt[16]; - for (LinkData *link = DST.enabled_engines.first; link; link = link->next) { - u = 0; - DrawEngineType *engine = link->data; - ViewportEngineData *data = DRW_viewport_engine_data_ensure(engine); - - draw_stat(&rect, u++, v, engine->idname, sizeof(engine->idname)); - - init_tot_time += data->init_time; - sprintf(time_to_txt, "%.2fms", data->init_time); - draw_stat(&rect, u++, v, time_to_txt, sizeof(time_to_txt)); - - background_tot_time += data->background_time; - sprintf(time_to_txt, "%.2fms", data->background_time); - draw_stat(&rect, u++, v, time_to_txt, sizeof(time_to_txt)); - - render_tot_time += data->render_time; - sprintf(time_to_txt, "%.2fms", data->render_time); - draw_stat(&rect, u++, v, time_to_txt, sizeof(time_to_txt)); - - tot_time += data->init_time + data->background_time + data->render_time; - sprintf(time_to_txt, "%.2fms", data->init_time + data->background_time + data->render_time); - draw_stat(&rect, u++, v, time_to_txt, sizeof(time_to_txt)); - v++; - } - - /* Totals row */ - u = 0; - sprintf(col_label, "Sub Total"); - draw_stat(&rect, u++, v, col_label, sizeof(col_label)); - sprintf(time_to_txt, "%.2fms", init_tot_time); - draw_stat(&rect, u++, v, time_to_txt, sizeof(time_to_txt)); - sprintf(time_to_txt, "%.2fms", background_tot_time); - draw_stat(&rect, u++, v, time_to_txt, sizeof(time_to_txt)); - sprintf(time_to_txt, "%.2fms", render_tot_time); - draw_stat(&rect, u++, v, time_to_txt, sizeof(time_to_txt)); - sprintf(time_to_txt, "%.2fms", tot_time); - draw_stat(&rect, u++, v, time_to_txt, sizeof(time_to_txt)); - v += 2; - - u = 0; - sprintf(col_label, "Cache Time"); - draw_stat(&rect, u++, v, col_label, sizeof(col_label)); - sprintf(time_to_txt, "%.2fms", DST.cache_time); - draw_stat(&rect, u++, v, time_to_txt, sizeof(time_to_txt)); -} - -/* Display GPU time for each passes */ -static void drw_debug_gpu_stats(void) -{ - /* local coordinate visible rect inside region, to accomodate overlapping ui */ - rcti rect; - struct ARegion *ar = DST.draw_ctx.ar; - ED_region_visible_rect(ar, &rect); - - UI_FontThemeColor(BLF_default(), TH_TEXT_HI); - - int v = BLI_listbase_count(&DST.enabled_engines) + 5; - - char stat_string[32]; - - /* Memory Stats */ - unsigned int tex_mem = GPU_texture_memory_usage_get(); - unsigned int vbo_mem = GWN_vertbuf_get_memory_usage(); - - sprintf(stat_string, "GPU Memory"); - draw_stat(&rect, 0, v, stat_string, sizeof(stat_string)); - sprintf(stat_string, "%.2fMB", (double)(tex_mem + vbo_mem) / 1000000.0); - draw_stat(&rect, 1, v++, stat_string, sizeof(stat_string)); - sprintf(stat_string, " |--> Textures"); - draw_stat(&rect, 0, v, stat_string, sizeof(stat_string)); - sprintf(stat_string, "%.2fMB", (double)tex_mem / 1000000.0); - draw_stat(&rect, 1, v++, stat_string, sizeof(stat_string)); - sprintf(stat_string, " |--> Meshes"); - draw_stat(&rect, 0, v, stat_string, sizeof(stat_string)); - sprintf(stat_string, "%.2fMB", (double)vbo_mem / 1000000.0); - draw_stat(&rect, 1, v++, stat_string, sizeof(stat_string)); - - /* Pre offset for stats_draw */ - rect.ymax -= (3 + ++v) * U.widget_unit; - - /* Rendering Stats */ - DRW_stats_draw(&rect); -} - /* -------------------------------------------------------------------- */ /** \name View Update @@ -3379,10 +938,10 @@ void DRW_notify_view_update(const DRWUpdateContext *update_ctx) /* XXX Really nasty locking. But else this could * be executed by the material previews thread * while rendering a viewport. */ - BLI_mutex_lock(&g_ogl_context_mutex); + BLI_mutex_lock(&DST.ogl_context_mutex); /* Reset before using it. */ - memset(&DST, 0x0, sizeof(DST)); + memset(&DST, 0x0, offsetof(DRWManager, ogl_context)); DST.viewport = rv3d->viewport; DST.draw_ctx = (DRWContextState){ @@ -3394,7 +953,7 @@ void DRW_notify_view_update(const DRWUpdateContext *update_ctx) for (LinkData *link = DST.enabled_engines.first; link; link = link->next) { DrawEngineType *draw_engine = link->data; - ViewportEngineData *data = DRW_viewport_engine_data_ensure(draw_engine); + ViewportEngineData *data = drw_viewport_engine_data_ensure(draw_engine); if (draw_engine->view_update) { draw_engine->view_update(data); @@ -3405,7 +964,7 @@ void DRW_notify_view_update(const DRWUpdateContext *update_ctx) drw_engines_disable(); - BLI_mutex_unlock(&g_ogl_context_mutex); + BLI_mutex_unlock(&DST.ogl_context_mutex); } /** \} */ @@ -3432,7 +991,7 @@ void DRW_notify_id_update(const DRWUpdateContext *update_ctx, ID *id) return; } /* Reset before using it. */ - memset(&DST, 0x0, sizeof(DST)); + memset(&DST, 0x0, offsetof(DRWManager, ogl_context)); DST.viewport = rv3d->viewport; DST.draw_ctx = (DRWContextState){ ar, rv3d, v3d, scene, view_layer, OBACT(view_layer), engine_type, depsgraph, OB_MODE_OBJECT, NULL, @@ -3440,7 +999,7 @@ void DRW_notify_id_update(const DRWUpdateContext *update_ctx, ID *id) drw_engines_enable(view_layer, engine_type); for (LinkData *link = DST.enabled_engines.first; link; link = link->next) { DrawEngineType *draw_engine = link->data; - ViewportEngineData *data = DRW_viewport_engine_data_ensure(draw_engine); + ViewportEngineData *data = drw_viewport_engine_data_ensure(draw_engine); if (draw_engine->id_update) { draw_engine->id_update(data, id); } @@ -3468,7 +1027,7 @@ void DRW_draw_view(const bContext *C) View3D *v3d = CTX_wm_view3d(C); /* Reset before using it. */ - memset(&DST, 0x0, sizeof(DST)); + memset(&DST, 0x0, offsetof(DRWManager, ogl_context)); DRW_draw_render_loop_ex(eval_ctx.depsgraph, engine_type, ar, v3d, eval_ctx.object_mode, C); } @@ -3590,8 +1149,9 @@ void DRW_draw_render_loop_ex( } if (G.debug_value > 20) { - drw_debug_cpu_stats(); - drw_debug_gpu_stats(); + rcti rect; /* local coordinate visible rect inside region, to accomodate overlapping ui */ + ED_region_visible_rect(DST.draw_ctx.ar, &rect); + DRW_stats_draw(&rect); } DRW_state_reset(); @@ -3601,7 +1161,7 @@ void DRW_draw_render_loop_ex( #ifdef DEBUG /* Avoid accidental reuse. */ - memset(&DST, 0xFF, sizeof(DST)); + memset(&DST, 0xFF, offsetof(DRWManager, ogl_context)); #endif } @@ -3610,7 +1170,7 @@ void DRW_draw_render_loop( ARegion *ar, View3D *v3d, const eObjectMode object_mode) { /* Reset before using it. */ - memset(&DST, 0x0, sizeof(DST)); + memset(&DST, 0x0, offsetof(DRWManager, ogl_context)); Scene *scene = DEG_get_evaluated_scene(depsgraph); RenderEngineType *engine_type = RE_engines_find(scene->view_render.engine_id); @@ -3640,7 +1200,7 @@ void DRW_draw_render_loop_offscreen( } /* Reset before using it. */ - memset(&DST, 0x0, sizeof(DST)); + memset(&DST, 0x0, offsetof(DRWManager, ogl_context)); DST.options.is_image_render = true; DST.options.draw_background = draw_background; DRW_draw_render_loop_ex(depsgraph, engine_type, ar, v3d, object_mode, NULL); @@ -3676,7 +1236,7 @@ void DRW_render_to_image(RenderEngine *engine, struct Depsgraph *depsgraph) * multiple threads. */ /* Reset before using it. */ - memset(&DST, 0x0, sizeof(DST)); + memset(&DST, 0x0, offsetof(DRWManager, ogl_context)); DST.options.is_image_render = true; DST.options.is_scene_render = true; DST.options.draw_background = scene->r.alphamode == R_ADDSKY; @@ -3692,7 +1252,7 @@ void DRW_render_to_image(RenderEngine *engine, struct Depsgraph *depsgraph) drw_viewport_var_init(); - ViewportEngineData *data = DRW_viewport_engine_data_ensure(draw_engine_type); + ViewportEngineData *data = drw_viewport_engine_data_ensure(draw_engine_type); /* set default viewport */ gpuPushAttrib(GPU_ENABLE_BIT | GPU_VIEWPORT_BIT); @@ -3753,7 +1313,7 @@ void DRW_render_to_image(RenderEngine *engine, struct Depsgraph *depsgraph) #ifdef DEBUG /* Avoid accidental reuse. */ - memset(&DST, 0xFF, sizeof(DST)); + memset(&DST, 0xFF, offsetof(DRWManager, ogl_context)); #endif } @@ -3825,7 +1385,7 @@ void DRW_draw_select_loop( RegionView3D *rv3d = ar->regiondata; /* Reset before using it. */ - memset(&DST, 0x0, sizeof(DST)); + memset(&DST, 0x0, offsetof(DRWManager, ogl_context)); /* backup (_never_ use rv3d->viewport) */ void *backup_viewport = rv3d->viewport; @@ -3925,7 +1485,7 @@ void DRW_draw_select_loop( #ifdef DEBUG /* Avoid accidental reuse. */ - memset(&DST, 0xFF, sizeof(DST)); + memset(&DST, 0xFF, offsetof(DRWManager, ogl_context)); #endif GPU_framebuffer_restore(); @@ -3997,7 +1557,7 @@ void DRW_draw_depth_loop( rv3d->viewport = NULL; /* Reset before using it. */ - memset(&DST, 0x0, sizeof(DST)); + memset(&DST, 0x0, offsetof(DRWManager, ogl_context)); gpuPushAttrib(GPU_ENABLE_BIT | GPU_VIEWPORT_BIT); glDisable(GL_SCISSOR_TEST); @@ -4067,7 +1627,7 @@ void DRW_draw_depth_loop( #ifdef DEBUG /* Avoid accidental reuse. */ - memset(&DST, 0xFF, sizeof(DST)); + memset(&DST, 0xFF, offsetof(DRWManager, ogl_context)); #endif /* TODO: Reading depth for operators should be done here. */ @@ -4295,6 +1855,7 @@ void DRW_engines_register(void) } } +extern struct Gwn_VertFormat *g_pos_format; /* draw_shgroup.c */ extern struct GPUUniformBuffer *globals_ubo; /* draw_common.c */ extern struct GPUTexture *globals_ramp; /* draw_common.c */ void DRW_engines_free(void) @@ -4318,16 +1879,12 @@ void DRW_engines_free(void) } } - if (globals_ubo) - GPU_uniformbuffer_free(globals_ubo); - - if (globals_ramp) - GPU_texture_free(globals_ramp); - + DRW_UBO_FREE_SAFE(globals_ubo); + DRW_TEXTURE_FREE_SAFE(globals_ramp); MEM_SAFE_FREE(g_pos_format); - MEM_SAFE_FREE(RST.bound_texs); - MEM_SAFE_FREE(RST.bound_tex_slots); + MEM_SAFE_FREE(DST.RST.bound_texs); + MEM_SAFE_FREE(DST.RST.bound_tex_slots); DRW_opengl_context_disable(); @@ -4343,16 +1900,16 @@ void DRW_engines_free(void) void DRW_opengl_context_create(void) { - BLI_assert(g_ogl_context == NULL); /* Ensure it's called once */ + BLI_assert(DST.ogl_context == NULL); /* Ensure it's called once */ BLI_assert(BLI_thread_is_main()); - BLI_mutex_init(&g_ogl_context_mutex); + BLI_mutex_init(&DST.ogl_context_mutex); immDeactivate(); /* This changes the active context. */ - g_ogl_context = WM_opengl_context_create(); + DST.ogl_context = WM_opengl_context_create(); /* Be sure to create gawain.context too. */ - g_gwn_context = GWN_context_create(); + DST.gwn_context = GWN_context_create(); immActivate(); /* Set default Blender OpenGL state */ GPU_state_init(); @@ -4363,27 +1920,27 @@ void DRW_opengl_context_create(void) void DRW_opengl_context_destroy(void) { BLI_assert(BLI_thread_is_main()); - if (g_ogl_context != NULL) { - WM_opengl_context_activate(g_ogl_context); - GWN_context_active_set(g_gwn_context); - GWN_context_discard(g_gwn_context); - WM_opengl_context_dispose(g_ogl_context); - BLI_mutex_end(&g_ogl_context_mutex); + if (DST.ogl_context != NULL) { + WM_opengl_context_activate(DST.ogl_context); + GWN_context_active_set(DST.gwn_context); + GWN_context_discard(DST.gwn_context); + WM_opengl_context_dispose(DST.ogl_context); + BLI_mutex_end(&DST.ogl_context_mutex); } } void DRW_opengl_context_enable(void) { - if (g_ogl_context != NULL) { + if (DST.ogl_context != NULL) { /* IMPORTANT: We dont support immediate mode in render mode! * This shall remain in effect until immediate mode supports * multiple threads. */ - BLI_mutex_lock(&g_ogl_context_mutex); + BLI_mutex_lock(&DST.ogl_context_mutex); if (BLI_thread_is_main()) { immDeactivate(); } - WM_opengl_context_activate(g_ogl_context); - GWN_context_active_set(g_gwn_context); + WM_opengl_context_activate(DST.ogl_context); + GWN_context_active_set(DST.gwn_context); if (BLI_thread_is_main()) { immActivate(); } @@ -4392,7 +1949,7 @@ void DRW_opengl_context_enable(void) void DRW_opengl_context_disable(void) { - if (g_ogl_context != NULL) { + if (DST.ogl_context != NULL) { #ifdef __APPLE__ /* Need to flush before disabling draw context, otherwise it does not * always finish drawing and viewport can be empty or partially drawn */ @@ -4403,11 +1960,11 @@ void DRW_opengl_context_disable(void) wm_window_reset_drawable(); } else { - WM_opengl_context_release(g_ogl_context); + WM_opengl_context_release(DST.ogl_context); GWN_context_active_set(NULL); } - BLI_mutex_unlock(&g_ogl_context_mutex); + BLI_mutex_unlock(&DST.ogl_context_mutex); } } diff --git a/source/blender/draw/intern/draw_manager.h b/source/blender/draw/intern/draw_manager.h new file mode 100644 index 00000000000..b6da79fc97a --- /dev/null +++ b/source/blender/draw/intern/draw_manager.h @@ -0,0 +1,346 @@ +/* + * Copyright 2016, Blender Foundation. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software Foundation, + * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + * + * Contributor(s): Blender Institute + * + */ + +/** \file draw_manager.h + * \ingroup draw + */ + +/* Private functions / structs of the draw manager */ + +#ifndef __DRAW_MANAGER_H__ +#define __DRAW_MANAGER_H__ + +#include "DRW_engine.h" +#include "DRW_render.h" + +#include "BLI_linklist.h" +#include "BLI_threads.h" + +#include "GPU_batch.h" +#include "GPU_framebuffer.h" +#include "GPU_shader.h" +#include "GPU_uniformbuffer.h" +#include "GPU_viewport.h" + +#include "draw_instance_data.h" + +/* Use draw manager to call GPU_select, see: DRW_draw_select_loop */ +#define USE_GPU_SELECT + +/* ------------ Profiling --------------- */ + +#define USE_PROFILE + +#ifdef USE_PROFILE +# include "PIL_time.h" + +# define PROFILE_TIMER_FALLOFF 0.1 + +# define PROFILE_START(time_start) \ + double time_start = PIL_check_seconds_timer(); + +# define PROFILE_END_ACCUM(time_accum, time_start) { \ + time_accum += (PIL_check_seconds_timer() - time_start) * 1e3; \ +} ((void)0) + +/* exp average */ +# define PROFILE_END_UPDATE(time_update, time_start) { \ + double _time_delta = (PIL_check_seconds_timer() - time_start) * 1e3; \ + time_update = (time_update * (1.0 - PROFILE_TIMER_FALLOFF)) + \ + (_time_delta * PROFILE_TIMER_FALLOFF); \ +} ((void)0) + +#else /* USE_PROFILE */ + +# define PROFILE_START(time_start) ((void)0) +# define PROFILE_END_ACCUM(time_accum, time_start) ((void)0) +# define PROFILE_END_UPDATE(time_update, time_start) ((void)0) + +#endif /* USE_PROFILE */ + +/* ------------ Data Structure --------------- */ +/** + * Data structure containing all drawcalls organized by passes and materials. + * DRWPass > DRWShadingGroup > DRWCall > DRWCallState + * > DRWUniform + **/ + +typedef struct DRWCallHeader { + struct DRWCallHeader *next; /* in reality DRWCall or DRWCallGenerate. */ +#ifdef USE_GPU_SELECT + int select_id; +#endif + unsigned char type; +} DRWCallHeader; + +typedef struct DRWCallState { + unsigned char flag; + uint16_t matflag; + /* Culling: Using Bounding Sphere for now for faster culling. + * Not ideal for planes. */ + struct { + float loc[3], rad; /* Bypassed if radius is < 0.0. */ + } bsphere; + /* Matrices */ + float model[4][4]; + float modelinverse[4][4]; + float modelview[4][4]; + float modelviewinverse[4][4]; + float modelviewprojection[4][4]; + float normalview[3][3]; + float normalworld[3][3]; /* Not view dependant */ + float orcotexfac[2][3]; /* Not view dependant */ + float eyevec[3]; +} DRWCallState; + +typedef struct DRWCall { + DRWCallHeader head; + DRWCallState state; /* For now integrated to the struct. */ + + Gwn_Batch *geometry; +} DRWCall; + +typedef struct DRWCallGenerate { + DRWCallHeader head; + DRWCallState state; /* For now integrated to the struct. */ + + DRWCallGenerateFn *geometry_fn; + void *user_data; +} DRWCallGenerate; + +struct DRWUniform { + DRWUniform *next; /* single-linked list */ + const void *value; + int location; + char type; /* DRWUniformType */ + char length; /* cannot be more than 16 */ + char arraysize; /* cannot be more than 16 too */ +}; + +struct DRWShadingGroup { + DRWShadingGroup *next; + + GPUShader *shader; /* Shader to bind */ + DRWUniform *uniforms; /* Uniforms pointers */ + + /* Watch this! Can be nasty for debugging. */ + union { + struct { /* DRW_SHG_NORMAL */ + DRWCallHeader *first, *last; /* Linked list of DRWCall or DRWCallDynamic depending of type */ + } calls; + struct { /* DRW_SHG_***_BATCH */ + struct Gwn_Batch *batch_geom; /* Result of call batching */ + struct Gwn_VertBuf *batch_vbo; + unsigned int primitive_count; + }; + struct { /* DRW_SHG_INSTANCE[_EXTERNAL] */ + struct Gwn_Batch *instance_geom; + struct Gwn_VertBuf *instance_vbo; + unsigned int instance_count; + float instance_orcofac[2][3]; /* TODO find a better place. */ + }; + }; + + DRWState state_extra; /* State changes for this batch only (or'd with the pass's state) */ + DRWState state_extra_disable; /* State changes for this batch only (and'd with the pass's state) */ + unsigned int stencil_mask; /* Stencil mask to use for stencil test / write operations */ + int type; + + /* Builtin matrices locations */ + int model; + int modelinverse; + int modelview; + int modelviewinverse; + int modelviewprojection; + int normalview; + int normalworld; + int orcotexfac; + int eye; + uint16_t matflag; /* Matrices needed, same as DRWCall.flag */ + +#ifndef NDEBUG + char attribs_count; +#endif + +#ifdef USE_GPU_SELECT + DRWInstanceData *inst_selectid; + DRWPass *pass_parent; /* backlink to pass we're in */ + int override_selectid; /* Override for single object instances. */ +#endif +}; + +#define MAX_PASS_NAME 32 + +struct DRWPass { + /* Linked list */ + struct { + DRWShadingGroup *first; + DRWShadingGroup *last; + } shgroups; + + DRWState state; + char name[MAX_PASS_NAME]; +}; + +/* Used by DRWUniform.type */ +typedef enum { + DRW_UNIFORM_BOOL, + DRW_UNIFORM_SHORT_TO_INT, + DRW_UNIFORM_SHORT_TO_FLOAT, + DRW_UNIFORM_INT, + DRW_UNIFORM_FLOAT, + DRW_UNIFORM_TEXTURE, + DRW_UNIFORM_BUFFER, + DRW_UNIFORM_BLOCK +} DRWUniformType; + +/* Used by DRWCall.flag */ +enum { + DRW_CALL_SINGLE, /* A single batch */ + DRW_CALL_GENERATE, /* Uses a callback to draw with any number of batches. */ +}; + +/* Used by DRWCall.state */ +enum { + DRW_CALL_CULLED = (1 << 0), + DRW_CALL_NEGSCALE = (1 << 1), +}; + +/* Used by DRWCall.flag */ +enum { + DRW_CALL_MODELINVERSE = (1 << 0), + DRW_CALL_MODELVIEW = (1 << 1), + DRW_CALL_MODELVIEWINVERSE = (1 << 2), + DRW_CALL_MODELVIEWPROJECTION = (1 << 3), + DRW_CALL_NORMALVIEW = (1 << 4), + DRW_CALL_NORMALWORLD = (1 << 5), + DRW_CALL_ORCOTEXFAC = (1 << 6), + DRW_CALL_EYEVEC = (1 << 7), +}; + +/* Used by DRWShadingGroup.type */ +enum { + DRW_SHG_NORMAL, + DRW_SHG_POINT_BATCH, + DRW_SHG_LINE_BATCH, + DRW_SHG_TRIANGLE_BATCH, + DRW_SHG_INSTANCE, + DRW_SHG_INSTANCE_EXTERNAL, +}; + +/* ------------- DRAW MANAGER ------------ */ + +#define MAX_CLIP_PLANES 6 /* GL_MAX_CLIP_PLANES is at least 6 */ + +typedef struct DRWManager { + /* TODO clean up this struct a bit */ + /* Cache generation */ + ViewportMemoryPool *vmempool; + DRWUniform *last_uniform; + DRWCall *last_call; + DRWCallGenerate *last_callgenerate; + DRWShadingGroup *last_shgroup; + DRWInstanceDataList *idatalist; + DRWInstanceData *common_instance_data[MAX_INSTANCE_DATA_SIZE]; + + /* Rendering state */ + GPUShader *shader; + + /* Managed by `DRW_state_set`, `DRW_state_reset` */ + DRWState state; + unsigned int stencil_mask; + + /* Per viewport */ + GPUViewport *viewport; + struct GPUFrameBuffer *default_framebuffer; + float size[2]; + float screenvecs[2][3]; + float pixsize; + + GLenum backface, frontface; + + struct { + unsigned int is_select : 1; + unsigned int is_depth : 1; + unsigned int is_image_render : 1; + unsigned int is_scene_render : 1; + unsigned int draw_background : 1; + } options; + + /* Current rendering context */ + DRWContextState draw_ctx; + + /* Convenience pointer to text_store owned by the viewport */ + struct DRWTextStore **text_store_p; + + ListBase enabled_engines; /* RenderEngineType */ + + bool buffer_finish_called; /* Avoid bad usage of DRW_render_instance_buffer_finish */ + + /* Profiling */ + double cache_time; + + /* View dependant uniforms. */ + float original_mat[6][4][4]; /* Original rv3d matrices. */ + int override_mat; /* Bitflag of which matrices are overriden. */ + int num_clip_planes; /* Number of active clipplanes. */ + + struct { + float mat[6][4][4]; + float viewcamtexcofac[4]; + float clip_planes_eq[MAX_CLIP_PLANES][4]; + } view_data; + +#ifdef USE_GPU_SELECT + unsigned int select_id; +#endif + + /* ---------- Nothing after this point is cleared after use ----------- */ + + /* ogl_context serves as the offset for clearing only + * the top portion of the struct so DO NOT MOVE IT! */ + void *ogl_context; /* Unique ghost context used by the draw manager. */ + Gwn_Context *gwn_context; + ThreadMutex ogl_context_mutex; /* Mutex to lock the drw manager and avoid concurent context usage. */ + + /** GPU Resource State: Memory storage between drawing. */ + struct { + GPUTexture **bound_texs; + bool *bound_tex_slots; + int bind_tex_inc; + int bind_ubo_inc; + } RST; +} DRWManager; + +extern DRWManager DST; /* TODO : get rid of this and allow multithreaded rendering */ + +/* --------------- FUNCTIONS ------------- */ + +void drw_texture_set_parameters(GPUTexture *tex, DRWTextureFlag flags); +void drw_texture_get_format( + DRWTextureFormat format, bool is_framebuffer, + GPUTextureFormat *r_data_type, int *r_channels, bool *r_is_depth); + +void *drw_viewport_engine_data_ensure(void *engine_type); + +void drw_state_set(DRWState state); + +#endif /* __DRAW_MANAGER_H__ */ diff --git a/source/blender/draw/intern/draw_manager_data.c b/source/blender/draw/intern/draw_manager_data.c new file mode 100644 index 00000000000..b35eda57005 --- /dev/null +++ b/source/blender/draw/intern/draw_manager_data.c @@ -0,0 +1,867 @@ +/* + * Copyright 2016, Blender Foundation. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software Foundation, + * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + * + * Contributor(s): Blender Institute + * + */ + +/** \file blender/draw/intern/draw_manager_data.c + * \ingroup draw + */ + +#include "draw_manager.h" + +#include "BKE_curve.h" +#include "BKE_global.h" +#include "BKE_mesh.h" +#include "BKE_paint.h" +#include "BKE_pbvh.h" + +#include "DNA_curve_types.h" +#include "DNA_mesh_types.h" +#include "DNA_meta_types.h" + +#include "BLI_link_utils.h" +#include "BLI_mempool.h" + +#include "intern/gpu_codegen.h" + +struct Gwn_VertFormat *g_pos_format = NULL; + +/* -------------------------------------------------------------------- */ + +/** \name Uniform Buffer Object (DRW_uniformbuffer) + * \{ */ + +GPUUniformBuffer *DRW_uniformbuffer_create(int size, const void *data) +{ + return GPU_uniformbuffer_create(size, data, NULL); +} + +void DRW_uniformbuffer_update(GPUUniformBuffer *ubo, const void *data) +{ + GPU_uniformbuffer_update(ubo, data); +} + +void DRW_uniformbuffer_free(GPUUniformBuffer *ubo) +{ + GPU_uniformbuffer_free(ubo); +} + +/** \} */ + +/* -------------------------------------------------------------------- */ + +/** \name Uniforms (DRW_shgroup_uniform) + * \{ */ + +static void drw_interface_builtin_uniform( + DRWShadingGroup *shgroup, int builtin, const void *value, int length, int arraysize) +{ + int loc = GPU_shader_get_builtin_uniform(shgroup->shader, builtin); + + if (loc == -1) + return; + + DRWUniform *uni = BLI_mempool_alloc(DST.vmempool->uniforms); + uni->location = loc; + uni->type = DRW_UNIFORM_FLOAT; + uni->value = value; + uni->length = length; + uni->arraysize = arraysize; + + BLI_LINKS_PREPEND(shgroup->uniforms, uni); +} + +static void drw_interface_uniform(DRWShadingGroup *shgroup, const char *name, + DRWUniformType type, const void *value, int length, int arraysize) +{ + int location; + if (type == DRW_UNIFORM_BLOCK) { + location = GPU_shader_get_uniform_block(shgroup->shader, name); + } + else { + location = GPU_shader_get_uniform(shgroup->shader, name); + } + + if (location == -1) { + if (G.debug & G_DEBUG) + fprintf(stderr, "Uniform '%s' not found!\n", name); + /* Nice to enable eventually, for now eevee uses uniforms that might not exist. */ + // BLI_assert(0); + return; + } + + DRWUniform *uni = BLI_mempool_alloc(DST.vmempool->uniforms); + + BLI_assert(arraysize > 0 && arraysize <= 16); + BLI_assert(length >= 0 && length <= 16); + + uni->location = location; + uni->type = type; + uni->value = value; + uni->length = length; + uni->arraysize = arraysize; + + BLI_LINKS_PREPEND(shgroup->uniforms, uni); +} + +void DRW_shgroup_uniform_texture(DRWShadingGroup *shgroup, const char *name, const GPUTexture *tex) +{ + drw_interface_uniform(shgroup, name, DRW_UNIFORM_TEXTURE, tex, 0, 1); +} + +void DRW_shgroup_uniform_block(DRWShadingGroup *shgroup, const char *name, const GPUUniformBuffer *ubo) +{ + drw_interface_uniform(shgroup, name, DRW_UNIFORM_BLOCK, ubo, 0, 1); +} + +void DRW_shgroup_uniform_buffer(DRWShadingGroup *shgroup, const char *name, GPUTexture **tex) +{ + drw_interface_uniform(shgroup, name, DRW_UNIFORM_BUFFER, tex, 0, 1); +} + +void DRW_shgroup_uniform_bool(DRWShadingGroup *shgroup, const char *name, const int *value, int arraysize) +{ + drw_interface_uniform(shgroup, name, DRW_UNIFORM_BOOL, value, 1, arraysize); +} + +void DRW_shgroup_uniform_float(DRWShadingGroup *shgroup, const char *name, const float *value, int arraysize) +{ + drw_interface_uniform(shgroup, name, DRW_UNIFORM_FLOAT, value, 1, arraysize); +} + +void DRW_shgroup_uniform_vec2(DRWShadingGroup *shgroup, const char *name, const float *value, int arraysize) +{ + drw_interface_uniform(shgroup, name, DRW_UNIFORM_FLOAT, value, 2, arraysize); +} + +void DRW_shgroup_uniform_vec3(DRWShadingGroup *shgroup, const char *name, const float *value, int arraysize) +{ + drw_interface_uniform(shgroup, name, DRW_UNIFORM_FLOAT, value, 3, arraysize); +} + +void DRW_shgroup_uniform_vec4(DRWShadingGroup *shgroup, const char *name, const float *value, int arraysize) +{ + drw_interface_uniform(shgroup, name, DRW_UNIFORM_FLOAT, value, 4, arraysize); +} + +void DRW_shgroup_uniform_short_to_int(DRWShadingGroup *shgroup, const char *name, const short *value, int arraysize) +{ + drw_interface_uniform(shgroup, name, DRW_UNIFORM_SHORT_TO_INT, value, 1, arraysize); +} + +void DRW_shgroup_uniform_short_to_float(DRWShadingGroup *shgroup, const char *name, const short *value, int arraysize) +{ + drw_interface_uniform(shgroup, name, DRW_UNIFORM_SHORT_TO_FLOAT, value, 1, arraysize); +} + +void DRW_shgroup_uniform_int(DRWShadingGroup *shgroup, const char *name, const int *value, int arraysize) +{ + drw_interface_uniform(shgroup, name, DRW_UNIFORM_INT, value, 1, arraysize); +} + +void DRW_shgroup_uniform_ivec2(DRWShadingGroup *shgroup, const char *name, const int *value, int arraysize) +{ + drw_interface_uniform(shgroup, name, DRW_UNIFORM_INT, value, 2, arraysize); +} + +void DRW_shgroup_uniform_ivec3(DRWShadingGroup *shgroup, const char *name, const int *value, int arraysize) +{ + drw_interface_uniform(shgroup, name, DRW_UNIFORM_INT, value, 3, arraysize); +} + +void DRW_shgroup_uniform_mat3(DRWShadingGroup *shgroup, const char *name, const float *value) +{ + drw_interface_uniform(shgroup, name, DRW_UNIFORM_FLOAT, value, 9, 1); +} + +void DRW_shgroup_uniform_mat4(DRWShadingGroup *shgroup, const char *name, const float *value) +{ + drw_interface_uniform(shgroup, name, DRW_UNIFORM_FLOAT, value, 16, 1); +} + +/** \} */ + +/* -------------------------------------------------------------------- */ + +/** \name Draw Call (DRW_calls) + * \{ */ + +static void drw_call_calc_orco(ID *ob_data, float (*r_orcofacs)[3]) +{ + float *texcoloc = NULL; + float *texcosize = NULL; + if (ob_data != NULL) { + switch (GS(ob_data->name)) { + case ID_ME: + BKE_mesh_texspace_get_reference((Mesh *)ob_data, NULL, &texcoloc, NULL, &texcosize); + break; + case ID_CU: + { + Curve *cu = (Curve *)ob_data; + if (cu->bb == NULL || (cu->bb->flag & BOUNDBOX_DIRTY)) { + BKE_curve_texspace_calc(cu); + } + texcoloc = cu->loc; + texcosize = cu->size; + break; + } + case ID_MB: + { + MetaBall *mb = (MetaBall *)ob_data; + texcoloc = mb->loc; + texcosize = mb->size; + break; + } + default: + break; + } + } + + if ((texcoloc != NULL) && (texcosize != NULL)) { + mul_v3_v3fl(r_orcofacs[1], texcosize, 2.0f); + invert_v3(r_orcofacs[1]); + sub_v3_v3v3(r_orcofacs[0], texcoloc, texcosize); + negate_v3(r_orcofacs[0]); + mul_v3_v3(r_orcofacs[0], r_orcofacs[1]); /* result in a nice MADD in the shader */ + } + else { + copy_v3_fl(r_orcofacs[0], 0.0f); + copy_v3_fl(r_orcofacs[1], 1.0f); + } +} + +static void drw_call_set_matrices(DRWCallState *state, float (*obmat)[4], ID *ob_data) +{ + /* Matrices */ + if (obmat != NULL) { + copy_m4_m4(state->model, obmat); + + if (is_negative_m4(state->model)) { + state->matflag |= DRW_CALL_NEGSCALE; + } + } + else { + unit_m4(state->model); + } + + /* Orco factors */ + if ((state->matflag & DRW_CALL_ORCOTEXFAC) != 0) { + drw_call_calc_orco(ob_data, state->orcotexfac); + state->matflag &= ~DRW_CALL_ORCOTEXFAC; + } + + /* TODO Set culling bsphere IF needed by the DRWPass */ + state->bsphere.rad = -1.0f; +} + +void DRW_shgroup_call_add(DRWShadingGroup *shgroup, Gwn_Batch *geom, float (*obmat)[4]) +{ + BLI_assert(geom != NULL); + BLI_assert(shgroup->type == DRW_SHG_NORMAL); + + DRWCall *call = BLI_mempool_alloc(DST.vmempool->calls); + call->head.type = DRW_CALL_SINGLE; + call->state.flag = 0; + call->state.matflag = shgroup->matflag; +#ifdef USE_GPU_SELECT + call->head.select_id = DST.select_id; +#endif + call->geometry = geom; + drw_call_set_matrices(&call->state, obmat, NULL); + BLI_LINKS_APPEND(&shgroup->calls, (DRWCallHeader *)call); +} + +void DRW_shgroup_call_object_add(DRWShadingGroup *shgroup, Gwn_Batch *geom, Object *ob) +{ + BLI_assert(geom != NULL); + BLI_assert(shgroup->type == DRW_SHG_NORMAL); + + DRWCall *call = BLI_mempool_alloc(DST.vmempool->calls); + call->head.type = DRW_CALL_SINGLE; + call->state.flag = 0; + call->state.matflag = shgroup->matflag; +#ifdef USE_GPU_SELECT + call->head.select_id = DST.select_id; +#endif + call->geometry = geom; + drw_call_set_matrices(&call->state, ob->obmat, ob->data); + BLI_LINKS_APPEND(&shgroup->calls, (DRWCallHeader *)call); +} + +void DRW_shgroup_call_generate_add( + DRWShadingGroup *shgroup, + DRWCallGenerateFn *geometry_fn, void *user_data, + float (*obmat)[4]) +{ + BLI_assert(geometry_fn != NULL); + BLI_assert(shgroup->type == DRW_SHG_NORMAL); + + DRWCallGenerate *call = BLI_mempool_alloc(DST.vmempool->calls_generate); + call->head.type = DRW_CALL_GENERATE; + call->state.flag = 0; + call->state.matflag = shgroup->matflag; +#ifdef USE_GPU_SELECT + call->head.select_id = DST.select_id; +#endif + call->geometry_fn = geometry_fn; + call->user_data = user_data; + drw_call_set_matrices(&call->state, obmat, NULL); + BLI_LINKS_APPEND(&shgroup->calls, (DRWCallHeader *)call); +} + +static void sculpt_draw_cb( + DRWShadingGroup *shgroup, + void (*draw_fn)(DRWShadingGroup *shgroup, Gwn_Batch *geom), + void *user_data) +{ + Object *ob = user_data; + PBVH *pbvh = ob->sculpt->pbvh; + + if (pbvh) { + BKE_pbvh_draw_cb( + pbvh, NULL, NULL, false, + (void (*)(void *, Gwn_Batch *))draw_fn, shgroup); + } +} + +void DRW_shgroup_call_sculpt_add(DRWShadingGroup *shgroup, Object *ob, float (*obmat)[4]) +{ + DRW_shgroup_call_generate_add(shgroup, sculpt_draw_cb, ob, obmat); +} + +void DRW_shgroup_call_dynamic_add_array(DRWShadingGroup *shgroup, const void *attr[], unsigned int attr_len) +{ +#ifdef USE_GPU_SELECT + if (G.f & G_PICKSEL) { + if (shgroup->inst_selectid == NULL) { + shgroup->inst_selectid = DRW_instance_data_request(DST.idatalist, 1, 128); + } + + int *select_id = DRW_instance_data_next(shgroup->inst_selectid); + *select_id = DST.select_id; + } +#endif + + BLI_assert(attr_len == shgroup->attribs_count); + UNUSED_VARS_NDEBUG(attr_len); + + for (int i = 0; i < attr_len; ++i) { + if (shgroup->instance_count == shgroup->instance_vbo->vertex_ct) { + GWN_vertbuf_data_resize(shgroup->instance_vbo, shgroup->instance_count + 32); + } + GWN_vertbuf_attr_set(shgroup->instance_vbo, i, shgroup->instance_count, attr[i]); + } + + shgroup->instance_count += 1; +} + +/** \} */ + +/* -------------------------------------------------------------------- */ + +/** \name Shading Groups (DRW_shgroup) + * \{ */ + +static void drw_interface_init(DRWShadingGroup *shgroup, GPUShader *shader) +{ + shgroup->instance_geom = NULL; + shgroup->instance_vbo = NULL; + shgroup->instance_count = 0; + shgroup->uniforms = NULL; +#ifdef USE_GPU_SELECT + shgroup->inst_selectid = NULL; + shgroup->override_selectid = -1; +#endif +#ifndef NDEBUG + shgroup->attribs_count = 0; +#endif + + /* TODO : They should be grouped inside a UBO updated once per redraw. */ + drw_interface_builtin_uniform(shgroup, GWN_UNIFORM_VIEW, DST.view_data.mat[DRW_MAT_VIEW], 16, 1); + drw_interface_builtin_uniform(shgroup, GWN_UNIFORM_VIEW_INV, DST.view_data.mat[DRW_MAT_VIEWINV], 16, 1); + drw_interface_builtin_uniform(shgroup, GWN_UNIFORM_VIEWPROJECTION, DST.view_data.mat[DRW_MAT_PERS], 16, 1); + drw_interface_builtin_uniform(shgroup, GWN_UNIFORM_VIEWPROJECTION_INV, DST.view_data.mat[DRW_MAT_PERSINV], 16, 1); + drw_interface_builtin_uniform(shgroup, GWN_UNIFORM_PROJECTION, DST.view_data.mat[DRW_MAT_WIN], 16, 1); + drw_interface_builtin_uniform(shgroup, GWN_UNIFORM_PROJECTION_INV, DST.view_data.mat[DRW_MAT_WININV], 16, 1); + drw_interface_builtin_uniform(shgroup, GWN_UNIFORM_CAMERATEXCO, DST.view_data.viewcamtexcofac, 3, 2); + drw_interface_builtin_uniform(shgroup, GWN_UNIFORM_CLIPPLANES, DST.view_data.clip_planes_eq, 4, DST.num_clip_planes); /* TO REMOVE */ + + shgroup->model = GPU_shader_get_builtin_uniform(shader, GWN_UNIFORM_MODEL); + shgroup->modelinverse = GPU_shader_get_builtin_uniform(shader, GWN_UNIFORM_MODEL_INV); + shgroup->modelview = GPU_shader_get_builtin_uniform(shader, GWN_UNIFORM_MODELVIEW); + shgroup->modelviewinverse = GPU_shader_get_builtin_uniform(shader, GWN_UNIFORM_MODELVIEW_INV); + shgroup->modelviewprojection = GPU_shader_get_builtin_uniform(shader, GWN_UNIFORM_MVP); + shgroup->normalview = GPU_shader_get_builtin_uniform(shader, GWN_UNIFORM_NORMAL); + shgroup->normalworld = GPU_shader_get_builtin_uniform(shader, GWN_UNIFORM_WORLDNORMAL); + shgroup->orcotexfac = GPU_shader_get_builtin_uniform(shader, GWN_UNIFORM_ORCO); + shgroup->eye = GPU_shader_get_builtin_uniform(shader, GWN_UNIFORM_EYE); + + shgroup->matflag = 0; + if (shgroup->modelinverse > -1) + shgroup->matflag |= DRW_CALL_MODELINVERSE; + if (shgroup->modelview > -1) + shgroup->matflag |= DRW_CALL_MODELVIEW; + if (shgroup->modelviewinverse > -1) + shgroup->matflag |= DRW_CALL_MODELVIEWINVERSE; + if (shgroup->modelviewprojection > -1) + shgroup->matflag |= DRW_CALL_MODELVIEWPROJECTION; + if (shgroup->normalview > -1) + shgroup->matflag |= DRW_CALL_NORMALVIEW; + if (shgroup->normalworld > -1) + shgroup->matflag |= DRW_CALL_NORMALWORLD; + if (shgroup->orcotexfac > -1) + shgroup->matflag |= DRW_CALL_ORCOTEXFAC; + if (shgroup->eye > -1) + shgroup->matflag |= DRW_CALL_EYEVEC; +} + +static void drw_interface_instance_init( + DRWShadingGroup *shgroup, GPUShader *shader, Gwn_Batch *batch, Gwn_VertFormat *format) +{ + BLI_assert(shgroup->type == DRW_SHG_INSTANCE); + BLI_assert(batch != NULL); + + drw_interface_init(shgroup, shader); + + shgroup->instance_geom = batch; +#ifndef NDEBUG + shgroup->attribs_count = (format != NULL) ? format->attrib_ct : 0; +#endif + + if (format != NULL) { + DRW_instancing_buffer_request(DST.idatalist, format, batch, shgroup, + &shgroup->instance_geom, &shgroup->instance_vbo); + } +} + +static void drw_interface_batching_init( + DRWShadingGroup *shgroup, GPUShader *shader, Gwn_VertFormat *format) +{ + drw_interface_init(shgroup, shader); + +#ifndef NDEBUG + shgroup->attribs_count = (format != NULL) ? format->attrib_ct : 0; +#endif + BLI_assert(format != NULL); + + Gwn_PrimType type; + switch (shgroup->type) { + case DRW_SHG_POINT_BATCH: type = GWN_PRIM_POINTS; break; + case DRW_SHG_LINE_BATCH: type = GWN_PRIM_LINES; break; + case DRW_SHG_TRIANGLE_BATCH: type = GWN_PRIM_TRIS; break; + default: + BLI_assert(0); + } + + DRW_batching_buffer_request(DST.idatalist, format, type, shgroup, + &shgroup->batch_geom, &shgroup->batch_vbo); +} + +static DRWShadingGroup *drw_shgroup_create_ex(struct GPUShader *shader, DRWPass *pass) +{ + DRWShadingGroup *shgroup = BLI_mempool_alloc(DST.vmempool->shgroups); + + BLI_LINKS_APPEND(&pass->shgroups, shgroup); + + shgroup->type = DRW_SHG_NORMAL; + shgroup->shader = shader; + shgroup->state_extra = 0; + shgroup->state_extra_disable = ~0x0; + shgroup->stencil_mask = 0; + shgroup->calls.first = NULL; + shgroup->calls.last = NULL; +#if 0 /* All the same in the union! */ + shgroup->batch_geom = NULL; + shgroup->batch_vbo = NULL; + + shgroup->instance_geom = NULL; + shgroup->instance_vbo = NULL; +#endif + +#ifdef USE_GPU_SELECT + shgroup->pass_parent = pass; +#endif + + return shgroup; +} + +static DRWShadingGroup *drw_shgroup_material_create_ex(GPUPass *gpupass, DRWPass *pass) +{ + if (!gpupass) { + /* Shader compilation error */ + return NULL; + } + + DRWShadingGroup *grp = drw_shgroup_create_ex(GPU_pass_shader(gpupass), pass); + return grp; +} + +static DRWShadingGroup *drw_shgroup_material_inputs( + DRWShadingGroup *grp, struct GPUMaterial *material, GPUPass *gpupass) +{ + /* TODO : Ideally we should not convert. But since the whole codegen + * is relying on GPUPass we keep it as is for now. */ + + /* Converting dynamic GPUInput to DRWUniform */ + ListBase *inputs = &gpupass->inputs; + + for (GPUInput *input = inputs->first; input; input = input->next) { + /* Textures */ + if (input->ima) { + double time = 0.0; /* TODO make time variable */ + GPUTexture *tex = GPU_texture_from_blender( + input->ima, input->iuser, input->textarget, input->image_isdata, time, 1); + + if (input->bindtex) { + DRW_shgroup_uniform_texture(grp, input->shadername, tex); + } + } + /* Color Ramps */ + else if (input->tex) { + DRW_shgroup_uniform_texture(grp, input->shadername, input->tex); + } + /* Floats */ + else { + switch (input->type) { + case GPU_FLOAT: + case GPU_VEC2: + case GPU_VEC3: + case GPU_VEC4: + /* Should already be in the material ubo. */ + break; + case GPU_MAT3: + DRW_shgroup_uniform_mat3(grp, input->shadername, (float *)input->dynamicvec); + break; + case GPU_MAT4: + DRW_shgroup_uniform_mat4(grp, input->shadername, (float *)input->dynamicvec); + break; + default: + break; + } + } + } + + GPUUniformBuffer *ubo = GPU_material_get_uniform_buffer(material); + if (ubo != NULL) { + DRW_shgroup_uniform_block(grp, GPU_UBO_BLOCK_NAME, ubo); + } + + return grp; +} + +Gwn_VertFormat *DRW_shgroup_instance_format_array(const DRWInstanceAttribFormat attribs[], int arraysize) +{ + Gwn_VertFormat *format = MEM_callocN(sizeof(Gwn_VertFormat), "Gwn_VertFormat"); + + for (int i = 0; i < arraysize; ++i) { + GWN_vertformat_attr_add(format, attribs[i].name, + (attribs[i].type == DRW_ATTRIB_INT) ? GWN_COMP_I32 : GWN_COMP_F32, + attribs[i].components, + (attribs[i].type == DRW_ATTRIB_INT) ? GWN_FETCH_INT : GWN_FETCH_FLOAT); + } + return format; +} + +DRWShadingGroup *DRW_shgroup_material_create( + struct GPUMaterial *material, DRWPass *pass) +{ + GPUPass *gpupass = GPU_material_get_pass(material); + DRWShadingGroup *shgroup = drw_shgroup_material_create_ex(gpupass, pass); + + if (shgroup) { + drw_interface_init(shgroup, GPU_pass_shader(gpupass)); + drw_shgroup_material_inputs(shgroup, material, gpupass); + } + + return shgroup; +} + +DRWShadingGroup *DRW_shgroup_material_instance_create( + struct GPUMaterial *material, DRWPass *pass, Gwn_Batch *geom, Object *ob, Gwn_VertFormat *format) +{ + GPUPass *gpupass = GPU_material_get_pass(material); + DRWShadingGroup *shgroup = drw_shgroup_material_create_ex(gpupass, pass); + + if (shgroup) { + shgroup->type = DRW_SHG_INSTANCE; + shgroup->instance_geom = geom; + drw_call_calc_orco(ob->data, shgroup->instance_orcofac); + drw_interface_instance_init(shgroup, GPU_pass_shader(gpupass), geom, format); + drw_shgroup_material_inputs(shgroup, material, gpupass); + } + + return shgroup; +} + +DRWShadingGroup *DRW_shgroup_material_empty_tri_batch_create( + struct GPUMaterial *material, DRWPass *pass, int tri_count) +{ +#ifdef USE_GPU_SELECT + BLI_assert((G.f & G_PICKSEL) == 0); +#endif + GPUPass *gpupass = GPU_material_get_pass(material); + DRWShadingGroup *shgroup = drw_shgroup_material_create_ex(gpupass, pass); + + if (shgroup) { + /* Calling drw_interface_init will cause it to call GWN_draw_primitive(). */ + drw_interface_init(shgroup, GPU_pass_shader(gpupass)); + shgroup->type = DRW_SHG_TRIANGLE_BATCH; + shgroup->instance_count = tri_count * 3; + drw_shgroup_material_inputs(shgroup, material, gpupass); + } + + return shgroup; +} + +DRWShadingGroup *DRW_shgroup_create(struct GPUShader *shader, DRWPass *pass) +{ + DRWShadingGroup *shgroup = drw_shgroup_create_ex(shader, pass); + drw_interface_init(shgroup, shader); + return shgroup; +} + +DRWShadingGroup *DRW_shgroup_instance_create( + struct GPUShader *shader, DRWPass *pass, Gwn_Batch *geom, Gwn_VertFormat *format) +{ + DRWShadingGroup *shgroup = drw_shgroup_create_ex(shader, pass); + shgroup->type = DRW_SHG_INSTANCE; + shgroup->instance_geom = geom; + drw_call_calc_orco(NULL, shgroup->instance_orcofac); + drw_interface_instance_init(shgroup, shader, geom, format); + + return shgroup; +} + +DRWShadingGroup *DRW_shgroup_point_batch_create(struct GPUShader *shader, DRWPass *pass) +{ + DRW_shgroup_instance_format(g_pos_format, {{"pos", DRW_ATTRIB_FLOAT, 3}}); + + DRWShadingGroup *shgroup = drw_shgroup_create_ex(shader, pass); + shgroup->type = DRW_SHG_POINT_BATCH; + + drw_interface_batching_init(shgroup, shader, g_pos_format); + + return shgroup; +} + +DRWShadingGroup *DRW_shgroup_line_batch_create(struct GPUShader *shader, DRWPass *pass) +{ + DRW_shgroup_instance_format(g_pos_format, {{"pos", DRW_ATTRIB_FLOAT, 3}}); + + DRWShadingGroup *shgroup = drw_shgroup_create_ex(shader, pass); + shgroup->type = DRW_SHG_LINE_BATCH; + + drw_interface_batching_init(shgroup, shader, g_pos_format); + + return shgroup; +} + +/* Very special batch. Use this if you position + * your vertices with the vertex shader + * and dont need any VBO attrib */ +DRWShadingGroup *DRW_shgroup_empty_tri_batch_create(struct GPUShader *shader, DRWPass *pass, int tri_count) +{ +#ifdef USE_GPU_SELECT + BLI_assert((G.f & G_PICKSEL) == 0); +#endif + DRWShadingGroup *shgroup = drw_shgroup_create_ex(shader, pass); + + /* Calling drw_interface_init will cause it to call GWN_draw_primitive(). */ + drw_interface_init(shgroup, shader); + + shgroup->type = DRW_SHG_TRIANGLE_BATCH; + shgroup->instance_count = tri_count * 3; + + return shgroup; +} + +/* Specify an external batch instead of adding each attrib one by one. */ +void DRW_shgroup_instance_batch(DRWShadingGroup *shgroup, struct Gwn_Batch *batch) +{ + BLI_assert(shgroup->type == DRW_SHG_INSTANCE); + BLI_assert(shgroup->instance_count == 0); + /* You cannot use external instancing batch without a dummy format. */ + BLI_assert(shgroup->attribs_count != 0); + + shgroup->type = DRW_SHG_INSTANCE_EXTERNAL; + drw_call_calc_orco(NULL, shgroup->instance_orcofac); + /* PERF : This destroys the vaos cache so better check if it's necessary. */ + /* Note: This WILL break if batch->verts[0] is destroyed and reallocated + * at the same adress. Bindings/VAOs would remain obsolete. */ + //if (shgroup->instancing_geom->inst != batch->verts[0]) + GWN_batch_instbuf_set(shgroup->instance_geom, batch->verts[0], false); + +#ifdef USE_GPU_SELECT + shgroup->override_selectid = DST.select_id; +#endif +} + +/* Used for instancing with no attributes */ +void DRW_shgroup_set_instance_count(DRWShadingGroup *shgroup, unsigned int count) +{ + BLI_assert(shgroup->type == DRW_SHG_INSTANCE); + BLI_assert(shgroup->instance_count == 0); + BLI_assert(shgroup->attribs_count == 0); + +#ifdef USE_GPU_SELECT + if (G.f & G_PICKSEL) { + shgroup->override_selectid = DST.select_id; + } +#endif + + shgroup->instance_count = count; +} + +unsigned int DRW_shgroup_get_instance_count(const DRWShadingGroup *shgroup) +{ + return shgroup->instance_count; +} + +/** + * State is added to #Pass.state while drawing. + * Use to temporarily enable draw options. + */ +void DRW_shgroup_state_enable(DRWShadingGroup *shgroup, DRWState state) +{ + shgroup->state_extra |= state; +} + +void DRW_shgroup_state_disable(DRWShadingGroup *shgroup, DRWState state) +{ + shgroup->state_extra_disable &= ~state; +} + +void DRW_shgroup_stencil_mask(DRWShadingGroup *shgroup, unsigned int mask) +{ + BLI_assert(mask <= 255); + shgroup->stencil_mask = mask; +} + +/** \} */ + +/* -------------------------------------------------------------------- */ + +/** \name Passes (DRW_pass) + * \{ */ + +DRWPass *DRW_pass_create(const char *name, DRWState state) +{ + DRWPass *pass = BLI_mempool_alloc(DST.vmempool->passes); + pass->state = state; + if (G.debug_value > 20) { + BLI_strncpy(pass->name, name, MAX_PASS_NAME); + } + + pass->shgroups.first = NULL; + pass->shgroups.last = NULL; + + return pass; +} + +void DRW_pass_state_set(DRWPass *pass, DRWState state) +{ + pass->state = state; +} + +void DRW_pass_free(DRWPass *pass) +{ + pass->shgroups.first = NULL; + pass->shgroups.last = NULL; +} + +void DRW_pass_foreach_shgroup(DRWPass *pass, void (*callback)(void *userData, DRWShadingGroup *shgrp), void *userData) +{ + for (DRWShadingGroup *shgroup = pass->shgroups.first; shgroup; shgroup = shgroup->next) { + callback(userData, shgroup); + } +} + +typedef struct ZSortData { + float *axis; + float *origin; +} ZSortData; + +static int pass_shgroup_dist_sort(void *thunk, const void *a, const void *b) +{ + const ZSortData *zsortdata = (ZSortData *)thunk; + const DRWShadingGroup *shgrp_a = (const DRWShadingGroup *)a; + const DRWShadingGroup *shgrp_b = (const DRWShadingGroup *)b; + + const DRWCall *call_a = (DRWCall *)shgrp_a->calls.first; + const DRWCall *call_b = (DRWCall *)shgrp_b->calls.first; + + if (call_a == NULL) return -1; + if (call_b == NULL) return -1; + + float tmp[3]; + sub_v3_v3v3(tmp, zsortdata->origin, call_a->state.model[3]); + const float a_sq = dot_v3v3(zsortdata->axis, tmp); + sub_v3_v3v3(tmp, zsortdata->origin, call_b->state.model[3]); + const float b_sq = dot_v3v3(zsortdata->axis, tmp); + + if (a_sq < b_sq) return 1; + else if (a_sq > b_sq) return -1; + else { + /* If there is a depth prepass put it before */ + if ((shgrp_a->state_extra & DRW_STATE_WRITE_DEPTH) != 0) { + return -1; + } + else if ((shgrp_b->state_extra & DRW_STATE_WRITE_DEPTH) != 0) { + return 1; + } + else return 0; + } +} + +/* ------------------ Shading group sorting --------------------- */ + +#define SORT_IMPL_LINKTYPE DRWShadingGroup + +#define SORT_IMPL_USE_THUNK +#define SORT_IMPL_FUNC shgroup_sort_fn_r +#include "../../blenlib/intern/list_sort_impl.h" +#undef SORT_IMPL_FUNC +#undef SORT_IMPL_USE_THUNK + +#undef SORT_IMPL_LINKTYPE + +/** + * Sort Shading groups by decreasing Z of their first draw call. + * This is usefull for order dependant effect such as transparency. + **/ +void DRW_pass_sort_shgroup_z(DRWPass *pass) +{ + float (*viewinv)[4]; + viewinv = DST.view_data.mat[DRW_MAT_VIEWINV]; + + ZSortData zsortdata = {viewinv[2], viewinv[3]}; + + if (pass->shgroups.first && pass->shgroups.first->next) { + pass->shgroups.first = shgroup_sort_fn_r(pass->shgroups.first, pass_shgroup_dist_sort, &zsortdata); + + /* Find the next last */ + DRWShadingGroup *last = pass->shgroups.first; + while ((last = last->next)) { + /* Do nothing */ + } + pass->shgroups.last = last; + } +} + +/** \} */ diff --git a/source/blender/draw/intern/draw_manager_exec.c b/source/blender/draw/intern/draw_manager_exec.c new file mode 100644 index 00000000000..80c10d18bb0 --- /dev/null +++ b/source/blender/draw/intern/draw_manager_exec.c @@ -0,0 +1,747 @@ +/* + * Copyright 2016, Blender Foundation. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software Foundation, + * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + * + * Contributor(s): Blender Institute + * + */ + +/** \file blender/draw/intern/draw_manager_exec.c + * \ingroup draw + */ + +#include "draw_manager.h" + +#include "BIF_glutil.h" + +#include "BKE_global.h" + +#include "GPU_draw.h" +#include "GPU_extensions.h" + +#ifdef USE_GPU_SELECT +# include "ED_view3d.h" +# include "ED_armature.h" +# include "GPU_select.h" +#endif + +#ifdef USE_GPU_SELECT +void DRW_select_load_id(unsigned int id) +{ + BLI_assert(G.f & G_PICKSEL); + DST.select_id = id; +} +#endif + +/* -------------------------------------------------------------------- */ + +/** \name Draw State (DRW_state) + * \{ */ + +void drw_state_set(DRWState state) +{ + if (DST.state == state) { + return; + } + +#define CHANGED_TO(f) \ + ((DST.state & (f)) ? \ + ((state & (f)) ? 0 : -1) : \ + ((state & (f)) ? 1 : 0)) + +#define CHANGED_ANY(f) \ + ((DST.state & (f)) != (state & (f))) + +#define CHANGED_ANY_STORE_VAR(f, enabled) \ + ((DST.state & (f)) != (enabled = (state & (f)))) + + /* Depth Write */ + { + int test; + if ((test = CHANGED_TO(DRW_STATE_WRITE_DEPTH))) { + if (test == 1) { + glDepthMask(GL_TRUE); + } + else { + glDepthMask(GL_FALSE); + } + } + } + + /* Color Write */ + { + int test; + if ((test = CHANGED_TO(DRW_STATE_WRITE_COLOR))) { + if (test == 1) { + glColorMask(GL_TRUE, GL_TRUE, GL_TRUE, GL_TRUE); + } + else { + glColorMask(GL_FALSE, GL_FALSE, GL_FALSE, GL_FALSE); + } + } + } + + /* Cull */ + { + DRWState test; + if (CHANGED_ANY_STORE_VAR( + DRW_STATE_CULL_BACK | DRW_STATE_CULL_FRONT, + test)) + { + if (test) { + glEnable(GL_CULL_FACE); + + if ((state & DRW_STATE_CULL_BACK) != 0) { + glCullFace(GL_BACK); + } + else if ((state & DRW_STATE_CULL_FRONT) != 0) { + glCullFace(GL_FRONT); + } + else { + BLI_assert(0); + } + } + else { + glDisable(GL_CULL_FACE); + } + } + } + + /* Depth Test */ + { + DRWState test; + if (CHANGED_ANY_STORE_VAR( + DRW_STATE_DEPTH_LESS | DRW_STATE_DEPTH_EQUAL | DRW_STATE_DEPTH_GREATER | DRW_STATE_DEPTH_ALWAYS, + test)) + { + if (test) { + glEnable(GL_DEPTH_TEST); + + if (state & DRW_STATE_DEPTH_LESS) { + glDepthFunc(GL_LEQUAL); + } + else if (state & DRW_STATE_DEPTH_EQUAL) { + glDepthFunc(GL_EQUAL); + } + else if (state & DRW_STATE_DEPTH_GREATER) { + glDepthFunc(GL_GREATER); + } + else if (state & DRW_STATE_DEPTH_ALWAYS) { + glDepthFunc(GL_ALWAYS); + } + else { + BLI_assert(0); + } + } + else { + glDisable(GL_DEPTH_TEST); + } + } + } + + /* Wire Width */ + { + if (CHANGED_ANY(DRW_STATE_WIRE)) { + if ((state & DRW_STATE_WIRE) != 0) { + glLineWidth(1.0f); + } + else { + /* do nothing */ + } + } + } + + /* Points Size */ + { + int test; + if ((test = CHANGED_TO(DRW_STATE_POINT))) { + if (test == 1) { + GPU_enable_program_point_size(); + glPointSize(5.0f); + } + else { + GPU_disable_program_point_size(); + } + } + } + + /* Blending (all buffer) */ + { + int test; + if (CHANGED_ANY_STORE_VAR( + DRW_STATE_BLEND | DRW_STATE_ADDITIVE | DRW_STATE_MULTIPLY | DRW_STATE_TRANSMISSION | + DRW_STATE_ADDITIVE_FULL, + test)) + { + if (test) { + glEnable(GL_BLEND); + + if ((state & DRW_STATE_BLEND) != 0) { + glBlendFuncSeparate(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA, /* RGB */ + GL_ONE, GL_ONE_MINUS_SRC_ALPHA); /* Alpha */ + } + else if ((state & DRW_STATE_MULTIPLY) != 0) { + glBlendFunc(GL_DST_COLOR, GL_ZERO); + } + else if ((state & DRW_STATE_TRANSMISSION) != 0) { + glBlendFunc(GL_ONE, GL_SRC_ALPHA); + } + else if ((state & DRW_STATE_ADDITIVE) != 0) { + /* Do not let alpha accumulate but premult the source RGB by it. */ + glBlendFuncSeparate(GL_SRC_ALPHA, GL_ONE, /* RGB */ + GL_ZERO, GL_ONE); /* Alpha */ + } + else if ((state & DRW_STATE_ADDITIVE_FULL) != 0) { + /* Let alpha accumulate. */ + glBlendFunc(GL_ONE, GL_ONE); + } + else { + BLI_assert(0); + } + } + else { + glDisable(GL_BLEND); + } + } + } + + /* Clip Planes */ + { + int test; + if ((test = CHANGED_TO(DRW_STATE_CLIP_PLANES))) { + if (test == 1) { + for (int i = 0; i < DST.num_clip_planes; ++i) { + glEnable(GL_CLIP_DISTANCE0 + i); + } + } + else { + for (int i = 0; i < MAX_CLIP_PLANES; ++i) { + glDisable(GL_CLIP_DISTANCE0 + i); + } + } + } + } + + /* Line Stipple */ + { + int test; + if (CHANGED_ANY_STORE_VAR( + DRW_STATE_STIPPLE_2 | DRW_STATE_STIPPLE_3 | DRW_STATE_STIPPLE_4, + test)) + { + if (test) { + if ((state & DRW_STATE_STIPPLE_2) != 0) { + setlinestyle(2); + } + else if ((state & DRW_STATE_STIPPLE_3) != 0) { + setlinestyle(3); + } + else if ((state & DRW_STATE_STIPPLE_4) != 0) { + setlinestyle(4); + } + else { + BLI_assert(0); + } + } + else { + setlinestyle(0); + } + } + } + + /* Stencil */ + { + DRWState test; + if (CHANGED_ANY_STORE_VAR( + DRW_STATE_WRITE_STENCIL | + DRW_STATE_STENCIL_EQUAL, + test)) + { + if (test) { + glEnable(GL_STENCIL_TEST); + + /* Stencil Write */ + if ((state & DRW_STATE_WRITE_STENCIL) != 0) { + glStencilMask(0xFF); + glStencilOp(GL_KEEP, GL_KEEP, GL_REPLACE); + } + /* Stencil Test */ + else if ((state & DRW_STATE_STENCIL_EQUAL) != 0) { + glStencilMask(0x00); /* disable write */ + DST.stencil_mask = 0; + } + else { + BLI_assert(0); + } + } + else { + /* disable write & test */ + DST.stencil_mask = 0; + glStencilMask(0x00); + glStencilFunc(GL_ALWAYS, 1, 0xFF); + glDisable(GL_STENCIL_TEST); + } + } + } + +#undef CHANGED_TO +#undef CHANGED_ANY +#undef CHANGED_ANY_STORE_VAR + + DST.state = state; +} + +static void drw_stencil_set(unsigned int mask) +{ + if (DST.stencil_mask != mask) { + /* Stencil Write */ + if ((DST.state & DRW_STATE_WRITE_STENCIL) != 0) { + glStencilFunc(GL_ALWAYS, mask, 0xFF); + DST.stencil_mask = mask; + } + /* Stencil Test */ + else if ((DST.state & DRW_STATE_STENCIL_EQUAL) != 0) { + glStencilFunc(GL_EQUAL, mask, 0xFF); + DST.stencil_mask = mask; + } + } +} + +/* Reset state to not interfer with other UI drawcall */ +void DRW_state_reset_ex(DRWState state) +{ + DST.state = ~state; + drw_state_set(state); +} + +void DRW_state_reset(void) +{ + /* Reset blending function */ + glBlendFuncSeparate(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA, GL_ONE, GL_ONE_MINUS_SRC_ALPHA); + + DRW_state_reset_ex(DRW_STATE_DEFAULT); +} + +/* NOTE : Make sure to reset after use! */ +void DRW_state_invert_facing(void) +{ + SWAP(GLenum, DST.backface, DST.frontface); + glFrontFace(DST.frontface); +} + +/** + * This only works if DRWPasses have been tagged with DRW_STATE_CLIP_PLANES, + * and if the shaders have support for it (see usage of gl_ClipDistance). + * Be sure to call DRW_state_clip_planes_reset() after you finish drawing. + **/ +void DRW_state_clip_planes_add(float plane_eq[4]) +{ + BLI_assert(DST.num_clip_planes < MAX_CLIP_PLANES-1); + copy_v4_v4(DST.view_data.clip_planes_eq[DST.num_clip_planes++], plane_eq); +} + +void DRW_state_clip_planes_reset(void) +{ + DST.num_clip_planes = 0; +} + +/** \} */ + +/* -------------------------------------------------------------------- */ + +/** \name Draw (DRW_draw) + * \{ */ + +static void draw_matrices_model_prepare(DRWCallState *st) +{ + /* OPTI : We can optimize further by sharing this computation for each call using the same object. */ + /* Order matters */ + if (st->matflag & (DRW_CALL_MODELVIEW | DRW_CALL_MODELVIEWINVERSE | + DRW_CALL_NORMALVIEW | DRW_CALL_EYEVEC)) + { + mul_m4_m4m4(st->modelview, DST.view_data.mat[DRW_MAT_VIEW], st->model); + } + if (st->matflag & DRW_CALL_MODELVIEWINVERSE) { + invert_m4_m4(st->modelviewinverse, st->modelview); + } + if (st->matflag & DRW_CALL_MODELVIEWPROJECTION) { + mul_m4_m4m4(st->modelviewprojection, DST.view_data.mat[DRW_MAT_PERS], st->model); + } + if (st->matflag & DRW_CALL_NORMALVIEW) { + copy_m3_m4(st->normalview, st->modelview); + invert_m3(st->normalview); + transpose_m3(st->normalview); + } + if (st->matflag & DRW_CALL_EYEVEC) { + /* Used by orthographic wires */ + float tmp[3][3]; + copy_v3_fl3(st->eyevec, 0.0f, 0.0f, 1.0f); + invert_m3_m3(tmp, st->normalview); + /* set eye vector, transformed to object coords */ + mul_m3_v3(tmp, st->eyevec); + } + /* Non view dependant */ + if (st->matflag & DRW_CALL_MODELINVERSE) { + invert_m4_m4(st->modelinverse, st->model); + st->matflag &= ~DRW_CALL_MODELINVERSE; + } + if (st->matflag & DRW_CALL_NORMALWORLD) { + copy_m3_m4(st->normalworld, st->model); + invert_m3(st->normalworld); + transpose_m3(st->normalworld); + st->matflag &= ~DRW_CALL_NORMALWORLD; + } +} + +static void draw_geometry_prepare(DRWShadingGroup *shgroup, DRWCallState *state) +{ + /* step 1 : bind object dependent matrices */ + if (state != NULL) { + /* OPTI/IDEA(clem): Do this preparation in another thread. */ + draw_matrices_model_prepare(state); + GPU_shader_uniform_vector(shgroup->shader, shgroup->model, 16, 1, (float *)state->model); + GPU_shader_uniform_vector(shgroup->shader, shgroup->modelinverse, 16, 1, (float *)state->modelinverse); + GPU_shader_uniform_vector(shgroup->shader, shgroup->modelview, 16, 1, (float *)state->modelview); + GPU_shader_uniform_vector(shgroup->shader, shgroup->modelviewinverse, 16, 1, (float *)state->modelviewinverse); + GPU_shader_uniform_vector(shgroup->shader, shgroup->modelviewprojection, 16, 1, (float *)state->modelviewprojection); + GPU_shader_uniform_vector(shgroup->shader, shgroup->normalview, 9, 1, (float *)state->normalview); + GPU_shader_uniform_vector(shgroup->shader, shgroup->normalworld, 9, 1, (float *)state->normalworld); + GPU_shader_uniform_vector(shgroup->shader, shgroup->orcotexfac, 3, 2, (float *)state->orcotexfac); + GPU_shader_uniform_vector(shgroup->shader, shgroup->eye, 3, 1, (float *)state->eyevec); + } + else { + BLI_assert((shgroup->normalview == -1) && (shgroup->normalworld == -1) && (shgroup->eye == -1)); + /* For instancing and batching. */ + float unitmat[4][4]; + unit_m4(unitmat); + GPU_shader_uniform_vector(shgroup->shader, shgroup->model, 16, 1, (float *)unitmat); + GPU_shader_uniform_vector(shgroup->shader, shgroup->modelinverse, 16, 1, (float *)unitmat); + GPU_shader_uniform_vector(shgroup->shader, shgroup->modelview, 16, 1, (float *)DST.view_data.mat[DRW_MAT_VIEW]); + GPU_shader_uniform_vector(shgroup->shader, shgroup->modelviewinverse, 16, 1, (float *)DST.view_data.mat[DRW_MAT_VIEWINV]); + GPU_shader_uniform_vector(shgroup->shader, shgroup->modelviewprojection, 16, 1, (float *)DST.view_data.mat[DRW_MAT_PERS]); + GPU_shader_uniform_vector(shgroup->shader, shgroup->orcotexfac, 3, 2, (float *)shgroup->instance_orcofac); + } +} + +static void draw_geometry_execute_ex( + DRWShadingGroup *shgroup, Gwn_Batch *geom, unsigned int start, unsigned int count) +{ + /* Special case: empty drawcall, placement is done via shader, don't bind anything. */ + if (geom == NULL) { + BLI_assert(shgroup->type == DRW_SHG_TRIANGLE_BATCH); /* Add other type if needed. */ + /* Shader is already bound. */ + GWN_draw_primitive(GWN_PRIM_TRIS, count); + return; + } + + /* step 2 : bind vertex array & draw */ + GWN_batch_program_set_no_use(geom, GPU_shader_get_program(shgroup->shader), GPU_shader_get_interface(shgroup->shader)); + /* XXX hacking gawain. we don't want to call glUseProgram! (huge performance loss) */ + geom->program_in_use = true; + if (ELEM(shgroup->type, DRW_SHG_INSTANCE, DRW_SHG_INSTANCE_EXTERNAL)) { + GWN_batch_draw_range_ex(geom, start, count, true); + } + else { + GWN_batch_draw_range(geom, start, count); + } + geom->program_in_use = false; /* XXX hacking gawain */ +} + +static void draw_geometry_execute(DRWShadingGroup *shgroup, Gwn_Batch *geom) +{ + draw_geometry_execute_ex(shgroup, geom, 0, 0); +} + +static void bind_texture(GPUTexture *tex) +{ + int bind_num = GPU_texture_bound_number(tex); + if (bind_num == -1) { + for (int i = 0; i < GPU_max_textures(); ++i) { + DST.RST.bind_tex_inc = (DST.RST.bind_tex_inc + 1) % GPU_max_textures(); + if (DST.RST.bound_tex_slots[DST.RST.bind_tex_inc] == false) { + if (DST.RST.bound_texs[DST.RST.bind_tex_inc] != NULL) { + GPU_texture_unbind(DST.RST.bound_texs[DST.RST.bind_tex_inc]); + } + GPU_texture_bind(tex, DST.RST.bind_tex_inc); + DST.RST.bound_texs[DST.RST.bind_tex_inc] = tex; + DST.RST.bound_tex_slots[DST.RST.bind_tex_inc] = true; + // printf("Binds Texture %d %p\n", DST.RST.bind_tex_inc, tex); + return; + } + } + + printf("Not enough texture slots! Reduce number of textures used by your shader.\n"); + } + DST.RST.bound_tex_slots[bind_num] = true; +} + +static void bind_ubo(GPUUniformBuffer *ubo) +{ + if (DST.RST.bind_ubo_inc < GPU_max_ubo_binds()) { + GPU_uniformbuffer_bind(ubo, DST.RST.bind_ubo_inc); + DST.RST.bind_ubo_inc++; + } + else { + /* This is not depending on user input. + * It is our responsability to make sure there enough slots. */ + BLI_assert(0 && "Not enough ubo slots! This should not happen!\n"); + + /* printf so user can report bad behaviour */ + printf("Not enough ubo slots! This should not happen!\n"); + } +} + +static void release_texture_slots(void) +{ + memset(DST.RST.bound_tex_slots, 0x0, sizeof(bool) * GPU_max_textures()); +} + +static void release_ubo_slots(void) +{ + DST.RST.bind_ubo_inc = 0; +} + +static void draw_shgroup(DRWShadingGroup *shgroup, DRWState pass_state) +{ + BLI_assert(shgroup->shader); + + GPUTexture *tex; + GPUUniformBuffer *ubo; + int val; + float fval; + + if (DST.shader != shgroup->shader) { + if (DST.shader) GPU_shader_unbind(); + GPU_shader_bind(shgroup->shader); + DST.shader = shgroup->shader; + } + + release_texture_slots(); + release_ubo_slots(); + + drw_state_set((pass_state & shgroup->state_extra_disable) | shgroup->state_extra); + drw_stencil_set(shgroup->stencil_mask); + + /* Binding Uniform */ + /* Don't check anything, Interface should already contain the least uniform as possible */ + for (DRWUniform *uni = shgroup->uniforms; uni; uni = uni->next) { + switch (uni->type) { + case DRW_UNIFORM_SHORT_TO_INT: + val = (int)*((short *)uni->value); + GPU_shader_uniform_vector_int( + shgroup->shader, uni->location, uni->length, uni->arraysize, (int *)&val); + break; + case DRW_UNIFORM_SHORT_TO_FLOAT: + fval = (float)*((short *)uni->value); + GPU_shader_uniform_vector( + shgroup->shader, uni->location, uni->length, uni->arraysize, (float *)&fval); + break; + case DRW_UNIFORM_BOOL: + case DRW_UNIFORM_INT: + GPU_shader_uniform_vector_int( + shgroup->shader, uni->location, uni->length, uni->arraysize, (int *)uni->value); + break; + case DRW_UNIFORM_FLOAT: + GPU_shader_uniform_vector( + shgroup->shader, uni->location, uni->length, uni->arraysize, (float *)uni->value); + break; + case DRW_UNIFORM_TEXTURE: + tex = (GPUTexture *)uni->value; + BLI_assert(tex); + bind_texture(tex); + GPU_shader_uniform_texture(shgroup->shader, uni->location, tex); + break; + case DRW_UNIFORM_BUFFER: + if (!DRW_state_is_fbo()) { + break; + } + tex = *((GPUTexture **)uni->value); + BLI_assert(tex); + bind_texture(tex); + GPU_shader_uniform_texture(shgroup->shader, uni->location, tex); + break; + case DRW_UNIFORM_BLOCK: + ubo = (GPUUniformBuffer *)uni->value; + bind_ubo(ubo); + GPU_shader_uniform_buffer(shgroup->shader, uni->location, ubo); + break; + } + } + +#ifdef USE_GPU_SELECT + /* use the first item because of selection we only ever add one */ +# define GPU_SELECT_LOAD_IF_PICKSEL(_call) \ + if ((G.f & G_PICKSEL) && (_call)) { \ + GPU_select_load_id((_call)->head.select_id); \ + } ((void)0) + +# define GPU_SELECT_LOAD_IF_PICKSEL_LIST(_shgroup, _start, _count) \ + _start = 0; \ + _count = _shgroup->instance_count; \ + int *select_id = NULL; \ + if (G.f & G_PICKSEL) { \ + if (_shgroup->override_selectid == -1) { \ + select_id = DRW_instance_data_get(_shgroup->inst_selectid); \ + switch (_shgroup->type) { \ + case DRW_SHG_TRIANGLE_BATCH: _count = 3; break; \ + case DRW_SHG_LINE_BATCH: _count = 2; break; \ + default: _count = 1; break; \ + } \ + } \ + else { \ + GPU_select_load_id(_shgroup->override_selectid); \ + } \ + } \ + while (_start < _shgroup->instance_count) { \ + if (select_id) { \ + GPU_select_load_id(select_id[_start]); \ + } + +# define GPU_SELECT_LOAD_IF_PICKSEL_LIST_END(_start, _count) \ + _start += _count; \ + } + +#else +# define GPU_SELECT_LOAD_IF_PICKSEL(call) +# define GPU_SELECT_LOAD_IF_PICKSEL_LIST_END(start, count) +# define GPU_SELECT_LOAD_IF_PICKSEL_LIST(_shgroup, _start, _count) \ + _start = 0; \ + _count = _shgroup->interface.instance_count; + +#endif + + /* Rendering Calls */ + if (!ELEM(shgroup->type, DRW_SHG_NORMAL)) { + /* Replacing multiple calls with only one */ + if (ELEM(shgroup->type, DRW_SHG_INSTANCE, DRW_SHG_INSTANCE_EXTERNAL)) { + if (shgroup->type == DRW_SHG_INSTANCE_EXTERNAL) { + if (shgroup->instance_geom != NULL) { + unsigned int count, start; + draw_geometry_prepare(shgroup, NULL); + /* This will only load override_selectid */ + GPU_SELECT_LOAD_IF_PICKSEL_LIST(shgroup, start, count) + { + draw_geometry_execute(shgroup, shgroup->instance_geom); + } + GPU_SELECT_LOAD_IF_PICKSEL_LIST_END(start, count) + } + } + else { + if (shgroup->instance_count > 0) { + unsigned int count, start; + draw_geometry_prepare(shgroup, NULL); + GPU_SELECT_LOAD_IF_PICKSEL_LIST(shgroup, start, count) + { + draw_geometry_execute_ex(shgroup, shgroup->instance_geom, start, count); + } + GPU_SELECT_LOAD_IF_PICKSEL_LIST_END(start, count) + } + } + } + else { /* DRW_SHG_***_BATCH */ + /* Some dynamic batch can have no geom (no call to aggregate) */ + if (shgroup->instance_count > 0) { + unsigned int count, start; + draw_geometry_prepare(shgroup, NULL); + GPU_SELECT_LOAD_IF_PICKSEL_LIST(shgroup, start, count) + { + draw_geometry_execute_ex(shgroup, shgroup->batch_geom, start, count); + } + GPU_SELECT_LOAD_IF_PICKSEL_LIST_END(start, count) + } + } + } + else { + bool prev_neg_scale = false; + for (DRWCall *call = (DRWCall *)shgroup->calls.first; call; call = (DRWCall *)call->head.next) { + if ((call->state.flag & DRW_CALL_CULLED) != 0) + continue; + + /* Negative scale objects */ + bool neg_scale = call->state.flag & DRW_CALL_NEGSCALE; + if (neg_scale != prev_neg_scale) { + glFrontFace((neg_scale) ? DST.backface : DST.frontface); + prev_neg_scale = neg_scale; + } + + GPU_SELECT_LOAD_IF_PICKSEL(call); + + if (call->head.type == DRW_CALL_SINGLE) { + draw_geometry_prepare(shgroup, &call->state); + draw_geometry_execute(shgroup, call->geometry); + } + else { + BLI_assert(call->head.type == DRW_CALL_GENERATE); + DRWCallGenerate *callgen = ((DRWCallGenerate *)call); + draw_geometry_prepare(shgroup, &callgen->state); + callgen->geometry_fn(shgroup, draw_geometry_execute, callgen->user_data); + } + } + /* Reset state */ + glFrontFace(DST.frontface); + } + + /* TODO: remove, (currently causes alpha issue with sculpt, need to investigate) */ + DRW_state_reset(); +} + +static void drw_draw_pass_ex(DRWPass *pass, DRWShadingGroup *start_group, DRWShadingGroup *end_group) +{ + /* Start fresh */ + DST.shader = NULL; + + BLI_assert(DST.buffer_finish_called && "DRW_render_instance_buffer_finish had not been called before drawing"); + + drw_state_set(pass->state); + + DRW_stats_query_start(pass->name); + + for (DRWShadingGroup *shgroup = start_group; shgroup; shgroup = shgroup->next) { + draw_shgroup(shgroup, pass->state); + /* break if upper limit */ + if (shgroup == end_group) { + break; + } + } + + /* Clear Bound textures */ + for (int i = 0; i < GPU_max_textures(); i++) { + if (DST.RST.bound_texs[i] != NULL) { + GPU_texture_unbind(DST.RST.bound_texs[i]); + DST.RST.bound_texs[i] = NULL; + } + } + + if (DST.shader) { + GPU_shader_unbind(); + DST.shader = NULL; + } + + DRW_stats_query_end(); +} + +void DRW_draw_pass(DRWPass *pass) +{ + drw_draw_pass_ex(pass, pass->shgroups.first, pass->shgroups.last); +} + +/* Draw only a subset of shgroups. Used in special situations as grease pencil strokes */ +void DRW_draw_pass_subset(DRWPass *pass, DRWShadingGroup *start_group, DRWShadingGroup *end_group) +{ + drw_draw_pass_ex(pass, start_group, end_group); +} + +/** \} */ diff --git a/source/blender/draw/intern/draw_manager_framebuffer.c b/source/blender/draw/intern/draw_manager_framebuffer.c new file mode 100644 index 00000000000..a76b1c42a53 --- /dev/null +++ b/source/blender/draw/intern/draw_manager_framebuffer.c @@ -0,0 +1,189 @@ +/* + * Copyright 2016, Blender Foundation. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software Foundation, + * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + * + * Contributor(s): Blender Institute + * + */ + +/** \file blender/draw/intern/draw_manager_framebuffer.c + * \ingroup draw + */ + +#include "draw_manager.h" + +GPUFrameBuffer *DRW_framebuffer_create(void) +{ + return GPU_framebuffer_create(); +} + +void DRW_framebuffer_init( + GPUFrameBuffer **fb, void *engine_type, int width, int height, + DRWFboTexture textures[MAX_FBO_TEX], int textures_len) +{ + BLI_assert(textures_len <= MAX_FBO_TEX); + BLI_assert(width > 0 && height > 0); + + bool create_fb = false; + int color_attachment = -1; + + if (!*fb) { + *fb = GPU_framebuffer_create(); + create_fb = true; + } + + for (int i = 0; i < textures_len; ++i) { + int channels; + bool is_depth; + bool create_tex = false; + GPUTextureFormat gpu_format; + + DRWFboTexture fbotex = textures[i]; + bool is_temp = (fbotex.flag & DRW_TEX_TEMP) != 0; + + drw_texture_get_format(fbotex.format, true, &gpu_format, &channels, &is_depth); + + if (!*fbotex.tex || is_temp) { + /* Temp textures need to be queried each frame, others not. */ + if (is_temp) { + *fbotex.tex = GPU_viewport_texture_pool_query( + DST.viewport, engine_type, width, height, channels, gpu_format); + } + else { + *fbotex.tex = GPU_texture_create_2D_custom( + width, height, channels, gpu_format, NULL, NULL); + create_tex = true; + } + } + + if (!is_depth) { + ++color_attachment; + } + + if (create_fb || create_tex) { + drw_texture_set_parameters(*fbotex.tex, fbotex.flag); + GPU_framebuffer_texture_attach(*fb, *fbotex.tex, color_attachment, 0); + } + } + + if (create_fb && (textures_len > 0)) { + if (!GPU_framebuffer_check_valid(*fb, NULL)) { + printf("Error invalid framebuffer\n"); + } + + /* Detach temp textures */ + for (int i = 0; i < textures_len; ++i) { + DRWFboTexture fbotex = textures[i]; + + if ((fbotex.flag & DRW_TEX_TEMP) != 0) { + GPU_framebuffer_texture_detach(*fbotex.tex); + } + } + + if (DST.default_framebuffer != NULL) { + GPU_framebuffer_bind(DST.default_framebuffer); + } + } +} + +void DRW_framebuffer_free(GPUFrameBuffer *fb) +{ + GPU_framebuffer_free(fb); +} + +void DRW_framebuffer_bind(GPUFrameBuffer *fb) +{ + GPU_framebuffer_bind(fb); +} + +void DRW_framebuffer_clear(bool color, bool depth, bool stencil, float clear_col[4], float clear_depth) +{ + if (color) { + glColorMask(GL_TRUE, GL_TRUE, GL_TRUE, GL_TRUE); + glClearColor(clear_col[0], clear_col[1], clear_col[2], clear_col[3]); + } + if (depth) { + glDepthMask(GL_TRUE); + glClearDepth(clear_depth); + } + if (stencil) { + glStencilMask(0xFF); + } + glClear(((color) ? GL_COLOR_BUFFER_BIT : 0) | + ((depth) ? GL_DEPTH_BUFFER_BIT : 0) | + ((stencil) ? GL_STENCIL_BUFFER_BIT : 0)); +} + +void DRW_framebuffer_read_data(int x, int y, int w, int h, int channels, int slot, float *data) +{ + GLenum type; + switch (channels) { + case 1: type = GL_RED; break; + case 2: type = GL_RG; break; + case 3: type = GL_RGB; break; + case 4: type = GL_RGBA; break; + default: + BLI_assert(false && "wrong number of read channels"); + return; + } + glReadBuffer(GL_COLOR_ATTACHMENT0 + slot); + glReadPixels(x, y, w, h, type, GL_FLOAT, data); +} + +void DRW_framebuffer_read_depth(int x, int y, int w, int h, float *data) +{ + GLenum type = GL_DEPTH_COMPONENT; + + glReadBuffer(GL_COLOR_ATTACHMENT0); /* This is OK! */ + glReadPixels(x, y, w, h, type, GL_FLOAT, data); +} + +void DRW_framebuffer_texture_attach(GPUFrameBuffer *fb, GPUTexture *tex, int slot, int mip) +{ + GPU_framebuffer_texture_attach(fb, tex, slot, mip); +} + +void DRW_framebuffer_texture_layer_attach(GPUFrameBuffer *fb, GPUTexture *tex, int slot, int layer, int mip) +{ + GPU_framebuffer_texture_layer_attach(fb, tex, slot, layer, mip); +} + +void DRW_framebuffer_cubeface_attach(GPUFrameBuffer *fb, GPUTexture *tex, int slot, int face, int mip) +{ + GPU_framebuffer_texture_cubeface_attach(fb, tex, slot, face, mip); +} + +void DRW_framebuffer_texture_detach(GPUTexture *tex) +{ + GPU_framebuffer_texture_detach(tex); +} + +void DRW_framebuffer_blit(GPUFrameBuffer *fb_read, GPUFrameBuffer *fb_write, bool depth, bool stencil) +{ + GPU_framebuffer_blit(fb_read, 0, fb_write, 0, depth, stencil); +} + +void DRW_framebuffer_recursive_downsample( + GPUFrameBuffer *fb, GPUTexture *tex, int num_iter, + void (*callback)(void *userData, int level), void *userData) +{ + GPU_framebuffer_recursive_downsample(fb, tex, num_iter, callback, userData); +} + +void DRW_framebuffer_viewport_size(GPUFrameBuffer *UNUSED(fb_read), int x, int y, int w, int h) +{ + glViewport(x, y, w, h); +} diff --git a/source/blender/draw/intern/draw_manager_profiling.c b/source/blender/draw/intern/draw_manager_profiling.c index f9fbbac2e2e..8e5f98b5a49 100644 --- a/source/blender/draw/intern/draw_manager_profiling.c +++ b/source/blender/draw/intern/draw_manager_profiling.c @@ -32,7 +32,12 @@ #include "MEM_guardedalloc.h" +#include "draw_manager.h" + #include "GPU_glew.h" +#include "GPU_texture.h" + +#include "UI_resources.h" #include "WM_api.h" #include "WM_types.h" @@ -198,14 +203,120 @@ void DRW_stats_reset(void) } } +static void draw_stat_5row(rcti *rect, int u, int v, const char *txt, const int size) +{ + BLF_draw_default_ascii(rect->xmin + (1 + u * 5) * U.widget_unit, + rect->ymax - (3 + v) * U.widget_unit, 0.0f, + txt, size); +} + +static void draw_stat(rcti *rect, int u, int v, const char *txt, const int size) +{ + BLF_draw_default_ascii(rect->xmin + (1 + u) * U.widget_unit, + rect->ymax - (3 + v) * U.widget_unit, 0.0f, + txt, size); +} + void DRW_stats_draw(rcti *rect) { char stat_string[64]; int lvl_index[MAX_NESTED_TIMER]; - int v = 0; + int v = 0, u = 0; + + double init_tot_time = 0.0, background_tot_time = 0.0, render_tot_time = 0.0, tot_time = 0.0; + + UI_FontThemeColor(BLF_default(), TH_TEXT_HI); + + /* ------------------------------------------ */ + /* ---------------- CPU stats --------------- */ + /* ------------------------------------------ */ + /* Label row */ + char col_label[32]; + sprintf(col_label, "Engine"); + draw_stat_5row(rect, u++, v, col_label, sizeof(col_label)); + sprintf(col_label, "Init"); + draw_stat_5row(rect, u++, v, col_label, sizeof(col_label)); + sprintf(col_label, "Background"); + draw_stat_5row(rect, u++, v, col_label, sizeof(col_label)); + sprintf(col_label, "Render"); + draw_stat_5row(rect, u++, v, col_label, sizeof(col_label)); + sprintf(col_label, "Total (w/o cache)"); + draw_stat_5row(rect, u++, v, col_label, sizeof(col_label)); + v++; + + /* Engines rows */ + char time_to_txt[16]; + for (LinkData *link = DST.enabled_engines.first; link; link = link->next) { + u = 0; + DrawEngineType *engine = link->data; + ViewportEngineData *data = drw_viewport_engine_data_ensure(engine); + + draw_stat_5row(rect, u++, v, engine->idname, sizeof(engine->idname)); + + init_tot_time += data->init_time; + sprintf(time_to_txt, "%.2fms", data->init_time); + draw_stat_5row(rect, u++, v, time_to_txt, sizeof(time_to_txt)); + + background_tot_time += data->background_time; + sprintf(time_to_txt, "%.2fms", data->background_time); + draw_stat_5row(rect, u++, v, time_to_txt, sizeof(time_to_txt)); + + render_tot_time += data->render_time; + sprintf(time_to_txt, "%.2fms", data->render_time); + draw_stat_5row(rect, u++, v, time_to_txt, sizeof(time_to_txt)); + + tot_time += data->init_time + data->background_time + data->render_time; + sprintf(time_to_txt, "%.2fms", data->init_time + data->background_time + data->render_time); + draw_stat_5row(rect, u++, v, time_to_txt, sizeof(time_to_txt)); + v++; + } - BLI_snprintf(stat_string, sizeof(stat_string), "GPU Render Stats"); - BLF_draw_default_ascii(rect->xmin + 1 * U.widget_unit, rect->ymax - v++ * U.widget_unit, 0.0f, stat_string, sizeof(stat_string)); + /* Totals row */ + u = 0; + sprintf(col_label, "Sub Total"); + draw_stat_5row(rect, u++, v, col_label, sizeof(col_label)); + sprintf(time_to_txt, "%.2fms", init_tot_time); + draw_stat_5row(rect, u++, v, time_to_txt, sizeof(time_to_txt)); + sprintf(time_to_txt, "%.2fms", background_tot_time); + draw_stat_5row(rect, u++, v, time_to_txt, sizeof(time_to_txt)); + sprintf(time_to_txt, "%.2fms", render_tot_time); + draw_stat_5row(rect, u++, v, time_to_txt, sizeof(time_to_txt)); + sprintf(time_to_txt, "%.2fms", tot_time); + draw_stat_5row(rect, u++, v, time_to_txt, sizeof(time_to_txt)); + v += 2; + + u = 0; + sprintf(col_label, "Cache Time"); + draw_stat_5row(rect, u++, v, col_label, sizeof(col_label)); + sprintf(time_to_txt, "%.2fms", DST.cache_time); + draw_stat_5row(rect, u++, v, time_to_txt, sizeof(time_to_txt)); + v += 2; + + /* ------------------------------------------ */ + /* ---------------- GPU stats --------------- */ + /* ------------------------------------------ */ + + /* Memory Stats */ + unsigned int tex_mem = GPU_texture_memory_usage_get(); + unsigned int vbo_mem = GWN_vertbuf_get_memory_usage(); + + sprintf(stat_string, "GPU Memory"); + draw_stat(rect, 0, v, stat_string, sizeof(stat_string)); + sprintf(stat_string, "%.2fMB", (double)(tex_mem + vbo_mem) / 1000000.0); + draw_stat_5row(rect, 1, v++, stat_string, sizeof(stat_string)); + sprintf(stat_string, "Textures"); + draw_stat(rect, 1, v, stat_string, sizeof(stat_string)); + sprintf(stat_string, "%.2fMB", (double)tex_mem / 1000000.0); + draw_stat_5row(rect, 1, v++, stat_string, sizeof(stat_string)); + sprintf(stat_string, "Meshes"); + draw_stat(rect, 1, v, stat_string, sizeof(stat_string)); + sprintf(stat_string, "%.2fMB", (double)vbo_mem / 1000000.0); + draw_stat_5row(rect, 1, v++, stat_string, sizeof(stat_string)); + v += 1; + + /* GPU Timings */ + BLI_snprintf(stat_string, sizeof(stat_string), "GPU Render Timings"); + draw_stat(rect, 0, v++, stat_string, sizeof(stat_string)); for (int i = 0; i < DTP.timer_increment; ++i) { double time_ms, time_percent; @@ -232,11 +343,11 @@ void DRW_stats_draw(rcti *rect) time_percent = MIN2(time_percent, 100.0); BLI_snprintf(stat_string, sizeof(stat_string), "%s", timer->name); - BLF_draw_default_ascii(rect->xmin + (1 + timer->lvl) * U.widget_unit, rect->ymax - v * U.widget_unit, 0.0f, stat_string, sizeof(stat_string)); + draw_stat(rect, 0 + timer->lvl, v, stat_string, sizeof(stat_string)); BLI_snprintf(stat_string, sizeof(stat_string), "%.2fms", time_ms); - BLF_draw_default_ascii(rect->xmin + (13 + timer->lvl) * U.widget_unit, rect->ymax - v * U.widget_unit, 0.0f, stat_string, sizeof(stat_string)); + draw_stat(rect, 12 + timer->lvl, v, stat_string, sizeof(stat_string)); BLI_snprintf(stat_string, sizeof(stat_string), "%.0f", time_percent); - BLF_draw_default_ascii(rect->xmin + (17 + timer->lvl) * U.widget_unit, rect->ymax - v * U.widget_unit, 0.0f, stat_string, sizeof(stat_string)); + draw_stat(rect, 16 + timer->lvl, v, stat_string, sizeof(stat_string)); v++; } -}
\ No newline at end of file +} diff --git a/source/blender/draw/intern/draw_manager_shader.c b/source/blender/draw/intern/draw_manager_shader.c new file mode 100644 index 00000000000..e9d2ac14e57 --- /dev/null +++ b/source/blender/draw/intern/draw_manager_shader.c @@ -0,0 +1,90 @@ +/* + * Copyright 2016, Blender Foundation. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software Foundation, + * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + * + * Contributor(s): Blender Institute + * + */ + +/** \file blender/draw/intern/draw_manager_shader.c + * \ingroup draw + */ + +#include "draw_manager.h" + +#include "BLI_string.h" +#include "BLI_string_utils.h" + +#include "GPU_shader.h" + +extern char datatoc_gpu_shader_2D_vert_glsl[]; +extern char datatoc_gpu_shader_3D_vert_glsl[]; +extern char datatoc_gpu_shader_fullscreen_vert_glsl[]; + +GPUShader *DRW_shader_create(const char *vert, const char *geom, const char *frag, const char *defines) +{ + return GPU_shader_create(vert, frag, geom, NULL, defines); +} + +GPUShader *DRW_shader_create_with_lib( + const char *vert, const char *geom, const char *frag, const char *lib, const char *defines) +{ + GPUShader *sh; + char *vert_with_lib = NULL; + char *frag_with_lib = NULL; + char *geom_with_lib = NULL; + + vert_with_lib = BLI_string_joinN(lib, vert); + frag_with_lib = BLI_string_joinN(lib, frag); + if (geom) { + geom_with_lib = BLI_string_joinN(lib, geom); + } + + sh = GPU_shader_create(vert_with_lib, frag_with_lib, geom_with_lib, NULL, defines); + + MEM_freeN(vert_with_lib); + MEM_freeN(frag_with_lib); + if (geom) { + MEM_freeN(geom_with_lib); + } + + return sh; +} + +GPUShader *DRW_shader_create_2D(const char *frag, const char *defines) +{ + return GPU_shader_create(datatoc_gpu_shader_2D_vert_glsl, frag, NULL, NULL, defines); +} + +GPUShader *DRW_shader_create_3D(const char *frag, const char *defines) +{ + return GPU_shader_create(datatoc_gpu_shader_3D_vert_glsl, frag, NULL, NULL, defines); +} + +GPUShader *DRW_shader_create_fullscreen(const char *frag, const char *defines) +{ + return GPU_shader_create(datatoc_gpu_shader_fullscreen_vert_glsl, frag, NULL, NULL, defines); +} + +GPUShader *DRW_shader_create_3D_depth_only(void) +{ + return GPU_shader_get_builtin_shader(GPU_SHADER_3D_DEPTH_ONLY); +} + +void DRW_shader_free(GPUShader *shader) +{ + GPU_shader_free(shader); +} diff --git a/source/blender/draw/intern/draw_manager_texture.c b/source/blender/draw/intern/draw_manager_texture.c new file mode 100644 index 00000000000..bbef680e77a --- /dev/null +++ b/source/blender/draw/intern/draw_manager_texture.c @@ -0,0 +1,213 @@ +/* + * Copyright 2016, Blender Foundation. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software Foundation, + * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + * + * Contributor(s): Blender Institute + * + */ + +/** \file blender/draw/intern/draw_manager_texture.c + * \ingroup draw + */ + +#include "draw_manager.h" + +void drw_texture_get_format( + DRWTextureFormat format, bool is_framebuffer, + GPUTextureFormat *r_data_type, int *r_channels, bool *r_is_depth) +{ + /* Some formats do not work with framebuffers. */ + if (is_framebuffer) { + switch (format) { + /* Only add formats that are COMPATIBLE with FB. + * Generally they are multiple of 16bit. */ + case DRW_TEX_R_16: + case DRW_TEX_R_32: + case DRW_TEX_RG_8: + case DRW_TEX_RG_16: + case DRW_TEX_RG_16I: + case DRW_TEX_RG_32: + case DRW_TEX_RGBA_8: + case DRW_TEX_RGBA_16: + case DRW_TEX_RGBA_32: + case DRW_TEX_DEPTH_16: + case DRW_TEX_DEPTH_24: + case DRW_TEX_DEPTH_24_STENCIL_8: + case DRW_TEX_DEPTH_32: + case DRW_TEX_RGB_11_11_10: + break; + default: + BLI_assert(false && "Texture format unsupported as render target!"); + *r_channels = 4; + *r_data_type = GPU_RGBA8; + *r_is_depth = false; + return; + } + } + + switch (format) { + case DRW_TEX_RGBA_8: *r_data_type = GPU_RGBA8; break; + case DRW_TEX_RGBA_16: *r_data_type = GPU_RGBA16F; break; + case DRW_TEX_RGBA_32: *r_data_type = GPU_RGBA32F; break; + case DRW_TEX_RGB_16: *r_data_type = GPU_RGB16F; break; + case DRW_TEX_RGB_11_11_10: *r_data_type = GPU_R11F_G11F_B10F; break; + case DRW_TEX_RG_8: *r_data_type = GPU_RG8; break; + case DRW_TEX_RG_16: *r_data_type = GPU_RG16F; break; + case DRW_TEX_RG_16I: *r_data_type = GPU_RG16I; break; + case DRW_TEX_RG_32: *r_data_type = GPU_RG32F; break; + case DRW_TEX_R_8: *r_data_type = GPU_R8; break; + case DRW_TEX_R_16: *r_data_type = GPU_R16F; break; + case DRW_TEX_R_32: *r_data_type = GPU_R32F; break; +#if 0 + case DRW_TEX_RGB_8: *r_data_type = GPU_RGB8; break; + case DRW_TEX_RGB_32: *r_data_type = GPU_RGB32F; break; +#endif + case DRW_TEX_DEPTH_16: *r_data_type = GPU_DEPTH_COMPONENT16; break; + case DRW_TEX_DEPTH_24: *r_data_type = GPU_DEPTH_COMPONENT24; break; + case DRW_TEX_DEPTH_24_STENCIL_8: *r_data_type = GPU_DEPTH24_STENCIL8; break; + case DRW_TEX_DEPTH_32: *r_data_type = GPU_DEPTH_COMPONENT32F; break; + default : + /* file type not supported you must uncomment it from above */ + BLI_assert(false); + break; + } + + switch (format) { + case DRW_TEX_RGBA_8: + case DRW_TEX_RGBA_16: + case DRW_TEX_RGBA_32: + *r_channels = 4; + break; + case DRW_TEX_RGB_8: + case DRW_TEX_RGB_16: + case DRW_TEX_RGB_32: + case DRW_TEX_RGB_11_11_10: + *r_channels = 3; + break; + case DRW_TEX_RG_8: + case DRW_TEX_RG_16: + case DRW_TEX_RG_16I: + case DRW_TEX_RG_32: + *r_channels = 2; + break; + default: + *r_channels = 1; + break; + } + + if (r_is_depth) { + *r_is_depth = ELEM(format, DRW_TEX_DEPTH_16, DRW_TEX_DEPTH_24, DRW_TEX_DEPTH_24_STENCIL_8); + } +} + +void drw_texture_set_parameters(GPUTexture *tex, DRWTextureFlag flags) +{ + GPU_texture_bind(tex, 0); + if (flags & DRW_TEX_MIPMAP) { + GPU_texture_mipmap_mode(tex, true, flags & DRW_TEX_FILTER); + DRW_texture_generate_mipmaps(tex); + } + else { + GPU_texture_filter_mode(tex, flags & DRW_TEX_FILTER); + } + GPU_texture_wrap_mode(tex, flags & DRW_TEX_WRAP); + GPU_texture_compare_mode(tex, flags & DRW_TEX_COMPARE); + GPU_texture_unbind(tex); +} + +GPUTexture *DRW_texture_create_1D(int w, DRWTextureFormat format, DRWTextureFlag flags, const float *fpixels) +{ + GPUTexture *tex; + GPUTextureFormat data_type; + int channels; + + drw_texture_get_format(format, false, &data_type, &channels, NULL); + tex = GPU_texture_create_1D_custom(w, channels, data_type, fpixels, NULL); + drw_texture_set_parameters(tex, flags); + + return tex; +} + +GPUTexture *DRW_texture_create_2D(int w, int h, DRWTextureFormat format, DRWTextureFlag flags, const float *fpixels) +{ + GPUTexture *tex; + GPUTextureFormat data_type; + int channels; + + drw_texture_get_format(format, false, &data_type, &channels, NULL); + tex = GPU_texture_create_2D_custom(w, h, channels, data_type, fpixels, NULL); + drw_texture_set_parameters(tex, flags); + + return tex; +} + +GPUTexture *DRW_texture_create_2D_array( + int w, int h, int d, DRWTextureFormat format, DRWTextureFlag flags, const float *fpixels) +{ + GPUTexture *tex; + GPUTextureFormat data_type; + int channels; + + drw_texture_get_format(format, false, &data_type, &channels, NULL); + tex = GPU_texture_create_2D_array_custom(w, h, d, channels, data_type, fpixels, NULL); + drw_texture_set_parameters(tex, flags); + + return tex; +} + +GPUTexture *DRW_texture_create_3D( + int w, int h, int d, DRWTextureFormat format, DRWTextureFlag flags, const float *fpixels) +{ + GPUTexture *tex; + GPUTextureFormat data_type; + int channels; + + drw_texture_get_format(format, false, &data_type, &channels, NULL); + tex = GPU_texture_create_3D_custom(w, h, d, channels, data_type, fpixels, NULL); + drw_texture_set_parameters(tex, flags); + + return tex; +} + +GPUTexture *DRW_texture_create_cube(int w, DRWTextureFormat format, DRWTextureFlag flags, const float *fpixels) +{ + GPUTexture *tex; + GPUTextureFormat data_type; + int channels; + + drw_texture_get_format(format, false, &data_type, &channels, NULL); + tex = GPU_texture_create_cube_custom(w, channels, data_type, fpixels, NULL); + drw_texture_set_parameters(tex, flags); + + return tex; +} + +void DRW_texture_generate_mipmaps(GPUTexture *tex) +{ + GPU_texture_bind(tex, 0); + GPU_texture_generate_mipmap(tex); + GPU_texture_unbind(tex); +} + +void DRW_texture_update(GPUTexture *tex, const float *pixels) +{ + GPU_texture_update(tex, pixels); +} + +void DRW_texture_free(GPUTexture *tex) +{ + GPU_texture_free(tex); +} |