Welcome to mirror list, hosted at ThFree Co, Russian Federation.

git.blender.org/blender.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
Diffstat (limited to 'source/blender/draw/intern')
-rw-r--r--source/blender/draw/intern/DRW_render.h528
-rw-r--r--source/blender/draw/intern/draw_armature.c1460
-rw-r--r--source/blender/draw/intern/draw_cache.c2802
-rw-r--r--source/blender/draw/intern/draw_cache.h174
-rw-r--r--source/blender/draw/intern/draw_cache_impl.h127
-rw-r--r--source/blender/draw/intern/draw_cache_impl_curve.c1118
-rw-r--r--source/blender/draw/intern/draw_cache_impl_displist.c398
-rw-r--r--source/blender/draw/intern/draw_cache_impl_lattice.c587
-rw-r--r--source/blender/draw/intern/draw_cache_impl_mesh.c3932
-rw-r--r--source/blender/draw/intern/draw_cache_impl_metaball.c144
-rw-r--r--source/blender/draw/intern/draw_cache_impl_particles.c544
-rw-r--r--source/blender/draw/intern/draw_common.c537
-rw-r--r--source/blender/draw/intern/draw_common.h148
-rw-r--r--source/blender/draw/intern/draw_instance_data.c469
-rw-r--r--source/blender/draw/intern/draw_instance_data.h60
-rw-r--r--source/blender/draw/intern/draw_manager.c2087
-rw-r--r--source/blender/draw/intern/draw_manager.h358
-rw-r--r--source/blender/draw/intern/draw_manager_data.c935
-rw-r--r--source/blender/draw/intern/draw_manager_exec.c1170
-rw-r--r--source/blender/draw/intern/draw_manager_profiling.c354
-rw-r--r--source/blender/draw/intern/draw_manager_profiling.h43
-rw-r--r--source/blender/draw/intern/draw_manager_shader.c377
-rw-r--r--source/blender/draw/intern/draw_manager_text.c199
-rw-r--r--source/blender/draw/intern/draw_manager_text.h54
-rw-r--r--source/blender/draw/intern/draw_manager_texture.c237
-rw-r--r--source/blender/draw/intern/draw_view.c703
-rw-r--r--source/blender/draw/intern/draw_view.h36
27 files changed, 19581 insertions, 0 deletions
diff --git a/source/blender/draw/intern/DRW_render.h b/source/blender/draw/intern/DRW_render.h
new file mode 100644
index 00000000000..60e855108f9
--- /dev/null
+++ b/source/blender/draw/intern/DRW_render.h
@@ -0,0 +1,528 @@
+/*
+ * Copyright 2016, Blender Foundation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Contributor(s): Blender Institute
+ *
+ */
+
+/** \file DRW_render.h
+ * \ingroup draw
+ */
+
+/* This is the Render Functions used by Realtime engines to draw with OpenGL */
+
+#ifndef __DRW_RENDER_H__
+#define __DRW_RENDER_H__
+
+#include "BLI_listbase.h"
+#include "BLI_math_matrix.h"
+#include "BLI_math_vector.h"
+#include "BLI_string.h"
+
+#include "BKE_context.h"
+#include "BKE_layer.h"
+#include "BKE_material.h"
+#include "BKE_scene.h"
+
+#include "BLT_translation.h"
+
+#include "DNA_object_types.h"
+#include "DNA_lamp_types.h"
+#include "DNA_material_types.h"
+#include "DNA_scene_types.h"
+
+#include "GPU_framebuffer.h"
+
+#include "draw_common.h"
+#include "draw_cache.h"
+#include "draw_view.h"
+
+#include "draw_manager_profiling.h"
+
+#include "MEM_guardedalloc.h"
+
+#include "RE_engine.h"
+
+#include "DEG_depsgraph.h"
+
+struct rcti;
+struct bContext;
+struct GPUFrameBuffer;
+struct GPUShader;
+struct GPUMaterial;
+struct GPUTexture;
+struct GPUUniformBuffer;
+struct Object;
+struct Gwn_Batch;
+struct DefaultFramebufferList;
+struct DefaultTextureList;
+struct DRWTextStore;
+struct LampEngineData;
+struct RenderEngineType;
+struct ViewportEngineData;
+struct ViewportEngineData_Info;
+
+typedef struct DRWUniform DRWUniform;
+typedef struct DRWInterface DRWInterface;
+typedef struct DRWPass DRWPass;
+typedef struct DRWShadingGroup DRWShadingGroup;
+
+/* TODO Put it somewhere else? */
+typedef struct BoundSphere {
+ float center[3], radius;
+} BoundSphere;
+
+/* declare members as empty (unused) */
+typedef char DRWViewportEmptyList;
+
+#define DRW_VIEWPORT_LIST_SIZE(list) \
+ (sizeof(list) == sizeof(DRWViewportEmptyList) ? 0 : ((sizeof(list)) / sizeof(void *)))
+
+/* Unused members must be either pass list or 'char *' when not usd. */
+#define DRW_VIEWPORT_DATA_SIZE(ty) { \
+ DRW_VIEWPORT_LIST_SIZE(*(((ty *)NULL)->fbl)), \
+ DRW_VIEWPORT_LIST_SIZE(*(((ty *)NULL)->txl)), \
+ DRW_VIEWPORT_LIST_SIZE(*(((ty *)NULL)->psl)), \
+ DRW_VIEWPORT_LIST_SIZE(*(((ty *)NULL)->stl)) \
+}
+
+/* Use of multisample framebuffers. */
+#define MULTISAMPLE_SYNC_ENABLE(dfbl) { \
+ if (dfbl->multisample_fb != NULL) { \
+ DRW_stats_query_start("Multisample Blit"); \
+ GPU_framebuffer_blit(dfbl->default_fb, 0, dfbl->multisample_fb, 0, GPU_COLOR_BIT | GPU_DEPTH_BIT); \
+ GPU_framebuffer_bind(dfbl->multisample_fb); \
+ DRW_stats_query_end(); \
+ } \
+}
+
+#define MULTISAMPLE_SYNC_DISABLE(dfbl) { \
+ if (dfbl->multisample_fb != NULL) { \
+ DRW_stats_query_start("Multisample Resolve"); \
+ GPU_framebuffer_blit(dfbl->multisample_fb, 0, dfbl->default_fb, 0, GPU_COLOR_BIT | GPU_DEPTH_BIT); \
+ GPU_framebuffer_bind(dfbl->default_fb); \
+ DRW_stats_query_end(); \
+ } \
+}
+
+
+
+typedef struct DrawEngineDataSize {
+ int fbl_len;
+ int txl_len;
+ int psl_len;
+ int stl_len;
+} DrawEngineDataSize;
+
+typedef struct DrawEngineType {
+ struct DrawEngineType *next, *prev;
+
+ char idname[32];
+
+ const DrawEngineDataSize *vedata_size;
+
+ void (*engine_init)(void *vedata);
+ void (*engine_free)(void);
+
+ void (*cache_init)(void *vedata);
+ void (*cache_populate)(void *vedata, struct Object *ob);
+ void (*cache_finish)(void *vedata);
+
+ void (*draw_background)(void *vedata);
+ void (*draw_scene)(void *vedata);
+
+ void (*view_update)(void *vedata);
+ void (*id_update)(void *vedata, struct ID *id);
+
+ void (*render_to_image)(void *vedata, struct RenderEngine *engine, struct RenderLayer *layer, const struct rcti *rect);
+} DrawEngineType;
+
+#ifndef __DRW_ENGINE_H__
+/* Buffer and textures used by the viewport by default */
+typedef struct DefaultFramebufferList {
+ struct GPUFrameBuffer *default_fb;
+ struct GPUFrameBuffer *color_only_fb;
+ struct GPUFrameBuffer *depth_only_fb;
+ struct GPUFrameBuffer *multisample_fb;
+} DefaultFramebufferList;
+
+typedef struct DefaultTextureList {
+ struct GPUTexture *color;
+ struct GPUTexture *depth;
+ struct GPUTexture *multisample_color;
+ struct GPUTexture *multisample_depth;
+} DefaultTextureList;
+#endif
+
+/* Textures */
+/* NOTE naming in this struct is broken.
+ * There should either be suffixes for Normalized int formats or float formats.
+ * Right now every 8bit texture is Normalized int and others are Floating point. */
+typedef enum {
+ DRW_TEX_RGBA_8,
+ DRW_TEX_RGBA_16,
+ DRW_TEX_RGBA_32,
+ DRW_TEX_RGB_11_11_10,
+ DRW_TEX_RGB_8,
+ DRW_TEX_RGB_16,
+ DRW_TEX_RGB_32,
+ DRW_TEX_RG_8,
+ DRW_TEX_RG_16,
+ DRW_TEX_RG_16I,
+ DRW_TEX_RG_32,
+ DRW_TEX_R_8,
+ DRW_TEX_R_16,
+ DRW_TEX_R_16I,
+ DRW_TEX_R_32,
+ DRW_TEX_DEPTH_16,
+ DRW_TEX_DEPTH_24,
+ DRW_TEX_DEPTH_24_STENCIL_8,
+ DRW_TEX_DEPTH_32,
+} DRWTextureFormat;
+
+typedef enum {
+ DRW_TEX_FILTER = (1 << 0),
+ DRW_TEX_WRAP = (1 << 1),
+ DRW_TEX_COMPARE = (1 << 2),
+ DRW_TEX_MIPMAP = (1 << 3),
+} DRWTextureFlag;
+
+/* Textures from DRW_texture_pool_query_* have the options
+ * DRW_TEX_FILTER for color float textures, and no options
+ * for depth textures and integer textures. */
+struct GPUTexture *DRW_texture_pool_query_2D(int w, int h, DRWTextureFormat format, DrawEngineType *engine_type);
+
+struct GPUTexture *DRW_texture_create_1D(
+ int w, DRWTextureFormat format, DRWTextureFlag flags, const float *fpixels);
+struct GPUTexture *DRW_texture_create_2D(
+ int w, int h, DRWTextureFormat format, DRWTextureFlag flags, const float *fpixels);
+struct GPUTexture *DRW_texture_create_2D_array(
+ int w, int h, int d, DRWTextureFormat format, DRWTextureFlag flags, const float *fpixels);
+struct GPUTexture *DRW_texture_create_3D(
+ int w, int h, int d, DRWTextureFormat format, DRWTextureFlag flags, const float *fpixels);
+struct GPUTexture *DRW_texture_create_cube(
+ int w, DRWTextureFormat format, DRWTextureFlag flags, const float *fpixels);
+
+void DRW_texture_ensure_fullscreen_2D(
+ struct GPUTexture **tex, DRWTextureFormat format, DRWTextureFlag flags);
+void DRW_texture_ensure_2D(
+ struct GPUTexture **tex, int w, int h, DRWTextureFormat format, DRWTextureFlag flags);
+
+void DRW_texture_generate_mipmaps(struct GPUTexture *tex);
+void DRW_texture_free(struct GPUTexture *tex);
+#define DRW_TEXTURE_FREE_SAFE(tex) do { \
+ if (tex != NULL) { \
+ DRW_texture_free(tex); \
+ tex = NULL; \
+ } \
+} while (0)
+
+/* UBOs */
+struct GPUUniformBuffer *DRW_uniformbuffer_create(int size, const void *data);
+void DRW_uniformbuffer_update(struct GPUUniformBuffer *ubo, const void *data);
+void DRW_uniformbuffer_free(struct GPUUniformBuffer *ubo);
+#define DRW_UBO_FREE_SAFE(ubo) do { \
+ if (ubo != NULL) { \
+ DRW_uniformbuffer_free(ubo); \
+ ubo = NULL; \
+ } \
+} while (0)
+
+void DRW_transform_to_display(struct GPUTexture *tex);
+
+/* Shaders */
+struct GPUShader *DRW_shader_create(
+ const char *vert, const char *geom, const char *frag, const char *defines);
+struct GPUShader *DRW_shader_create_with_lib(
+ const char *vert, const char *geom, const char *frag, const char *lib, const char *defines);
+struct GPUShader *DRW_shader_create_2D(const char *frag, const char *defines);
+struct GPUShader *DRW_shader_create_3D(const char *frag, const char *defines);
+struct GPUShader *DRW_shader_create_fullscreen(const char *frag, const char *defines);
+struct GPUShader *DRW_shader_create_3D_depth_only(void);
+struct GPUMaterial *DRW_shader_find_from_world(struct World *wo, const void *engine_type, int options);
+struct GPUMaterial *DRW_shader_find_from_material(struct Material *ma, const void *engine_type, int options);
+struct GPUMaterial *DRW_shader_create_from_world(
+ struct Scene *scene, struct World *wo, const void *engine_type, int options,
+ const char *vert, const char *geom, const char *frag_lib, const char *defines);
+struct GPUMaterial *DRW_shader_create_from_material(
+ struct Scene *scene, struct Material *ma, const void *engine_type, int options,
+ const char *vert, const char *geom, const char *frag_lib, const char *defines);
+void DRW_shader_free(struct GPUShader *shader);
+#define DRW_SHADER_FREE_SAFE(shader) do { \
+ if (shader != NULL) { \
+ DRW_shader_free(shader); \
+ shader = NULL; \
+ } \
+} while (0)
+
+/* Batches */
+
+typedef enum {
+ DRW_STATE_WRITE_DEPTH = (1 << 0),
+ DRW_STATE_WRITE_COLOR = (1 << 1),
+ DRW_STATE_DEPTH_LESS = (1 << 2),
+ DRW_STATE_DEPTH_EQUAL = (1 << 3),
+ DRW_STATE_DEPTH_GREATER = (1 << 4),
+ DRW_STATE_DEPTH_ALWAYS = (1 << 5),
+ DRW_STATE_CULL_BACK = (1 << 6),
+ DRW_STATE_CULL_FRONT = (1 << 7),
+ DRW_STATE_WIRE = (1 << 8),
+// DRW_STATE_WIRE_LARGE = (1 << 9), /* Removed from ogl in 3.0 */
+ DRW_STATE_POINT = (1 << 10),
+ DRW_STATE_STIPPLE_2 = (1 << 11),
+ DRW_STATE_STIPPLE_3 = (1 << 12),
+ DRW_STATE_STIPPLE_4 = (1 << 13),
+ DRW_STATE_BLEND = (1 << 14),
+ DRW_STATE_ADDITIVE = (1 << 15),
+ DRW_STATE_MULTIPLY = (1 << 16),
+ DRW_STATE_TRANSMISSION = (1 << 17),
+ DRW_STATE_CLIP_PLANES = (1 << 18),
+ DRW_STATE_ADDITIVE_FULL = (1 << 19), /* Same as DRW_STATE_ADDITIVE but let alpha accumulate without premult. */
+
+ DRW_STATE_WRITE_STENCIL = (1 << 27),
+ DRW_STATE_STENCIL_EQUAL = (1 << 28),
+} DRWState;
+
+#define DRW_STATE_DEFAULT (DRW_STATE_WRITE_DEPTH | DRW_STATE_WRITE_COLOR | DRW_STATE_DEPTH_LESS)
+
+typedef enum {
+ DRW_ATTRIB_INT,
+ DRW_ATTRIB_FLOAT,
+} DRWAttribType;
+
+typedef struct DRWInstanceAttribFormat {
+ char name[32];
+ DRWAttribType type;
+ int components;
+} DRWInstanceAttribFormat;
+
+struct Gwn_VertFormat *DRW_shgroup_instance_format_array(const DRWInstanceAttribFormat attribs[], int arraysize);
+#define DRW_shgroup_instance_format(format, ...) do { \
+ if (format == NULL) { \
+ DRWInstanceAttribFormat drw_format[] = __VA_ARGS__;\
+ format = DRW_shgroup_instance_format_array(drw_format, (sizeof(drw_format) / sizeof(DRWInstanceAttribFormat))); \
+ } \
+} while (0)
+
+DRWShadingGroup *DRW_shgroup_create(struct GPUShader *shader, DRWPass *pass);
+DRWShadingGroup *DRW_shgroup_material_create(struct GPUMaterial *material, DRWPass *pass);
+DRWShadingGroup *DRW_shgroup_material_instance_create(
+ struct GPUMaterial *material, DRWPass *pass, struct Gwn_Batch *geom, struct Object *ob,
+ struct Gwn_VertFormat *format);
+DRWShadingGroup *DRW_shgroup_material_empty_tri_batch_create(struct GPUMaterial *material, DRWPass *pass, int size);
+DRWShadingGroup *DRW_shgroup_instance_create(
+ struct GPUShader *shader, DRWPass *pass, struct Gwn_Batch *geom, struct Gwn_VertFormat *format);
+DRWShadingGroup *DRW_shgroup_point_batch_create(struct GPUShader *shader, DRWPass *pass);
+DRWShadingGroup *DRW_shgroup_line_batch_create(struct GPUShader *shader, DRWPass *pass);
+DRWShadingGroup *DRW_shgroup_empty_tri_batch_create(struct GPUShader *shader, DRWPass *pass, int size);
+
+typedef void (DRWCallGenerateFn)(
+ DRWShadingGroup *shgroup,
+ void (*draw_fn)(DRWShadingGroup *shgroup, struct Gwn_Batch *geom),
+ void *user_data);
+
+void DRW_shgroup_instance_batch(DRWShadingGroup *shgroup, struct Gwn_Batch *batch);
+
+void DRW_shgroup_free(struct DRWShadingGroup *shgroup);
+void DRW_shgroup_call_add(DRWShadingGroup *shgroup, struct Gwn_Batch *geom, float (*obmat)[4]);
+void DRW_shgroup_call_object_add(DRWShadingGroup *shgroup, struct Gwn_Batch *geom, struct Object *ob);
+/* Used for drawing a batch with instancing without instance attribs. */
+void DRW_shgroup_call_instances_add(
+ DRWShadingGroup *shgroup, struct Gwn_Batch *geom, float (*obmat)[4], unsigned int *count);
+void DRW_shgroup_call_object_instances_add(
+ DRWShadingGroup *shgroup, struct Gwn_Batch *geom, struct Object *ob, unsigned int *count);
+void DRW_shgroup_call_sculpt_add(DRWShadingGroup *shgroup, struct Object *ob, float (*obmat)[4]);
+void DRW_shgroup_call_generate_add(
+ DRWShadingGroup *shgroup, DRWCallGenerateFn *geometry_fn, void *user_data, float (*obmat)[4]);
+void DRW_shgroup_call_dynamic_add_array(DRWShadingGroup *shgroup, const void *attr[], unsigned int attr_len);
+#define DRW_shgroup_call_dynamic_add(shgroup, ...) do { \
+ const void *array[] = {__VA_ARGS__}; \
+ DRW_shgroup_call_dynamic_add_array(shgroup, array, (sizeof(array) / sizeof(*array))); \
+} while (0)
+
+unsigned int DRW_shgroup_get_instance_count(const DRWShadingGroup *shgroup);
+
+void DRW_shgroup_state_enable(DRWShadingGroup *shgroup, DRWState state);
+void DRW_shgroup_state_disable(DRWShadingGroup *shgroup, DRWState state);
+void DRW_shgroup_stencil_mask(DRWShadingGroup *shgroup, unsigned int mask);
+
+void DRW_shgroup_uniform_texture(DRWShadingGroup *shgroup, const char *name, const struct GPUTexture *tex);
+void DRW_shgroup_uniform_texture_persistent(DRWShadingGroup *shgroup, const char *name, const struct GPUTexture *tex);
+void DRW_shgroup_uniform_block(DRWShadingGroup *shgroup, const char *name, const struct GPUUniformBuffer *ubo);
+void DRW_shgroup_uniform_block_persistent(DRWShadingGroup *shgroup, const char *name, const struct GPUUniformBuffer *ubo);
+void DRW_shgroup_uniform_texture_ref(DRWShadingGroup *shgroup, const char *name, struct GPUTexture **tex);
+void DRW_shgroup_uniform_float(DRWShadingGroup *shgroup, const char *name, const float *value, int arraysize);
+void DRW_shgroup_uniform_vec2(DRWShadingGroup *shgroup, const char *name, const float *value, int arraysize);
+void DRW_shgroup_uniform_vec3(DRWShadingGroup *shgroup, const char *name, const float *value, int arraysize);
+void DRW_shgroup_uniform_vec4(DRWShadingGroup *shgroup, const char *name, const float *value, int arraysize);
+void DRW_shgroup_uniform_short_to_int(DRWShadingGroup *shgroup, const char *name, const short *value, int arraysize);
+void DRW_shgroup_uniform_short_to_float(DRWShadingGroup *shgroup, const char *name, const short *value, int arraysize);
+/* Boolean are expected to be 4bytes longs for opengl! */
+void DRW_shgroup_uniform_bool(DRWShadingGroup *shgroup, const char *name, const int *value, int arraysize);
+void DRW_shgroup_uniform_int(DRWShadingGroup *shgroup, const char *name, const int *value, int arraysize);
+void DRW_shgroup_uniform_ivec2(DRWShadingGroup *shgroup, const char *name, const int *value, int arraysize);
+void DRW_shgroup_uniform_ivec3(DRWShadingGroup *shgroup, const char *name, const int *value, int arraysize);
+void DRW_shgroup_uniform_mat3(DRWShadingGroup *shgroup, const char *name, const float *value);
+void DRW_shgroup_uniform_mat4(DRWShadingGroup *shgroup, const char *name, const float *value);
+
+/* Passes */
+DRWPass *DRW_pass_create(const char *name, DRWState state);
+void DRW_pass_state_set(DRWPass *pass, DRWState state);
+void DRW_pass_foreach_shgroup(DRWPass *pass, void (*callback)(void *userData, DRWShadingGroup *shgrp), void *userData);
+void DRW_pass_sort_shgroup_z(DRWPass *pass);
+
+/* Viewport */
+typedef enum {
+ DRW_MAT_PERS = 0,
+ DRW_MAT_PERSINV,
+ DRW_MAT_VIEW,
+ DRW_MAT_VIEWINV,
+ DRW_MAT_WIN,
+ DRW_MAT_WININV,
+
+ DRW_MAT_COUNT, // Don't use this.
+} DRWViewportMatrixType;
+
+typedef struct DRWMatrixState {
+ float mat[DRW_MAT_COUNT][4][4];
+} DRWMatrixState;
+
+void DRW_viewport_init(const bContext *C);
+void DRW_viewport_matrix_get(float mat[4][4], DRWViewportMatrixType type);
+void DRW_viewport_matrix_get_all(DRWMatrixState *state);
+void DRW_viewport_matrix_override_set(float mat[4][4], DRWViewportMatrixType type);
+void DRW_viewport_matrix_override_set_all(DRWMatrixState *state);
+void DRW_viewport_matrix_override_unset(DRWViewportMatrixType type);
+void DRW_viewport_matrix_override_unset_all(void);
+
+const float *DRW_viewport_size_get(void);
+const float *DRW_viewport_invert_size_get(void);
+const float *DRW_viewport_screenvecs_get(void);
+const float *DRW_viewport_pixelsize_get(void);
+bool DRW_viewport_is_persp_get(void);
+
+struct DefaultFramebufferList *DRW_viewport_framebuffer_list_get(void);
+struct DefaultTextureList *DRW_viewport_texture_list_get(void);
+
+void DRW_viewport_request_redraw(void);
+
+void DRW_render_to_image(struct RenderEngine *engine, struct Depsgraph *graph);
+void DRW_render_object_iter(
+ void *vedata, struct RenderEngine *engine, struct Depsgraph *graph,
+ void (*callback)(void *vedata, struct Object *ob, struct RenderEngine *engine, struct Depsgraph *graph));
+void DRW_render_instance_buffer_finish(void);
+
+/* ViewLayers */
+void *DRW_view_layer_engine_data_get(DrawEngineType *engine_type);
+void **DRW_view_layer_engine_data_ensure(DrawEngineType *engine_type, void (*callback)(void *storage));
+
+/* Objects */
+ObjectEngineData *DRW_object_engine_data_get(Object *ob, DrawEngineType *engine_type);
+ObjectEngineData *DRW_object_engine_data_ensure(
+ Object *ob,
+ DrawEngineType *engine_type,
+ size_t size,
+ ObjectEngineDataInitCb init_cb,
+ ObjectEngineDataFreeCb free_cb);
+struct LampEngineData *DRW_lamp_engine_data_ensure(Object *ob, struct RenderEngineType *engine_type);
+void DRW_lamp_engine_data_free(struct LampEngineData *led);
+
+/* Settings */
+bool DRW_object_is_renderable(struct Object *ob);
+bool DRW_check_object_visible_within_active_context(struct Object *ob);
+bool DRW_object_is_flat_normal(const struct Object *ob);
+int DRW_object_is_mode_shade(const struct Object *ob);
+
+/* Draw commands */
+void DRW_draw_pass(DRWPass *pass);
+void DRW_draw_pass_subset(DRWPass *pass, DRWShadingGroup *start_group, DRWShadingGroup *end_group);
+
+void DRW_draw_text_cache_queue(struct DRWTextStore *dt);
+
+void DRW_draw_callbacks_pre_scene(void);
+void DRW_draw_callbacks_post_scene(void);
+
+int DRW_draw_region_engine_info_offset(void);
+void DRW_draw_region_engine_info(void);
+
+void DRW_state_reset_ex(DRWState state);
+void DRW_state_reset(void);
+void DRW_state_lock(DRWState state);
+
+void DRW_state_invert_facing(void);
+
+void DRW_state_clip_planes_count_set(unsigned int plane_ct);
+void DRW_state_clip_planes_reset(void);
+
+/* Culling, return true if object is inside view frustum. */
+bool DRW_culling_sphere_test(BoundSphere *bsphere);
+bool DRW_culling_box_test(BoundBox *bbox);
+
+/* Selection */
+void DRW_select_load_id(unsigned int id);
+
+/* Draw State */
+void DRW_state_dfdy_factors_get(float dfdyfac[2]);
+bool DRW_state_is_fbo(void);
+bool DRW_state_is_select(void);
+bool DRW_state_is_depth(void);
+bool DRW_state_is_image_render(void);
+bool DRW_state_is_scene_render(void);
+bool DRW_state_is_opengl_render(void);
+bool DRW_state_show_text(void);
+bool DRW_state_draw_support(void);
+bool DRW_state_draw_background(void);
+
+enum eDepsObjectIteratorMode DRW_iterator_mode_get(void);
+
+struct DRWTextStore *DRW_state_text_cache_get(void);
+
+/* Avoid too many lookups while drawing */
+typedef struct DRWContextState {
+
+ struct ARegion *ar; /* 'CTX_wm_region(C)' */
+ struct RegionView3D *rv3d; /* 'CTX_wm_region_view3d(C)' */
+ struct View3D *v3d; /* 'CTX_wm_view3d(C)' */
+
+ struct Scene *scene; /* 'CTX_data_scene(C)' */
+ struct ViewLayer *view_layer; /* 'CTX_data_view_layer(C)' */
+
+ /* Use 'object_edit' for edit-mode */
+ struct Object *obact; /* 'OBACT' */
+
+ struct RenderEngineType *engine_type;
+
+ EvaluationContext eval_ctx;
+ struct Depsgraph *depsgraph;
+
+ eObjectMode object_mode;
+
+ /* Last resort (some functions take this as an arg so we can't easily avoid).
+ * May be NULL when used for selection or depth buffer. */
+ const struct bContext *evil_C;
+
+ /* ---- */
+
+ /* Cache: initialized by 'drw_context_state_init'. */
+ struct Object *object_pose;
+ struct Object *object_edit;
+
+} DRWContextState;
+
+const DRWContextState *DRW_context_state_get(void);
+
+#endif /* __DRW_RENDER_H__ */
diff --git a/source/blender/draw/intern/draw_armature.c b/source/blender/draw/intern/draw_armature.c
new file mode 100644
index 00000000000..c14fe70e0c3
--- /dev/null
+++ b/source/blender/draw/intern/draw_armature.c
@@ -0,0 +1,1460 @@
+/*
+ * Copyright 2016, Blender Foundation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Contributor(s): Blender Institute
+ *
+ */
+
+/** \file draw_armature.c
+ * \ingroup draw
+ */
+
+#include <stdlib.h>
+#include <string.h>
+#include <math.h>
+
+#include "DNA_anim_types.h"
+#include "DNA_armature_types.h"
+#include "DNA_constraint_types.h"
+#include "DNA_scene_types.h"
+#include "DNA_screen_types.h"
+#include "DNA_view3d_types.h"
+#include "DNA_object_types.h"
+
+#include "DRW_render.h"
+
+#include "BLI_blenlib.h"
+#include "BLI_math.h"
+#include "BLI_dlrbTree.h"
+#include "BLI_utildefines.h"
+
+#include "BKE_animsys.h"
+#include "BKE_action.h"
+#include "BKE_armature.h"
+#include "BKE_global.h"
+#include "BKE_modifier.h"
+#include "BKE_nla.h"
+#include "BKE_curve.h"
+
+#include "BIF_gl.h"
+
+#include "ED_armature.h"
+#include "ED_keyframes_draw.h"
+
+#include "GPU_select.h"
+
+#include "UI_resources.h"
+
+#include "draw_common.h"
+#include "draw_manager_text.h"
+
+#define BONE_VAR(eBone, pchan, var) ((eBone) ? (eBone->var) : (pchan->var))
+#define BONE_FLAG(eBone, pchan) ((eBone) ? (eBone->flag) : (pchan->bone->flag))
+
+/* For now just match 2.7x where possible. */
+// #define USE_SOLID_COLOR
+
+/* Reset for drawing each armature object */
+static struct {
+ /* Current armature object */
+ Object *ob;
+ /* Reset when changing current_armature */
+ DRWShadingGroup *bone_octahedral_solid;
+ DRWShadingGroup *bone_octahedral_wire;
+ DRWShadingGroup *bone_box_solid;
+ DRWShadingGroup *bone_box_wire;
+ DRWShadingGroup *bone_wire_wire;
+ DRWShadingGroup *bone_envelope_solid;
+ DRWShadingGroup *bone_envelope_distance;
+ DRWShadingGroup *bone_envelope_wire;
+ DRWShadingGroup *bone_envelope_head_wire;
+ DRWShadingGroup *bone_point_solid;
+ DRWShadingGroup *bone_point_wire;
+ DRWShadingGroup *bone_axes;
+ DRWShadingGroup *relationship_lines;
+
+ DRWPass *pass_bone_solid;
+ DRWPass *pass_bone_wire;
+ DRWPass *pass_bone_envelope;
+} g_data = {NULL};
+
+/* -------------------------------------------------------------------- */
+
+/** \name Shader Groups (DRW_shgroup)
+ * \{ */
+
+/* Octahedral */
+static void drw_shgroup_bone_octahedral_solid(const float (*bone_mat)[4], const float color[4])
+{
+ if (g_data.bone_octahedral_solid == NULL) {
+ struct Gwn_Batch *geom = DRW_cache_bone_octahedral_get();
+ g_data.bone_octahedral_solid = shgroup_instance_solid(g_data.pass_bone_solid, geom);
+ }
+ float final_bonemat[4][4];
+ mul_m4_m4m4(final_bonemat, g_data.ob->obmat, bone_mat);
+ DRW_shgroup_call_dynamic_add(g_data.bone_octahedral_solid, final_bonemat, color);
+}
+
+static void drw_shgroup_bone_octahedral_wire(const float (*bone_mat)[4], const float color[4])
+{
+ if (g_data.bone_octahedral_wire == NULL) {
+ struct Gwn_Batch *geom = DRW_cache_bone_octahedral_wire_outline_get();
+ g_data.bone_octahedral_wire = shgroup_instance_wire(g_data.pass_bone_wire, geom);
+ }
+ float final_bonemat[4][4];
+ mul_m4_m4m4(final_bonemat, g_data.ob->obmat, bone_mat);
+ DRW_shgroup_call_dynamic_add(g_data.bone_octahedral_wire, final_bonemat, color);
+}
+
+/* Box / B-Bone */
+static void drw_shgroup_bone_box_solid(const float (*bone_mat)[4], const float color[4])
+{
+ if (g_data.bone_box_solid == NULL) {
+ struct Gwn_Batch *geom = DRW_cache_bone_box_get();
+ g_data.bone_box_solid = shgroup_instance_solid(g_data.pass_bone_solid, geom);
+ }
+ float final_bonemat[4][4];
+ mul_m4_m4m4(final_bonemat, g_data.ob->obmat, bone_mat);
+ DRW_shgroup_call_dynamic_add(g_data.bone_box_solid, final_bonemat, color);
+}
+
+static void drw_shgroup_bone_box_wire(const float (*bone_mat)[4], const float color[4])
+{
+ if (g_data.bone_box_wire == NULL) {
+ struct Gwn_Batch *geom = DRW_cache_bone_box_wire_outline_get();
+ g_data.bone_box_wire = shgroup_instance_wire(g_data.pass_bone_wire, geom);
+ }
+ float final_bonemat[4][4];
+ mul_m4_m4m4(final_bonemat, g_data.ob->obmat, bone_mat);
+ DRW_shgroup_call_dynamic_add(g_data.bone_box_wire, final_bonemat, color);
+}
+
+/* Wire */
+static void drw_shgroup_bone_wire_wire(const float (*bone_mat)[4], const float color[4])
+{
+ if (g_data.bone_wire_wire == NULL) {
+ struct Gwn_Batch *geom = DRW_cache_bone_wire_wire_outline_get();
+ g_data.bone_wire_wire = shgroup_instance_wire(g_data.pass_bone_wire, geom);
+ }
+ float final_bonemat[4][4];
+ mul_m4_m4m4(final_bonemat, g_data.ob->obmat, bone_mat);
+ DRW_shgroup_call_dynamic_add(g_data.bone_wire_wire, final_bonemat, color);
+}
+
+/* Envelope */
+static void drw_shgroup_bone_envelope_distance(
+ const float (*bone_mat)[4], const float color[4],
+ const float *radius_head, const float *radius_tail, const float *distance)
+{
+ if (g_data.pass_bone_envelope != NULL) {
+ if (g_data.bone_envelope_distance == NULL) {
+ struct Gwn_Batch *geom = DRW_cache_bone_envelope_distance_outline_get();
+ /* Note: bone_wire draw pass is not really working, think we need another one here? */
+ g_data.bone_envelope_distance = shgroup_instance_bone_envelope_wire(g_data.pass_bone_envelope, geom);
+ }
+ float final_bonemat[4][4];
+ mul_m4_m4m4(final_bonemat, g_data.ob->obmat, bone_mat);
+ DRW_shgroup_call_dynamic_add(g_data.bone_envelope_distance, final_bonemat, color, radius_head, radius_tail, distance);
+ }
+}
+
+static void drw_shgroup_bone_envelope_solid(
+ const float (*bone_mat)[4], const float color[4],
+ const float *radius_head, const float *radius_tail)
+{
+ if (g_data.bone_envelope_solid == NULL) {
+ struct Gwn_Batch *geom = DRW_cache_bone_envelope_solid_get();
+ g_data.bone_envelope_solid = shgroup_instance_bone_envelope_solid(g_data.pass_bone_solid, geom);
+ }
+ float final_bonemat[4][4];
+ mul_m4_m4m4(final_bonemat, g_data.ob->obmat, bone_mat);
+ DRW_shgroup_call_dynamic_add(g_data.bone_envelope_solid, final_bonemat, color, radius_head, radius_tail);
+}
+
+static void drw_shgroup_bone_envelope_wire(
+ const float (*bone_mat)[4], const float color[4],
+ const float *radius_head, const float *radius_tail, const float *distance)
+{
+ if (g_data.bone_envelope_wire == NULL) {
+ struct Gwn_Batch *geom = DRW_cache_bone_envelope_wire_outline_get();
+ g_data.bone_envelope_wire = shgroup_instance_bone_envelope_wire(g_data.pass_bone_wire, geom);
+ }
+ float final_bonemat[4][4];
+ mul_m4_m4m4(final_bonemat, g_data.ob->obmat, bone_mat);
+ DRW_shgroup_call_dynamic_add(g_data.bone_envelope_wire, final_bonemat, color, radius_head, radius_tail, distance);
+}
+
+static void drw_shgroup_bone_envelope_head_wire(
+ const float (*bone_mat)[4], const float color[4],
+ const float *radius_head, const float *radius_tail, const float *distance)
+{
+ if (g_data.bone_envelope_head_wire == NULL) {
+ struct Gwn_Batch *geom = DRW_cache_bone_envelope_head_wire_outline_get();
+ g_data.bone_envelope_head_wire = shgroup_instance_bone_envelope_wire(g_data.pass_bone_wire, geom);
+ }
+ float final_bonemat[4][4];
+ mul_m4_m4m4(final_bonemat, g_data.ob->obmat, bone_mat);
+ DRW_shgroup_call_dynamic_add(g_data.bone_envelope_head_wire, final_bonemat, color, radius_head, radius_tail, distance);
+}
+
+/* Custom (geometry) */
+
+static void drw_shgroup_bone_custom_solid(const float (*bone_mat)[4], const float color[4], Object *custom)
+{
+ /* grr, not re-using instances! */
+ struct Gwn_Batch *geom = DRW_cache_object_surface_get(custom);
+ if (geom) {
+ DRWShadingGroup *shgrp_geom_solid = shgroup_instance_solid(g_data.pass_bone_solid, geom);
+ float final_bonemat[4][4];
+ mul_m4_m4m4(final_bonemat, g_data.ob->obmat, bone_mat);
+ DRW_shgroup_call_dynamic_add(shgrp_geom_solid, final_bonemat, color);
+ }
+}
+
+static void drw_shgroup_bone_custom_wire(const float (*bone_mat)[4], const float color[4], Object *custom)
+{
+ /* grr, not re-using instances! */
+ struct Gwn_Batch *geom = DRW_cache_object_wire_outline_get(custom);
+ if (geom) {
+ DRWShadingGroup *shgrp_geom_wire = shgroup_instance_wire(g_data.pass_bone_wire, geom);
+ float final_bonemat[4][4];
+ mul_m4_m4m4(final_bonemat, g_data.ob->obmat, bone_mat);
+ DRW_shgroup_call_dynamic_add(shgrp_geom_wire, final_bonemat, color);
+ }
+}
+
+/* Head and tail sphere */
+static void drw_shgroup_bone_point_solid(const float (*bone_mat)[4], const float color[4])
+{
+ if (g_data.bone_point_solid == NULL) {
+ struct Gwn_Batch *geom = DRW_cache_bone_point_get();
+ g_data.bone_point_solid = shgroup_instance_solid(g_data.pass_bone_solid, geom);
+ }
+ float final_bonemat[4][4];
+ mul_m4_m4m4(final_bonemat, g_data.ob->obmat, bone_mat);
+ DRW_shgroup_call_dynamic_add(g_data.bone_point_solid, final_bonemat, color);
+}
+
+static void drw_shgroup_bone_point_wire(const float (*bone_mat)[4], const float color[4])
+{
+ if (g_data.bone_point_wire == NULL) {
+ struct Gwn_Batch *geom = DRW_cache_bone_point_wire_outline_get();
+ g_data.bone_point_wire = shgroup_instance_wire(g_data.pass_bone_wire, geom);
+ }
+ float final_bonemat[4][4];
+ mul_m4_m4m4(final_bonemat, g_data.ob->obmat, bone_mat);
+ DRW_shgroup_call_dynamic_add(g_data.bone_point_wire, final_bonemat, color);
+}
+
+/* Axes */
+static void drw_shgroup_bone_axes(const float (*bone_mat)[4], const float color[4])
+{
+ if (g_data.bone_axes == NULL) {
+ struct Gwn_Batch *geom = DRW_cache_bone_arrows_get();
+ g_data.bone_axes = shgroup_instance_wire(g_data.pass_bone_wire, geom);
+ }
+ float final_bonemat[4][4];
+ mul_m4_m4m4(final_bonemat, g_data.ob->obmat, bone_mat);
+ DRW_shgroup_call_dynamic_add(g_data.bone_axes, final_bonemat, color);
+}
+
+/* Relationship lines */
+static void UNUSED_FUNCTION(drw_shgroup_bone_relationship_lines)(const float head[3], const float tail[3])
+{
+ DRW_shgroup_call_dynamic_add(g_data.relationship_lines, head);
+ DRW_shgroup_call_dynamic_add(g_data.relationship_lines, tail);
+}
+
+/** \} */
+
+
+/* -------------------------------------------------------------------- */
+
+/** \name Drawing Theme Helpers
+ *
+ * Note, this section is duplicate of code in 'drawarmature.c'.
+ *
+ * \{ */
+
+/* global here is reset before drawing each bone */
+struct {
+ const ThemeWireColor *bcolor;
+} g_color;
+
+/* values of colCode for set_pchan_color */
+enum {
+ PCHAN_COLOR_NORMAL = 0, /* normal drawing */
+ PCHAN_COLOR_SOLID, /* specific case where "solid" color is needed */
+ PCHAN_COLOR_CONSTS, /* "constraint" colors (which may/may-not be suppressed) */
+
+ PCHAN_COLOR_SPHEREBONE_BASE, /* for the 'stick' of sphere (envelope) bones */
+ PCHAN_COLOR_SPHEREBONE_END, /* for the ends of sphere (envelope) bones */
+ PCHAN_COLOR_LINEBONE /* for the middle of line-bones */
+};
+
+/* This function sets the color-set for coloring a certain bone */
+static void set_pchan_colorset(Object *ob, bPoseChannel *pchan)
+{
+ bPose *pose = (ob) ? ob->pose : NULL;
+ bArmature *arm = (ob) ? ob->data : NULL;
+ bActionGroup *grp = NULL;
+ short color_index = 0;
+
+ /* sanity check */
+ if (ELEM(NULL, ob, arm, pose, pchan)) {
+ g_color.bcolor = NULL;
+ return;
+ }
+
+ /* only try to set custom color if enabled for armature */
+ if (arm->flag & ARM_COL_CUSTOM) {
+ /* currently, a bone can only use a custom color set if it's group (if it has one),
+ * has been set to use one
+ */
+ if (pchan->agrp_index) {
+ grp = (bActionGroup *)BLI_findlink(&pose->agroups, (pchan->agrp_index - 1));
+ if (grp)
+ color_index = grp->customCol;
+ }
+ }
+
+ /* bcolor is a pointer to the color set to use. If NULL, then the default
+ * color set (based on the theme colors for 3d-view) is used.
+ */
+ if (color_index > 0) {
+ bTheme *btheme = UI_GetTheme();
+ g_color.bcolor = &btheme->tarm[(color_index - 1)];
+ }
+ else if (color_index == -1) {
+ /* use the group's own custom color set (grp is always != NULL here) */
+ g_color.bcolor = &grp->cs;
+ }
+ else {
+ g_color.bcolor = NULL;
+ }
+}
+
+/* This function is for brightening/darkening a given color (like UI_GetThemeColorShade3ubv()) */
+static void cp_shade_color3ub(unsigned char cp[3], const int offset)
+{
+ int r, g, b;
+
+ r = offset + (int) cp[0];
+ CLAMP(r, 0, 255);
+ g = offset + (int) cp[1];
+ CLAMP(g, 0, 255);
+ b = offset + (int) cp[2];
+ CLAMP(b, 0, 255);
+
+ cp[0] = r;
+ cp[1] = g;
+ cp[2] = b;
+}
+
+/* This function sets the gl-color for coloring a certain bone (based on bcolor) */
+static bool set_pchan_color(short colCode, const int boneflag, const short constflag, float r_color[4])
+{
+ float *fcolor = r_color;
+ const ThemeWireColor *bcolor = g_color.bcolor;
+
+ switch (colCode) {
+ case PCHAN_COLOR_NORMAL:
+ {
+ if (bcolor) {
+ unsigned char cp[4] = {255};
+
+ if (boneflag & BONE_DRAW_ACTIVE) {
+ copy_v3_v3_char((char *)cp, bcolor->active);
+ if (!(boneflag & BONE_SELECTED)) {
+ cp_shade_color3ub(cp, -80);
+ }
+ }
+ else if (boneflag & BONE_SELECTED) {
+ copy_v3_v3_char((char *)cp, bcolor->select);
+ }
+ else {
+ /* a bit darker than solid */
+ copy_v3_v3_char((char *)cp, bcolor->solid);
+ cp_shade_color3ub(cp, -50);
+ }
+
+ rgb_uchar_to_float(fcolor, cp);
+ }
+ else {
+ if ((boneflag & BONE_DRAW_ACTIVE) && (boneflag & BONE_SELECTED)) {
+ UI_GetThemeColor4fv(TH_BONE_POSE_ACTIVE, fcolor);
+ }
+ else if (boneflag & BONE_DRAW_ACTIVE) {
+ UI_GetThemeColorBlendShade4fv(TH_WIRE, TH_BONE_POSE, 0.15f, 0, fcolor);
+ }
+ else if (boneflag & BONE_SELECTED) {
+ UI_GetThemeColor4fv(TH_BONE_POSE, fcolor);
+ }
+ else {
+ UI_GetThemeColor4fv(TH_WIRE, fcolor);
+ }
+ }
+
+ return true;
+ }
+ case PCHAN_COLOR_SOLID:
+ {
+ if (bcolor) {
+ rgb_uchar_to_float(fcolor, (unsigned char *)bcolor->solid);
+ }
+ else {
+ UI_GetThemeColor4fv(TH_BONE_SOLID, fcolor);
+ }
+
+ return true;
+ }
+ case PCHAN_COLOR_CONSTS:
+ {
+ if ((bcolor == NULL) || (bcolor->flag & TH_WIRECOLOR_CONSTCOLS)) {
+ unsigned char cp[4];
+ if (constflag & PCHAN_HAS_TARGET) rgba_char_args_set((char *)cp, 255, 150, 0, 80);
+ else if (constflag & PCHAN_HAS_IK) rgba_char_args_set((char *)cp, 255, 255, 0, 80);
+ else if (constflag & PCHAN_HAS_SPLINEIK) rgba_char_args_set((char *)cp, 200, 255, 0, 80);
+ else if (constflag & PCHAN_HAS_CONST) rgba_char_args_set((char *)cp, 0, 255, 120, 80);
+
+ rgba_uchar_to_float(fcolor, cp);
+
+ return true;
+ }
+ return false;
+ }
+ case PCHAN_COLOR_SPHEREBONE_BASE:
+ {
+ if (bcolor) {
+ unsigned char cp[4] = {255};
+
+ if (boneflag & BONE_DRAW_ACTIVE) {
+ copy_v3_v3_char((char *)cp, bcolor->active);
+ }
+ else if (boneflag & BONE_SELECTED) {
+ copy_v3_v3_char((char *)cp, bcolor->select);
+ }
+ else {
+ copy_v3_v3_char((char *)cp, bcolor->solid);
+ }
+
+ rgb_uchar_to_float(fcolor, cp);
+ }
+ else {
+ if (boneflag & BONE_DRAW_ACTIVE) {
+ UI_GetThemeColorShade4fv(TH_BONE_POSE, 40, fcolor);
+ }
+ else if (boneflag & BONE_SELECTED) {
+ UI_GetThemeColor4fv(TH_BONE_POSE, fcolor);
+ }
+ else {
+ UI_GetThemeColor4fv(TH_BONE_SOLID, fcolor);
+ }
+ }
+
+ return true;
+ }
+ case PCHAN_COLOR_SPHEREBONE_END:
+ {
+ if (bcolor) {
+ unsigned char cp[4] = {255};
+
+ if (boneflag & BONE_DRAW_ACTIVE) {
+ copy_v3_v3_char((char *)cp, bcolor->active);
+ cp_shade_color3ub(cp, 10);
+ }
+ else if (boneflag & BONE_SELECTED) {
+ copy_v3_v3_char((char *)cp, bcolor->select);
+ cp_shade_color3ub(cp, -30);
+ }
+ else {
+ copy_v3_v3_char((char *)cp, bcolor->solid);
+ cp_shade_color3ub(cp, -30);
+ }
+
+ rgb_uchar_to_float(fcolor, cp);
+ }
+ else {
+ if (boneflag & BONE_DRAW_ACTIVE) {
+ UI_GetThemeColorShade4fv(TH_BONE_POSE, 10, fcolor);
+ }
+ else if (boneflag & BONE_SELECTED) {
+ UI_GetThemeColorShade4fv(TH_BONE_POSE, -30, fcolor);
+ }
+ else {
+ UI_GetThemeColorShade4fv(TH_BONE_SOLID, -30, fcolor);
+ }
+ }
+ break;
+ }
+ case PCHAN_COLOR_LINEBONE:
+ {
+ /* inner part in background color or constraint */
+ if ((constflag) && ((bcolor == NULL) || (bcolor->flag & TH_WIRECOLOR_CONSTCOLS))) {
+ unsigned char cp[4];
+ if (constflag & PCHAN_HAS_TARGET) rgba_char_args_set((char *)cp, 255, 150, 0, 255);
+ else if (constflag & PCHAN_HAS_IK) rgba_char_args_set((char *)cp, 255, 255, 0, 255);
+ else if (constflag & PCHAN_HAS_SPLINEIK) rgba_char_args_set((char *)cp, 200, 255, 0, 255);
+ else if (constflag & PCHAN_HAS_CONST) rgba_char_args_set((char *)cp, 0, 255, 120, 255);
+ else if (constflag) UI_GetThemeColor4ubv(TH_BONE_POSE, cp); /* PCHAN_HAS_ACTION */
+
+ rgb_uchar_to_float(fcolor, cp);
+ }
+ else {
+ if (bcolor) {
+ const char *cp = bcolor->solid;
+ rgb_uchar_to_float(fcolor, (unsigned char *)cp);
+ fcolor[3] = 204.f / 255.f;
+ }
+ else {
+ UI_GetThemeColorShade4fv(TH_BACK, -30, fcolor);
+ }
+ }
+
+ return true;
+ }
+ }
+
+ return false;
+}
+
+/** \} */
+
+
+/* -------------------------------------------------------------------- */
+
+/** \name Drawing Color Helpers
+ * \{ */
+
+/**
+ * Follow `TH_*` naming except for mixed colors.
+ */
+static struct {
+ float select_color[4];
+ float edge_select_color[4];
+ float bone_select_color[4]; /* tint */
+ float wire_color[4];
+ float wire_edit_color[4];
+ float bone_solid_color[4];
+ float bone_active_unselect_color[4]; /* mix */
+ float bone_pose_color[4];
+ float bone_pose_active_color[4];
+ float bone_pose_active_unselect_color[4]; /* mix */
+ float text_hi_color[4];
+ float text_color[4];
+ float vertex_select_color[4];
+ float vertex_color[4];
+
+ /* not a theme, this is an override */
+ const float *const_color;
+} g_theme;
+
+/** See: 'set_pchan_color'*/
+static void update_color(const float const_color[4])
+{
+ g_theme.const_color = const_color;
+
+#define NO_ALPHA(c) (((c)[3] = 1.0f), (c))
+
+ UI_GetThemeColor3fv(TH_SELECT, NO_ALPHA(g_theme.select_color));
+ UI_GetThemeColor3fv(TH_EDGE_SELECT, NO_ALPHA(g_theme.edge_select_color));
+ UI_GetThemeColorShade3fv(TH_EDGE_SELECT, -20, NO_ALPHA(g_theme.bone_select_color));
+ UI_GetThemeColor3fv(TH_WIRE, NO_ALPHA(g_theme.wire_color));
+ UI_GetThemeColor3fv(TH_WIRE_EDIT, NO_ALPHA(g_theme.wire_edit_color));
+ UI_GetThemeColor3fv(TH_BONE_SOLID, NO_ALPHA(g_theme.bone_solid_color));
+ UI_GetThemeColorBlendShade3fv(TH_WIRE_EDIT, TH_EDGE_SELECT, 0.15f, 0, NO_ALPHA(g_theme.bone_active_unselect_color));
+ UI_GetThemeColor3fv(TH_BONE_POSE, NO_ALPHA(g_theme.bone_pose_color));
+ UI_GetThemeColor3fv(TH_BONE_POSE_ACTIVE, NO_ALPHA(g_theme.bone_pose_active_color));
+ UI_GetThemeColorBlendShade3fv(TH_WIRE, TH_BONE_POSE, 0.15f, 0, NO_ALPHA(g_theme.bone_pose_active_unselect_color));
+ UI_GetThemeColor3fv(TH_TEXT_HI, NO_ALPHA(g_theme.text_hi_color));
+ UI_GetThemeColor3fv(TH_TEXT, NO_ALPHA(g_theme.text_color));
+ UI_GetThemeColor3fv(TH_VERTEX_SELECT, NO_ALPHA(g_theme.vertex_select_color));
+ UI_GetThemeColor3fv(TH_VERTEX, NO_ALPHA(g_theme.vertex_color));
+
+#undef NO_ALPHA
+}
+
+static const float *get_bone_solid_color(
+ const EditBone *eBone, const bPoseChannel *pchan, const bArmature *arm,
+ const int boneflag, const short constflag)
+{
+ if (g_theme.const_color)
+ return g_theme.bone_solid_color;
+
+#ifdef USE_SOLID_COLOR
+ /* Edit Mode */
+ if (eBone) {
+ bool is_active = (arm->act_edbone == eBone);
+ if (eBone->flag & BONE_SELECTED) {
+ if (is_active) {
+ return g_theme.edge_select_color;
+ }
+ else {
+ return g_theme.bone_select_color;
+ }
+ }
+ }
+ else if (arm->flag & ARM_POSEMODE) {
+ bool is_active = (arm->act_bone == pchan->bone);
+ if (pchan->bone->flag & BONE_SELECTED) {
+ if (is_active) {
+ return g_theme.bone_pose_active_color;
+ }
+ else {
+ return g_theme.bone_pose_color;
+ }
+ }
+ }
+#else
+ if (arm->drawtype == ARM_ENVELOPE) {
+ /* Edit Mode */
+ if (eBone) {
+ bool is_active = (arm->act_edbone == eBone);
+ if (eBone->flag & BONE_SELECTED) {
+ if (is_active) {
+ return g_theme.edge_select_color;
+ }
+ else {
+ return g_theme.bone_select_color;
+ }
+ }
+ }
+ else if (arm->flag & ARM_POSEMODE) {
+ bool is_active = (arm->act_bone == pchan->bone);
+ if (pchan->bone->flag & BONE_SELECTED) {
+ if (is_active) {
+ return g_theme.bone_pose_active_color;
+ }
+ else {
+ return g_theme.bone_pose_color;
+ }
+ }
+ }
+ }
+#endif
+
+ if (arm->flag & ARM_POSEMODE) {
+ float *disp_color = pchan->draw_data->solid_color;
+ set_pchan_color(PCHAN_COLOR_SOLID, boneflag, constflag, disp_color);
+ disp_color[3] = 1.0;
+ return disp_color;
+ }
+
+ return g_theme.bone_solid_color;
+}
+
+static const float *get_bone_wire_color(
+ const EditBone *eBone, const bPoseChannel *pchan, const bArmature *arm,
+ const int boneflag, const short constflag)
+{
+ if (g_theme.const_color)
+ return g_theme.const_color;
+
+ if (eBone) {
+ if (boneflag & BONE_SELECTED) {
+ if (boneflag & BONE_DRAW_ACTIVE) {
+ return g_theme.edge_select_color;
+ }
+ else {
+ return g_theme.bone_select_color;
+ }
+ }
+ else {
+ if (boneflag & BONE_DRAW_ACTIVE) {
+ return g_theme.bone_active_unselect_color;
+ }
+ else {
+ return g_theme.wire_edit_color;
+ }
+ }
+ }
+ else if (arm->flag & ARM_POSEMODE) {
+ float *disp_color = pchan->draw_data->wire_color;
+ set_pchan_color(PCHAN_COLOR_NORMAL, boneflag, constflag, disp_color);
+ disp_color[3] = 1.0;
+ return disp_color;
+
+
+#if 0
+ if (boneflag & BONE_SELECTED) {
+ if (is_active) {
+ return g_theme.bone_pose_active_color;
+ }
+ else {
+ return g_theme.bone_pose_color;
+ }
+ }
+ else {
+ if (is_active) {
+ return g_theme.bone_pose_active_unselect_color;
+ }
+ else {
+ return g_theme.wire_color;
+ }
+ }
+#endif
+ }
+
+ return g_theme.vertex_color;
+}
+
+/** \} */
+
+
+/* -------------------------------------------------------------------- */
+
+/** \name Helper Utils
+ * \{ */
+
+static void pchan_draw_data_init(bPoseChannel *pchan)
+{
+ if (pchan->draw_data != NULL) {
+ if (pchan->draw_data->bbone_matrix_len != pchan->bone->segments) {
+ MEM_SAFE_FREE(pchan->draw_data);
+ }
+ }
+
+ if (pchan->draw_data == NULL) {
+ pchan->draw_data = MEM_mallocN(sizeof(*pchan->draw_data) + sizeof(Mat4) * pchan->bone->segments, __func__);
+ pchan->draw_data->bbone_matrix_len = pchan->bone->segments;
+ }
+}
+
+static void draw_bone_update_disp_matrix_default(EditBone *eBone, bPoseChannel *pchan)
+{
+ float s[4][4], ebmat[4][4];
+ float length;
+ float (*bone_mat)[4];
+ float (*disp_mat)[4];
+ float (*disp_tail_mat)[4];
+
+ /* TODO : This should be moved to depsgraph or armature refresh
+ * and not be tight to the draw pass creation.
+ * This would refresh armature without invalidating the draw cache */
+ if (pchan) {
+ length = pchan->bone->length;
+ bone_mat = pchan->pose_mat;
+ disp_mat = pchan->disp_mat;
+ disp_tail_mat = pchan->disp_tail_mat;
+ }
+ else {
+ eBone->length = len_v3v3(eBone->tail, eBone->head);
+ ED_armature_ebone_to_mat4(eBone, ebmat);
+
+ length = eBone->length;
+ bone_mat = ebmat;
+ disp_mat = eBone->disp_mat;
+ disp_tail_mat = eBone->disp_tail_mat;
+ }
+
+ scale_m4_fl(s, length);
+ mul_m4_m4m4(disp_mat, bone_mat, s);
+ copy_m4_m4(disp_tail_mat, disp_mat);
+ translate_m4(disp_tail_mat, 0.0f, 1.0f, 0.0f);
+}
+
+/* XXX Direct copy from drawarmature.c... This is ugly! */
+/* A partial copy of b_bone_spline_setup(), with just the parts for previewing editmode curve settings
+ *
+ * This assumes that prev/next bones don't have any impact (since they should all still be in the "straight"
+ * position here anyway), and that we can simply apply the bbone settings to get the desired effect...
+ */
+static void ebone_spline_preview(EditBone *ebone, float result_array[MAX_BBONE_SUBDIV][4][4])
+{
+ float h1[3], h2[3], length, hlength1, hlength2, roll1 = 0.0f, roll2 = 0.0f;
+ float mat3[3][3];
+ float data[MAX_BBONE_SUBDIV + 1][4], *fp;
+ int a;
+
+ length = ebone->length;
+
+ hlength1 = ebone->ease1 * length * 0.390464f; /* 0.5f * sqrt(2) * kappa, the handle length for near-perfect circles */
+ hlength2 = ebone->ease2 * length * 0.390464f;
+
+ /* find the handle points, since this is inside bone space, the
+ * first point = (0, 0, 0)
+ * last point = (0, length, 0)
+ *
+ * we also just apply all the "extra effects", since they're the whole reason we're doing this...
+ */
+ h1[0] = ebone->curveInX;
+ h1[1] = hlength1;
+ h1[2] = ebone->curveInY;
+ roll1 = ebone->roll1;
+
+ h2[0] = ebone->curveOutX;
+ h2[1] = -hlength2;
+ h2[2] = ebone->curveOutY;
+ roll2 = ebone->roll2;
+
+ /* make curve */
+ if (ebone->segments > MAX_BBONE_SUBDIV)
+ ebone->segments = MAX_BBONE_SUBDIV;
+
+ BKE_curve_forward_diff_bezier(0.0f, h1[0], h2[0], 0.0f, data[0], MAX_BBONE_SUBDIV, 4 * sizeof(float));
+ BKE_curve_forward_diff_bezier(0.0f, h1[1], length + h2[1], length, data[0] + 1, MAX_BBONE_SUBDIV, 4 * sizeof(float));
+ BKE_curve_forward_diff_bezier(0.0f, h1[2], h2[2], 0.0f, data[0] + 2, MAX_BBONE_SUBDIV, 4 * sizeof(float));
+ BKE_curve_forward_diff_bezier(roll1, roll1 + 0.390464f * (roll2 - roll1), roll2 - 0.390464f * (roll2 - roll1), roll2, data[0] + 3, MAX_BBONE_SUBDIV, 4 * sizeof(float));
+
+ equalize_bbone_bezier(data[0], ebone->segments); /* note: does stride 4! */
+
+ /* make transformation matrices for the segments for drawing */
+ for (a = 0, fp = data[0]; a < ebone->segments; a++, fp += 4) {
+ sub_v3_v3v3(h1, fp + 4, fp);
+ vec_roll_to_mat3(h1, fp[3], mat3); /* fp[3] is roll */
+
+ copy_m4_m3(result_array[a], mat3);
+ copy_v3_v3(result_array[a][3], fp);
+
+ /* "extra" scale facs... */
+ {
+ const int num_segments = ebone->segments;
+
+ const float scaleFactorIn = 1.0f + (ebone->scaleIn - 1.0f) * ((float)(num_segments - a) / (float)num_segments);
+ const float scaleFactorOut = 1.0f + (ebone->scaleOut - 1.0f) * ((float)(a + 1) / (float)num_segments);
+
+ const float scalefac = scaleFactorIn * scaleFactorOut;
+ float bscalemat[4][4], bscale[3];
+
+ bscale[0] = scalefac;
+ bscale[1] = 1.0f;
+ bscale[2] = scalefac;
+
+ size_to_mat4(bscalemat, bscale);
+
+ /* Note: don't multiply by inverse scale mat here, as it causes problems with scaling shearing and breaking segment chains */
+ mul_m4_series(result_array[a], result_array[a], bscalemat);
+ }
+ }
+}
+
+static void draw_bone_update_disp_matrix_bbone(EditBone *eBone, bPoseChannel *pchan)
+{
+ float s[4][4], ebmat[4][4];
+ float length, xwidth, zwidth;
+ float (*bone_mat)[4];
+ short bbone_segments;
+
+ /* TODO : This should be moved to depsgraph or armature refresh
+ * and not be tight to the draw pass creation.
+ * This would refresh armature without invalidating the draw cache */
+ if (pchan) {
+ length = pchan->bone->length;
+ xwidth = pchan->bone->xwidth;
+ zwidth = pchan->bone->zwidth;
+ bone_mat = pchan->pose_mat;
+ bbone_segments = pchan->bone->segments;
+ }
+ else {
+ eBone->length = len_v3v3(eBone->tail, eBone->head);
+ ED_armature_ebone_to_mat4(eBone, ebmat);
+
+ length = eBone->length;
+ xwidth = eBone->xwidth;
+ zwidth = eBone->zwidth;
+ bone_mat = ebmat;
+ bbone_segments = eBone->segments;
+ }
+
+ size_to_mat4(s, (const float[3]){xwidth, length / bbone_segments, zwidth});
+
+ /* Compute BBones segment matrices... */
+ /* Note that we need this even for one-segment bones, because box drawing need specific weirdo matrix for the box,
+ * that we cannot use to draw end points & co. */
+ if (pchan) {
+ Mat4 *bbones_mat = (Mat4 *)pchan->draw_data->bbone_matrix;
+ if (bbone_segments > 1) {
+ b_bone_spline_setup(pchan, 0, bbones_mat);
+
+ for (int i = bbone_segments; i--; bbones_mat++) {
+ mul_m4_m4m4(bbones_mat->mat, bbones_mat->mat, s);
+ mul_m4_m4m4(bbones_mat->mat, bone_mat, bbones_mat->mat);
+ }
+ }
+ else {
+ mul_m4_m4m4(bbones_mat->mat, bone_mat, s);
+ }
+ }
+ else {
+ float (*bbones_mat)[4][4] = eBone->disp_bbone_mat;
+
+ if (bbone_segments > 1) {
+ ebone_spline_preview(eBone, bbones_mat);
+
+ for (int i = bbone_segments; i--; bbones_mat++) {
+ mul_m4_m4m4(*bbones_mat, *bbones_mat, s);
+ mul_m4_m4m4(*bbones_mat, bone_mat, *bbones_mat);
+ }
+ }
+ else {
+ mul_m4_m4m4(*bbones_mat, bone_mat, s);
+ }
+ }
+
+ /* Grrr... We need default display matrix to draw end points, axes, etc. :( */
+ draw_bone_update_disp_matrix_default(eBone, pchan);
+}
+
+static void draw_bone_update_disp_matrix_custom(bPoseChannel *pchan)
+{
+ float s[4][4];
+ float length;
+ float (*bone_mat)[4];
+ float (*disp_mat)[4];
+ float (*disp_tail_mat)[4];
+
+ /* See TODO above */
+ length = PCHAN_CUSTOM_DRAW_SIZE(pchan);
+ bone_mat = pchan->custom_tx ? pchan->custom_tx->pose_mat : pchan->pose_mat;
+ disp_mat = pchan->disp_mat;
+ disp_tail_mat = pchan->disp_tail_mat;
+
+ scale_m4_fl(s, length);
+ mul_m4_m4m4(disp_mat, bone_mat, s);
+ copy_m4_m4(disp_tail_mat, disp_mat);
+ translate_m4(disp_tail_mat, 0.0f, 1.0f, 0.0f);
+}
+
+static void draw_axes(EditBone *eBone, bPoseChannel *pchan)
+{
+ const float *col = (g_theme.const_color) ? g_theme.const_color :
+ (BONE_FLAG(eBone, pchan) & BONE_SELECTED) ? g_theme.text_hi_color : g_theme.text_color;
+
+ drw_shgroup_bone_axes(BONE_VAR(eBone, pchan, disp_mat), col);
+}
+
+static void draw_points(
+ const EditBone *eBone, const bPoseChannel *pchan, const bArmature *arm,
+ const int boneflag, const short constflag,
+ const int select_id)
+{
+ const float *col_solid_root = g_theme.bone_solid_color;
+ const float *col_solid_tail = g_theme.bone_solid_color;
+ const float *col_wire_root = (g_theme.const_color) ? g_theme.const_color : g_theme.vertex_color;
+ const float *col_wire_tail = (g_theme.const_color) ? g_theme.const_color : g_theme.vertex_color;
+
+ const bool is_envelope_draw = (arm->drawtype == ARM_ENVELOPE);
+ static const float envelope_ignore = -1.0f;
+
+ /* Edit bone points can be selected */
+ if (eBone) {
+ if (eBone->flag & BONE_ROOTSEL) {
+#ifdef USE_SOLID_COLOR
+ col_solid_root = g_theme.vertex_select_color;
+#else
+ if (is_envelope_draw) {
+ col_solid_root = g_theme.vertex_select_color;
+ }
+#endif
+ col_wire_root = g_theme.vertex_select_color;
+ }
+ if (eBone->flag & BONE_TIPSEL) {
+#ifdef USE_SOLID_COLOR
+ col_solid_tail = g_theme.vertex_select_color;
+#else
+ if (is_envelope_draw) {
+ col_solid_tail = g_theme.vertex_select_color;
+ }
+#endif
+ col_wire_tail = g_theme.vertex_select_color;
+ }
+ }
+ else if (arm->flag & ARM_POSEMODE) {
+ col_solid_root = col_solid_tail = get_bone_solid_color(eBone, pchan, arm, boneflag, constflag);
+ col_wire_root = col_wire_tail = get_bone_wire_color(eBone, pchan, arm, boneflag, constflag);
+ }
+
+ /* Draw root point if we are not connected and parent are not hidden */
+ if ((BONE_FLAG(eBone, pchan) & BONE_CONNECTED) == 0) {
+ if (select_id != -1) {
+ DRW_select_load_id(select_id | BONESEL_ROOT);
+ }
+
+ if (eBone) {
+ if (!((eBone->parent) && !EBONE_VISIBLE(arm, eBone->parent))) {
+ if (is_envelope_draw) {
+ drw_shgroup_bone_envelope_solid(eBone->disp_mat, col_solid_root,
+ &eBone->rad_head, &envelope_ignore);
+ drw_shgroup_bone_envelope_head_wire(eBone->disp_mat, col_wire_root,
+ &eBone->rad_head, &envelope_ignore, &envelope_ignore);
+ }
+ else {
+ drw_shgroup_bone_point_solid(eBone->disp_mat, col_solid_root);
+ drw_shgroup_bone_point_wire(eBone->disp_mat, col_wire_root);
+ }
+ }
+ }
+ else {
+ Bone *bone = pchan->bone;
+ if (!((bone->parent) && (bone->parent->flag & (BONE_HIDDEN_P | BONE_HIDDEN_PG)))) {
+ if (is_envelope_draw) {
+ drw_shgroup_bone_envelope_solid(pchan->disp_mat, col_solid_root,
+ &bone->rad_head, &envelope_ignore);
+ drw_shgroup_bone_envelope_head_wire(pchan->disp_mat, col_wire_root,
+ &bone->rad_head, &envelope_ignore, &envelope_ignore);
+ }
+ else {
+ drw_shgroup_bone_point_solid(pchan->disp_mat, col_solid_root);
+ drw_shgroup_bone_point_wire(pchan->disp_mat, col_wire_root);
+ }
+ }
+ }
+ }
+
+ /* Draw tip point */
+ if (select_id != -1) {
+ DRW_select_load_id(select_id | BONESEL_TIP);
+ }
+
+ if (is_envelope_draw) {
+ const float *rad_tail = eBone ? &eBone->rad_tail : &pchan->bone->rad_tail;
+ drw_shgroup_bone_envelope_solid(
+ BONE_VAR(eBone, pchan, disp_mat), col_solid_tail, &envelope_ignore, rad_tail);
+ drw_shgroup_bone_envelope_head_wire(
+ BONE_VAR(eBone, pchan, disp_mat), col_wire_tail, &envelope_ignore, rad_tail, &envelope_ignore);
+ }
+ else {
+ drw_shgroup_bone_point_solid(BONE_VAR(eBone, pchan, disp_tail_mat), col_solid_tail);
+ drw_shgroup_bone_point_wire(BONE_VAR(eBone, pchan, disp_tail_mat), col_wire_tail);
+ }
+
+ if (select_id != -1) {
+ DRW_select_load_id(-1);
+ }
+}
+
+/** \} */
+
+
+/* -------------------------------------------------------------------- */
+
+/** \name Draw Bones
+ * \{ */
+
+static void draw_bone_custom_shape(
+ EditBone *eBone, bPoseChannel *pchan, bArmature *arm,
+ const int boneflag, const short constflag,
+ const int select_id)
+{
+ const float *col_solid = get_bone_solid_color(eBone, pchan, arm, boneflag, constflag);
+ const float *col_wire = get_bone_wire_color(eBone, pchan, arm, boneflag, constflag);
+ const float (*disp_mat)[4] = pchan->disp_mat;
+
+ if (select_id != -1) {
+ DRW_select_load_id(select_id | BONESEL_BONE);
+ }
+
+ drw_shgroup_bone_custom_solid(disp_mat, col_solid, pchan->custom);
+ drw_shgroup_bone_custom_wire(disp_mat, col_wire, pchan->custom);
+
+ if (select_id != -1) {
+ DRW_select_load_id(-1);
+ }
+}
+
+static void draw_bone_envelope(
+ EditBone *eBone, bPoseChannel *pchan, bArmature *arm,
+ const int boneflag, const short constflag,
+ const int select_id)
+{
+ const float *col_solid = get_bone_solid_color(eBone, pchan, arm, boneflag, constflag);
+ const float *col_wire = get_bone_wire_color(eBone, pchan, arm, boneflag, constflag);
+
+ static const float col_white[4] = {1.0f, 1.0f, 1.0f, 0.2f};
+
+ float *rad_head, *rad_tail, *distance;
+ if (eBone) {
+ rad_tail = &eBone->rad_tail;
+ distance = &eBone->dist;
+ rad_head = (eBone->parent && (boneflag & BONE_CONNECTED)) ? &eBone->parent->rad_tail : &eBone->rad_head;
+ }
+ else {
+ rad_tail = &pchan->bone->rad_tail;
+ distance = &pchan->bone->dist;
+ rad_head = (pchan->parent && (boneflag & BONE_CONNECTED)) ? &pchan->parent->bone->rad_tail : &pchan->bone->rad_head;
+ }
+
+ if ((boneflag & BONE_NO_DEFORM) == 0 &&
+ ((boneflag & BONE_SELECTED) || (eBone && (boneflag & (BONE_ROOTSEL | BONE_TIPSEL)))))
+ {
+ drw_shgroup_bone_envelope_distance(BONE_VAR(eBone, pchan, disp_mat), col_white, rad_head, rad_tail, distance);
+ }
+
+ if (select_id != -1) {
+ DRW_select_load_id(select_id | BONESEL_BONE);
+ }
+
+ drw_shgroup_bone_envelope_solid(BONE_VAR(eBone, pchan, disp_mat), col_solid, rad_head, rad_tail);
+ drw_shgroup_bone_envelope_wire(BONE_VAR(eBone, pchan, disp_mat), col_wire, rad_head, rad_tail, distance);
+
+ if (select_id != -1) {
+ DRW_select_load_id(-1);
+ }
+
+ draw_points(eBone, pchan, arm, boneflag, constflag, select_id);
+}
+
+static void draw_bone_line(
+ EditBone *UNUSED(eBone), bPoseChannel *UNUSED(pchan), bArmature *UNUSED(arm),
+ const int UNUSED(boneflag), const short UNUSED(constflag),
+ const int UNUSED(select_id))
+{
+ /* work in progress -- fclem */
+}
+
+static void draw_bone_wire(
+ EditBone *eBone, bPoseChannel *pchan, bArmature *arm,
+ const int boneflag, const short constflag,
+ const int select_id)
+{
+ const float *col_wire = get_bone_wire_color(eBone, pchan, arm, boneflag, constflag);
+
+ if (select_id != -1) {
+ DRW_select_load_id(select_id | BONESEL_BONE);
+ }
+
+ if (pchan) {
+ Mat4 *bbones_mat = (Mat4 *)pchan->draw_data->bbone_matrix;
+ BLI_assert(bbones_mat != NULL);
+
+ for (int i = pchan->bone->segments; i--; bbones_mat++) {
+ drw_shgroup_bone_wire_wire(bbones_mat->mat, col_wire);
+ }
+ }
+ else if (eBone) {
+ for (int i = 0; i < eBone->segments; i++) {
+ drw_shgroup_bone_wire_wire(eBone->disp_bbone_mat[i], col_wire);
+ }
+ }
+
+ if (select_id != -1) {
+ DRW_select_load_id(-1);
+ }
+
+ if (eBone) {
+ draw_points(eBone, pchan, arm, boneflag, constflag, select_id);
+ }
+}
+
+static void draw_bone_box(
+ EditBone *eBone, bPoseChannel *pchan, bArmature *arm,
+ const int boneflag, const short constflag,
+ const int select_id)
+{
+ const float *col_solid = get_bone_solid_color(eBone, pchan, arm, boneflag, constflag);
+ const float *col_wire = get_bone_wire_color(eBone, pchan, arm, boneflag, constflag);
+
+ if (select_id != -1) {
+ DRW_select_load_id(select_id | BONESEL_BONE);
+ }
+
+ if (pchan) {
+ Mat4 *bbones_mat = (Mat4 *)pchan->draw_data->bbone_matrix;
+ BLI_assert(bbones_mat != NULL);
+
+ for (int i = pchan->bone->segments; i--; bbones_mat++) {
+ drw_shgroup_bone_box_solid(bbones_mat->mat, col_solid);
+ drw_shgroup_bone_box_wire(bbones_mat->mat, col_wire);
+ }
+ }
+ else if (eBone) {
+ for (int i = 0; i < eBone->segments; i++) {
+ drw_shgroup_bone_box_solid(eBone->disp_bbone_mat[i], col_solid);
+ drw_shgroup_bone_box_wire(eBone->disp_bbone_mat[i], col_wire);
+ }
+ }
+
+ if (select_id != -1) {
+ DRW_select_load_id(-1);
+ }
+
+ if (eBone) {
+ draw_points(eBone, pchan, arm, boneflag, constflag, select_id);
+ }
+}
+
+static void draw_bone_octahedral(
+ EditBone *eBone, bPoseChannel *pchan, bArmature *arm,
+ const int boneflag, const short constflag,
+ const int select_id)
+{
+ const float *col_solid = get_bone_solid_color(eBone, pchan, arm, boneflag, constflag);
+ const float *col_wire = get_bone_wire_color(eBone, pchan, arm, boneflag, constflag);
+
+ if (select_id != -1) {
+ DRW_select_load_id(select_id | BONESEL_BONE);
+ }
+
+ drw_shgroup_bone_octahedral_solid(BONE_VAR(eBone, pchan, disp_mat), col_solid);
+ drw_shgroup_bone_octahedral_wire(BONE_VAR(eBone, pchan, disp_mat), col_wire);
+
+ if (select_id != -1) {
+ DRW_select_load_id(-1);
+ }
+
+ draw_points(eBone, pchan, arm, boneflag, constflag, select_id);
+}
+
+/** \} */
+
+
+/* -------------------------------------------------------------------- */
+
+/** \name Main Draw Loops
+ * \{ */
+
+static void draw_armature_edit(Object *ob)
+{
+ EditBone *eBone;
+ bArmature *arm = ob->data;
+ int index;
+ const bool is_select = DRW_state_is_select();
+
+ update_color(NULL);
+
+ const bool show_text = DRW_state_show_text();
+
+ for (eBone = arm->edbo->first, index = 0; eBone; eBone = eBone->next, index++) {
+ if (eBone->layer & arm->layer) {
+ if ((eBone->flag & BONE_HIDDEN_A) == 0) {
+ const int select_id = is_select ? index : (unsigned int)-1;
+
+ const short constflag = 0;
+
+ /* catch exception for bone with hidden parent */
+ int boneflag = eBone->flag;
+ if ((eBone->parent) && !EBONE_VISIBLE(arm, eBone->parent)) {
+ boneflag &= ~BONE_CONNECTED;
+ }
+
+ /* set temporary flag for drawing bone as active, but only if selected */
+ if (eBone == arm->act_edbone) {
+ boneflag |= BONE_DRAW_ACTIVE;
+ }
+
+ if (arm->drawtype == ARM_ENVELOPE) {
+ draw_bone_update_disp_matrix_default(eBone, NULL);
+ draw_bone_envelope(eBone, NULL, arm, boneflag, constflag, select_id);
+ }
+ else if (arm->drawtype == ARM_LINE) {
+ draw_bone_update_disp_matrix_default(eBone, NULL);
+ draw_bone_line(eBone, NULL, arm, boneflag, constflag, select_id);
+ }
+ else if (arm->drawtype == ARM_WIRE) {
+ draw_bone_update_disp_matrix_bbone(eBone, NULL);
+ draw_bone_wire(eBone, NULL, arm, boneflag, constflag, select_id);
+ }
+ else if (arm->drawtype == ARM_B_BONE) {
+ draw_bone_update_disp_matrix_bbone(eBone, NULL);
+ draw_bone_box(eBone, NULL, arm, boneflag, constflag, select_id);
+ }
+ else {
+ draw_bone_update_disp_matrix_default(eBone, NULL);
+ draw_bone_octahedral(eBone, NULL, arm, boneflag, constflag, select_id);
+ }
+
+ /* Draw names of bone */
+ if (show_text && (arm->flag & ARM_DRAWNAMES)) {
+ unsigned char color[4];
+ UI_GetThemeColor4ubv((eBone->flag & BONE_SELECTED) ? TH_TEXT_HI : TH_TEXT, color);
+
+ float vec[3];
+ mid_v3_v3v3(vec, eBone->head, eBone->tail);
+ mul_m4_v3(ob->obmat, vec);
+
+ struct DRWTextStore *dt = DRW_text_cache_ensure();
+ DRW_text_cache_add(
+ dt, vec, eBone->name, strlen(eBone->name),
+ 10, DRW_TEXT_CACHE_GLOBALSPACE | DRW_TEXT_CACHE_STRING_PTR, color);
+ }
+
+ /* Draw additional axes */
+ if (arm->flag & ARM_DRAWAXES) {
+ draw_axes(eBone, NULL);
+ }
+ }
+ }
+ }
+}
+
+/* if const_color is NULL do pose mode coloring */
+static void draw_armature_pose(Object *ob, const float const_color[4])
+{
+ bArmature *arm = ob->data;
+ bPoseChannel *pchan;
+ int index = -1;
+ Bone *bone;
+
+ update_color(const_color);
+
+ /* We can't safely draw non-updated pose, might contain NULL bone pointers... */
+ if (ob->pose->flag & POSE_RECALC) {
+ BKE_pose_rebuild(ob, arm);
+ }
+
+ // if (!(base->flag & OB_FROMDUPLI)) // TODO
+ {
+ const DRWContextState *draw_ctx = DRW_context_state_get();
+
+ if ((draw_ctx->object_mode & OB_MODE_POSE) || (ob == draw_ctx->object_pose)) {
+ arm->flag |= ARM_POSEMODE;
+ }
+
+ if (arm->flag & ARM_POSEMODE) {
+ index = ob->select_color;
+ }
+ }
+
+ const bool is_pose_select = (arm->flag & ARM_POSEMODE) && DRW_state_is_select();
+ const bool show_text = DRW_state_show_text();
+
+ /* being set below */
+ arm->layer_used = 0;
+
+ for (pchan = ob->pose->chanbase.first; pchan; pchan = pchan->next) {
+ bone = pchan->bone;
+ arm->layer_used |= bone->layer;
+
+ /* bone must be visible */
+ if ((bone->flag & (BONE_HIDDEN_P | BONE_HIDDEN_PG)) == 0) {
+ if (bone->layer & arm->layer) {
+ const int select_id = is_pose_select ? index : (unsigned int)-1;
+
+ const short constflag = pchan->constflag;
+
+ pchan_draw_data_init(pchan);
+
+ if (const_color) {
+ /* keep color */
+ }
+ else {
+ /* set color-set to use */
+ set_pchan_colorset(ob, pchan);
+ }
+
+ int boneflag = bone->flag;
+ /* catch exception for bone with hidden parent */
+ boneflag = bone->flag;
+ if ((bone->parent) && (bone->parent->flag & (BONE_HIDDEN_P | BONE_HIDDEN_PG))) {
+ boneflag &= ~BONE_CONNECTED;
+ }
+
+ /* set temporary flag for drawing bone as active, but only if selected */
+ if (bone == arm->act_bone)
+ boneflag |= BONE_DRAW_ACTIVE;
+
+
+ if ((pchan->custom) && !(arm->flag & ARM_NO_CUSTOM)) {
+ draw_bone_update_disp_matrix_custom(pchan);
+ draw_bone_custom_shape(NULL, pchan, arm, boneflag, constflag, select_id);
+ }
+ else if (arm->drawtype == ARM_ENVELOPE) {
+ draw_bone_update_disp_matrix_default(NULL, pchan);
+ draw_bone_envelope(NULL, pchan, arm, boneflag, constflag, select_id);
+ }
+ else if (arm->drawtype == ARM_LINE) {
+ draw_bone_update_disp_matrix_default(NULL, pchan);
+ draw_bone_line(NULL, pchan, arm, boneflag, constflag, select_id);
+ }
+ else if (arm->drawtype == ARM_WIRE) {
+ draw_bone_update_disp_matrix_bbone(NULL, pchan);
+ draw_bone_wire(NULL, pchan, arm, boneflag, constflag, select_id);
+ }
+ else if (arm->drawtype == ARM_B_BONE) {
+ draw_bone_update_disp_matrix_bbone(NULL, pchan);
+ draw_bone_box(NULL, pchan, arm, boneflag, constflag, select_id);
+ }
+ else {
+ draw_bone_update_disp_matrix_default(NULL, pchan);
+ draw_bone_octahedral(NULL, pchan, arm, boneflag, constflag, select_id);
+ }
+
+ /* Draw names of bone */
+ if (show_text && (arm->flag & ARM_DRAWNAMES)) {
+ unsigned char color[4];
+ UI_GetThemeColor4ubv((arm->flag & ARM_POSEMODE) &&
+ (bone->flag & BONE_SELECTED) ? TH_TEXT_HI : TH_TEXT, color);
+ float vec[3];
+ mid_v3_v3v3(vec, pchan->pose_head, pchan->pose_tail);
+ mul_m4_v3(ob->obmat, vec);
+
+ struct DRWTextStore *dt = DRW_text_cache_ensure();
+ DRW_text_cache_add(
+ dt, vec, pchan->name, strlen(pchan->name),
+ 10, DRW_TEXT_CACHE_GLOBALSPACE | DRW_TEXT_CACHE_STRING_PTR, color);
+ }
+
+ /* Draw additional axes */
+ if (arm->flag & ARM_DRAWAXES) {
+ draw_axes(NULL, pchan);
+ }
+ }
+ }
+ if (is_pose_select) {
+ index += 0x10000;
+ }
+ }
+
+ arm->flag &= ~ARM_POSEMODE;
+}
+
+/**
+ * This function set the object space to use for all subsequent `DRW_shgroup_bone_*` calls.
+ */
+static void drw_shgroup_armature(
+ Object *ob, DRWPass *pass_bone_solid, DRWPass *pass_bone_wire, DRWPass *pass_bone_envelope,
+ DRWShadingGroup *shgrp_relationship_lines)
+{
+ memset(&g_data, 0x0, sizeof(g_data));
+ g_data.ob = ob;
+
+ g_data.pass_bone_solid = pass_bone_solid;
+ g_data.pass_bone_wire = pass_bone_wire;
+ g_data.pass_bone_envelope = pass_bone_envelope;
+ g_data.relationship_lines = shgrp_relationship_lines;
+
+ memset(&g_color, 0x0, sizeof(g_color));
+}
+
+void DRW_shgroup_armature_object(
+ Object *ob, ViewLayer *view_layer, DRWPass *pass_bone_solid, DRWPass *pass_bone_wire, DRWPass *UNUSED(pass_bone_envelope),
+ DRWShadingGroup *shgrp_relationship_lines)
+{
+ float *color;
+ DRW_object_wire_theme_get(ob, view_layer, &color);
+
+ drw_shgroup_armature(ob, pass_bone_solid, pass_bone_wire, NULL, shgrp_relationship_lines);
+ draw_armature_pose(ob, color);
+}
+
+void DRW_shgroup_armature_pose(
+ Object *ob, DRWPass *pass_bone_solid, DRWPass *pass_bone_wire, DRWPass *pass_bone_envelope,
+ DRWShadingGroup *shgrp_relationship_lines)
+{
+ drw_shgroup_armature(ob, pass_bone_solid, pass_bone_wire, pass_bone_envelope, shgrp_relationship_lines);
+ draw_armature_pose(ob, NULL);
+}
+
+void DRW_shgroup_armature_edit(
+ Object *ob, DRWPass *pass_bone_solid, DRWPass *pass_bone_wire, DRWPass *pass_bone_envelope,
+ DRWShadingGroup *shgrp_relationship_lines)
+{
+ drw_shgroup_armature(ob, pass_bone_solid, pass_bone_wire, pass_bone_envelope, shgrp_relationship_lines);
+ draw_armature_edit(ob);
+}
+
+/** \} */
diff --git a/source/blender/draw/intern/draw_cache.c b/source/blender/draw/intern/draw_cache.c
new file mode 100644
index 00000000000..301a39d053f
--- /dev/null
+++ b/source/blender/draw/intern/draw_cache.c
@@ -0,0 +1,2802 @@
+/*
+ * Copyright 2016, Blender Foundation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Contributor(s): Blender Institute
+ *
+ */
+
+/** \file draw_cache.c
+ * \ingroup draw
+ */
+
+
+#include "DNA_scene_types.h"
+#include "DNA_mesh_types.h"
+#include "DNA_curve_types.h"
+#include "DNA_object_types.h"
+#include "DNA_particle_types.h"
+#include "DNA_modifier_types.h"
+#include "DNA_lattice_types.h"
+
+#include "UI_resources.h"
+
+#include "BLI_utildefines.h"
+#include "BLI_math.h"
+
+#include "GPU_batch.h"
+
+#include "draw_cache.h"
+#include "draw_cache_impl.h"
+
+/* Batch's only (free'd as an array) */
+static struct DRWShapeCache {
+ Gwn_Batch *drw_single_vertice;
+ Gwn_Batch *drw_cursor;
+ Gwn_Batch *drw_fullscreen_quad;
+ Gwn_Batch *drw_quad;
+ Gwn_Batch *drw_sphere;
+ Gwn_Batch *drw_screenspace_circle;
+ Gwn_Batch *drw_plain_axes;
+ Gwn_Batch *drw_single_arrow;
+ Gwn_Batch *drw_cube;
+ Gwn_Batch *drw_circle;
+ Gwn_Batch *drw_square;
+ Gwn_Batch *drw_line;
+ Gwn_Batch *drw_line_endpoints;
+ Gwn_Batch *drw_empty_sphere;
+ Gwn_Batch *drw_empty_cone;
+ Gwn_Batch *drw_arrows;
+ Gwn_Batch *drw_axis_names;
+ Gwn_Batch *drw_image_plane;
+ Gwn_Batch *drw_image_plane_wire;
+ Gwn_Batch *drw_field_wind;
+ Gwn_Batch *drw_field_force;
+ Gwn_Batch *drw_field_vortex;
+ Gwn_Batch *drw_field_tube_limit;
+ Gwn_Batch *drw_field_cone_limit;
+ Gwn_Batch *drw_lamp;
+ Gwn_Batch *drw_lamp_shadows;
+ Gwn_Batch *drw_lamp_sunrays;
+ Gwn_Batch *drw_lamp_area;
+ Gwn_Batch *drw_lamp_hemi;
+ Gwn_Batch *drw_lamp_spot;
+ Gwn_Batch *drw_lamp_spot_square;
+ Gwn_Batch *drw_speaker;
+ Gwn_Batch *drw_lightprobe_cube;
+ Gwn_Batch *drw_lightprobe_planar;
+ Gwn_Batch *drw_lightprobe_grid;
+ Gwn_Batch *drw_bone_octahedral;
+ Gwn_Batch *drw_bone_octahedral_wire;
+ Gwn_Batch *drw_bone_box;
+ Gwn_Batch *drw_bone_box_wire;
+ Gwn_Batch *drw_bone_wire_wire;
+ Gwn_Batch *drw_bone_envelope;
+ Gwn_Batch *drw_bone_envelope_distance;
+ Gwn_Batch *drw_bone_envelope_wire;
+ Gwn_Batch *drw_bone_envelope_head_wire;
+ Gwn_Batch *drw_bone_point;
+ Gwn_Batch *drw_bone_point_wire;
+ Gwn_Batch *drw_bone_arrows;
+ Gwn_Batch *drw_camera;
+ Gwn_Batch *drw_camera_frame;
+ Gwn_Batch *drw_camera_tria;
+ Gwn_Batch *drw_camera_focus;
+ Gwn_Batch *drw_particle_cross;
+ Gwn_Batch *drw_particle_circle;
+ Gwn_Batch *drw_particle_axis;
+} SHC = {NULL};
+
+void DRW_shape_cache_free(void)
+{
+ uint i = sizeof(SHC) / sizeof(Gwn_Batch *);
+ Gwn_Batch **batch = (Gwn_Batch **)&SHC;
+ while (i--) {
+ GWN_BATCH_DISCARD_SAFE(*batch);
+ batch++;
+ }
+}
+
+
+/* -------------------------------------------------------------------- */
+
+/** \name Helper functions
+ * \{ */
+
+static void add_fancy_edge(
+ Gwn_VertBuf *vbo, unsigned int pos_id, unsigned int n1_id, unsigned int n2_id,
+ unsigned int *v_idx, const float co1[3], const float co2[3],
+const float n1[3], const float n2[3])
+{
+ GWN_vertbuf_attr_set(vbo, n1_id, *v_idx, n1);
+ GWN_vertbuf_attr_set(vbo, n2_id, *v_idx, n2);
+ GWN_vertbuf_attr_set(vbo, pos_id, (*v_idx)++, co1);
+
+ GWN_vertbuf_attr_set(vbo, n1_id, *v_idx, n1);
+ GWN_vertbuf_attr_set(vbo, n2_id, *v_idx, n2);
+ GWN_vertbuf_attr_set(vbo, pos_id, (*v_idx)++, co2);
+}
+
+static void add_lat_lon_vert(
+ Gwn_VertBuf *vbo, unsigned int pos_id, unsigned int nor_id,
+ unsigned int *v_idx, const float rad, const float lat, const float lon)
+{
+ float pos[3], nor[3];
+ nor[0] = sinf(lat) * cosf(lon);
+ nor[1] = cosf(lat);
+ nor[2] = sinf(lat) * sinf(lon);
+ mul_v3_v3fl(pos, nor, rad);
+
+ GWN_vertbuf_attr_set(vbo, nor_id, *v_idx, nor);
+ GWN_vertbuf_attr_set(vbo, pos_id, (*v_idx)++, pos);
+}
+
+static Gwn_VertBuf *fill_arrows_vbo(const float scale)
+{
+ /* Position Only 3D format */
+ static Gwn_VertFormat format = { 0 };
+ static struct { uint pos; } attr_id;
+ if (format.attrib_ct == 0) {
+ attr_id.pos = GWN_vertformat_attr_add(&format, "pos", GWN_COMP_F32, 3, GWN_FETCH_FLOAT);
+ }
+
+ /* Line */
+ Gwn_VertBuf *vbo = GWN_vertbuf_create_with_format(&format);
+ GWN_vertbuf_data_alloc(vbo, 6 * 3);
+
+ float v1[3] = {0.0, 0.0, 0.0};
+ float v2[3] = {0.0, 0.0, 0.0};
+ float vtmp1[3], vtmp2[3];
+
+ for (int axis = 0; axis < 3; axis++) {
+ const int arrow_axis = (axis == 0) ? 1 : 0;
+
+ v2[axis] = 1.0f;
+ mul_v3_v3fl(vtmp1, v1, scale);
+ mul_v3_v3fl(vtmp2, v2, scale);
+ GWN_vertbuf_attr_set(vbo, attr_id.pos, axis * 6 + 0, vtmp1);
+ GWN_vertbuf_attr_set(vbo, attr_id.pos, axis * 6 + 1, vtmp2);
+
+ v1[axis] = 0.85f;
+ v1[arrow_axis] = -0.08f;
+ mul_v3_v3fl(vtmp1, v1, scale);
+ mul_v3_v3fl(vtmp2, v2, scale);
+ GWN_vertbuf_attr_set(vbo, attr_id.pos, axis * 6 + 2, vtmp1);
+ GWN_vertbuf_attr_set(vbo, attr_id.pos, axis * 6 + 3, vtmp2);
+
+ v1[arrow_axis] = 0.08f;
+ mul_v3_v3fl(vtmp1, v1, scale);
+ mul_v3_v3fl(vtmp2, v2, scale);
+ GWN_vertbuf_attr_set(vbo, attr_id.pos, axis * 6 + 4, vtmp1);
+ GWN_vertbuf_attr_set(vbo, attr_id.pos, axis * 6 + 5, vtmp2);
+
+ /* reset v1 & v2 to zero */
+ v1[arrow_axis] = v1[axis] = v2[axis] = 0.0f;
+ }
+
+ return vbo;
+}
+
+static Gwn_VertBuf *sphere_wire_vbo(const float rad)
+{
+#define NSEGMENTS 32
+ /* Position Only 3D format */
+ static Gwn_VertFormat format = { 0 };
+ static struct { uint pos; } attr_id;
+ if (format.attrib_ct == 0) {
+ attr_id.pos = GWN_vertformat_attr_add(&format, "pos", GWN_COMP_F32, 3, GWN_FETCH_FLOAT);
+ }
+
+ Gwn_VertBuf *vbo = GWN_vertbuf_create_with_format(&format);
+ GWN_vertbuf_data_alloc(vbo, NSEGMENTS * 2 * 3);
+
+ /* a single ring of vertices */
+ float p[NSEGMENTS][2];
+ for (int i = 0; i < NSEGMENTS; ++i) {
+ float angle = 2 * M_PI * ((float)i / (float)NSEGMENTS);
+ p[i][0] = rad * cosf(angle);
+ p[i][1] = rad * sinf(angle);
+ }
+
+ for (int axis = 0; axis < 3; ++axis) {
+ for (int i = 0; i < NSEGMENTS; ++i) {
+ for (int j = 0; j < 2; ++j) {
+ float cv[2], v[3];
+
+ cv[0] = p[(i + j) % NSEGMENTS][0];
+ cv[1] = p[(i + j) % NSEGMENTS][1];
+
+ if (axis == 0)
+ v[0] = cv[0], v[1] = cv[1], v[2] = 0.0f;
+ else if (axis == 1)
+ v[0] = cv[0], v[1] = 0.0f, v[2] = cv[1];
+ else
+ v[0] = 0.0f, v[1] = cv[0], v[2] = cv[1];
+
+ GWN_vertbuf_attr_set(vbo, attr_id.pos, i * 2 + j + (NSEGMENTS * 2 * axis), v);
+ }
+ }
+ }
+
+ return vbo;
+#undef NSEGMENTS
+}
+
+/* Quads */
+/* Use this one for rendering fullscreen passes. For 3D objects use DRW_cache_quad_get(). */
+Gwn_Batch *DRW_cache_fullscreen_quad_get(void)
+{
+ if (!SHC.drw_fullscreen_quad) {
+ /* Use a triangle instead of a real quad */
+ /* https://www.slideshare.net/DevCentralAMD/vertex-shader-tricks-bill-bilodeau - slide 14 */
+ float pos[3][2] = {{-1.0f, -1.0f}, { 3.0f, -1.0f}, {-1.0f, 3.0f}};
+ float uvs[3][2] = {{ 0.0f, 0.0f}, { 2.0f, 0.0f}, { 0.0f, 2.0f}};
+
+ /* Position Only 2D format */
+ static Gwn_VertFormat format = { 0 };
+ static struct { uint pos, uvs; } attr_id;
+ if (format.attrib_ct == 0) {
+ attr_id.pos = GWN_vertformat_attr_add(&format, "pos", GWN_COMP_F32, 2, GWN_FETCH_FLOAT);
+ attr_id.uvs = GWN_vertformat_attr_add(&format, "uvs", GWN_COMP_F32, 2, GWN_FETCH_FLOAT);
+ }
+
+ Gwn_VertBuf *vbo = GWN_vertbuf_create_with_format(&format);
+ GWN_vertbuf_data_alloc(vbo, 3);
+
+ for (int i = 0; i < 3; ++i) {
+ GWN_vertbuf_attr_set(vbo, attr_id.pos, i, pos[i]);
+ GWN_vertbuf_attr_set(vbo, attr_id.uvs, i, uvs[i]);
+ }
+
+ SHC.drw_fullscreen_quad = GWN_batch_create_ex(GWN_PRIM_TRIS, vbo, NULL, GWN_BATCH_OWNS_VBO);
+ }
+ return SHC.drw_fullscreen_quad;
+}
+
+/* Just a regular quad with 4 vertices. */
+Gwn_Batch *DRW_cache_quad_get(void)
+{
+ if (!SHC.drw_quad) {
+ float pos[4][2] = {{-1.0f, -1.0f}, { 1.0f, -1.0f}, {1.0f, 1.0f}, {-1.0f, 1.0f}};
+ float uvs[4][2] = {{ 0.0f, 0.0f}, { 1.0f, 0.0f}, {1.0f, 1.0f}, { 0.0f, 1.0f}};
+
+ /* Position Only 2D format */
+ static Gwn_VertFormat format = { 0 };
+ static struct { uint pos, uvs; } attr_id;
+ if (format.attrib_ct == 0) {
+ attr_id.pos = GWN_vertformat_attr_add(&format, "pos", GWN_COMP_F32, 2, GWN_FETCH_FLOAT);
+ attr_id.uvs = GWN_vertformat_attr_add(&format, "uvs", GWN_COMP_F32, 2, GWN_FETCH_FLOAT);
+ }
+
+ Gwn_VertBuf *vbo = GWN_vertbuf_create_with_format(&format);
+ GWN_vertbuf_data_alloc(vbo, 4);
+
+ for (int i = 0; i < 4; ++i) {
+ GWN_vertbuf_attr_set(vbo, attr_id.pos, i, pos[i]);
+ GWN_vertbuf_attr_set(vbo, attr_id.uvs, i, uvs[i]);
+ }
+
+ SHC.drw_quad = GWN_batch_create_ex(GWN_PRIM_TRI_FAN, vbo, NULL, GWN_BATCH_OWNS_VBO);
+ }
+ return SHC.drw_quad;
+}
+
+/* Sphere */
+Gwn_Batch *DRW_cache_sphere_get(void)
+{
+ if (!SHC.drw_sphere) {
+ SHC.drw_sphere = gpu_batch_sphere(32, 24);
+ }
+ return SHC.drw_sphere;
+}
+
+/** \} */
+
+/* -------------------------------------------------------------------- */
+
+/** \name Common
+ * \{ */
+
+Gwn_Batch *DRW_cache_cube_get(void)
+{
+ if (!SHC.drw_cube) {
+ const GLfloat verts[8][3] = {
+ {-1.0f, -1.0f, -1.0f},
+ {-1.0f, -1.0f, 1.0f},
+ {-1.0f, 1.0f, -1.0f},
+ {-1.0f, 1.0f, 1.0f},
+ { 1.0f, -1.0f, -1.0f},
+ { 1.0f, -1.0f, 1.0f},
+ { 1.0f, 1.0f, -1.0f},
+ { 1.0f, 1.0f, 1.0f}
+ };
+
+ const GLubyte indices[24] = {0, 1, 1, 3, 3, 2, 2, 0, 0, 4, 4, 5, 5, 7, 7, 6, 6, 4, 1, 5, 3, 7, 2, 6};
+
+ /* Position Only 3D format */
+ static Gwn_VertFormat format = { 0 };
+ static struct { uint pos; } attr_id;
+ if (format.attrib_ct == 0) {
+ attr_id.pos = GWN_vertformat_attr_add(&format, "pos", GWN_COMP_F32, 3, GWN_FETCH_FLOAT);
+ }
+
+ Gwn_VertBuf *vbo = GWN_vertbuf_create_with_format(&format);
+ GWN_vertbuf_data_alloc(vbo, 24);
+
+ for (int i = 0; i < 24; ++i) {
+ GWN_vertbuf_attr_set(vbo, attr_id.pos, i, verts[indices[i]]);
+ }
+
+ SHC.drw_cube = GWN_batch_create_ex(GWN_PRIM_LINES, vbo, NULL, GWN_BATCH_OWNS_VBO);
+ }
+ return SHC.drw_cube;
+}
+
+Gwn_Batch *DRW_cache_circle_get(void)
+{
+#define CIRCLE_RESOL 32
+ if (!SHC.drw_circle) {
+ float v[3] = {0.0f, 0.0f, 0.0f};
+
+ /* Position Only 3D format */
+ static Gwn_VertFormat format = { 0 };
+ static struct { uint pos; } attr_id;
+ if (format.attrib_ct == 0) {
+ attr_id.pos = GWN_vertformat_attr_add(&format, "pos", GWN_COMP_F32, 3, GWN_FETCH_FLOAT);
+ }
+
+ Gwn_VertBuf *vbo = GWN_vertbuf_create_with_format(&format);
+ GWN_vertbuf_data_alloc(vbo, CIRCLE_RESOL * 2);
+
+ for (int a = 0; a < CIRCLE_RESOL; a++) {
+ v[0] = sinf((2.0f * M_PI * a) / ((float)CIRCLE_RESOL));
+ v[2] = cosf((2.0f * M_PI * a) / ((float)CIRCLE_RESOL));
+ v[1] = 0.0f;
+ GWN_vertbuf_attr_set(vbo, attr_id.pos, a * 2, v);
+
+ v[0] = sinf((2.0f * M_PI * (a + 1)) / ((float)CIRCLE_RESOL));
+ v[2] = cosf((2.0f * M_PI * (a + 1)) / ((float)CIRCLE_RESOL));
+ v[1] = 0.0f;
+ GWN_vertbuf_attr_set(vbo, attr_id.pos, a * 2 + 1, v);
+ }
+
+ SHC.drw_circle = GWN_batch_create_ex(GWN_PRIM_LINES, vbo, NULL, GWN_BATCH_OWNS_VBO);
+ }
+ return SHC.drw_circle;
+#undef CIRCLE_RESOL
+}
+
+Gwn_Batch *DRW_cache_square_get(void)
+{
+ if (!SHC.drw_square) {
+ float p[4][3] = {{ 1.0f, 0.0f, 1.0f},
+ { 1.0f, 0.0f, -1.0f},
+ {-1.0f, 0.0f, -1.0f},
+ {-1.0f, 0.0f, 1.0f}};
+
+ /* Position Only 3D format */
+ static Gwn_VertFormat format = { 0 };
+ static struct { uint pos; } attr_id;
+ if (format.attrib_ct == 0) {
+ attr_id.pos = GWN_vertformat_attr_add(&format, "pos", GWN_COMP_F32, 3, GWN_FETCH_FLOAT);
+ }
+
+ Gwn_VertBuf *vbo = GWN_vertbuf_create_with_format(&format);
+ GWN_vertbuf_data_alloc(vbo, 8);
+
+ for (int i = 0; i < 4; i++) {
+ GWN_vertbuf_attr_set(vbo, attr_id.pos, i * 2, p[i % 4]);
+ GWN_vertbuf_attr_set(vbo, attr_id.pos, i * 2 + 1, p[(i + 1) % 4]);
+ }
+
+ SHC.drw_square = GWN_batch_create_ex(GWN_PRIM_LINES, vbo, NULL, GWN_BATCH_OWNS_VBO);
+ }
+ return SHC.drw_square;
+}
+
+Gwn_Batch *DRW_cache_single_line_get(void)
+{
+ /* Z axis line */
+ if (!SHC.drw_line) {
+ float v1[3] = {0.0f, 0.0f, 0.0f};
+ float v2[3] = {0.0f, 0.0f, 1.0f};
+
+ /* Position Only 3D format */
+ static Gwn_VertFormat format = { 0 };
+ static struct { uint pos; } attr_id;
+ if (format.attrib_ct == 0) {
+ attr_id.pos = GWN_vertformat_attr_add(&format, "pos", GWN_COMP_F32, 3, GWN_FETCH_FLOAT);
+ }
+
+ Gwn_VertBuf *vbo = GWN_vertbuf_create_with_format(&format);
+ GWN_vertbuf_data_alloc(vbo, 2);
+
+ GWN_vertbuf_attr_set(vbo, attr_id.pos, 0, v1);
+ GWN_vertbuf_attr_set(vbo, attr_id.pos, 1, v2);
+
+ SHC.drw_line = GWN_batch_create_ex(GWN_PRIM_LINES, vbo, NULL, GWN_BATCH_OWNS_VBO);
+ }
+ return SHC.drw_line;
+}
+
+Gwn_Batch *DRW_cache_single_line_endpoints_get(void)
+{
+ /* Z axis line */
+ if (!SHC.drw_line_endpoints) {
+ float v1[3] = {0.0f, 0.0f, 0.0f};
+ float v2[3] = {0.0f, 0.0f, 1.0f};
+
+ /* Position Only 3D format */
+ static Gwn_VertFormat format = { 0 };
+ static struct { uint pos; } attr_id;
+ if (format.attrib_ct == 0) {
+ attr_id.pos = GWN_vertformat_attr_add(&format, "pos", GWN_COMP_F32, 3, GWN_FETCH_FLOAT);
+ }
+
+ Gwn_VertBuf *vbo = GWN_vertbuf_create_with_format(&format);
+ GWN_vertbuf_data_alloc(vbo, 2);
+
+ GWN_vertbuf_attr_set(vbo, attr_id.pos, 0, v1);
+ GWN_vertbuf_attr_set(vbo, attr_id.pos, 1, v2);
+
+ SHC.drw_line_endpoints = GWN_batch_create_ex(GWN_PRIM_POINTS, vbo, NULL, GWN_BATCH_OWNS_VBO);
+ }
+ return SHC.drw_line_endpoints;
+}
+
+Gwn_Batch *DRW_cache_screenspace_circle_get(void)
+{
+#define CIRCLE_RESOL 32
+ if (!SHC.drw_screenspace_circle) {
+ float v[3] = {0.0f, 0.0f, 0.0f};
+
+ /* Position Only 3D format */
+ static Gwn_VertFormat format = { 0 };
+ static struct { uint pos; } attr_id;
+ if (format.attrib_ct == 0) {
+ attr_id.pos = GWN_vertformat_attr_add(&format, "pos", GWN_COMP_F32, 3, GWN_FETCH_FLOAT);
+ }
+
+ Gwn_VertBuf *vbo = GWN_vertbuf_create_with_format(&format);
+ GWN_vertbuf_data_alloc(vbo, CIRCLE_RESOL + 1);
+
+ for (int a = 0; a <= CIRCLE_RESOL; a++) {
+ v[0] = sinf((2.0f * M_PI * a) / ((float)CIRCLE_RESOL));
+ v[1] = cosf((2.0f * M_PI * a) / ((float)CIRCLE_RESOL));
+ GWN_vertbuf_attr_set(vbo, attr_id.pos, a, v);
+ }
+
+ SHC.drw_screenspace_circle = GWN_batch_create_ex(GWN_PRIM_LINE_STRIP, vbo, NULL, GWN_BATCH_OWNS_VBO);
+ }
+ return SHC.drw_screenspace_circle;
+#undef CIRCLE_RESOL
+}
+
+/** \} */
+
+/* -------------------------------------------------------------------- */
+
+/** \name Common Object API
+ * \{ */
+
+Gwn_Batch *DRW_cache_object_wire_outline_get(Object *ob)
+{
+ switch (ob->type) {
+ case OB_MESH:
+ return DRW_cache_mesh_wire_outline_get(ob);
+
+ /* TODO, should match 'DRW_cache_object_surface_get' */
+ default:
+ return NULL;
+ }
+}
+
+Gwn_Batch *DRW_cache_object_surface_get(Object *ob)
+{
+ switch (ob->type) {
+ case OB_MESH:
+ return DRW_cache_mesh_surface_get(ob);
+ case OB_CURVE:
+ return DRW_cache_curve_surface_get(ob);
+ case OB_SURF:
+ return DRW_cache_surf_surface_get(ob);
+ case OB_FONT:
+ return DRW_cache_text_surface_get(ob);
+ case OB_MBALL:
+ return DRW_cache_mball_surface_get(ob);
+ default:
+ return NULL;
+ }
+}
+
+Gwn_Batch **DRW_cache_object_surface_material_get(
+ struct Object *ob, struct GPUMaterial **gpumat_array, uint gpumat_array_len)
+{
+ switch (ob->type) {
+ case OB_MESH:
+ return DRW_cache_mesh_surface_shaded_get(ob, gpumat_array, gpumat_array_len);
+ case OB_CURVE:
+ return DRW_cache_curve_surface_shaded_get(ob, gpumat_array, gpumat_array_len);
+ case OB_SURF:
+ return DRW_cache_surf_surface_shaded_get(ob, gpumat_array, gpumat_array_len);
+ case OB_FONT:
+ return DRW_cache_text_surface_shaded_get(ob, gpumat_array, gpumat_array_len);
+ default:
+ return NULL;
+ }
+}
+
+/** \} */
+
+
+/* -------------------------------------------------------------------- */
+
+/** \name Empties
+ * \{ */
+
+Gwn_Batch *DRW_cache_plain_axes_get(void)
+{
+ if (!SHC.drw_plain_axes) {
+ int axis;
+ float v1[3] = {0.0f, 0.0f, 0.0f};
+ float v2[3] = {0.0f, 0.0f, 0.0f};
+
+ /* Position Only 3D format */
+ static Gwn_VertFormat format = { 0 };
+ static struct { uint pos; } attr_id;
+ if (format.attrib_ct == 0) {
+ attr_id.pos = GWN_vertformat_attr_add(&format, "pos", GWN_COMP_F32, 3, GWN_FETCH_FLOAT);
+ }
+
+ Gwn_VertBuf *vbo = GWN_vertbuf_create_with_format(&format);
+ GWN_vertbuf_data_alloc(vbo, 6);
+
+ for (axis = 0; axis < 3; axis++) {
+ v1[axis] = 1.0f;
+ v2[axis] = -1.0f;
+
+ GWN_vertbuf_attr_set(vbo, attr_id.pos, axis * 2, v1);
+ GWN_vertbuf_attr_set(vbo, attr_id.pos, axis * 2 + 1, v2);
+
+ /* reset v1 & v2 to zero for next axis */
+ v1[axis] = v2[axis] = 0.0f;
+ }
+
+ SHC.drw_plain_axes = GWN_batch_create_ex(GWN_PRIM_LINES, vbo, NULL, GWN_BATCH_OWNS_VBO);
+ }
+ return SHC.drw_plain_axes;
+}
+
+Gwn_Batch *DRW_cache_single_arrow_get(void)
+{
+ if (!SHC.drw_single_arrow) {
+ float v1[3] = {0.0f, 0.0f, 1.0f}, v2[3], v3[3];
+
+ /* Position Only 3D format */
+ static Gwn_VertFormat format = { 0 };
+ static struct { uint pos; } attr_id;
+ if (format.attrib_ct == 0) {
+ attr_id.pos = GWN_vertformat_attr_add(&format, "pos", GWN_COMP_F32, 3, GWN_FETCH_FLOAT);
+ }
+
+ /* Square Pyramid */
+ Gwn_VertBuf *vbo = GWN_vertbuf_create_with_format(&format);
+ GWN_vertbuf_data_alloc(vbo, 12);
+
+ v2[0] = 0.035f; v2[1] = 0.035f;
+ v3[0] = -0.035f; v3[1] = 0.035f;
+ v2[2] = v3[2] = 0.75f;
+
+ for (int sides = 0; sides < 4; sides++) {
+ if (sides % 2 == 1) {
+ v2[0] = -v2[0];
+ v3[1] = -v3[1];
+ }
+ else {
+ v2[1] = -v2[1];
+ v3[0] = -v3[0];
+ }
+
+ GWN_vertbuf_attr_set(vbo, attr_id.pos, sides * 3 + 0, v1);
+ GWN_vertbuf_attr_set(vbo, attr_id.pos, sides * 3 + 1, v2);
+ GWN_vertbuf_attr_set(vbo, attr_id.pos, sides * 3 + 2, v3);
+ }
+
+ SHC.drw_single_arrow = GWN_batch_create_ex(GWN_PRIM_TRIS, vbo, NULL, GWN_BATCH_OWNS_VBO);
+ }
+ return SHC.drw_single_arrow;
+}
+
+Gwn_Batch *DRW_cache_empty_sphere_get(void)
+{
+ if (!SHC.drw_empty_sphere) {
+ Gwn_VertBuf *vbo = sphere_wire_vbo(1.0f);
+ SHC.drw_empty_sphere = GWN_batch_create_ex(GWN_PRIM_LINES, vbo, NULL, GWN_BATCH_OWNS_VBO);
+ }
+ return SHC.drw_empty_sphere;
+}
+
+Gwn_Batch *DRW_cache_empty_cone_get(void)
+{
+#define NSEGMENTS 8
+ if (!SHC.drw_empty_cone) {
+ /* a single ring of vertices */
+ float p[NSEGMENTS][2];
+ for (int i = 0; i < NSEGMENTS; ++i) {
+ float angle = 2 * M_PI * ((float)i / (float)NSEGMENTS);
+ p[i][0] = cosf(angle);
+ p[i][1] = sinf(angle);
+ }
+
+ /* Position Only 3D format */
+ static Gwn_VertFormat format = { 0 };
+ static struct { uint pos; } attr_id;
+ if (format.attrib_ct == 0) {
+ attr_id.pos = GWN_vertformat_attr_add(&format, "pos", GWN_COMP_F32, 3, GWN_FETCH_FLOAT);
+ }
+
+ Gwn_VertBuf *vbo = GWN_vertbuf_create_with_format(&format);
+ GWN_vertbuf_data_alloc(vbo, NSEGMENTS * 4);
+
+ for (int i = 0; i < NSEGMENTS; ++i) {
+ float cv[2], v[3];
+ cv[0] = p[(i) % NSEGMENTS][0];
+ cv[1] = p[(i) % NSEGMENTS][1];
+
+ /* cone sides */
+ v[0] = cv[0], v[1] = 0.0f, v[2] = cv[1];
+ GWN_vertbuf_attr_set(vbo, attr_id.pos, i * 4, v);
+ v[0] = 0.0f, v[1] = 2.0f, v[2] = 0.0f;
+ GWN_vertbuf_attr_set(vbo, attr_id.pos, i * 4 + 1, v);
+
+ /* end ring */
+ v[0] = cv[0], v[1] = 0.0f, v[2] = cv[1];
+ GWN_vertbuf_attr_set(vbo, attr_id.pos, i * 4 + 2, v);
+ cv[0] = p[(i + 1) % NSEGMENTS][0];
+ cv[1] = p[(i + 1) % NSEGMENTS][1];
+ v[0] = cv[0], v[1] = 0.0f, v[2] = cv[1];
+ GWN_vertbuf_attr_set(vbo, attr_id.pos, i * 4 + 3, v);
+ }
+
+ SHC.drw_empty_cone = GWN_batch_create_ex(GWN_PRIM_LINES, vbo, NULL, GWN_BATCH_OWNS_VBO);
+ }
+ return SHC.drw_empty_cone;
+#undef NSEGMENTS
+}
+
+Gwn_Batch *DRW_cache_arrows_get(void)
+{
+ if (!SHC.drw_arrows) {
+ Gwn_VertBuf *vbo = fill_arrows_vbo(1.0f);
+
+ SHC.drw_arrows = GWN_batch_create_ex(GWN_PRIM_LINES, vbo, NULL, GWN_BATCH_OWNS_VBO);
+ }
+ return SHC.drw_arrows;
+}
+
+Gwn_Batch *DRW_cache_axis_names_get(void)
+{
+ if (!SHC.drw_axis_names) {
+ const float size = 0.1f;
+ float v1[3], v2[3];
+
+ /* Position Only 3D format */
+ static Gwn_VertFormat format = { 0 };
+ static struct { uint pos; } attr_id;
+ if (format.attrib_ct == 0) {
+ /* Using 3rd component as axis indicator */
+ attr_id.pos = GWN_vertformat_attr_add(&format, "pos", GWN_COMP_F32, 3, GWN_FETCH_FLOAT);
+ }
+
+ /* Line */
+ Gwn_VertBuf *vbo = GWN_vertbuf_create_with_format(&format);
+ GWN_vertbuf_data_alloc(vbo, 14);
+
+ /* X */
+ copy_v3_fl3(v1, -size, size, 0.0f);
+ copy_v3_fl3(v2, size, -size, 0.0f);
+ GWN_vertbuf_attr_set(vbo, attr_id.pos, 0, v1);
+ GWN_vertbuf_attr_set(vbo, attr_id.pos, 1, v2);
+
+ copy_v3_fl3(v1, size, size, 0.0f);
+ copy_v3_fl3(v2, -size, -size, 0.0f);
+ GWN_vertbuf_attr_set(vbo, attr_id.pos, 2, v1);
+ GWN_vertbuf_attr_set(vbo, attr_id.pos, 3, v2);
+
+ /* Y */
+ copy_v3_fl3(v1, -size + 0.25f * size, size, 1.0f);
+ copy_v3_fl3(v2, 0.0f, 0.0f, 1.0f);
+ GWN_vertbuf_attr_set(vbo, attr_id.pos, 4, v1);
+ GWN_vertbuf_attr_set(vbo, attr_id.pos, 5, v2);
+
+ copy_v3_fl3(v1, size - 0.25f * size, size, 1.0f);
+ copy_v3_fl3(v2, -size + 0.25f * size, -size, 1.0f);
+ GWN_vertbuf_attr_set(vbo, attr_id.pos, 6, v1);
+ GWN_vertbuf_attr_set(vbo, attr_id.pos, 7, v2);
+
+ /* Z */
+ copy_v3_fl3(v1, -size, size, 2.0f);
+ copy_v3_fl3(v2, size, size, 2.0f);
+ GWN_vertbuf_attr_set(vbo, attr_id.pos, 8, v1);
+ GWN_vertbuf_attr_set(vbo, attr_id.pos, 9, v2);
+
+ copy_v3_fl3(v1, size, size, 2.0f);
+ copy_v3_fl3(v2, -size, -size, 2.0f);
+ GWN_vertbuf_attr_set(vbo, attr_id.pos, 10, v1);
+ GWN_vertbuf_attr_set(vbo, attr_id.pos, 11, v2);
+
+ copy_v3_fl3(v1, -size, -size, 2.0f);
+ copy_v3_fl3(v2, size, -size, 2.0f);
+ GWN_vertbuf_attr_set(vbo, attr_id.pos, 12, v1);
+ GWN_vertbuf_attr_set(vbo, attr_id.pos, 13, v2);
+
+ SHC.drw_axis_names = GWN_batch_create_ex(GWN_PRIM_LINES, vbo, NULL, GWN_BATCH_OWNS_VBO);
+ }
+ return SHC.drw_axis_names;
+}
+
+Gwn_Batch *DRW_cache_image_plane_get(void)
+{
+ if (!SHC.drw_image_plane) {
+ const float quad[4][2] = {{0, 0}, {1, 0}, {1, 1}, {0, 1}};
+ static Gwn_VertFormat format = { 0 };
+ static struct { uint pos, texCoords; } attr_id;
+ if (format.attrib_ct == 0) {
+ attr_id.pos = GWN_vertformat_attr_add(&format, "pos", GWN_COMP_F32, 2, GWN_FETCH_FLOAT);
+ attr_id.texCoords = GWN_vertformat_attr_add(&format, "texCoord", GWN_COMP_F32, 2, GWN_FETCH_FLOAT);
+ }
+ Gwn_VertBuf *vbo = GWN_vertbuf_create_with_format(&format);
+ GWN_vertbuf_data_alloc(vbo, 4);
+ for (uint j = 0; j < 4; j++) {
+ GWN_vertbuf_attr_set(vbo, attr_id.pos, j, quad[j]);
+ GWN_vertbuf_attr_set(vbo, attr_id.texCoords, j, quad[j]);
+ }
+ SHC.drw_image_plane = GWN_batch_create_ex(GWN_PRIM_TRI_FAN, vbo, NULL, GWN_BATCH_OWNS_VBO);
+ }
+ return SHC.drw_image_plane;
+}
+
+Gwn_Batch *DRW_cache_image_plane_wire_get(void)
+{
+ if (!SHC.drw_image_plane_wire) {
+ const float quad[4][2] = {{0, 0}, {1, 0}, {1, 1}, {0, 1}};
+ static Gwn_VertFormat format = { 0 };
+ static struct { uint pos; } attr_id;
+ if (format.attrib_ct == 0) {
+ attr_id.pos = GWN_vertformat_attr_add(&format, "pos", GWN_COMP_F32, 2, GWN_FETCH_FLOAT);
+ }
+ Gwn_VertBuf *vbo = GWN_vertbuf_create_with_format(&format);
+ GWN_vertbuf_data_alloc(vbo, 4);
+ for (uint j = 0; j < 4; j++) {
+ GWN_vertbuf_attr_set(vbo, attr_id.pos, j, quad[j]);
+ }
+ SHC.drw_image_plane_wire = GWN_batch_create_ex(GWN_PRIM_LINE_LOOP, vbo, NULL, GWN_BATCH_OWNS_VBO);
+ }
+ return SHC.drw_image_plane_wire;
+}
+
+/* Force Field */
+Gwn_Batch *DRW_cache_field_wind_get(void)
+{
+#define CIRCLE_RESOL 32
+ if (!SHC.drw_field_wind) {
+ float v[3] = {0.0f, 0.0f, 0.0f};
+
+ /* Position Only 3D format */
+ static Gwn_VertFormat format = { 0 };
+ static struct { uint pos; } attr_id;
+ if (format.attrib_ct == 0) {
+ attr_id.pos = GWN_vertformat_attr_add(&format, "pos", GWN_COMP_F32, 3, GWN_FETCH_FLOAT);
+ }
+
+ Gwn_VertBuf *vbo = GWN_vertbuf_create_with_format(&format);
+ GWN_vertbuf_data_alloc(vbo, CIRCLE_RESOL * 2 * 4);
+
+ for (int i = 0; i < 4; i++) {
+ float z = 0.05f * (float)i;
+ for (int a = 0; a < CIRCLE_RESOL; a++) {
+ v[0] = sinf((2.0f * M_PI * a) / ((float)CIRCLE_RESOL));
+ v[1] = cosf((2.0f * M_PI * a) / ((float)CIRCLE_RESOL));
+ v[2] = z;
+ GWN_vertbuf_attr_set(vbo, attr_id.pos, i * CIRCLE_RESOL * 2 + a * 2, v);
+
+ v[0] = sinf((2.0f * M_PI * (a + 1)) / ((float)CIRCLE_RESOL));
+ v[1] = cosf((2.0f * M_PI * (a + 1)) / ((float)CIRCLE_RESOL));
+ v[2] = z;
+ GWN_vertbuf_attr_set(vbo, attr_id.pos, i * CIRCLE_RESOL * 2 + a * 2 + 1, v);
+ }
+ }
+
+ SHC.drw_field_wind = GWN_batch_create_ex(GWN_PRIM_LINES, vbo, NULL, GWN_BATCH_OWNS_VBO);
+ }
+ return SHC.drw_field_wind;
+#undef CIRCLE_RESOL
+}
+
+Gwn_Batch *DRW_cache_field_force_get(void)
+{
+#define CIRCLE_RESOL 32
+ if (!SHC.drw_field_force) {
+ float v[3] = {0.0f, 0.0f, 0.0f};
+
+ /* Position Only 3D format */
+ static Gwn_VertFormat format = { 0 };
+ static struct { uint pos; } attr_id;
+ if (format.attrib_ct == 0) {
+ attr_id.pos = GWN_vertformat_attr_add(&format, "pos", GWN_COMP_F32, 3, GWN_FETCH_FLOAT);
+ }
+
+ Gwn_VertBuf *vbo = GWN_vertbuf_create_with_format(&format);
+ GWN_vertbuf_data_alloc(vbo, CIRCLE_RESOL * 2 * 3);
+
+ for (int i = 0; i < 3; i++) {
+ float radius = 1.0f + 0.5f * (float)i;
+ for (int a = 0; a < CIRCLE_RESOL; a++) {
+ v[0] = radius * sinf((2.0f * M_PI * a) / ((float)CIRCLE_RESOL));
+ v[1] = radius * cosf((2.0f * M_PI * a) / ((float)CIRCLE_RESOL));
+ v[2] = 0.0f;
+ GWN_vertbuf_attr_set(vbo, attr_id.pos, i * CIRCLE_RESOL * 2 + a * 2, v);
+
+ v[0] = radius * sinf((2.0f * M_PI * (a + 1)) / ((float)CIRCLE_RESOL));
+ v[1] = radius * cosf((2.0f * M_PI * (a + 1)) / ((float)CIRCLE_RESOL));
+ v[2] = 0.0f;
+ GWN_vertbuf_attr_set(vbo, attr_id.pos, i * CIRCLE_RESOL * 2 + a * 2 + 1, v);
+ }
+ }
+
+ SHC.drw_field_force = GWN_batch_create_ex(GWN_PRIM_LINES, vbo, NULL, GWN_BATCH_OWNS_VBO);
+ }
+ return SHC.drw_field_force;
+#undef CIRCLE_RESOL
+}
+
+Gwn_Batch *DRW_cache_field_vortex_get(void)
+{
+#define SPIRAL_RESOL 32
+ if (!SHC.drw_field_vortex) {
+ float v[3] = {0.0f, 0.0f, 0.0f};
+ unsigned int v_idx = 0;
+
+ /* Position Only 3D format */
+ static Gwn_VertFormat format = { 0 };
+ static struct { uint pos; } attr_id;
+ if (format.attrib_ct == 0) {
+ attr_id.pos = GWN_vertformat_attr_add(&format, "pos", GWN_COMP_F32, 3, GWN_FETCH_FLOAT);
+ }
+
+ Gwn_VertBuf *vbo = GWN_vertbuf_create_with_format(&format);
+ GWN_vertbuf_data_alloc(vbo, SPIRAL_RESOL * 2 + 1);
+
+ for (int a = SPIRAL_RESOL; a > -1; a--) {
+ v[0] = sinf((2.0f * M_PI * a) / ((float)SPIRAL_RESOL)) * (a / (float)SPIRAL_RESOL);
+ v[1] = cosf((2.0f * M_PI * a) / ((float)SPIRAL_RESOL)) * (a / (float)SPIRAL_RESOL);
+
+ GWN_vertbuf_attr_set(vbo, attr_id.pos, v_idx++, v);
+ }
+
+ for (int a = 1; a <= SPIRAL_RESOL; a++) {
+ v[0] = -sinf((2.0f * M_PI * a) / ((float)SPIRAL_RESOL)) * (a / (float)SPIRAL_RESOL);
+ v[1] = -cosf((2.0f * M_PI * a) / ((float)SPIRAL_RESOL)) * (a / (float)SPIRAL_RESOL);
+
+ GWN_vertbuf_attr_set(vbo, attr_id.pos, v_idx++, v);
+ }
+
+ SHC.drw_field_vortex = GWN_batch_create_ex(GWN_PRIM_LINE_STRIP, vbo, NULL, GWN_BATCH_OWNS_VBO);
+ }
+ return SHC.drw_field_vortex;
+#undef SPIRAL_RESOL
+}
+
+Gwn_Batch *DRW_cache_field_tube_limit_get(void)
+{
+#define CIRCLE_RESOL 32
+ if (!SHC.drw_field_tube_limit) {
+ float v[3] = {0.0f, 0.0f, 0.0f};
+ unsigned int v_idx = 0;
+
+ /* Position Only 3D format */
+ static Gwn_VertFormat format = { 0 };
+ static struct { uint pos; } attr_id;
+ if (format.attrib_ct == 0) {
+ attr_id.pos = GWN_vertformat_attr_add(&format, "pos", GWN_COMP_F32, 3, GWN_FETCH_FLOAT);
+ }
+
+ Gwn_VertBuf *vbo = GWN_vertbuf_create_with_format(&format);
+ GWN_vertbuf_data_alloc(vbo, CIRCLE_RESOL * 2 * 2 + 8);
+
+ /* Caps */
+ for (int i = 0; i < 2; i++) {
+ float z = (float)i * 2.0f - 1.0f;
+ for (int a = 0; a < CIRCLE_RESOL; a++) {
+ v[0] = sinf((2.0f * M_PI * a) / ((float)CIRCLE_RESOL));
+ v[1] = cosf((2.0f * M_PI * a) / ((float)CIRCLE_RESOL));
+ v[2] = z;
+ GWN_vertbuf_attr_set(vbo, attr_id.pos, v_idx++, v);
+
+ v[0] = sinf((2.0f * M_PI * (a + 1)) / ((float)CIRCLE_RESOL));
+ v[1] = cosf((2.0f * M_PI * (a + 1)) / ((float)CIRCLE_RESOL));
+ v[2] = z;
+ GWN_vertbuf_attr_set(vbo, attr_id.pos, v_idx++, v);
+ }
+ }
+ /* Side Edges */
+ for (int a = 0; a < 4; a++) {
+ for (int i = 0; i < 2; i++) {
+ float z = (float)i * 2.0f - 1.0f;
+ v[0] = sinf((2.0f * M_PI * a) / 4.0f);
+ v[1] = cosf((2.0f * M_PI * a) / 4.0f);
+ v[2] = z;
+ GWN_vertbuf_attr_set(vbo, attr_id.pos, v_idx++, v);
+ }
+ }
+
+ SHC.drw_field_tube_limit = GWN_batch_create_ex(GWN_PRIM_LINES, vbo, NULL, GWN_BATCH_OWNS_VBO);
+ }
+ return SHC.drw_field_tube_limit;
+#undef CIRCLE_RESOL
+}
+
+Gwn_Batch *DRW_cache_field_cone_limit_get(void)
+{
+#define CIRCLE_RESOL 32
+ if (!SHC.drw_field_cone_limit) {
+ float v[3] = {0.0f, 0.0f, 0.0f};
+ unsigned int v_idx = 0;
+
+ /* Position Only 3D format */
+ static Gwn_VertFormat format = { 0 };
+ static struct { uint pos; } attr_id;
+ if (format.attrib_ct == 0) {
+ attr_id.pos = GWN_vertformat_attr_add(&format, "pos", GWN_COMP_F32, 3, GWN_FETCH_FLOAT);
+ }
+
+ Gwn_VertBuf *vbo = GWN_vertbuf_create_with_format(&format);
+ GWN_vertbuf_data_alloc(vbo, CIRCLE_RESOL * 2 * 2 + 8);
+
+ /* Caps */
+ for (int i = 0; i < 2; i++) {
+ float z = (float)i * 2.0f - 1.0f;
+ for (int a = 0; a < CIRCLE_RESOL; a++) {
+ v[0] = sinf((2.0f * M_PI * a) / ((float)CIRCLE_RESOL));
+ v[1] = cosf((2.0f * M_PI * a) / ((float)CIRCLE_RESOL));
+ v[2] = z;
+ GWN_vertbuf_attr_set(vbo, attr_id.pos, v_idx++, v);
+
+ v[0] = sinf((2.0f * M_PI * (a + 1)) / ((float)CIRCLE_RESOL));
+ v[1] = cosf((2.0f * M_PI * (a + 1)) / ((float)CIRCLE_RESOL));
+ v[2] = z;
+ GWN_vertbuf_attr_set(vbo, attr_id.pos, v_idx++, v);
+ }
+ }
+ /* Side Edges */
+ for (int a = 0; a < 4; a++) {
+ for (int i = 0; i < 2; i++) {
+ float z = (float)i * 2.0f - 1.0f;
+ v[0] = z * sinf((2.0f * M_PI * a) / 4.0f);
+ v[1] = z * cosf((2.0f * M_PI * a) / 4.0f);
+ v[2] = z;
+ GWN_vertbuf_attr_set(vbo, attr_id.pos, v_idx++, v);
+ }
+ }
+
+ SHC.drw_field_cone_limit = GWN_batch_create_ex(GWN_PRIM_LINES, vbo, NULL, GWN_BATCH_OWNS_VBO);
+ }
+ return SHC.drw_field_cone_limit;
+#undef CIRCLE_RESOL
+}
+
+/** \} */
+
+/* -------------------------------------------------------------------- */
+
+/** \name Lamps
+ * \{ */
+
+Gwn_Batch *DRW_cache_lamp_get(void)
+{
+#define NSEGMENTS 8
+ if (!SHC.drw_lamp) {
+ float v[2];
+
+ /* Position Only 3D format */
+ static Gwn_VertFormat format = { 0 };
+ static struct { uint pos; } attr_id;
+ if (format.attrib_ct == 0) {
+ attr_id.pos = GWN_vertformat_attr_add(&format, "pos", GWN_COMP_F32, 2, GWN_FETCH_FLOAT);
+ }
+
+ Gwn_VertBuf *vbo = GWN_vertbuf_create_with_format(&format);
+ GWN_vertbuf_data_alloc(vbo, NSEGMENTS * 2);
+
+ for (int a = 0; a < NSEGMENTS * 2; a += 2) {
+ v[0] = sinf((2.0f * M_PI * a) / ((float)NSEGMENTS * 2));
+ v[1] = cosf((2.0f * M_PI * a) / ((float)NSEGMENTS * 2));
+ GWN_vertbuf_attr_set(vbo, attr_id.pos, a, v);
+
+ v[0] = sinf((2.0f * M_PI * (a + 1)) / ((float)NSEGMENTS * 2));
+ v[1] = cosf((2.0f * M_PI * (a + 1)) / ((float)NSEGMENTS * 2));
+ GWN_vertbuf_attr_set(vbo, attr_id.pos, a + 1, v);
+ }
+
+ SHC.drw_lamp = GWN_batch_create_ex(GWN_PRIM_LINES, vbo, NULL, GWN_BATCH_OWNS_VBO);
+ }
+ return SHC.drw_lamp;
+#undef NSEGMENTS
+}
+
+Gwn_Batch *DRW_cache_lamp_shadows_get(void)
+{
+#define NSEGMENTS 10
+ if (!SHC.drw_lamp_shadows) {
+ float v[2];
+
+ /* Position Only 3D format */
+ static Gwn_VertFormat format = { 0 };
+ static struct { uint pos; } attr_id;
+ if (format.attrib_ct == 0) {
+ attr_id.pos = GWN_vertformat_attr_add(&format, "pos", GWN_COMP_F32, 2, GWN_FETCH_FLOAT);
+ }
+
+ Gwn_VertBuf *vbo = GWN_vertbuf_create_with_format(&format);
+ GWN_vertbuf_data_alloc(vbo, NSEGMENTS * 2);
+
+ for (int a = 0; a < NSEGMENTS * 2; a += 2) {
+ v[0] = sinf((2.0f * M_PI * a) / ((float)NSEGMENTS * 2));
+ v[1] = cosf((2.0f * M_PI * a) / ((float)NSEGMENTS * 2));
+ GWN_vertbuf_attr_set(vbo, attr_id.pos, a, v);
+
+ v[0] = sinf((2.0f * M_PI * (a + 1)) / ((float)NSEGMENTS * 2));
+ v[1] = cosf((2.0f * M_PI * (a + 1)) / ((float)NSEGMENTS * 2));
+ GWN_vertbuf_attr_set(vbo, attr_id.pos, a + 1, v);
+ }
+
+ SHC.drw_lamp_shadows = GWN_batch_create_ex(GWN_PRIM_LINES, vbo, NULL, GWN_BATCH_OWNS_VBO);
+ }
+ return SHC.drw_lamp_shadows;
+#undef NSEGMENTS
+}
+
+Gwn_Batch *DRW_cache_lamp_sunrays_get(void)
+{
+ if (!SHC.drw_lamp_sunrays) {
+ float v[2], v1[2], v2[2];
+
+ /* Position Only 2D format */
+ static Gwn_VertFormat format = { 0 };
+ static struct { uint pos; } attr_id;
+ if (format.attrib_ct == 0) {
+ attr_id.pos = GWN_vertformat_attr_add(&format, "pos", GWN_COMP_F32, 2, GWN_FETCH_FLOAT);
+ }
+
+ Gwn_VertBuf *vbo = GWN_vertbuf_create_with_format(&format);
+ GWN_vertbuf_data_alloc(vbo, 32);
+
+ for (int a = 0; a < 8; a++) {
+ v[0] = sinf((2.0f * M_PI * a) / 8.0f);
+ v[1] = cosf((2.0f * M_PI * a) / 8.0f);
+
+ mul_v2_v2fl(v1, v, 1.6f);
+ mul_v2_v2fl(v2, v, 1.9f);
+ GWN_vertbuf_attr_set(vbo, attr_id.pos, a * 4, v1);
+ GWN_vertbuf_attr_set(vbo, attr_id.pos, a * 4 + 1, v2);
+
+ mul_v2_v2fl(v1, v, 2.2f);
+ mul_v2_v2fl(v2, v, 2.5f);
+ GWN_vertbuf_attr_set(vbo, attr_id.pos, a * 4 + 2, v1);
+ GWN_vertbuf_attr_set(vbo, attr_id.pos, a * 4 + 3, v2);
+ }
+
+ SHC.drw_lamp_sunrays = GWN_batch_create_ex(GWN_PRIM_LINES, vbo, NULL, GWN_BATCH_OWNS_VBO);
+ }
+ return SHC.drw_lamp_sunrays;
+}
+
+Gwn_Batch *DRW_cache_lamp_area_get(void)
+{
+ if (!SHC.drw_lamp_area) {
+ float v1[3] = {0.0f, 0.0f, 0.0f};
+
+ /* Position Only 3D format */
+ static Gwn_VertFormat format = { 0 };
+ static struct { uint pos; } attr_id;
+ if (format.attrib_ct == 0) {
+ attr_id.pos = GWN_vertformat_attr_add(&format, "pos", GWN_COMP_F32, 3, GWN_FETCH_FLOAT);
+ }
+
+ Gwn_VertBuf *vbo = GWN_vertbuf_create_with_format(&format);
+ GWN_vertbuf_data_alloc(vbo, 8);
+
+ v1[0] = v1[1] = 0.5f;
+ GWN_vertbuf_attr_set(vbo, attr_id.pos, 0, v1);
+ v1[0] = -0.5f;
+ GWN_vertbuf_attr_set(vbo, attr_id.pos, 1, v1);
+ GWN_vertbuf_attr_set(vbo, attr_id.pos, 2, v1);
+ v1[1] = -0.5f;
+ GWN_vertbuf_attr_set(vbo, attr_id.pos, 3, v1);
+ GWN_vertbuf_attr_set(vbo, attr_id.pos, 4, v1);
+ v1[0] = 0.5f;
+ GWN_vertbuf_attr_set(vbo, attr_id.pos, 5, v1);
+ GWN_vertbuf_attr_set(vbo, attr_id.pos, 6, v1);
+ v1[1] = 0.5f;
+ GWN_vertbuf_attr_set(vbo, attr_id.pos, 7, v1);
+
+ SHC.drw_lamp_area = GWN_batch_create_ex(GWN_PRIM_LINES, vbo, NULL, GWN_BATCH_OWNS_VBO);
+ }
+ return SHC.drw_lamp_area;
+}
+
+Gwn_Batch *DRW_cache_lamp_hemi_get(void)
+{
+#define CIRCLE_RESOL 32
+ if (!SHC.drw_lamp_hemi) {
+ float v[3];
+ int vidx = 0;
+
+ /* Position Only 3D format */
+ static Gwn_VertFormat format = { 0 };
+ static struct { uint pos; } attr_id;
+ if (format.attrib_ct == 0) {
+ attr_id.pos = GWN_vertformat_attr_add(&format, "pos", GWN_COMP_F32, 3, GWN_FETCH_FLOAT);
+ }
+
+ Gwn_VertBuf *vbo = GWN_vertbuf_create_with_format(&format);
+ GWN_vertbuf_data_alloc(vbo, CIRCLE_RESOL * 2 * 2 - 6 * 2 * 2);
+
+ /* XZ plane */
+ for (int a = 3; a < CIRCLE_RESOL / 2 - 3; a++) {
+ v[0] = sinf((2.0f * M_PI * a) / ((float)CIRCLE_RESOL) - M_PI / 2);
+ v[2] = cosf((2.0f * M_PI * a) / ((float)CIRCLE_RESOL) - M_PI / 2) - 1.0f;
+ v[1] = 0.0f;
+ GWN_vertbuf_attr_set(vbo, attr_id.pos, vidx++, v);
+
+ v[0] = sinf((2.0f * M_PI * (a + 1)) / ((float)CIRCLE_RESOL) - M_PI / 2);
+ v[2] = cosf((2.0f * M_PI * (a + 1)) / ((float)CIRCLE_RESOL) - M_PI / 2) - 1.0f;
+ v[1] = 0.0f;
+ GWN_vertbuf_attr_set(vbo, attr_id.pos, vidx++, v);
+ }
+
+ /* XY plane */
+ for (int a = 3; a < CIRCLE_RESOL / 2 - 3; a++) {
+ v[2] = sinf((2.0f * M_PI * a) / ((float)CIRCLE_RESOL)) - 1.0f;
+ v[1] = cosf((2.0f * M_PI * a) / ((float)CIRCLE_RESOL));
+ v[0] = 0.0f;
+ GWN_vertbuf_attr_set(vbo, attr_id.pos, vidx++, v);
+
+ v[2] = sinf((2.0f * M_PI * (a + 1)) / ((float)CIRCLE_RESOL)) - 1.0f;
+ v[1] = cosf((2.0f * M_PI * (a + 1)) / ((float)CIRCLE_RESOL));
+ v[0] = 0.0f;
+ GWN_vertbuf_attr_set(vbo, attr_id.pos, vidx++, v);
+ }
+
+ /* YZ plane full circle */
+ /* lease v[2] as it is */
+ const float rad = cosf((2.0f * M_PI * 3) / ((float)CIRCLE_RESOL));
+ for (int a = 0; a < CIRCLE_RESOL; a++) {
+ v[1] = rad * sinf((2.0f * M_PI * a) / ((float)CIRCLE_RESOL));
+ v[0] = rad * cosf((2.0f * M_PI * a) / ((float)CIRCLE_RESOL));
+ GWN_vertbuf_attr_set(vbo, attr_id.pos, vidx++, v);
+
+ v[1] = rad * sinf((2.0f * M_PI * (a + 1)) / ((float)CIRCLE_RESOL));
+ v[0] = rad * cosf((2.0f * M_PI * (a + 1)) / ((float)CIRCLE_RESOL));
+ GWN_vertbuf_attr_set(vbo, attr_id.pos, vidx++, v);
+ }
+
+
+ SHC.drw_lamp_hemi = GWN_batch_create_ex(GWN_PRIM_LINES, vbo, NULL, GWN_BATCH_OWNS_VBO);
+ }
+ return SHC.drw_lamp_hemi;
+#undef CIRCLE_RESOL
+}
+
+
+Gwn_Batch *DRW_cache_lamp_spot_get(void)
+{
+#define NSEGMENTS 32
+ if (!SHC.drw_lamp_spot) {
+ /* a single ring of vertices */
+ float p[NSEGMENTS][2];
+ float n[NSEGMENTS][3];
+ float neg[NSEGMENTS][3];
+ float half_angle = 2 * M_PI / ((float)NSEGMENTS * 2);
+ for (int i = 0; i < NSEGMENTS; ++i) {
+ float angle = 2 * M_PI * ((float)i / (float)NSEGMENTS);
+ p[i][0] = cosf(angle);
+ p[i][1] = sinf(angle);
+
+ n[i][0] = cosf(angle - half_angle);
+ n[i][1] = sinf(angle - half_angle);
+ n[i][2] = cosf(M_PI / 16.0f); /* slope of the cone */
+ normalize_v3(n[i]); /* necessary ? */
+ negate_v3_v3(neg[i], n[i]);
+ }
+
+ /* Position Only 3D format */
+ static Gwn_VertFormat format = { 0 };
+ static struct { uint pos, n1, n2; } attr_id;
+ if (format.attrib_ct == 0) {
+ attr_id.pos = GWN_vertformat_attr_add(&format, "pos", GWN_COMP_F32, 3, GWN_FETCH_FLOAT);
+ attr_id.n1 = GWN_vertformat_attr_add(&format, "N1", GWN_COMP_F32, 3, GWN_FETCH_FLOAT);
+ attr_id.n2 = GWN_vertformat_attr_add(&format, "N2", GWN_COMP_F32, 3, GWN_FETCH_FLOAT);
+ }
+
+ Gwn_VertBuf *vbo = GWN_vertbuf_create_with_format(&format);
+ GWN_vertbuf_data_alloc(vbo, NSEGMENTS * 4);
+
+ for (int i = 0; i < NSEGMENTS; ++i) {
+ float cv[2], v[3];
+ cv[0] = p[i % NSEGMENTS][0];
+ cv[1] = p[i % NSEGMENTS][1];
+
+ /* cone sides */
+ v[0] = cv[0], v[1] = cv[1], v[2] = -1.0f;
+ GWN_vertbuf_attr_set(vbo, attr_id.pos, i * 4, v);
+ v[0] = 0.0f, v[1] = 0.0f, v[2] = 0.0f;
+ GWN_vertbuf_attr_set(vbo, attr_id.pos, i * 4 + 1, v);
+
+ GWN_vertbuf_attr_set(vbo, attr_id.n1, i * 4, n[(i) % NSEGMENTS]);
+ GWN_vertbuf_attr_set(vbo, attr_id.n1, i * 4 + 1, n[(i) % NSEGMENTS]);
+ GWN_vertbuf_attr_set(vbo, attr_id.n2, i * 4, n[(i + 1) % NSEGMENTS]);
+ GWN_vertbuf_attr_set(vbo, attr_id.n2, i * 4 + 1, n[(i + 1) % NSEGMENTS]);
+
+ /* end ring */
+ v[0] = cv[0], v[1] = cv[1], v[2] = -1.0f;
+ GWN_vertbuf_attr_set(vbo, attr_id.pos, i * 4 + 2, v);
+ cv[0] = p[(i + 1) % NSEGMENTS][0];
+ cv[1] = p[(i + 1) % NSEGMENTS][1];
+ v[0] = cv[0], v[1] = cv[1], v[2] = -1.0f;
+ GWN_vertbuf_attr_set(vbo, attr_id.pos, i * 4 + 3, v);
+
+ GWN_vertbuf_attr_set(vbo, attr_id.n1, i * 4 + 2, n[(i) % NSEGMENTS]);
+ GWN_vertbuf_attr_set(vbo, attr_id.n1, i * 4 + 3, n[(i) % NSEGMENTS]);
+ GWN_vertbuf_attr_set(vbo, attr_id.n2, i * 4 + 2, neg[(i) % NSEGMENTS]);
+ GWN_vertbuf_attr_set(vbo, attr_id.n2, i * 4 + 3, neg[(i) % NSEGMENTS]);
+ }
+
+ SHC.drw_lamp_spot = GWN_batch_create_ex(GWN_PRIM_LINES, vbo, NULL, GWN_BATCH_OWNS_VBO);
+ }
+ return SHC.drw_lamp_spot;
+#undef NSEGMENTS
+}
+
+Gwn_Batch *DRW_cache_lamp_spot_square_get(void)
+{
+ if (!SHC.drw_lamp_spot_square) {
+ float p[5][3] = {{ 0.0f, 0.0f, 0.0f},
+ { 1.0f, 1.0f, -1.0f},
+ { 1.0f, -1.0f, -1.0f},
+ {-1.0f, -1.0f, -1.0f},
+ {-1.0f, 1.0f, -1.0f}};
+
+ unsigned int v_idx = 0;
+
+ /* Position Only 3D format */
+ static Gwn_VertFormat format = { 0 };
+ static struct { uint pos; } attr_id;
+ if (format.attrib_ct == 0) {
+ attr_id.pos = GWN_vertformat_attr_add(&format, "pos", GWN_COMP_F32, 3, GWN_FETCH_FLOAT);
+ }
+
+ Gwn_VertBuf *vbo = GWN_vertbuf_create_with_format(&format);
+ GWN_vertbuf_data_alloc(vbo, 16);
+
+ /* piramid sides */
+ for (int i = 1; i <= 4; ++i) {
+ GWN_vertbuf_attr_set(vbo, attr_id.pos, v_idx++, p[0]);
+ GWN_vertbuf_attr_set(vbo, attr_id.pos, v_idx++, p[i]);
+
+ GWN_vertbuf_attr_set(vbo, attr_id.pos, v_idx++, p[(i % 4) + 1]);
+ GWN_vertbuf_attr_set(vbo, attr_id.pos, v_idx++, p[((i + 1) % 4) + 1]);
+ }
+
+ SHC.drw_lamp_spot_square = GWN_batch_create_ex(GWN_PRIM_LINES, vbo, NULL, GWN_BATCH_OWNS_VBO);
+ }
+ return SHC.drw_lamp_spot_square;
+}
+
+/** \} */
+
+/* -------------------------------------------------------------------- */
+
+/** \name Speaker
+ * \{ */
+
+Gwn_Batch *DRW_cache_speaker_get(void)
+{
+ if (!SHC.drw_speaker) {
+ float v[3];
+ const int segments = 16;
+ int vidx = 0;
+
+ /* Position Only 3D format */
+ static Gwn_VertFormat format = { 0 };
+ static struct { uint pos; } attr_id;
+ if (format.attrib_ct == 0) {
+ attr_id.pos = GWN_vertformat_attr_add(&format, "pos", GWN_COMP_F32, 3, GWN_FETCH_FLOAT);
+ }
+
+ Gwn_VertBuf *vbo = GWN_vertbuf_create_with_format(&format);
+ GWN_vertbuf_data_alloc(vbo, 3 * segments * 2 + 4 * 4);
+
+ for (int j = 0; j < 3; j++) {
+ float z = 0.25f * j - 0.125f;
+ float r = (j == 0 ? 0.5f : 0.25f);
+
+ copy_v3_fl3(v, r, 0.0f, z);
+ GWN_vertbuf_attr_set(vbo, attr_id.pos, vidx++, v);
+ for (int i = 1; i < segments; i++) {
+ float x = cosf(2.f * (float)M_PI * i / segments) * r;
+ float y = sinf(2.f * (float)M_PI * i / segments) * r;
+ copy_v3_fl3(v, x, y, z);
+ GWN_vertbuf_attr_set(vbo, attr_id.pos, vidx++, v);
+ GWN_vertbuf_attr_set(vbo, attr_id.pos, vidx++, v);
+ }
+ copy_v3_fl3(v, r, 0.0f, z);
+ GWN_vertbuf_attr_set(vbo, attr_id.pos, vidx++, v);
+ }
+
+ for (int j = 0; j < 4; j++) {
+ float x = (((j + 1) % 2) * (j - 1)) * 0.5f;
+ float y = ((j % 2) * (j - 2)) * 0.5f;
+ for (int i = 0; i < 3; i++) {
+ if (i == 1) {
+ x *= 0.5f;
+ y *= 0.5f;
+ }
+
+ float z = 0.25f * i - 0.125f;
+ copy_v3_fl3(v, x, y, z);
+ GWN_vertbuf_attr_set(vbo, attr_id.pos, vidx++, v);
+ if (i == 1) {
+ GWN_vertbuf_attr_set(vbo, attr_id.pos, vidx++, v);
+ }
+ }
+ }
+
+ SHC.drw_speaker = GWN_batch_create_ex(GWN_PRIM_LINES, vbo, NULL, GWN_BATCH_OWNS_VBO);
+ }
+ return SHC.drw_speaker;
+}
+
+/** \} */
+
+/* -------------------------------------------------------------------- */
+
+/** \name Probe
+ * \{ */
+
+Gwn_Batch *DRW_cache_lightprobe_cube_get(void)
+{
+ if (!SHC.drw_lightprobe_cube) {
+ int v_idx = 0;
+ const float sin_pi_3 = 0.86602540378f;
+ const float cos_pi_3 = 0.5f;
+ float v[7][3] = {
+ {0.0f, 1.0f, 0.0f},
+ {sin_pi_3, cos_pi_3, 0.0f},
+ {sin_pi_3, -cos_pi_3, 0.0f},
+ {0.0f, -1.0f, 0.0f},
+ {-sin_pi_3, -cos_pi_3, 0.0f},
+ {-sin_pi_3, cos_pi_3, 0.0f},
+ {0.0f, 0.0f, 0.0f},
+ };
+
+ /* Position Only 3D format */
+ static Gwn_VertFormat format = { 0 };
+ static struct { uint pos; } attr_id;
+ if (format.attrib_ct == 0) {
+ attr_id.pos = GWN_vertformat_attr_add(&format, "pos", GWN_COMP_F32, 3, GWN_FETCH_FLOAT);
+ }
+
+ Gwn_VertBuf *vbo = GWN_vertbuf_create_with_format(&format);
+ GWN_vertbuf_data_alloc(vbo, (6 + 3) * 2);
+
+ for (int i = 0; i < 6; ++i) {
+ GWN_vertbuf_attr_set(vbo, attr_id.pos, v_idx++, v[i]);
+ GWN_vertbuf_attr_set(vbo, attr_id.pos, v_idx++, v[(i + 1) % 6]);
+ }
+
+ GWN_vertbuf_attr_set(vbo, attr_id.pos, v_idx++, v[1]);
+ GWN_vertbuf_attr_set(vbo, attr_id.pos, v_idx++, v[6]);
+
+ GWN_vertbuf_attr_set(vbo, attr_id.pos, v_idx++, v[5]);
+ GWN_vertbuf_attr_set(vbo, attr_id.pos, v_idx++, v[6]);
+
+ GWN_vertbuf_attr_set(vbo, attr_id.pos, v_idx++, v[3]);
+ GWN_vertbuf_attr_set(vbo, attr_id.pos, v_idx++, v[6]);
+
+ SHC.drw_lightprobe_cube = GWN_batch_create_ex(GWN_PRIM_LINES, vbo, NULL, GWN_BATCH_OWNS_VBO);
+ }
+ return SHC.drw_lightprobe_cube;
+}
+
+Gwn_Batch *DRW_cache_lightprobe_grid_get(void)
+{
+ if (!SHC.drw_lightprobe_grid) {
+ int v_idx = 0;
+ const float sin_pi_3 = 0.86602540378f;
+ const float cos_pi_3 = 0.5f;
+ const float v[7][3] = {
+ {0.0f, 1.0f, 0.0f},
+ {sin_pi_3, cos_pi_3, 0.0f},
+ {sin_pi_3, -cos_pi_3, 0.0f},
+ {0.0f, -1.0f, 0.0f},
+ {-sin_pi_3, -cos_pi_3, 0.0f},
+ {-sin_pi_3, cos_pi_3, 0.0f},
+ {0.0f, 0.0f, 0.0f},
+ };
+
+ /* Position Only 3D format */
+ static Gwn_VertFormat format = { 0 };
+ static struct { uint pos; } attr_id;
+ if (format.attrib_ct == 0) {
+ attr_id.pos = GWN_vertformat_attr_add(&format, "pos", GWN_COMP_F32, 3, GWN_FETCH_FLOAT);
+ }
+
+ Gwn_VertBuf *vbo = GWN_vertbuf_create_with_format(&format);
+ GWN_vertbuf_data_alloc(vbo, (6 * 2 + 3) * 2);
+
+ for (int i = 0; i < 6; ++i) {
+ float tmp_v1[3], tmp_v2[3], tmp_tr[3];
+ copy_v3_v3(tmp_v1, v[i]);
+ copy_v3_v3(tmp_v2, v[(i + 1) % 6]);
+ GWN_vertbuf_attr_set(vbo, attr_id.pos, v_idx++, tmp_v1);
+ GWN_vertbuf_attr_set(vbo, attr_id.pos, v_idx++, tmp_v2);
+
+ /* Internal wires. */
+ for (int j = 1; j < 2; ++j) {
+ mul_v3_v3fl(tmp_tr, v[(i / 2) * 2 + 1], -0.5f * j);
+ add_v3_v3v3(tmp_v1, v[i], tmp_tr);
+ add_v3_v3v3(tmp_v2, v[(i + 1) % 6], tmp_tr);
+ GWN_vertbuf_attr_set(vbo, attr_id.pos, v_idx++, tmp_v1);
+ GWN_vertbuf_attr_set(vbo, attr_id.pos, v_idx++, tmp_v2);
+ }
+ }
+
+ GWN_vertbuf_attr_set(vbo, attr_id.pos, v_idx++, v[1]);
+ GWN_vertbuf_attr_set(vbo, attr_id.pos, v_idx++, v[6]);
+
+ GWN_vertbuf_attr_set(vbo, attr_id.pos, v_idx++, v[5]);
+ GWN_vertbuf_attr_set(vbo, attr_id.pos, v_idx++, v[6]);
+
+ GWN_vertbuf_attr_set(vbo, attr_id.pos, v_idx++, v[3]);
+ GWN_vertbuf_attr_set(vbo, attr_id.pos, v_idx++, v[6]);
+
+ SHC.drw_lightprobe_grid = GWN_batch_create_ex(GWN_PRIM_LINES, vbo, NULL, GWN_BATCH_OWNS_VBO);
+ }
+ return SHC.drw_lightprobe_grid;
+}
+
+Gwn_Batch *DRW_cache_lightprobe_planar_get(void)
+{
+ if (!SHC.drw_lightprobe_planar) {
+ int v_idx = 0;
+ const float sin_pi_3 = 0.86602540378f;
+ float v[4][3] = {
+ {0.0f, 0.5f, 0.0f},
+ {sin_pi_3, 0.0f, 0.0f},
+ {0.0f, -0.5f, 0.0f},
+ {-sin_pi_3, 0.0f, 0.0f},
+ };
+
+ /* Position Only 3D format */
+ static Gwn_VertFormat format = { 0 };
+ static struct { uint pos; } attr_id;
+ if (format.attrib_ct == 0) {
+ attr_id.pos = GWN_vertformat_attr_add(&format, "pos", GWN_COMP_F32, 3, GWN_FETCH_FLOAT);
+ }
+
+ Gwn_VertBuf *vbo = GWN_vertbuf_create_with_format(&format);
+ GWN_vertbuf_data_alloc(vbo, 4 * 2);
+
+ for (int i = 0; i < 4; ++i) {
+ GWN_vertbuf_attr_set(vbo, attr_id.pos, v_idx++, v[i]);
+ GWN_vertbuf_attr_set(vbo, attr_id.pos, v_idx++, v[(i + 1) % 4]);
+ }
+
+ SHC.drw_lightprobe_planar = GWN_batch_create_ex(GWN_PRIM_LINES, vbo, NULL, GWN_BATCH_OWNS_VBO);
+ }
+ return SHC.drw_lightprobe_planar;
+}
+
+/** \} */
+
+/* -------------------------------------------------------------------- */
+
+/** \name Armature Bones
+ * \{ */
+
+static const float bone_octahedral_verts[6][3] = {
+ { 0.0f, 0.0f, 0.0f},
+ { 0.1f, 0.1f, 0.1f},
+ { 0.1f, 0.1f, -0.1f},
+ {-0.1f, 0.1f, -0.1f},
+ {-0.1f, 0.1f, 0.1f},
+ { 0.0f, 1.0f, 0.0f}
+};
+
+static const unsigned int bone_octahedral_wire[24] = {
+ 0, 1, 1, 5, 5, 3, 3, 0,
+ 0, 4, 4, 5, 5, 2, 2, 0,
+ 1, 2, 2, 3, 3, 4, 4, 1,
+};
+
+/* aligned with bone_octahedral_wire
+ * Contains adjacent normal index */
+static const unsigned int bone_octahedral_wire_adjacent_face[24] = {
+ 0, 3, 4, 7, 5, 6, 1, 2,
+ 2, 3, 6, 7, 4, 5, 0, 1,
+ 0, 4, 1, 5, 2, 6, 3, 7,
+};
+
+static const unsigned int bone_octahedral_solid_tris[8][3] = {
+ {2, 1, 0}, /* bottom */
+ {3, 2, 0},
+ {4, 3, 0},
+ {1, 4, 0},
+
+ {5, 1, 2}, /* top */
+ {5, 2, 3},
+ {5, 3, 4},
+ {5, 4, 1}
+};
+
+/* aligned with bone_octahedral_solid_tris */
+static const float bone_octahedral_solid_normals[8][3] = {
+ { M_SQRT1_2, -M_SQRT1_2, 0.00000000f},
+ {-0.00000000f, -M_SQRT1_2, -M_SQRT1_2},
+ {-M_SQRT1_2, -M_SQRT1_2, 0.00000000f},
+ { 0.00000000f, -M_SQRT1_2, M_SQRT1_2},
+ { 0.99388373f, 0.11043154f, -0.00000000f},
+ { 0.00000000f, 0.11043154f, -0.99388373f},
+ {-0.99388373f, 0.11043154f, 0.00000000f},
+ { 0.00000000f, 0.11043154f, 0.99388373f}
+};
+
+Gwn_Batch *DRW_cache_bone_octahedral_get(void)
+{
+ if (!SHC.drw_bone_octahedral) {
+ unsigned int v_idx = 0;
+
+ static Gwn_VertFormat format = { 0 };
+ static struct { uint pos, nor; } attr_id;
+ if (format.attrib_ct == 0) {
+ attr_id.pos = GWN_vertformat_attr_add(&format, "pos", GWN_COMP_F32, 3, GWN_FETCH_FLOAT);
+ attr_id.nor = GWN_vertformat_attr_add(&format, "nor", GWN_COMP_F32, 3, GWN_FETCH_FLOAT);
+ }
+
+ /* Vertices */
+ Gwn_VertBuf *vbo = GWN_vertbuf_create_with_format(&format);
+ GWN_vertbuf_data_alloc(vbo, 24);
+
+ for (int i = 0; i < 8; i++) {
+ GWN_vertbuf_attr_set(vbo, attr_id.nor, v_idx, bone_octahedral_solid_normals[i]);
+ GWN_vertbuf_attr_set(vbo, attr_id.pos, v_idx++, bone_octahedral_verts[bone_octahedral_solid_tris[i][0]]);
+ GWN_vertbuf_attr_set(vbo, attr_id.nor, v_idx, bone_octahedral_solid_normals[i]);
+ GWN_vertbuf_attr_set(vbo, attr_id.pos, v_idx++, bone_octahedral_verts[bone_octahedral_solid_tris[i][1]]);
+ GWN_vertbuf_attr_set(vbo, attr_id.nor, v_idx, bone_octahedral_solid_normals[i]);
+ GWN_vertbuf_attr_set(vbo, attr_id.pos, v_idx++, bone_octahedral_verts[bone_octahedral_solid_tris[i][2]]);
+ }
+
+ SHC.drw_bone_octahedral = GWN_batch_create_ex(GWN_PRIM_TRIS, vbo, NULL, GWN_BATCH_OWNS_VBO);
+ }
+ return SHC.drw_bone_octahedral;
+}
+
+Gwn_Batch *DRW_cache_bone_octahedral_wire_outline_get(void)
+{
+ if (!SHC.drw_bone_octahedral_wire) {
+ unsigned int v_idx = 0;
+
+ static Gwn_VertFormat format = { 0 };
+ static struct { uint pos, n1, n2; } attr_id;
+ if (format.attrib_ct == 0) {
+ attr_id.pos = GWN_vertformat_attr_add(&format, "pos", GWN_COMP_F32, 3, GWN_FETCH_FLOAT);
+ attr_id.n1 = GWN_vertformat_attr_add(&format, "N1", GWN_COMP_F32, 3, GWN_FETCH_FLOAT);
+ attr_id.n2 = GWN_vertformat_attr_add(&format, "N2", GWN_COMP_F32, 3, GWN_FETCH_FLOAT);
+ }
+
+ /* Vertices */
+ Gwn_VertBuf *vbo = GWN_vertbuf_create_with_format(&format);
+ GWN_vertbuf_data_alloc(vbo, 12 * 2);
+
+ for (int i = 0; i < 12; i++) {
+ const float *co1 = bone_octahedral_verts[bone_octahedral_wire[i * 2]];
+ const float *co2 = bone_octahedral_verts[bone_octahedral_wire[i * 2 + 1]];
+ const float *n1 = bone_octahedral_solid_normals[bone_octahedral_wire_adjacent_face[i * 2]];
+ const float *n2 = bone_octahedral_solid_normals[bone_octahedral_wire_adjacent_face[i * 2 + 1]];
+ add_fancy_edge(vbo, attr_id.pos, attr_id.n1, attr_id.n2, &v_idx, co1, co2, n1, n2);
+ }
+
+ SHC.drw_bone_octahedral_wire = GWN_batch_create_ex(GWN_PRIM_LINES, vbo, NULL, GWN_BATCH_OWNS_VBO);
+ }
+ return SHC.drw_bone_octahedral_wire;
+}
+
+
+/* XXX TODO move that 1 unit cube to more common/generic place? */
+static const float bone_box_verts[8][3] = {
+ { 1.0f, 0.0f, 1.0f},
+ { 1.0f, 0.0f, -1.0f},
+ {-1.0f, 0.0f, -1.0f},
+ {-1.0f, 0.0f, 1.0f},
+ { 1.0f, 1.0f, 1.0f},
+ { 1.0f, 1.0f, -1.0f},
+ {-1.0f, 1.0f, -1.0f},
+ {-1.0f, 1.0f, 1.0f}
+};
+
+static const unsigned int bone_box_wire[24] = {
+ 0, 1, 1, 2, 2, 3, 3, 0,
+ 4, 5, 5, 6, 6, 7, 7, 4,
+ 0, 4, 1, 5, 2, 6, 3, 7,
+};
+
+/* aligned with bone_octahedral_wire
+ * Contains adjacent normal index */
+static const unsigned int bone_box_wire_adjacent_face[24] = {
+ 0, 2, 0, 4, 1, 6, 1, 8,
+ 3, 10, 5, 10, 7, 11, 9, 11,
+ 3, 8, 2, 5, 4, 7, 6, 9,
+};
+
+static const unsigned int bone_box_solid_tris[12][3] = {
+ {0, 1, 2}, /* bottom */
+ {0, 2, 3},
+
+ {0, 1, 5}, /* sides */
+ {0, 5, 4},
+
+ {1, 2, 6},
+ {1, 6, 5},
+
+ {2, 3, 7},
+ {2, 7, 6},
+
+ {3, 0, 4},
+ {3, 4, 7},
+
+ {4, 5, 6}, /* top */
+ {4, 6, 7},
+};
+
+/* aligned with bone_octahedral_solid_tris */
+static const float bone_box_solid_normals[12][3] = {
+ { 0.0f, -1.0f, 0.0f},
+ { 0.0f, -1.0f, 0.0f},
+
+ { 1.0f, 0.0f, 0.0f},
+ { 1.0f, 0.0f, 0.0f},
+
+ { 0.0f, 0.0f, -1.0f},
+ { 0.0f, 0.0f, -1.0f},
+
+ {-1.0f, 0.0f, 0.0f},
+ {-1.0f, 0.0f, 0.0f},
+
+ { 0.0f, 0.0f, 1.0f},
+ { 0.0f, 0.0f, 1.0f},
+
+ { 0.0f, 1.0f, 0.0f},
+ { 0.0f, 1.0f, 0.0f},
+};
+
+Gwn_Batch *DRW_cache_bone_box_get(void)
+{
+ if (!SHC.drw_bone_box) {
+ unsigned int v_idx = 0;
+
+ static Gwn_VertFormat format = { 0 };
+ static struct { uint pos, nor; } attr_id;
+ if (format.attrib_ct == 0) {
+ attr_id.pos = GWN_vertformat_attr_add(&format, "pos", GWN_COMP_F32, 3, GWN_FETCH_FLOAT);
+ attr_id.nor = GWN_vertformat_attr_add(&format, "nor", GWN_COMP_F32, 3, GWN_FETCH_FLOAT);
+ }
+
+ /* Vertices */
+ Gwn_VertBuf *vbo = GWN_vertbuf_create_with_format(&format);
+ GWN_vertbuf_data_alloc(vbo, 36);
+
+ for (int i = 0; i < 12; i++) {
+ for (int j = 0; j < 3; j++) {
+ GWN_vertbuf_attr_set(vbo, attr_id.nor, v_idx, bone_box_solid_normals[i]);
+ GWN_vertbuf_attr_set(vbo, attr_id.pos, v_idx++, bone_box_verts[bone_box_solid_tris[i][j]]);
+ }
+ }
+
+ SHC.drw_bone_box = GWN_batch_create_ex(GWN_PRIM_TRIS, vbo, NULL, GWN_BATCH_OWNS_VBO);
+ }
+ return SHC.drw_bone_box;
+}
+
+Gwn_Batch *DRW_cache_bone_box_wire_outline_get(void)
+{
+ if (!SHC.drw_bone_box_wire) {
+ unsigned int v_idx = 0;
+
+ static Gwn_VertFormat format = { 0 };
+ static struct { uint pos, n1, n2; } attr_id;
+ if (format.attrib_ct == 0) {
+ attr_id.pos = GWN_vertformat_attr_add(&format, "pos", GWN_COMP_F32, 3, GWN_FETCH_FLOAT);
+ attr_id.n1 = GWN_vertformat_attr_add(&format, "N1", GWN_COMP_F32, 3, GWN_FETCH_FLOAT);
+ attr_id.n2 = GWN_vertformat_attr_add(&format, "N2", GWN_COMP_F32, 3, GWN_FETCH_FLOAT);
+ }
+
+ /* Vertices */
+ Gwn_VertBuf *vbo = GWN_vertbuf_create_with_format(&format);
+ GWN_vertbuf_data_alloc(vbo, 12 * 2);
+
+ for (int i = 0; i < 12; i++) {
+ const float *co1 = bone_box_verts[bone_box_wire[i * 2]];
+ const float *co2 = bone_box_verts[bone_box_wire[i * 2 + 1]];
+ const float *n1 = bone_box_solid_normals[bone_box_wire_adjacent_face[i * 2]];
+ const float *n2 = bone_box_solid_normals[bone_box_wire_adjacent_face[i * 2 + 1]];
+ add_fancy_edge(vbo, attr_id.pos, attr_id.n1, attr_id.n2, &v_idx, co1, co2, n1, n2);
+ }
+
+ SHC.drw_bone_box_wire = GWN_batch_create_ex(GWN_PRIM_LINES, vbo, NULL, GWN_BATCH_OWNS_VBO);
+ }
+ return SHC.drw_bone_box_wire;
+}
+
+
+Gwn_Batch *DRW_cache_bone_wire_wire_outline_get(void)
+{
+ if (!SHC.drw_bone_wire_wire) {
+ unsigned int v_idx = 0;
+
+ static Gwn_VertFormat format = { 0 };
+ static struct { uint pos, n1, n2; } attr_id;
+ if (format.attrib_ct == 0) {
+ attr_id.pos = GWN_vertformat_attr_add(&format, "pos", GWN_COMP_F32, 3, GWN_FETCH_FLOAT);
+ attr_id.n1 = GWN_vertformat_attr_add(&format, "N1", GWN_COMP_F32, 3, GWN_FETCH_FLOAT);
+ attr_id.n2 = GWN_vertformat_attr_add(&format, "N2", GWN_COMP_F32, 3, GWN_FETCH_FLOAT);
+ }
+
+ /* Vertices */
+ Gwn_VertBuf *vbo = GWN_vertbuf_create_with_format(&format);
+ GWN_vertbuf_data_alloc(vbo, 2);
+
+ const float co1[3] = {0.0f, 0.0f, 0.0f};
+ const float co2[3] = {0.0f, 1.0f, 0.0f};
+ const float n[3] = {1.0f, 0.0f, 0.0f};
+ add_fancy_edge(vbo, attr_id.pos, attr_id.n1, attr_id.n2, &v_idx, co1, co2, n, n);
+
+ SHC.drw_bone_wire_wire = GWN_batch_create_ex(GWN_PRIM_LINES, vbo, NULL, GWN_BATCH_OWNS_VBO);
+ }
+ return SHC.drw_bone_wire_wire;
+}
+
+
+/* Helpers for envelope bone's solid sphere-with-hidden-equatorial-cylinder.
+ * Note that here we only encode head/tail in forth component of the vector. */
+static void benv_lat_lon_to_co(const float lat, const float lon, float r_nor[3])
+{
+ /* Poles are along Y axis. */
+ r_nor[0] = sinf(lat) * cosf(lon);
+ r_nor[1] = cosf(lat);
+ r_nor[2] = sinf(lat) * sinf(lon);
+}
+
+static void benv_add_tri(Gwn_VertBuf *vbo, uint pos_id, uint *v_idx, float *co1, float *co2, float *co3)
+{
+ /* Given tri and its seven other mirrors along X/Y/Z axes. */
+ for (int x = -1; x <= 1; x += 2) {
+ for (int y = -1; y <= 1; y += 2) {
+ const float head_tail = (y == -1) ? 0.0f : 1.0f;
+ for (int z = -1; z <= 1; z += 2) {
+ GWN_vertbuf_attr_set(vbo, pos_id, (*v_idx)++,
+ (const float[4]){co1[0] * x, co1[1] * y, co1[2] * z, head_tail});
+ GWN_vertbuf_attr_set(vbo, pos_id, (*v_idx)++,
+ (const float[4]){co2[0] * x, co2[1] * y, co2[2] * z, head_tail});
+ GWN_vertbuf_attr_set(vbo, pos_id, (*v_idx)++,
+ (const float[4]){co3[0] * x, co3[1] * y, co3[2] * z, head_tail});
+ }
+ }
+ }
+}
+
+Gwn_Batch *DRW_cache_bone_envelope_solid_get(void)
+{
+#define CIRCLE_RESOL 32 /* Must be multiple of 4 */
+ if (!SHC.drw_bone_envelope) {
+ const int lon_res = CIRCLE_RESOL / 4;
+ const int lat_res = CIRCLE_RESOL / 4;
+ const float lon_inc = M_PI_2 / lon_res;
+ const float lat_inc = M_PI_2 / lat_res;
+ unsigned int v_idx = 0;
+
+ static Gwn_VertFormat format = { 0 };
+ static struct { uint pos; } attr_id;
+ if (format.attrib_ct == 0) {
+ attr_id.pos = GWN_vertformat_attr_add(&format, "pos", GWN_COMP_F32, 4, GWN_FETCH_FLOAT);
+ }
+
+ /* Vertices */
+ Gwn_VertBuf *vbo = GWN_vertbuf_create_with_format(&format);
+ GWN_vertbuf_data_alloc(vbo, lat_res * lon_res * 8 * 6);
+
+ float lon = 0.0f;
+ for (int i = 0; i < lon_res; i++, lon += lon_inc) {
+ float lat = 0.0f;
+ float co1[3], co2[3], co3[3], co4[3];
+
+ for (int j = 0; j < lat_res; j++, lat += lat_inc) {
+ benv_lat_lon_to_co(lat, lon, co1);
+ benv_lat_lon_to_co(lat, lon + lon_inc, co2);
+ benv_lat_lon_to_co(lat + lat_inc, lon + lon_inc, co3);
+ benv_lat_lon_to_co(lat + lat_inc, lon, co4);
+
+ if (j != 0) { /* At pole, n1 and n2 are identical. */
+ benv_add_tri(vbo, attr_id.pos, &v_idx, co1, co2, co3);
+ }
+ benv_add_tri(vbo, attr_id.pos, &v_idx, co1, co3, co4);
+ }
+
+ /* lat is at equator (i.e. lat == pi / 2). */
+ /* We need to add 'cylinder' part between the equators (along XZ plane). */
+ for (int x = -1; x <= 1; x += 2) {
+ for (int z = -1; z <= 1; z += 2) {
+ GWN_vertbuf_attr_set(vbo, attr_id.pos, v_idx++,
+ (const float[4]){co3[0] * x, co3[1], co3[2] * z, 0.0f});
+ GWN_vertbuf_attr_set(vbo, attr_id.pos, v_idx++,
+ (const float[4]){co4[0] * x, co4[1], co4[2] * z, 0.0f});
+ GWN_vertbuf_attr_set(vbo, attr_id.pos, v_idx++,
+ (const float[4]){co4[0] * x, co4[1], co4[2] * z, 1.0f});
+
+ GWN_vertbuf_attr_set(vbo, attr_id.pos, v_idx++,
+ (const float[4]){co3[0] * x, co3[1], co3[2] * z, 0.0f});
+ GWN_vertbuf_attr_set(vbo, attr_id.pos, v_idx++,
+ (const float[4]){co4[0] * x, co4[1], co4[2] * z, 1.0f});
+ GWN_vertbuf_attr_set(vbo, attr_id.pos, v_idx++,
+ (const float[4]){co3[0] * x, co3[1], co3[2] * z, 1.0f});
+ }
+ }
+ }
+
+ SHC.drw_bone_envelope = GWN_batch_create_ex(GWN_PRIM_TRIS, vbo, NULL, GWN_BATCH_OWNS_VBO);
+ }
+ return SHC.drw_bone_envelope;
+}
+
+
+Gwn_Batch *DRW_cache_bone_envelope_distance_outline_get(void)
+{
+#define CIRCLE_RESOL 32 /* Must be multiple of 2 */
+ if (!SHC.drw_bone_envelope_distance) {
+ unsigned int v_idx = 0;
+
+ static Gwn_VertFormat format = { 0 };
+ static unsigned int pos_id;
+ if (format.attrib_ct == 0) {
+ pos_id = GWN_vertformat_attr_add(&format, "pos", GWN_COMP_F32, 4, GWN_FETCH_FLOAT);
+ }
+
+ /* Vertices */
+ Gwn_VertBuf *vbo = GWN_vertbuf_create_with_format(&format);
+ GWN_vertbuf_data_alloc(vbo, CIRCLE_RESOL * 2 + 6);
+
+ /* Encoded triangle strip, vertex shader gives them final correct value. */
+ for (int i = 0; i < CIRCLE_RESOL + 1; i++) {
+ const bool is_headtail_transition = ELEM(i, CIRCLE_RESOL / 2, CIRCLE_RESOL);
+ const float head_tail = (i > CIRCLE_RESOL / 2) ? 1.0f : 0.0f;
+ const float alpha = 2.0f * M_PI * i / CIRCLE_RESOL;
+ const float x = cosf(alpha);
+ const float y = -sinf(alpha);
+
+ /* { X, Y, head/tail, inner/outer border } */
+ GWN_vertbuf_attr_set(vbo, pos_id, v_idx++, (const float[4]){x, y, head_tail, 0.0f});
+ GWN_vertbuf_attr_set(vbo, pos_id, v_idx++, (const float[4]){x, y, head_tail, 1.0f});
+ if (is_headtail_transition) {
+ GWN_vertbuf_attr_set(vbo, pos_id, v_idx++, (const float[4]){x, y, 1.0f - head_tail, 0.0f});
+ GWN_vertbuf_attr_set(vbo, pos_id, v_idx++, (const float[4]){x, y, 1.0f - head_tail, 1.0f});
+ }
+ }
+
+ SHC.drw_bone_envelope_distance = GWN_batch_create_ex(GWN_PRIM_TRI_STRIP, vbo, NULL, GWN_BATCH_OWNS_VBO);
+ }
+ return SHC.drw_bone_envelope_distance;
+#undef CIRCLE_RESOL
+}
+
+
+/* Bone body. */
+Gwn_Batch *DRW_cache_bone_envelope_wire_outline_get(void)
+{
+ if (!SHC.drw_bone_envelope_wire) {
+ unsigned int v_idx = 0;
+
+ static Gwn_VertFormat format = { 0 };
+ static unsigned int pos_id;
+ if (format.attrib_ct == 0) {
+ pos_id = GWN_vertformat_attr_add(&format, "pos", GWN_COMP_F32, 4, GWN_FETCH_FLOAT);
+ }
+
+ /* Vertices */
+ Gwn_VertBuf *vbo = GWN_vertbuf_create_with_format(&format);
+ GWN_vertbuf_data_alloc(vbo, 4);
+
+ /* Two lines between head and tail circles. */
+ /* Encoded lines, vertex shader gives them final correct value. */
+ /* { X, Y, head/tail, inner/outer border } */
+ GWN_vertbuf_attr_set(vbo, pos_id, v_idx++, (const float[4]){ 1.0f, 0.0f, 0.0f, 0.0f});
+ GWN_vertbuf_attr_set(vbo, pos_id, v_idx++, (const float[4]){ 1.0f, 0.0f, 1.0f, 0.0f});
+ GWN_vertbuf_attr_set(vbo, pos_id, v_idx++, (const float[4]){-1.0f, 0.0f, 0.0f, 0.0f});
+ GWN_vertbuf_attr_set(vbo, pos_id, v_idx++, (const float[4]){-1.0f, 0.0f, 1.0f, 0.0f});
+
+ SHC.drw_bone_envelope_wire = GWN_batch_create_ex(GWN_PRIM_LINES, vbo, NULL, GWN_BATCH_OWNS_VBO);
+ }
+ return SHC.drw_bone_envelope_wire;
+}
+
+
+/* Bone head and tail. */
+Gwn_Batch *DRW_cache_bone_envelope_head_wire_outline_get(void)
+{
+#define CIRCLE_RESOL 32 /* Must be multiple of 2 */
+ if (!SHC.drw_bone_envelope_head_wire) {
+ unsigned int v_idx = 0;
+
+ static Gwn_VertFormat format = { 0 };
+ static unsigned int pos_id;
+ if (format.attrib_ct == 0) {
+ pos_id = GWN_vertformat_attr_add(&format, "pos", GWN_COMP_F32, 4, GWN_FETCH_FLOAT);
+ }
+
+ /* Vertices */
+ Gwn_VertBuf *vbo = GWN_vertbuf_create_with_format(&format);
+ GWN_vertbuf_data_alloc(vbo, CIRCLE_RESOL);
+
+ /* Encoded lines, vertex shader gives them final correct value. */
+ /* Only head circle (tail is drawn in disp_tail_mat space as a head one by draw_armature.c's draw_point()). */
+ for (int i = 0; i < CIRCLE_RESOL; i++) {
+ const float alpha = 2.0f * M_PI * i / CIRCLE_RESOL;
+ const float x = cosf(alpha);
+ const float y = -sinf(alpha);
+
+ /* { X, Y, head/tail, inner/outer border } */
+ GWN_vertbuf_attr_set(vbo, pos_id, v_idx++, (const float[4]){ x, y, 0.0f, 0.0f});
+ }
+
+ SHC.drw_bone_envelope_head_wire = GWN_batch_create_ex(GWN_PRIM_LINE_LOOP, vbo, NULL, GWN_BATCH_OWNS_VBO);
+ }
+ return SHC.drw_bone_envelope_head_wire;
+#undef CIRCLE_RESOL
+}
+
+
+Gwn_Batch *DRW_cache_bone_point_get(void)
+{
+ if (!SHC.drw_bone_point) {
+ const int lon_res = 16;
+ const int lat_res = 8;
+ const float rad = 0.05f;
+ const float lon_inc = 2 * M_PI / lon_res;
+ const float lat_inc = M_PI / lat_res;
+ unsigned int v_idx = 0;
+
+ static Gwn_VertFormat format = { 0 };
+ static struct { uint pos, nor; } attr_id;
+ if (format.attrib_ct == 0) {
+ attr_id.pos = GWN_vertformat_attr_add(&format, "pos", GWN_COMP_F32, 3, GWN_FETCH_FLOAT);
+ attr_id.nor = GWN_vertformat_attr_add(&format, "nor", GWN_COMP_F32, 3, GWN_FETCH_FLOAT);
+ }
+
+ /* Vertices */
+ Gwn_VertBuf *vbo = GWN_vertbuf_create_with_format(&format);
+ GWN_vertbuf_data_alloc(vbo, (lat_res - 1) * lon_res * 6);
+
+ float lon = 0.0f;
+ for (int i = 0; i < lon_res; i++, lon += lon_inc) {
+ float lat = 0.0f;
+ for (int j = 0; j < lat_res; j++, lat += lat_inc) {
+ if (j != lat_res - 1) { /* Pole */
+ add_lat_lon_vert(vbo, attr_id.pos, attr_id.nor, &v_idx, rad, lat + lat_inc, lon + lon_inc);
+ add_lat_lon_vert(vbo, attr_id.pos, attr_id.nor, &v_idx, rad, lat + lat_inc, lon);
+ add_lat_lon_vert(vbo, attr_id.pos, attr_id.nor, &v_idx, rad, lat, lon);
+ }
+
+ if (j != 0) { /* Pole */
+ add_lat_lon_vert(vbo, attr_id.pos, attr_id.nor, &v_idx, rad, lat, lon + lon_inc);
+ add_lat_lon_vert(vbo, attr_id.pos, attr_id.nor, &v_idx, rad, lat + lat_inc, lon + lon_inc);
+ add_lat_lon_vert(vbo, attr_id.pos, attr_id.nor, &v_idx, rad, lat, lon);
+ }
+ }
+ }
+
+ SHC.drw_bone_point = GWN_batch_create_ex(GWN_PRIM_TRIS, vbo, NULL, GWN_BATCH_OWNS_VBO);
+ }
+ return SHC.drw_bone_point;
+}
+
+Gwn_Batch *DRW_cache_bone_point_wire_outline_get(void)
+{
+ if (!SHC.drw_bone_point_wire) {
+ Gwn_VertBuf *vbo = sphere_wire_vbo(0.05f);
+ SHC.drw_bone_point_wire = GWN_batch_create_ex(GWN_PRIM_LINES, vbo, NULL, GWN_BATCH_OWNS_VBO);
+ }
+ return SHC.drw_bone_point_wire;
+}
+
+Gwn_Batch *DRW_cache_bone_arrows_get(void)
+{
+ if (!SHC.drw_bone_arrows) {
+ Gwn_VertBuf *vbo = fill_arrows_vbo(0.25f);
+ SHC.drw_bone_arrows = GWN_batch_create_ex(GWN_PRIM_LINES, vbo, NULL, GWN_BATCH_OWNS_VBO);
+ }
+ return SHC.drw_bone_arrows;
+}
+
+/** \} */
+
+/* -------------------------------------------------------------------- */
+
+/** \name Camera
+ * \{ */
+
+/**
+ * We could make these more generic functions.
+ * although filling 1d lines is not common.
+ *
+ * \note Use x coordinate to identify the vertex the vertex shader take care to place it appropriately.
+ */
+
+static const float camera_coords_frame_bounds[5] = {
+ 0.0f, /* center point */
+ 1.0f, /* + X + Y */
+ 2.0f, /* + X - Y */
+ 3.0f, /* - X - Y */
+ 4.0f, /* - X + Y */
+};
+
+static const float camera_coords_frame_tri[3] = {
+ 5.0f, /* tria + X */
+ 6.0f, /* tria - X */
+ 7.0f, /* tria + Y */
+};
+
+/** Draw a loop of lines. */
+static void camera_fill_lines_loop_fl_v1(
+ Gwn_VertBufRaw *pos_step,
+ const float *coords, const uint coords_len)
+{
+ for (uint i = 0, i_prev = coords_len - 1; i < coords_len; i_prev = i++) {
+ *((float *)GWN_vertbuf_raw_step(pos_step)) = coords[i_prev];
+ *((float *)GWN_vertbuf_raw_step(pos_step)) = coords[i];
+ }
+}
+
+/** Fan lines out from the first vertex. */
+static void camera_fill_lines_fan_fl_v1(
+ Gwn_VertBufRaw *pos_step,
+ const float *coords, const uint coords_len)
+{
+ for (uint i = 1; i < coords_len; i++) {
+ *((float *)GWN_vertbuf_raw_step(pos_step)) = coords[0];
+ *((float *)GWN_vertbuf_raw_step(pos_step)) = coords[i];
+ }
+}
+
+/** Simply fill the array. */
+static void camera_fill_array_fl_v1(
+ Gwn_VertBufRaw *pos_step,
+ const float *coords, const uint coords_len)
+{
+ for (uint i = 0; i < coords_len; i++) {
+ *((float *)GWN_vertbuf_raw_step(pos_step)) = coords[i];
+ }
+}
+
+
+Gwn_Batch *DRW_cache_camera_get(void)
+{
+ if (!SHC.drw_camera) {
+ static Gwn_VertFormat format = { 0 };
+ static struct { uint pos; } attr_id;
+ if (format.attrib_ct == 0) {
+ attr_id.pos = GWN_vertformat_attr_add(&format, "pos", GWN_COMP_F32, 1, GWN_FETCH_FLOAT);
+ }
+
+ /* Vertices */
+ Gwn_VertBuf *vbo = GWN_vertbuf_create_with_format(&format);
+ const int vbo_len_capacity = 22;
+ GWN_vertbuf_data_alloc(vbo, vbo_len_capacity);
+ Gwn_VertBufRaw pos_step;
+ GWN_vertbuf_attr_get_raw_data(vbo, attr_id.pos, &pos_step);
+
+ /* camera cone (from center to frame) */
+ camera_fill_lines_fan_fl_v1(&pos_step, camera_coords_frame_bounds, ARRAY_SIZE(camera_coords_frame_bounds));
+
+ /* camera frame (skip center) */
+ camera_fill_lines_loop_fl_v1(&pos_step, &camera_coords_frame_bounds[1], ARRAY_SIZE(camera_coords_frame_bounds) - 1);
+
+ /* camera triangle (above the frame) */
+ camera_fill_lines_loop_fl_v1(&pos_step, camera_coords_frame_tri, ARRAY_SIZE(camera_coords_frame_tri));
+
+ BLI_assert(vbo_len_capacity == GWN_vertbuf_raw_used(&pos_step));
+
+ SHC.drw_camera = GWN_batch_create_ex(GWN_PRIM_LINES, vbo, NULL, GWN_BATCH_OWNS_VBO);
+ }
+ return SHC.drw_camera;
+}
+
+Gwn_Batch *DRW_cache_camera_frame_get(void)
+{
+ if (!SHC.drw_camera_frame) {
+
+ static Gwn_VertFormat format = { 0 };
+ static struct { uint pos; } attr_id;
+ if (format.attrib_ct == 0) {
+ attr_id.pos = GWN_vertformat_attr_add(&format, "pos", GWN_COMP_F32, 1, GWN_FETCH_FLOAT);
+ }
+
+ /* Vertices */
+ Gwn_VertBuf *vbo = GWN_vertbuf_create_with_format(&format);
+ const int vbo_len_capacity = 8;
+ GWN_vertbuf_data_alloc(vbo, vbo_len_capacity);
+ Gwn_VertBufRaw pos_step;
+ GWN_vertbuf_attr_get_raw_data(vbo, attr_id.pos, &pos_step);
+
+ /* camera frame (skip center) */
+ camera_fill_lines_loop_fl_v1(&pos_step, &camera_coords_frame_bounds[1], ARRAY_SIZE(camera_coords_frame_bounds) - 1);
+
+ BLI_assert(vbo_len_capacity == GWN_vertbuf_raw_used(&pos_step));
+
+ SHC.drw_camera_frame = GWN_batch_create_ex(GWN_PRIM_LINES, vbo, NULL, GWN_BATCH_OWNS_VBO);
+ }
+ return SHC.drw_camera_frame;
+}
+
+Gwn_Batch *DRW_cache_camera_tria_get(void)
+{
+ if (!SHC.drw_camera_tria) {
+ static Gwn_VertFormat format = { 0 };
+ static struct { uint pos; } attr_id;
+ if (format.attrib_ct == 0) {
+ attr_id.pos = GWN_vertformat_attr_add(&format, "pos", GWN_COMP_F32, 1, GWN_FETCH_FLOAT);
+ }
+
+ /* Vertices */
+ Gwn_VertBuf *vbo = GWN_vertbuf_create_with_format(&format);
+ const int vbo_len_capacity = 3;
+ GWN_vertbuf_data_alloc(vbo, vbo_len_capacity);
+ Gwn_VertBufRaw pos_step;
+ GWN_vertbuf_attr_get_raw_data(vbo, attr_id.pos, &pos_step);
+
+ /* camera triangle (above the frame) */
+ camera_fill_array_fl_v1(&pos_step, camera_coords_frame_tri, ARRAY_SIZE(camera_coords_frame_tri));
+
+ BLI_assert(vbo_len_capacity == GWN_vertbuf_raw_used(&pos_step));
+
+ SHC.drw_camera_tria = GWN_batch_create_ex(GWN_PRIM_TRIS, vbo, NULL, GWN_BATCH_OWNS_VBO);
+ }
+ return SHC.drw_camera_tria;
+}
+
+/** \} */
+
+/* -------------------------------------------------------------------- */
+
+/** \name Object Mode Helpers
+ * \{ */
+
+/* Object Center */
+Gwn_Batch *DRW_cache_single_vert_get(void)
+{
+ if (!SHC.drw_single_vertice) {
+ float v1[3] = {0.0f, 0.0f, 0.0f};
+
+ /* Position Only 3D format */
+ static Gwn_VertFormat format = { 0 };
+ static struct { uint pos; } attr_id;
+ if (format.attrib_ct == 0) {
+ attr_id.pos = GWN_vertformat_attr_add(&format, "pos", GWN_COMP_F32, 3, GWN_FETCH_FLOAT);
+ }
+
+ Gwn_VertBuf *vbo = GWN_vertbuf_create_with_format(&format);
+ GWN_vertbuf_data_alloc(vbo, 1);
+
+ GWN_vertbuf_attr_set(vbo, attr_id.pos, 0, v1);
+
+ SHC.drw_single_vertice = GWN_batch_create_ex(GWN_PRIM_POINTS, vbo, NULL, GWN_BATCH_OWNS_VBO);
+ }
+ return SHC.drw_single_vertice;
+}
+
+/** \} */
+
+/* -------------------------------------------------------------------- */
+
+/** \name Meshes
+ * \{ */
+
+Gwn_Batch *DRW_cache_mesh_surface_overlay_get(Object *ob)
+{
+ BLI_assert(ob->type == OB_MESH);
+ Mesh *me = ob->data;
+ return DRW_mesh_batch_cache_get_all_triangles(me);
+}
+
+void DRW_cache_mesh_wire_overlay_get(
+ Object *ob,
+ Gwn_Batch **r_tris, Gwn_Batch **r_ledges, Gwn_Batch **r_lverts)
+{
+ BLI_assert(ob->type == OB_MESH);
+
+ Mesh *me = ob->data;
+
+ *r_tris = DRW_mesh_batch_cache_get_overlay_triangles(me);
+ *r_ledges = DRW_mesh_batch_cache_get_overlay_loose_edges(me);
+ *r_lverts = DRW_mesh_batch_cache_get_overlay_loose_verts(me);
+}
+
+void DRW_cache_mesh_normals_overlay_get(
+ Object *ob,
+ Gwn_Batch **r_tris, Gwn_Batch **r_ledges, Gwn_Batch **r_lverts)
+{
+ BLI_assert(ob->type == OB_MESH);
+
+ Mesh *me = ob->data;
+
+ *r_tris = DRW_mesh_batch_cache_get_overlay_triangles_nor(me);
+ *r_ledges = DRW_mesh_batch_cache_get_overlay_loose_edges_nor(me);
+ *r_lverts = DRW_mesh_batch_cache_get_overlay_loose_verts(me);
+}
+
+Gwn_Batch *DRW_cache_face_centers_get(Object *ob)
+{
+ BLI_assert(ob->type == OB_MESH);
+
+ Mesh *me = ob->data;
+
+ return DRW_mesh_batch_cache_get_overlay_facedots(me);
+}
+
+Gwn_Batch *DRW_cache_mesh_wire_outline_get(Object *ob)
+{
+ BLI_assert(ob->type == OB_MESH);
+
+ Mesh *me = ob->data;
+ return DRW_mesh_batch_cache_get_fancy_edges(me);
+}
+
+Gwn_Batch *DRW_cache_mesh_surface_get(Object *ob)
+{
+ BLI_assert(ob->type == OB_MESH);
+
+ Mesh *me = ob->data;
+ return DRW_mesh_batch_cache_get_triangles_with_normals(me);
+}
+
+Gwn_Batch *DRW_cache_mesh_surface_weights_get(Object *ob)
+{
+ BLI_assert(ob->type == OB_MESH);
+
+ Mesh *me = ob->data;
+ return DRW_mesh_batch_cache_get_triangles_with_normals_and_weights(me, ob->actdef - 1);
+}
+
+Gwn_Batch *DRW_cache_mesh_surface_vert_colors_get(Object *ob)
+{
+ BLI_assert(ob->type == OB_MESH);
+
+ Mesh *me = ob->data;
+ return DRW_mesh_batch_cache_get_triangles_with_normals_and_vert_colors(me);
+}
+
+/* Return list of batches */
+Gwn_Batch **DRW_cache_mesh_surface_shaded_get(
+ Object *ob, struct GPUMaterial **gpumat_array, uint gpumat_array_len)
+{
+ BLI_assert(ob->type == OB_MESH);
+
+ Mesh *me = ob->data;
+ return DRW_mesh_batch_cache_get_surface_shaded(me, gpumat_array, gpumat_array_len);
+}
+
+/* Return list of batches */
+Gwn_Batch **DRW_cache_mesh_surface_texpaint_get(Object *ob)
+{
+ BLI_assert(ob->type == OB_MESH);
+
+ Mesh *me = ob->data;
+ return DRW_mesh_batch_cache_get_surface_texpaint(me);
+}
+
+Gwn_Batch *DRW_cache_mesh_surface_texpaint_single_get(Object *ob)
+{
+ BLI_assert(ob->type == OB_MESH);
+
+ Mesh *me = ob->data;
+ return DRW_mesh_batch_cache_get_surface_texpaint_single(me);
+}
+
+Gwn_Batch *DRW_cache_mesh_surface_verts_get(Object *ob)
+{
+ BLI_assert(ob->type == OB_MESH);
+
+ Mesh *me = ob->data;
+ return DRW_mesh_batch_cache_get_points_with_normals(me);
+}
+
+Gwn_Batch *DRW_cache_mesh_edges_get(Object *ob)
+{
+ BLI_assert(ob->type == OB_MESH);
+
+ Mesh *me = ob->data;
+ return DRW_mesh_batch_cache_get_all_edges(me);
+}
+
+Gwn_Batch *DRW_cache_mesh_verts_get(Object *ob)
+{
+ BLI_assert(ob->type == OB_MESH);
+
+ Mesh *me = ob->data;
+ return DRW_mesh_batch_cache_get_all_verts(me);
+}
+
+Gwn_Batch *DRW_cache_mesh_edges_paint_overlay_get(Object *ob, bool use_wire, bool use_sel)
+{
+ BLI_assert(ob->type == OB_MESH);
+
+ Mesh *me = ob->data;
+ return DRW_mesh_batch_cache_get_weight_overlay_edges(me, use_wire, use_sel);
+}
+
+Gwn_Batch *DRW_cache_mesh_faces_weight_overlay_get(Object *ob)
+{
+ BLI_assert(ob->type == OB_MESH);
+
+ Mesh *me = ob->data;
+ return DRW_mesh_batch_cache_get_weight_overlay_faces(me);
+}
+
+Gwn_Batch *DRW_cache_mesh_verts_weight_overlay_get(Object *ob)
+{
+ BLI_assert(ob->type == OB_MESH);
+
+ Mesh *me = ob->data;
+ return DRW_mesh_batch_cache_get_weight_overlay_verts(me);
+}
+
+void DRW_cache_mesh_sculpt_coords_ensure(Object *ob)
+{
+ BLI_assert(ob->type == OB_MESH);
+
+ Mesh *me = ob->data;
+ DRW_mesh_cache_sculpt_coords_ensure(me);
+}
+
+/** \} */
+
+/* -------------------------------------------------------------------- */
+
+/** \name Curve
+ * \{ */
+
+Gwn_Batch *DRW_cache_curve_edge_wire_get(Object *ob)
+{
+ BLI_assert(ob->type == OB_CURVE);
+
+ struct Curve *cu = ob->data;
+ return DRW_curve_batch_cache_get_wire_edge(cu, ob->curve_cache);
+}
+
+Gwn_Batch *DRW_cache_curve_edge_normal_get(Object *ob, float normal_size)
+{
+ BLI_assert(ob->type == OB_CURVE);
+
+ struct Curve *cu = ob->data;
+ return DRW_curve_batch_cache_get_normal_edge(cu, ob->curve_cache, normal_size);
+}
+
+Gwn_Batch *DRW_cache_curve_edge_overlay_get(Object *ob)
+{
+ BLI_assert(ob->type == OB_CURVE);
+
+ struct Curve *cu = ob->data;
+ return DRW_curve_batch_cache_get_overlay_edges(cu);
+}
+
+Gwn_Batch *DRW_cache_curve_vert_overlay_get(Object *ob)
+{
+ BLI_assert(ob->type == OB_CURVE);
+
+ struct Curve *cu = ob->data;
+ return DRW_curve_batch_cache_get_overlay_verts(cu);
+}
+
+Gwn_Batch *DRW_cache_curve_surface_get(Object *ob)
+{
+ BLI_assert(ob->type == OB_CURVE);
+
+ struct Curve *cu = ob->data;
+ return DRW_curve_batch_cache_get_triangles_with_normals(cu, ob->curve_cache);
+}
+
+/* Return list of batches */
+Gwn_Batch **DRW_cache_curve_surface_shaded_get(
+ Object *ob, struct GPUMaterial **gpumat_array, uint gpumat_array_len)
+{
+ BLI_assert(ob->type == OB_CURVE);
+
+ struct Curve *cu = ob->data;
+ return DRW_curve_batch_cache_get_surface_shaded(cu, ob->curve_cache, gpumat_array, gpumat_array_len);
+}
+
+/** \} */
+
+/* -------------------------------------------------------------------- */
+
+/** \name MetaBall
+ * \{ */
+
+Gwn_Batch *DRW_cache_mball_surface_get(Object *ob)
+{
+ BLI_assert(ob->type == OB_MBALL);
+ return DRW_metaball_batch_cache_get_triangles_with_normals(ob);
+}
+
+/** \} */
+
+/* -------------------------------------------------------------------- */
+
+/** \name Font
+ * \{ */
+
+Gwn_Batch *DRW_cache_text_edge_wire_get(Object *ob)
+{
+ BLI_assert(ob->type == OB_FONT);
+
+ struct Curve *cu = ob->data;
+ return DRW_curve_batch_cache_get_wire_edge(cu, ob->curve_cache);
+}
+
+Gwn_Batch *DRW_cache_text_surface_get(Object *ob)
+{
+ BLI_assert(ob->type == OB_FONT);
+ struct Curve *cu = ob->data;
+ if (cu->editfont && (cu->flag & CU_FAST)) {
+ return NULL;
+ }
+ return DRW_curve_batch_cache_get_triangles_with_normals(cu, ob->curve_cache);
+}
+
+Gwn_Batch **DRW_cache_text_surface_shaded_get(
+ Object *ob, struct GPUMaterial **gpumat_array, uint gpumat_array_len)
+{
+ BLI_assert(ob->type == OB_FONT);
+ struct Curve *cu = ob->data;
+ if (cu->editfont && (cu->flag & CU_FAST)) {
+ return NULL;
+ }
+ return DRW_curve_batch_cache_get_surface_shaded(cu, ob->curve_cache, gpumat_array, gpumat_array_len);
+}
+
+Gwn_Batch *DRW_cache_text_cursor_overlay_get(Object *ob)
+{
+ BLI_assert(ob->type == OB_FONT);
+ struct Curve *cu = ob->data;
+ return DRW_curve_batch_cache_get_overlay_cursor(cu);
+}
+
+Gwn_Batch *DRW_cache_text_select_overlay_get(Object *ob)
+{
+ BLI_assert(ob->type == OB_FONT);
+ struct Curve *cu = ob->data;
+ return DRW_curve_batch_cache_get_overlay_select(cu);
+}
+
+/** \} */
+
+/* -------------------------------------------------------------------- */
+
+/** \name Surface
+ * \{ */
+
+Gwn_Batch *DRW_cache_surf_surface_get(Object *ob)
+{
+ BLI_assert(ob->type == OB_SURF);
+
+ struct Curve *cu = ob->data;
+ return DRW_curve_batch_cache_get_triangles_with_normals(cu, ob->curve_cache);
+}
+
+/* Return list of batches */
+Gwn_Batch **DRW_cache_surf_surface_shaded_get(
+ Object *ob, struct GPUMaterial **gpumat_array, uint gpumat_array_len)
+{
+ BLI_assert(ob->type == OB_SURF);
+
+ struct Curve *cu = ob->data;
+ return DRW_curve_batch_cache_get_surface_shaded(cu, ob->curve_cache, gpumat_array, gpumat_array_len);
+}
+
+/** \} */
+
+/* -------------------------------------------------------------------- */
+
+/** \name Lattice
+ * \{ */
+
+Gwn_Batch *DRW_cache_lattice_verts_get(Object *ob)
+{
+ BLI_assert(ob->type == OB_LATTICE);
+
+ struct Lattice *lt = ob->data;
+ return DRW_lattice_batch_cache_get_all_verts(lt);
+}
+
+Gwn_Batch *DRW_cache_lattice_wire_get(Object *ob, bool use_weight)
+{
+ BLI_assert(ob->type == OB_LATTICE);
+
+ Lattice *lt = ob->data;
+ int actdef = -1;
+
+ if (use_weight && ob->defbase.first && lt->editlatt->latt->dvert) {
+ actdef = ob->actdef - 1;
+ }
+
+ return DRW_lattice_batch_cache_get_all_edges(lt, use_weight, actdef);
+}
+
+Gwn_Batch *DRW_cache_lattice_vert_overlay_get(Object *ob)
+{
+ BLI_assert(ob->type == OB_LATTICE);
+
+ struct Lattice *lt = ob->data;
+ return DRW_lattice_batch_cache_get_overlay_verts(lt);
+}
+
+/** \} */
+
+/* -------------------------------------------------------------------- */
+
+/** \name Particles
+ * \{ */
+
+Gwn_Batch *DRW_cache_particles_get_hair(ParticleSystem *psys, ModifierData *md)
+{
+ return DRW_particles_batch_cache_get_hair(psys, md);
+}
+
+Gwn_Batch *DRW_cache_particles_get_dots(Object *object, ParticleSystem *psys)
+{
+ return DRW_particles_batch_cache_get_dots(object, psys);
+}
+
+Gwn_Batch *DRW_cache_particles_get_prim(int type)
+{
+ switch (type) {
+ case PART_DRAW_CROSS:
+ if (!SHC.drw_particle_cross) {
+ static Gwn_VertFormat format = { 0 };
+ static unsigned pos_id, axis_id;
+
+ if (format.attrib_ct == 0) {
+ pos_id = GWN_vertformat_attr_add(&format, "inst_pos", GWN_COMP_F32, 3, GWN_FETCH_FLOAT);
+ axis_id = GWN_vertformat_attr_add(&format, "axis", GWN_COMP_I32, 1, GWN_FETCH_INT);
+ }
+
+ Gwn_VertBuf *vbo = GWN_vertbuf_create_with_format(&format);
+ GWN_vertbuf_data_alloc(vbo, 6);
+
+ /* X axis */
+ float co[3] = {-1.0f, 0.0f, 0.0f};
+ int axis = -1;
+ GWN_vertbuf_attr_set(vbo, pos_id, 0, co);
+ GWN_vertbuf_attr_set(vbo, axis_id, 0, &axis);
+
+ co[0] = 1.0f;
+ GWN_vertbuf_attr_set(vbo, pos_id, 1, co);
+ GWN_vertbuf_attr_set(vbo, axis_id, 1, &axis);
+
+ /* Y axis */
+ co[0] = 0.0f;
+ co[1] = -1.0f;
+ GWN_vertbuf_attr_set(vbo, pos_id, 2, co);
+ GWN_vertbuf_attr_set(vbo, axis_id, 2, &axis);
+
+ co[1] = 1.0f;
+ GWN_vertbuf_attr_set(vbo, pos_id, 3, co);
+ GWN_vertbuf_attr_set(vbo, axis_id, 3, &axis);
+
+ /* Z axis */
+ co[1] = 0.0f;
+ co[2] = -1.0f;
+ GWN_vertbuf_attr_set(vbo, pos_id, 4, co);
+ GWN_vertbuf_attr_set(vbo, axis_id, 4, &axis);
+
+ co[2] = 1.0f;
+ GWN_vertbuf_attr_set(vbo, pos_id, 5, co);
+ GWN_vertbuf_attr_set(vbo, axis_id, 5, &axis);
+
+ SHC.drw_particle_cross = GWN_batch_create_ex(GWN_PRIM_LINES, vbo, NULL, GWN_BATCH_OWNS_VBO);
+ }
+
+ return SHC.drw_particle_cross;
+ case PART_DRAW_AXIS:
+ if (!SHC.drw_particle_axis) {
+ static Gwn_VertFormat format = { 0 };
+ static unsigned pos_id, axis_id;
+
+ if (format.attrib_ct == 0) {
+ pos_id = GWN_vertformat_attr_add(&format, "inst_pos", GWN_COMP_F32, 3, GWN_FETCH_FLOAT);
+ axis_id = GWN_vertformat_attr_add(&format, "axis", GWN_COMP_I32, 1, GWN_FETCH_INT);
+ }
+
+ Gwn_VertBuf *vbo = GWN_vertbuf_create_with_format(&format);
+ GWN_vertbuf_data_alloc(vbo, 6);
+
+ /* X axis */
+ float co[3] = {0.0f, 0.0f, 0.0f};
+ int axis = 0;
+ GWN_vertbuf_attr_set(vbo, pos_id, 0, co);
+ GWN_vertbuf_attr_set(vbo, axis_id, 0, &axis);
+
+ co[0] = 1.0f;
+ GWN_vertbuf_attr_set(vbo, pos_id, 1, co);
+ GWN_vertbuf_attr_set(vbo, axis_id, 1, &axis);
+
+ /* Y axis */
+ co[0] = 0.0f;
+ axis = 1;
+ GWN_vertbuf_attr_set(vbo, pos_id, 2, co);
+ GWN_vertbuf_attr_set(vbo, axis_id, 2, &axis);
+
+ co[1] = 1.0f;
+ GWN_vertbuf_attr_set(vbo, pos_id, 3, co);
+ GWN_vertbuf_attr_set(vbo, axis_id, 3, &axis);
+
+ /* Z axis */
+ co[1] = 0.0f;
+ axis = 2;
+ GWN_vertbuf_attr_set(vbo, pos_id, 4, co);
+ GWN_vertbuf_attr_set(vbo, axis_id, 4, &axis);
+
+ co[2] = 1.0f;
+ GWN_vertbuf_attr_set(vbo, pos_id, 5, co);
+ GWN_vertbuf_attr_set(vbo, axis_id, 5, &axis);
+
+ SHC.drw_particle_axis = GWN_batch_create_ex(GWN_PRIM_LINES, vbo, NULL, GWN_BATCH_OWNS_VBO);
+ }
+
+ return SHC.drw_particle_axis;
+ case PART_DRAW_CIRC:
+#define CIRCLE_RESOL 32
+ if (!SHC.drw_particle_circle) {
+ float v[3] = {0.0f, 0.0f, 0.0f};
+ int axis = -1;
+
+ static Gwn_VertFormat format = { 0 };
+ static unsigned pos_id, axis_id;
+
+ if (format.attrib_ct == 0) {
+ pos_id = GWN_vertformat_attr_add(&format, "inst_pos", GWN_COMP_F32, 3, GWN_FETCH_FLOAT);
+ axis_id = GWN_vertformat_attr_add(&format, "axis", GWN_COMP_I32, 1, GWN_FETCH_INT);
+ }
+
+ Gwn_VertBuf *vbo = GWN_vertbuf_create_with_format(&format);
+ GWN_vertbuf_data_alloc(vbo, CIRCLE_RESOL);
+
+ for (int a = 0; a < CIRCLE_RESOL; a++) {
+ v[0] = sinf((2.0f * M_PI * a) / ((float)CIRCLE_RESOL));
+ v[1] = cosf((2.0f * M_PI * a) / ((float)CIRCLE_RESOL));
+ v[2] = 0.0f;
+ GWN_vertbuf_attr_set(vbo, pos_id, a, v);
+ GWN_vertbuf_attr_set(vbo, axis_id, a, &axis);
+ }
+
+ SHC.drw_particle_circle = GWN_batch_create_ex(GWN_PRIM_LINE_LOOP, vbo, NULL, GWN_BATCH_OWNS_VBO);
+ }
+
+ return SHC.drw_particle_circle;
+#undef CIRCLE_RESOL
+ default:
+ BLI_assert(false);
+ break;
+ }
+
+ return NULL;
+}
+
+/* 3D cursor */
+Gwn_Batch *DRW_cache_cursor_get(void)
+{
+ if (!SHC.drw_cursor) {
+ const float f5 = 0.25f;
+ const float f10 = 0.5f;
+ const float f20 = 1.0f;
+
+ const int segments = 16;
+ const int vert_ct = segments + 8;
+ const int index_ct = vert_ct + 5;
+
+ unsigned char red[3] = {255, 0, 0};
+ unsigned char white[3] = {255, 255, 255};
+ unsigned char crosshair_color[3];
+ UI_GetThemeColor3ubv(TH_VIEW_OVERLAY, crosshair_color);
+
+ static Gwn_VertFormat format = { 0 };
+ static struct { uint pos, color; } attr_id;
+ if (format.attrib_ct == 0) {
+ attr_id.pos = GWN_vertformat_attr_add(&format, "pos", GWN_COMP_F32, 2, GWN_FETCH_FLOAT);
+ attr_id.color = GWN_vertformat_attr_add(&format, "color", GWN_COMP_U8, 3, GWN_FETCH_INT_TO_FLOAT_UNIT);
+ }
+
+ Gwn_IndexBufBuilder elb;
+ GWN_indexbuf_init_ex(&elb, GWN_PRIM_LINE_STRIP, index_ct, vert_ct, true);
+
+ Gwn_VertBuf *vbo = GWN_vertbuf_create_with_format(&format);
+ GWN_vertbuf_data_alloc(vbo, vert_ct);
+
+ int v = 0;
+ for (int i = 0; i < segments; ++i) {
+ float angle = (float)(2 * M_PI) * ((float)i / (float)segments);
+ float x = f10 * cosf(angle);
+ float y = f10 * sinf(angle);
+
+ if (i % 2 == 0)
+ GWN_vertbuf_attr_set(vbo, attr_id.color, v, red);
+ else
+ GWN_vertbuf_attr_set(vbo, attr_id.color, v, white);
+
+ GWN_vertbuf_attr_set(vbo, attr_id.pos, v, (const float[2]){x, y});
+ GWN_indexbuf_add_generic_vert(&elb, v++);
+ }
+ GWN_indexbuf_add_generic_vert(&elb, 0);
+ GWN_indexbuf_add_primitive_restart(&elb);
+
+ GWN_vertbuf_attr_set(vbo, attr_id.pos, v, (const float[2]){-f20, 0});
+ GWN_vertbuf_attr_set(vbo, attr_id.color, v, crosshair_color);
+ GWN_indexbuf_add_generic_vert(&elb, v++);
+ GWN_vertbuf_attr_set(vbo, attr_id.pos, v, (const float[2]){-f5, 0});
+ GWN_vertbuf_attr_set(vbo, attr_id.color, v, crosshair_color);
+ GWN_indexbuf_add_generic_vert(&elb, v++);
+
+ GWN_indexbuf_add_primitive_restart(&elb);
+
+ GWN_vertbuf_attr_set(vbo, attr_id.pos, v, (const float[2]){+f5, 0});
+ GWN_vertbuf_attr_set(vbo, attr_id.color, v, crosshair_color);
+ GWN_indexbuf_add_generic_vert(&elb, v++);
+ GWN_vertbuf_attr_set(vbo, attr_id.pos, v, (const float[2]){+f20, 0});
+ GWN_vertbuf_attr_set(vbo, attr_id.color, v, crosshair_color);
+ GWN_indexbuf_add_generic_vert(&elb, v++);
+
+ GWN_indexbuf_add_primitive_restart(&elb);
+
+ GWN_vertbuf_attr_set(vbo, attr_id.pos, v, (const float[2]){0, -f20});
+ GWN_vertbuf_attr_set(vbo, attr_id.color, v, crosshair_color);
+ GWN_indexbuf_add_generic_vert(&elb, v++);
+ GWN_vertbuf_attr_set(vbo, attr_id.pos, v, (const float[2]){0, -f5});
+ GWN_vertbuf_attr_set(vbo, attr_id.color, v, crosshair_color);
+ GWN_indexbuf_add_generic_vert(&elb, v++);
+
+ GWN_indexbuf_add_primitive_restart(&elb);
+
+ GWN_vertbuf_attr_set(vbo, attr_id.pos, v, (const float[2]){0, +f5});
+ GWN_vertbuf_attr_set(vbo, attr_id.color, v, crosshair_color);
+ GWN_indexbuf_add_generic_vert(&elb, v++);
+ GWN_vertbuf_attr_set(vbo, attr_id.pos, v, (const float[2]){0, +f20});
+ GWN_vertbuf_attr_set(vbo, attr_id.color, v, crosshair_color);
+ GWN_indexbuf_add_generic_vert(&elb, v++);
+
+ Gwn_IndexBuf *ibo = GWN_indexbuf_build(&elb);
+
+ SHC.drw_cursor = GWN_batch_create_ex(GWN_PRIM_LINE_STRIP, vbo, ibo, GWN_BATCH_OWNS_VBO | GWN_BATCH_OWNS_INDEX);
+ }
+ return SHC.drw_cursor;
+} \ No newline at end of file
diff --git a/source/blender/draw/intern/draw_cache.h b/source/blender/draw/intern/draw_cache.h
new file mode 100644
index 00000000000..2ef57884a44
--- /dev/null
+++ b/source/blender/draw/intern/draw_cache.h
@@ -0,0 +1,174 @@
+/*
+ * Copyright 2016, Blender Foundation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Contributor(s): Blender Institute
+ *
+ */
+
+/** \file draw_cache.h
+ * \ingroup draw
+ */
+
+#ifndef __DRAW_CACHE_H__
+#define __DRAW_CACHE_H__
+
+struct Gwn_Batch;
+struct GPUMaterial;
+struct Object;
+struct ModifierData;
+
+void DRW_shape_cache_free(void);
+
+/* 3D cursor */
+struct Gwn_Batch *DRW_cache_cursor_get(void);
+
+/* Common Shapes */
+struct Gwn_Batch *DRW_cache_fullscreen_quad_get(void);
+struct Gwn_Batch *DRW_cache_quad_get(void);
+struct Gwn_Batch *DRW_cache_sphere_get(void);
+struct Gwn_Batch *DRW_cache_single_vert_get(void);
+struct Gwn_Batch *DRW_cache_single_line_get(void);
+struct Gwn_Batch *DRW_cache_single_line_endpoints_get(void);
+struct Gwn_Batch *DRW_cache_screenspace_circle_get(void);
+
+/* Common Object */
+struct Gwn_Batch *DRW_cache_object_wire_outline_get(struct Object *ob);
+struct Gwn_Batch *DRW_cache_object_surface_get(struct Object *ob);
+struct Gwn_Batch **DRW_cache_object_surface_material_get(
+ struct Object *ob, struct GPUMaterial **gpumat_array, uint gpumat_array_len);
+
+/* Empties */
+struct Gwn_Batch *DRW_cache_plain_axes_get(void);
+struct Gwn_Batch *DRW_cache_single_arrow_get(void);
+struct Gwn_Batch *DRW_cache_cube_get(void);
+struct Gwn_Batch *DRW_cache_circle_get(void);
+struct Gwn_Batch *DRW_cache_square_get(void);
+struct Gwn_Batch *DRW_cache_empty_sphere_get(void);
+struct Gwn_Batch *DRW_cache_empty_cone_get(void);
+struct Gwn_Batch *DRW_cache_arrows_get(void);
+struct Gwn_Batch *DRW_cache_axis_names_get(void);
+struct Gwn_Batch *DRW_cache_image_plane_get(void);
+struct Gwn_Batch *DRW_cache_image_plane_wire_get(void);
+
+/* Force Field */
+struct Gwn_Batch *DRW_cache_field_wind_get(void);
+struct Gwn_Batch *DRW_cache_field_force_get(void);
+struct Gwn_Batch *DRW_cache_field_vortex_get(void);
+struct Gwn_Batch *DRW_cache_field_tube_limit_get(void);
+struct Gwn_Batch *DRW_cache_field_cone_limit_get(void);
+
+/* Lamps */
+struct Gwn_Batch *DRW_cache_lamp_get(void);
+struct Gwn_Batch *DRW_cache_lamp_shadows_get(void);
+struct Gwn_Batch *DRW_cache_lamp_sunrays_get(void);
+struct Gwn_Batch *DRW_cache_lamp_area_get(void);
+struct Gwn_Batch *DRW_cache_lamp_hemi_get(void);
+struct Gwn_Batch *DRW_cache_lamp_spot_get(void);
+struct Gwn_Batch *DRW_cache_lamp_spot_square_get(void);
+
+/* Camera */
+struct Gwn_Batch *DRW_cache_camera_get(void);
+struct Gwn_Batch *DRW_cache_camera_frame_get(void);
+struct Gwn_Batch *DRW_cache_camera_tria_get(void);
+
+/* Speaker */
+struct Gwn_Batch *DRW_cache_speaker_get(void);
+
+/* Probe */
+struct Gwn_Batch *DRW_cache_lightprobe_cube_get(void);
+struct Gwn_Batch *DRW_cache_lightprobe_grid_get(void);
+struct Gwn_Batch *DRW_cache_lightprobe_planar_get(void);
+
+/* Bones */
+struct Gwn_Batch *DRW_cache_bone_octahedral_get(void);
+struct Gwn_Batch *DRW_cache_bone_octahedral_wire_outline_get(void);
+struct Gwn_Batch *DRW_cache_bone_box_get(void);
+struct Gwn_Batch *DRW_cache_bone_box_wire_outline_get(void);
+struct Gwn_Batch *DRW_cache_bone_wire_wire_outline_get(void);
+struct Gwn_Batch *DRW_cache_bone_envelope_solid_get(void);
+struct Gwn_Batch *DRW_cache_bone_envelope_distance_outline_get(void);
+struct Gwn_Batch *DRW_cache_bone_envelope_wire_outline_get(void);
+struct Gwn_Batch *DRW_cache_bone_envelope_head_wire_outline_get(void);
+struct Gwn_Batch *DRW_cache_bone_point_get(void);
+struct Gwn_Batch *DRW_cache_bone_point_wire_outline_get(void);
+struct Gwn_Batch *DRW_cache_bone_arrows_get(void);
+
+/* Meshes */
+struct Gwn_Batch *DRW_cache_mesh_surface_overlay_get(struct Object *ob);
+void DRW_cache_mesh_wire_overlay_get(
+ struct Object *ob,
+ struct Gwn_Batch **r_tris, struct Gwn_Batch **r_ledges, struct Gwn_Batch **r_lverts);
+void DRW_cache_mesh_normals_overlay_get(
+ struct Object *ob,
+ struct Gwn_Batch **r_tris, struct Gwn_Batch **r_ledges, struct Gwn_Batch **r_lverts);
+struct Gwn_Batch *DRW_cache_face_centers_get(struct Object *ob);
+struct Gwn_Batch *DRW_cache_mesh_wire_outline_get(struct Object *ob);
+struct Gwn_Batch *DRW_cache_mesh_surface_get(struct Object *ob);
+struct Gwn_Batch *DRW_cache_mesh_surface_weights_get(struct Object *ob);
+struct Gwn_Batch *DRW_cache_mesh_surface_vert_colors_get(struct Object *ob);
+struct Gwn_Batch *DRW_cache_mesh_surface_verts_get(struct Object *ob);
+struct Gwn_Batch *DRW_cache_mesh_edges_get(struct Object *ob);
+struct Gwn_Batch *DRW_cache_mesh_verts_get(struct Object *ob);
+struct Gwn_Batch *DRW_cache_mesh_edges_paint_overlay_get(struct Object *ob, bool use_wire, bool use_sel);
+struct Gwn_Batch *DRW_cache_mesh_faces_weight_overlay_get(struct Object *ob);
+struct Gwn_Batch *DRW_cache_mesh_verts_weight_overlay_get(struct Object *ob);
+struct Gwn_Batch **DRW_cache_mesh_surface_shaded_get(
+ struct Object *ob, struct GPUMaterial **gpumat_array, uint gpumat_array_len);
+struct Gwn_Batch **DRW_cache_mesh_surface_texpaint_get(struct Object *ob);
+struct Gwn_Batch *DRW_cache_mesh_surface_texpaint_single_get(struct Object *ob);
+
+void DRW_cache_mesh_sculpt_coords_ensure(struct Object *ob);
+
+/* Curve */
+struct Gwn_Batch *DRW_cache_curve_surface_get(struct Object *ob);
+struct Gwn_Batch **DRW_cache_curve_surface_shaded_get(
+ struct Object *ob, struct GPUMaterial **gpumat_array, uint gpumat_array_len);
+struct Gwn_Batch *DRW_cache_curve_surface_verts_get(struct Object *ob);
+struct Gwn_Batch *DRW_cache_curve_edge_wire_get(struct Object *ob);
+/* edit-mode */
+struct Gwn_Batch *DRW_cache_curve_edge_normal_get(struct Object *ob, float normal_size);
+struct Gwn_Batch *DRW_cache_curve_edge_overlay_get(struct Object *ob);
+struct Gwn_Batch *DRW_cache_curve_vert_overlay_get(struct Object *ob);
+
+/* Font */
+struct Gwn_Batch *DRW_cache_text_edge_wire_get(struct Object *ob);
+struct Gwn_Batch *DRW_cache_text_surface_get(struct Object *ob);
+struct Gwn_Batch **DRW_cache_text_surface_shaded_get(
+ struct Object *ob, struct GPUMaterial **gpumat_array, uint gpumat_array_len);
+/* edit-mode */
+struct Gwn_Batch *DRW_cache_text_cursor_overlay_get(struct Object *ob);
+struct Gwn_Batch *DRW_cache_text_select_overlay_get(struct Object *ob);
+
+/* Surface */
+struct Gwn_Batch *DRW_cache_surf_surface_get(struct Object *ob);
+struct Gwn_Batch **DRW_cache_surf_surface_shaded_get(
+ struct Object *ob, struct GPUMaterial **gpumat_array, uint gpumat_array_len);
+
+/* Lattice */
+struct Gwn_Batch *DRW_cache_lattice_verts_get(struct Object *ob);
+struct Gwn_Batch *DRW_cache_lattice_wire_get(struct Object *ob, bool use_weight);
+struct Gwn_Batch *DRW_cache_lattice_vert_overlay_get(struct Object *ob);
+
+/* Particles */
+struct Gwn_Batch *DRW_cache_particles_get_hair(struct ParticleSystem *psys, struct ModifierData *md);
+struct Gwn_Batch *DRW_cache_particles_get_dots(struct Object *object, struct ParticleSystem *psys);
+struct Gwn_Batch *DRW_cache_particles_get_prim(int type);
+
+/* Metaball */
+struct Gwn_Batch *DRW_cache_mball_surface_get(struct Object *ob);
+
+#endif /* __DRAW_CACHE_H__ */
diff --git a/source/blender/draw/intern/draw_cache_impl.h b/source/blender/draw/intern/draw_cache_impl.h
new file mode 100644
index 00000000000..83cc87307b5
--- /dev/null
+++ b/source/blender/draw/intern/draw_cache_impl.h
@@ -0,0 +1,127 @@
+/*
+ * Copyright 2016, Blender Foundation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Contributor(s): Blender Institute
+ *
+ */
+
+/** \file draw_cache_impl.h
+ * \ingroup draw
+ */
+
+#ifndef __DRAW_CACHE_IMPL_H__
+#define __DRAW_CACHE_IMPL_H__
+
+struct CurveCache;
+struct GPUMaterial;
+struct Gwn_Batch;
+struct Gwn_IndexBuf;
+struct Gwn_VertBuf;
+struct ListBase;
+struct ModifierData;
+struct ParticleSystem;
+
+struct Curve;
+struct Lattice;
+struct Mesh;
+struct MetaBall;
+
+/* Expose via BKE callbacks */
+void DRW_mball_batch_cache_dirty(struct MetaBall *mb, int mode);
+void DRW_mball_batch_cache_free(struct MetaBall *mb);
+
+void DRW_curve_batch_cache_dirty(struct Curve *cu, int mode);
+void DRW_curve_batch_cache_free(struct Curve *cu);
+
+void DRW_mesh_batch_cache_dirty(struct Mesh *me, int mode);
+void DRW_mesh_batch_cache_free(struct Mesh *me);
+
+void DRW_lattice_batch_cache_dirty(struct Lattice *lt, int mode);
+void DRW_lattice_batch_cache_free(struct Lattice *lt);
+
+void DRW_particle_batch_cache_dirty(struct ParticleSystem *psys, int mode);
+void DRW_particle_batch_cache_free(struct ParticleSystem *psys);
+
+/* Curve */
+struct Gwn_Batch *DRW_curve_batch_cache_get_wire_edge(struct Curve *cu, struct CurveCache *ob_curve_cache);
+struct Gwn_Batch *DRW_curve_batch_cache_get_normal_edge(
+ struct Curve *cu, struct CurveCache *ob_curve_cache, float normal_size);
+struct Gwn_Batch *DRW_curve_batch_cache_get_overlay_edges(struct Curve *cu);
+struct Gwn_Batch *DRW_curve_batch_cache_get_overlay_verts(struct Curve *cu);
+
+struct Gwn_Batch *DRW_curve_batch_cache_get_triangles_with_normals(struct Curve *cu, struct CurveCache *ob_curve_cache);
+struct Gwn_Batch **DRW_curve_batch_cache_get_surface_shaded(
+ struct Curve *cu, struct CurveCache *ob_curve_cache,
+ struct GPUMaterial **gpumat_array, uint gpumat_array_len);
+
+/* Metaball */
+struct Gwn_Batch *DRW_metaball_batch_cache_get_triangles_with_normals(struct Object *ob);
+
+/* Curve (Font) */
+struct Gwn_Batch *DRW_curve_batch_cache_get_overlay_cursor(struct Curve *cu);
+struct Gwn_Batch *DRW_curve_batch_cache_get_overlay_select(struct Curve *cu);
+
+/* DispList */
+struct Gwn_VertBuf *DRW_displist_vertbuf_calc_pos_with_normals(struct ListBase *lb);
+struct Gwn_IndexBuf *DRW_displist_indexbuf_calc_triangles_in_order(struct ListBase *lb);
+struct Gwn_IndexBuf **DRW_displist_indexbuf_calc_triangles_in_order_split_by_material(
+ struct ListBase *lb, uint gpumat_array_len);
+struct Gwn_Batch **DRW_displist_batch_calc_tri_pos_normals_and_uv_split_by_material(
+ struct ListBase *lb, uint gpumat_array_len);
+
+/* Lattice */
+struct Gwn_Batch *DRW_lattice_batch_cache_get_all_edges(struct Lattice *lt, bool use_weight, const int actdef);
+struct Gwn_Batch *DRW_lattice_batch_cache_get_all_verts(struct Lattice *lt);
+struct Gwn_Batch *DRW_lattice_batch_cache_get_overlay_verts(struct Lattice *lt);
+
+/* Mesh */
+
+struct Gwn_Batch **DRW_mesh_batch_cache_get_surface_shaded(
+ struct Mesh *me, struct GPUMaterial **gpumat_array, uint gpumat_array_len);
+struct Gwn_Batch **DRW_mesh_batch_cache_get_surface_texpaint(struct Mesh *me);
+struct Gwn_Batch *DRW_mesh_batch_cache_get_surface_texpaint_single(struct Mesh *me);
+struct Gwn_Batch *DRW_mesh_batch_cache_get_weight_overlay_edges(struct Mesh *me, bool use_wire, bool use_sel);
+struct Gwn_Batch *DRW_mesh_batch_cache_get_weight_overlay_faces(struct Mesh *me);
+struct Gwn_Batch *DRW_mesh_batch_cache_get_weight_overlay_verts(struct Mesh *me);
+struct Gwn_Batch *DRW_mesh_batch_cache_get_all_edges(struct Mesh *me);
+struct Gwn_Batch *DRW_mesh_batch_cache_get_all_triangles(struct Mesh *me);
+struct Gwn_Batch *DRW_mesh_batch_cache_get_triangles_with_normals(struct Mesh *me);
+struct Gwn_Batch *DRW_mesh_batch_cache_get_triangles_with_normals_and_weights(struct Mesh *me, int defgroup);
+struct Gwn_Batch *DRW_mesh_batch_cache_get_triangles_with_normals_and_vert_colors(struct Mesh *me);
+struct Gwn_Batch *DRW_mesh_batch_cache_get_triangles_with_select_id(struct Mesh *me, bool use_hide, uint select_id_offset);
+struct Gwn_Batch *DRW_mesh_batch_cache_get_triangles_with_select_mask(struct Mesh *me, bool use_hide);
+struct Gwn_Batch *DRW_mesh_batch_cache_get_points_with_normals(struct Mesh *me);
+struct Gwn_Batch *DRW_mesh_batch_cache_get_all_verts(struct Mesh *me);
+struct Gwn_Batch *DRW_mesh_batch_cache_get_fancy_edges(struct Mesh *me);
+struct Gwn_Batch *DRW_mesh_batch_cache_get_overlay_triangles(struct Mesh *me);
+struct Gwn_Batch *DRW_mesh_batch_cache_get_overlay_triangles_nor(struct Mesh *me);
+struct Gwn_Batch *DRW_mesh_batch_cache_get_overlay_loose_edges(struct Mesh *me);
+struct Gwn_Batch *DRW_mesh_batch_cache_get_overlay_loose_edges_nor(struct Mesh *me);
+struct Gwn_Batch *DRW_mesh_batch_cache_get_overlay_loose_verts(struct Mesh *me);
+struct Gwn_Batch *DRW_mesh_batch_cache_get_overlay_facedots(struct Mesh *me);
+/* edit-mesh selection (use generic function for faces) */
+struct Gwn_Batch *DRW_mesh_batch_cache_get_facedots_with_select_id(struct Mesh *me, uint select_id_offset);
+struct Gwn_Batch *DRW_mesh_batch_cache_get_edges_with_select_id(struct Mesh *me, uint select_id_offset);
+struct Gwn_Batch *DRW_mesh_batch_cache_get_verts_with_select_id(struct Mesh *me, uint select_id_offset);
+
+void DRW_mesh_cache_sculpt_coords_ensure(struct Mesh *me);
+
+/* Particles */
+struct Gwn_Batch *DRW_particles_batch_cache_get_hair(struct ParticleSystem *psys, struct ModifierData *md);
+struct Gwn_Batch *DRW_particles_batch_cache_get_dots(struct Object *object, struct ParticleSystem *psys);
+
+#endif /* __DRAW_CACHE_IMPL_H__ */
diff --git a/source/blender/draw/intern/draw_cache_impl_curve.c b/source/blender/draw/intern/draw_cache_impl_curve.c
new file mode 100644
index 00000000000..3939ea062e9
--- /dev/null
+++ b/source/blender/draw/intern/draw_cache_impl_curve.c
@@ -0,0 +1,1118 @@
+/*
+ * ***** BEGIN GPL LICENSE BLOCK *****
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * The Original Code is Copyright (C) 2017 by Blender Foundation.
+ * All rights reserved.
+ *
+ * ***** END GPL LICENSE BLOCK *****
+ */
+
+/** \file draw_cache_impl_curve.c
+ * \ingroup draw
+ *
+ * \brief Curve API for render engines
+ */
+
+#include "MEM_guardedalloc.h"
+
+#include "BLI_utildefines.h"
+#include "BLI_math_vector.h"
+
+#include "DNA_curve_types.h"
+
+#include "BKE_curve.h"
+
+#include "BKE_font.h"
+
+#include "GPU_batch.h"
+
+#include "UI_resources.h"
+
+#include "draw_cache_impl.h" /* own include */
+
+#define SELECT 1
+#define ACTIVE_NURB 1 << 7 /* last char bite */
+#define HANDLE_SEL_OFFSET (TH_HANDLE_SEL_FREE - TH_HANDLE_FREE)
+
+/* Used as values of `color_id` in `edit_curve_overlay_handle_geom.glsl` */
+enum {
+ COLOR_NURB_ULINE_ID = TH_HANDLE_SEL_AUTOCLAMP - TH_HANDLE_FREE + 1,
+ COLOR_NURB_SEL_ULINE_ID,
+ COLOR_ACTIVE_SPLINE,
+
+ TOT_HANDLE_COL,
+};
+
+/**
+ * TODO
+ * - Ensure `CurveCache`, `SEQUENCER_DAG_WORKAROUND`.
+ * - Check number of verts/edges to see if cache is valid.
+ * - Check if 'overlay.edges' can use single attribyte per edge, not 2 (for selection drawing).
+ */
+
+static void curve_batch_cache_clear(Curve *cu);
+
+/* ---------------------------------------------------------------------- */
+/* Curve Interface, direct access to basic data. */
+
+static void curve_render_overlay_verts_edges_len_get(
+ ListBase *lb, bool hide_handles,
+ int *r_vert_len, int *r_edge_len)
+{
+ BLI_assert(r_vert_len || r_edge_len);
+ int vert_len = 0;
+ int edge_len = 0;
+ for (Nurb *nu = lb->first; nu; nu = nu->next) {
+ if (nu->bezt) {
+ vert_len += hide_handles ? nu->pntsu : (nu->pntsu * 3);
+ /* 2x handles per point*/
+ edge_len += 2 * nu->pntsu;
+ }
+ else if (nu->bp) {
+ vert_len += nu->pntsu;
+ /* segments between points */
+ edge_len += nu->pntsu - 1;
+ }
+ }
+ if (r_vert_len) {
+ *r_vert_len = vert_len;
+ }
+ if (r_edge_len) {
+ *r_edge_len = edge_len;
+ }
+}
+
+static void curve_render_wire_verts_edges_len_get(
+ const CurveCache *ob_curve_cache,
+ int *r_vert_len, int *r_edge_len)
+{
+ BLI_assert(r_vert_len || r_edge_len);
+ int vert_len = 0;
+ int edge_len = 0;
+ for (const BevList *bl = ob_curve_cache->bev.first; bl; bl = bl->next) {
+ if (bl->nr > 0) {
+ const bool is_cyclic = bl->poly != -1;
+
+ /* verts */
+ vert_len += bl->nr;
+
+ /* edges */
+ edge_len += bl->nr;
+ if (!is_cyclic) {
+ edge_len -= 1;
+ }
+ }
+ }
+ if (r_vert_len) {
+ *r_vert_len = vert_len;
+ }
+ if (r_edge_len) {
+ *r_edge_len = edge_len;
+ }
+}
+
+static int curve_render_normal_len_get(const ListBase *lb, const CurveCache *ob_curve_cache)
+{
+ int normal_len = 0;
+ const BevList *bl;
+ const Nurb *nu;
+ for (bl = ob_curve_cache->bev.first, nu = lb->first; nu && bl; bl = bl->next, nu = nu->next) {
+ int nr = bl->nr;
+ int skip = nu->resolu / 16;
+#if 0
+ while (nr-- > 0) { /* accounts for empty bevel lists */
+ normal_len += 1;
+ nr -= skip;
+ }
+#else
+ /* Same as loop above */
+ normal_len += (nr / (skip + 1)) + ((nr % (skip + 1)) != 0);
+#endif
+ }
+ return normal_len;
+}
+
+/* ---------------------------------------------------------------------- */
+/* Curve Interface, indirect, partially cached access to complex data. */
+
+typedef struct CurveRenderData {
+ int types;
+
+ struct {
+ int vert_len;
+ int edge_len;
+ } overlay;
+
+ struct {
+ int vert_len;
+ int edge_len;
+ } wire;
+
+ /* edit mode normal's */
+ struct {
+ /* 'edge_len == len * 2'
+ * 'vert_len == len * 3' */
+ int len;
+ } normal;
+
+ struct {
+ EditFont *edit_font;
+ } text;
+
+ bool hide_handles;
+ bool hide_normals;
+
+ /* borrow from 'Object' */
+ CurveCache *ob_curve_cache;
+
+ /* borrow from 'Curve' */
+ ListBase *nurbs;
+
+ /* edit, index in nurb list */
+ int actnu;
+ /* edit, index in active nurb (BPoint or BezTriple) */
+ int actvert;
+} CurveRenderData;
+
+enum {
+ /* Wire center-line */
+ CU_DATATYPE_WIRE = 1 << 0,
+ /* Edit-mode verts and optionally handles */
+ CU_DATATYPE_OVERLAY = 1 << 1,
+ /* Edit-mode normals */
+ CU_DATATYPE_NORMAL = 1 << 2,
+ /* Geometry */
+ CU_DATATYPE_SURFACE = 1 << 3,
+ /* Text */
+ CU_DATATYPE_TEXT_SELECT = 1 << 4,
+};
+
+/*
+ * ob_curve_cache can be NULL, only needed for CU_DATATYPE_WIRE
+ */
+static CurveRenderData *curve_render_data_create(Curve *cu, CurveCache *ob_curve_cache, const int types)
+{
+ CurveRenderData *rdata = MEM_callocN(sizeof(*rdata), __func__);
+ rdata->types = types;
+ ListBase *nurbs;
+
+ rdata->hide_handles = (cu->drawflag & CU_HIDE_HANDLES) != 0;
+ rdata->hide_normals = (cu->drawflag & CU_HIDE_NORMALS) != 0;
+
+ rdata->actnu = cu->actnu;
+ rdata->actvert = cu->actvert;
+
+ rdata->ob_curve_cache = ob_curve_cache;
+
+ if (types & CU_DATATYPE_WIRE) {
+ curve_render_wire_verts_edges_len_get(
+ rdata->ob_curve_cache,
+ &rdata->wire.vert_len, &rdata->wire.edge_len);
+ }
+
+ if (cu->editnurb) {
+ EditNurb *editnurb = cu->editnurb;
+ nurbs = &editnurb->nurbs;
+
+ if (types & CU_DATATYPE_OVERLAY) {
+ curve_render_overlay_verts_edges_len_get(
+ nurbs, rdata->hide_handles,
+ &rdata->overlay.vert_len,
+ rdata->hide_handles ? NULL : &rdata->overlay.edge_len);
+
+ rdata->actnu = cu->actnu;
+ rdata->actvert = cu->actvert;
+ }
+ if (types & CU_DATATYPE_NORMAL) {
+ rdata->normal.len = curve_render_normal_len_get(nurbs, rdata->ob_curve_cache);
+ }
+ }
+ else {
+ nurbs = &cu->nurb;
+ }
+
+ rdata->nurbs = nurbs;
+
+ rdata->text.edit_font = cu->editfont;
+
+ return rdata;
+}
+
+static void curve_render_data_free(CurveRenderData *rdata)
+{
+#if 0
+ if (rdata->loose_verts) {
+ MEM_freeN(rdata->loose_verts);
+ }
+#endif
+ MEM_freeN(rdata);
+}
+
+static int curve_render_data_overlay_verts_len_get(const CurveRenderData *rdata)
+{
+ BLI_assert(rdata->types & CU_DATATYPE_OVERLAY);
+ return rdata->overlay.vert_len;
+}
+
+static int curve_render_data_overlay_edges_len_get(const CurveRenderData *rdata)
+{
+ BLI_assert(rdata->types & CU_DATATYPE_OVERLAY);
+ return rdata->overlay.edge_len;
+}
+
+static int curve_render_data_wire_verts_len_get(const CurveRenderData *rdata)
+{
+ BLI_assert(rdata->types & CU_DATATYPE_WIRE);
+ return rdata->wire.vert_len;
+}
+
+static int curve_render_data_wire_edges_len_get(const CurveRenderData *rdata)
+{
+ BLI_assert(rdata->types & CU_DATATYPE_WIRE);
+ return rdata->wire.edge_len;
+}
+
+static int curve_render_data_normal_len_get(const CurveRenderData *rdata)
+{
+ BLI_assert(rdata->types & CU_DATATYPE_NORMAL);
+ return rdata->normal.len;
+}
+
+enum {
+ VFLAG_VERTEX_SELECTED = 1 << 0,
+ VFLAG_VERTEX_ACTIVE = 1 << 1,
+};
+
+/* ---------------------------------------------------------------------- */
+/* Curve Gwn_Batch Cache */
+
+typedef struct CurveBatchCache {
+ /* center-line */
+ struct {
+ Gwn_VertBuf *verts;
+ Gwn_VertBuf *edges;
+ Gwn_Batch *batch;
+ Gwn_IndexBuf *elem;
+ } wire;
+
+ /* normals */
+ struct {
+ Gwn_VertBuf *verts;
+ Gwn_VertBuf *edges;
+ Gwn_Batch *batch;
+ Gwn_IndexBuf *elem;
+ } normal;
+
+ /* control handles and vertices */
+ struct {
+ Gwn_Batch *edges;
+ Gwn_Batch *verts;
+ } overlay;
+
+ struct {
+ Gwn_VertBuf *verts;
+ Gwn_IndexBuf *triangles_in_order;
+ Gwn_Batch **shaded_triangles;
+ Gwn_Batch *batch;
+ int mat_len;
+ } surface;
+
+ /* 3d text */
+ struct {
+ Gwn_Batch *select;
+ Gwn_Batch *cursor;
+ } text;
+
+ /* settings to determine if cache is invalid */
+ bool is_dirty;
+
+ bool hide_handles;
+ bool hide_normals;
+
+ float normal_size;
+
+ bool is_editmode;
+} CurveBatchCache;
+
+/* Gwn_Batch cache management. */
+
+static bool curve_batch_cache_valid(Curve *cu)
+{
+ CurveBatchCache *cache = cu->batch_cache;
+
+ if (cache == NULL) {
+ return false;
+ }
+
+ if (cache->is_dirty) {
+ return false;
+ }
+
+ if (cache->is_editmode != ((cu->editnurb != NULL) || (cu->editfont != NULL))) {
+ return false;
+ }
+
+ if (cache->is_editmode) {
+ if (cu->editnurb) {
+ if ((cache->hide_handles != ((cu->drawflag & CU_HIDE_HANDLES) != 0))) {
+ return false;
+ }
+ else if ((cache->hide_normals != ((cu->drawflag & CU_HIDE_NORMALS) != 0))) {
+ return false;
+ }
+ }
+ else if (cu->editfont) {
+ /* TODO */
+ }
+ }
+
+ return true;
+}
+
+static void curve_batch_cache_init(Curve *cu)
+{
+ CurveBatchCache *cache = cu->batch_cache;
+
+ if (!cache) {
+ cache = cu->batch_cache = MEM_callocN(sizeof(*cache), __func__);
+ }
+ else {
+ memset(cache, 0, sizeof(*cache));
+ }
+
+ cache->hide_handles = (cu->drawflag & CU_HIDE_HANDLES) != 0;
+ cache->hide_normals = (cu->drawflag & CU_HIDE_NORMALS) != 0;
+
+#if 0
+ ListBase *nurbs;
+ if (cu->editnurb) {
+ EditNurb *editnurb = cu->editnurb;
+ nurbs = &editnurb->nurbs;
+ }
+ else {
+ nurbs = &cu->nurb;
+ }
+#endif
+
+ cache->is_editmode = (cu->editnurb != NULL) || (cu->editfont != NULL);
+
+ cache->is_dirty = false;
+}
+
+static CurveBatchCache *curve_batch_cache_get(Curve *cu)
+{
+ if (!curve_batch_cache_valid(cu)) {
+ curve_batch_cache_clear(cu);
+ curve_batch_cache_init(cu);
+ }
+ return cu->batch_cache;
+}
+
+void DRW_curve_batch_cache_dirty(Curve *cu, int mode)
+{
+ CurveBatchCache *cache = cu->batch_cache;
+ if (cache == NULL) {
+ return;
+ }
+ switch (mode) {
+ case BKE_CURVE_BATCH_DIRTY_ALL:
+ cache->is_dirty = true;
+ break;
+ case BKE_CURVE_BATCH_DIRTY_SELECT:
+ /* editnurb */
+ GWN_BATCH_DISCARD_SAFE(cache->overlay.verts);
+ GWN_BATCH_DISCARD_SAFE(cache->overlay.edges);
+
+ /* editfont */
+ GWN_BATCH_DISCARD_SAFE(cache->text.select);
+ GWN_BATCH_DISCARD_SAFE(cache->text.cursor);
+ break;
+ default:
+ BLI_assert(0);
+ }
+}
+
+static void curve_batch_cache_clear(Curve *cu)
+{
+ CurveBatchCache *cache = cu->batch_cache;
+ if (!cache) {
+ return;
+ }
+
+ GWN_BATCH_DISCARD_SAFE(cache->overlay.verts);
+ GWN_BATCH_DISCARD_SAFE(cache->overlay.edges);
+
+ GWN_VERTBUF_DISCARD_SAFE(cache->surface.verts);
+ GWN_INDEXBUF_DISCARD_SAFE(cache->surface.triangles_in_order);
+ if (cache->surface.shaded_triangles) {
+ for (int i = 0; i < cache->surface.mat_len; ++i) {
+ GWN_BATCH_DISCARD_SAFE(cache->surface.shaded_triangles[i]);
+ }
+ }
+ MEM_SAFE_FREE(cache->surface.shaded_triangles);
+ GWN_BATCH_DISCARD_SAFE(cache->surface.batch);
+
+ /* don't own vbo & elems */
+ GWN_BATCH_DISCARD_SAFE(cache->wire.batch);
+ GWN_VERTBUF_DISCARD_SAFE(cache->wire.verts);
+ GWN_VERTBUF_DISCARD_SAFE(cache->wire.edges);
+ GWN_INDEXBUF_DISCARD_SAFE(cache->wire.elem);
+
+ /* don't own vbo & elems */
+ GWN_BATCH_DISCARD_SAFE(cache->normal.batch);
+ GWN_VERTBUF_DISCARD_SAFE(cache->normal.verts);
+ GWN_VERTBUF_DISCARD_SAFE(cache->normal.edges);
+ GWN_INDEXBUF_DISCARD_SAFE(cache->normal.elem);
+
+ /* 3d text */
+ GWN_BATCH_DISCARD_SAFE(cache->text.cursor);
+ GWN_BATCH_DISCARD_SAFE(cache->text.select);
+}
+
+void DRW_curve_batch_cache_free(Curve *cu)
+{
+ curve_batch_cache_clear(cu);
+ MEM_SAFE_FREE(cu->batch_cache);
+}
+
+/* -------------------------------------------------------------------- */
+
+/** \name Private Curve Cache API
+ * \{ */
+
+/* Gwn_Batch cache usage. */
+static Gwn_VertBuf *curve_batch_cache_get_wire_verts(CurveRenderData *rdata, CurveBatchCache *cache)
+{
+ BLI_assert(rdata->types & CU_DATATYPE_WIRE);
+ BLI_assert(rdata->ob_curve_cache != NULL);
+
+ if (cache->wire.verts == NULL) {
+ static Gwn_VertFormat format = { 0 };
+ static struct { uint pos; } attr_id;
+ if (format.attrib_ct == 0) {
+ /* initialize vertex format */
+ attr_id.pos = GWN_vertformat_attr_add(&format, "pos", GWN_COMP_F32, 3, GWN_FETCH_FLOAT);
+ }
+
+ const int vert_len = curve_render_data_wire_verts_len_get(rdata);
+
+ Gwn_VertBuf *vbo = cache->wire.verts = GWN_vertbuf_create_with_format(&format);
+ GWN_vertbuf_data_alloc(vbo, vert_len);
+ int vbo_len_used = 0;
+ for (const BevList *bl = rdata->ob_curve_cache->bev.first; bl; bl = bl->next) {
+ if (bl->nr > 0) {
+ const int i_end = vbo_len_used + bl->nr;
+ for (const BevPoint *bevp = bl->bevpoints; vbo_len_used < i_end; vbo_len_used++, bevp++) {
+ GWN_vertbuf_attr_set(vbo, attr_id.pos, vbo_len_used, bevp->vec);
+ }
+ }
+ }
+ BLI_assert(vbo_len_used == vert_len);
+ }
+
+ return cache->wire.verts;
+}
+
+static Gwn_IndexBuf *curve_batch_cache_get_wire_edges(CurveRenderData *rdata, CurveBatchCache *cache)
+{
+ BLI_assert(rdata->types & CU_DATATYPE_WIRE);
+ BLI_assert(rdata->ob_curve_cache != NULL);
+
+ if (cache->wire.edges == NULL) {
+ const int vert_len = curve_render_data_wire_verts_len_get(rdata);
+ const int edge_len = curve_render_data_wire_edges_len_get(rdata);
+ int edge_len_used = 0;
+
+ Gwn_IndexBufBuilder elb;
+ GWN_indexbuf_init(&elb, GWN_PRIM_LINES, edge_len, vert_len);
+
+ int i = 0;
+ for (const BevList *bl = rdata->ob_curve_cache->bev.first; bl; bl = bl->next) {
+ if (bl->nr > 0) {
+ const bool is_cyclic = bl->poly != -1;
+ const int i_end = i + (bl->nr);
+ int i_prev;
+ if (is_cyclic) {
+ i_prev = i + (bl->nr - 1);
+ }
+ else {
+ i_prev = i;
+ i += 1;
+ }
+ for (; i < i_end; i_prev = i++) {
+ GWN_indexbuf_add_line_verts(&elb, i_prev, i);
+ edge_len_used += 1;
+ }
+ }
+ }
+
+ if (rdata->hide_handles) {
+ BLI_assert(edge_len_used <= edge_len);
+ }
+ else {
+ BLI_assert(edge_len_used == edge_len);
+ }
+
+ cache->wire.elem = GWN_indexbuf_build(&elb);
+ }
+
+ return cache->wire.elem;
+}
+
+static Gwn_VertBuf *curve_batch_cache_get_normal_verts(CurveRenderData *rdata, CurveBatchCache *cache)
+{
+ BLI_assert(rdata->types & CU_DATATYPE_NORMAL);
+ BLI_assert(rdata->ob_curve_cache != NULL);
+
+ if (cache->normal.verts == NULL) {
+ static Gwn_VertFormat format = { 0 };
+ static struct { uint pos; } attr_id;
+ if (format.attrib_ct == 0) {
+ /* initialize vertex format */
+ attr_id.pos = GWN_vertformat_attr_add(&format, "pos", GWN_COMP_F32, 3, GWN_FETCH_FLOAT);
+ }
+
+ const int normal_len = curve_render_data_normal_len_get(rdata);
+ const int vert_len = normal_len * 3;
+
+ Gwn_VertBuf *vbo = cache->normal.verts = GWN_vertbuf_create_with_format(&format);
+ GWN_vertbuf_data_alloc(vbo, vert_len);
+ int vbo_len_used = 0;
+
+ const BevList *bl;
+ const Nurb *nu;
+
+ for (bl = rdata->ob_curve_cache->bev.first, nu = rdata->nurbs->first;
+ nu && bl;
+ bl = bl->next, nu = nu->next)
+ {
+ const BevPoint *bevp = bl->bevpoints;
+ int nr = bl->nr;
+ int skip = nu->resolu / 16;
+
+ while (nr-- > 0) { /* accounts for empty bevel lists */
+ const float fac = bevp->radius * cache->normal_size;
+ float vec_a[3]; /* Offset perpendicular to the curve */
+ float vec_b[3]; /* Delta along the curve */
+
+ vec_a[0] = fac;
+ vec_a[1] = 0.0f;
+ vec_a[2] = 0.0f;
+
+ mul_qt_v3(bevp->quat, vec_a);
+ madd_v3_v3fl(vec_a, bevp->dir, -fac);
+
+ reflect_v3_v3v3(vec_b, vec_a, bevp->dir);
+ negate_v3(vec_b);
+
+ add_v3_v3(vec_a, bevp->vec);
+ add_v3_v3(vec_b, bevp->vec);
+
+ GWN_vertbuf_attr_set(vbo, attr_id.pos, vbo_len_used++, vec_a);
+ GWN_vertbuf_attr_set(vbo, attr_id.pos, vbo_len_used++, bevp->vec);
+ GWN_vertbuf_attr_set(vbo, attr_id.pos, vbo_len_used++, vec_b);
+
+ bevp += skip + 1;
+ nr -= skip;
+ }
+ }
+ BLI_assert(vbo_len_used == vert_len);
+ }
+
+ return cache->normal.verts;
+}
+
+static Gwn_IndexBuf *curve_batch_cache_get_normal_edges(CurveRenderData *rdata, CurveBatchCache *cache)
+{
+ BLI_assert(rdata->types & CU_DATATYPE_NORMAL);
+ BLI_assert(rdata->ob_curve_cache != NULL);
+
+ if (cache->normal.edges == NULL) {
+ const int normal_len = curve_render_data_normal_len_get(rdata);
+ const int vert_len = normal_len * 3;
+ const int edge_len = normal_len * 2;
+
+ Gwn_IndexBufBuilder elb;
+ GWN_indexbuf_init(&elb, GWN_PRIM_LINES, edge_len, vert_len);
+
+ int vbo_len_used = 0;
+ for (int i = 0; i < normal_len; i++) {
+ GWN_indexbuf_add_line_verts(&elb, vbo_len_used + 0, vbo_len_used + 1);
+ GWN_indexbuf_add_line_verts(&elb, vbo_len_used + 1, vbo_len_used + 2);
+ vbo_len_used += 3;
+ }
+
+ BLI_assert(vbo_len_used == vert_len);
+
+ cache->normal.elem = GWN_indexbuf_build(&elb);
+ }
+
+ return cache->normal.elem;
+}
+
+static void curve_batch_cache_create_overlay_batches(Curve *cu)
+{
+ /* Since CU_DATATYPE_OVERLAY is slow to generate, generate them all at once */
+ int options = CU_DATATYPE_OVERLAY;
+
+ CurveBatchCache *cache = curve_batch_cache_get(cu);
+ CurveRenderData *rdata = curve_render_data_create(cu, NULL, options);
+
+ if (cache->overlay.verts == NULL) {
+ static Gwn_VertFormat format = { 0 };
+ static struct { uint pos, data; } attr_id;
+ if (format.attrib_ct == 0) {
+ /* initialize vertex format */
+ attr_id.pos = GWN_vertformat_attr_add(&format, "pos", GWN_COMP_F32, 3, GWN_FETCH_FLOAT);
+ attr_id.data = GWN_vertformat_attr_add(&format, "data", GWN_COMP_U8, 1, GWN_FETCH_INT);
+ }
+
+ Gwn_VertBuf *vbo = GWN_vertbuf_create_with_format(&format);
+ const int vbo_len_capacity = curve_render_data_overlay_verts_len_get(rdata);
+ int vbo_len_used = 0;
+ GWN_vertbuf_data_alloc(vbo, vbo_len_capacity);
+ int i = 0;
+ for (Nurb *nu = rdata->nurbs->first; nu; nu = nu->next) {
+ if (nu->bezt) {
+ int a = 0;
+ for (const BezTriple *bezt = nu->bezt; a < nu->pntsu; a++, bezt++) {
+ if (bezt->hide == false) {
+ const bool is_active = (i == rdata->actvert);
+ char vflag;
+
+ if (rdata->hide_handles) {
+ vflag = (bezt->f2 & SELECT) ?
+ (is_active ? VFLAG_VERTEX_ACTIVE : VFLAG_VERTEX_SELECTED) : 0;
+ GWN_vertbuf_attr_set(vbo, attr_id.pos, vbo_len_used, bezt->vec[1]);
+ GWN_vertbuf_attr_set(vbo, attr_id.data, vbo_len_used, &vflag);
+ vbo_len_used += 1;
+ }
+ else {
+ for (int j = 0; j < 3; j++) {
+ vflag = ((&bezt->f1)[j] & SELECT) ?
+ (is_active ? VFLAG_VERTEX_ACTIVE : VFLAG_VERTEX_SELECTED) : 0;
+ GWN_vertbuf_attr_set(vbo, attr_id.pos, vbo_len_used, bezt->vec[j]);
+ GWN_vertbuf_attr_set(vbo, attr_id.data, vbo_len_used, &vflag);
+ vbo_len_used += 1;
+ }
+ }
+ }
+ i += 1;
+ }
+ }
+ else if (nu->bp) {
+ int a = 0;
+ for (const BPoint *bp = nu->bp; a < nu->pntsu; a++, bp++) {
+ if (bp->hide == false) {
+ const bool is_active = (i == rdata->actvert);
+ char vflag;
+ vflag = (bp->f1 & SELECT) ? (is_active ? VFLAG_VERTEX_ACTIVE : VFLAG_VERTEX_SELECTED) : 0;
+ GWN_vertbuf_attr_set(vbo, attr_id.pos, vbo_len_used, bp->vec);
+ GWN_vertbuf_attr_set(vbo, attr_id.data, vbo_len_used, &vflag);
+ vbo_len_used += 1;
+ }
+ i += 1;
+ }
+ }
+ i += nu->pntsu;
+ }
+ if (vbo_len_capacity != vbo_len_used) {
+ GWN_vertbuf_data_resize(vbo, vbo_len_used);
+ }
+
+ cache->overlay.verts = GWN_batch_create_ex(GWN_PRIM_POINTS, vbo, NULL, GWN_BATCH_OWNS_VBO);
+ }
+
+
+ if ((cache->overlay.edges == NULL) && (rdata->hide_handles == false)) {
+ /* Note: we could reference indices to vertices (above) */
+
+ static Gwn_VertFormat format = { 0 };
+ static struct { uint pos, data; } attr_id;
+ if (format.attrib_ct == 0) {
+ /* initialize vertex format */
+ attr_id.pos = GWN_vertformat_attr_add(&format, "pos", GWN_COMP_F32, 3, GWN_FETCH_FLOAT);
+ attr_id.data = GWN_vertformat_attr_add(&format, "data", GWN_COMP_U8, 1, GWN_FETCH_INT);
+ }
+
+ Gwn_VertBuf *vbo = GWN_vertbuf_create_with_format(&format);
+ const int edge_len = curve_render_data_overlay_edges_len_get(rdata);
+ const int vbo_len_capacity = edge_len * 2;
+ int vbo_len_used = 0;
+ GWN_vertbuf_data_alloc(vbo, vbo_len_capacity);
+ int i = 0;
+ for (Nurb *nu = rdata->nurbs->first; nu; nu = nu->next, i++) {
+ const bool is_active_nurb = (i == cu->actnu);
+
+ if (nu->bezt) {
+ int a = 0;
+ for (const BezTriple *bezt = nu->bezt; a < nu->pntsu; a++, bezt++) {
+ if (bezt->hide == false) {
+ char col_id;
+
+ for (int j = 0; j < 2; j += 1) {
+ /* same vertex twice, only check different selection */
+ GWN_vertbuf_attr_set(vbo, attr_id.pos, vbo_len_used, bezt->vec[1]);
+ vbo_len_used += 1;
+
+ col_id = (&bezt->h1)[j];
+ if ((&bezt->f1)[j * 2] & SELECT) {
+ col_id += HANDLE_SEL_OFFSET;
+ }
+ if (is_active_nurb) {
+ col_id |= ACTIVE_NURB;
+ }
+
+ GWN_vertbuf_attr_set(vbo, attr_id.pos, vbo_len_used, bezt->vec[j * 2]);
+ GWN_vertbuf_attr_set(vbo, attr_id.data, vbo_len_used, &col_id);
+ vbo_len_used += 1;
+ }
+ }
+ }
+ }
+ else if (nu->bp) {
+ int a = 1;
+ for (const BPoint *bp_prev = nu->bp, *bp_curr = &nu->bp[1]; a < nu->pntsu; a++, bp_prev = bp_curr++) {
+ if ((bp_prev->hide == false) && (bp_curr->hide == false)) {
+ char col_id = ((bp_prev->f1 & SELECT) && (bp_curr->f1 & SELECT)) ? COLOR_NURB_SEL_ULINE_ID : COLOR_NURB_ULINE_ID;
+
+ if (is_active_nurb) {
+ col_id |= ACTIVE_NURB;
+ }
+
+ GWN_vertbuf_attr_set(vbo, attr_id.pos, vbo_len_used, bp_prev->vec);
+ vbo_len_used += 1;
+
+ GWN_vertbuf_attr_set(vbo, attr_id.pos, vbo_len_used, bp_curr->vec);
+ GWN_vertbuf_attr_set(vbo, attr_id.data, vbo_len_used, &col_id);
+ vbo_len_used += 1;
+
+ }
+ }
+ }
+ }
+ if (vbo_len_capacity != vbo_len_used) {
+ GWN_vertbuf_data_resize(vbo, vbo_len_used);
+ }
+
+ cache->overlay.edges = GWN_batch_create_ex(GWN_PRIM_LINES, vbo, NULL, GWN_BATCH_OWNS_VBO);
+ }
+
+ curve_render_data_free(rdata);
+}
+
+static Gwn_Batch *curve_batch_cache_get_pos_and_normals(CurveRenderData *rdata, CurveBatchCache *cache)
+{
+ BLI_assert(rdata->types & CU_DATATYPE_SURFACE);
+ if (cache->surface.batch == NULL) {
+ ListBase *lb = &rdata->ob_curve_cache->disp;
+
+ if (cache->surface.verts == NULL) {
+ cache->surface.verts = DRW_displist_vertbuf_calc_pos_with_normals(lb);
+ }
+ if (cache->surface.triangles_in_order == NULL) {
+ cache->surface.triangles_in_order = DRW_displist_indexbuf_calc_triangles_in_order(lb);
+ }
+ cache->surface.batch = GWN_batch_create(
+ GWN_PRIM_TRIS, cache->surface.verts, cache->surface.triangles_in_order);
+ }
+
+ return cache->surface.batch;
+}
+
+/** \} */
+
+
+/* -------------------------------------------------------------------- */
+
+/** \name Private Object/Font Cache API
+ * \{ */
+
+
+static Gwn_Batch *curve_batch_cache_get_overlay_select(CurveRenderData *rdata, CurveBatchCache *cache)
+{
+ BLI_assert(rdata->types & CU_DATATYPE_TEXT_SELECT);
+ if (cache->text.select == NULL) {
+ EditFont *ef = rdata->text.edit_font;
+ static Gwn_VertFormat format = { 0 };
+ static struct { uint pos; } attr_id;
+ if (format.attrib_ct == 0) {
+ attr_id.pos = GWN_vertformat_attr_add(&format, "pos", GWN_COMP_F32, 3, GWN_FETCH_FLOAT);
+ }
+
+ Gwn_VertBuf *vbo = GWN_vertbuf_create_with_format(&format);
+ const int vbo_len_capacity = ef->selboxes_len * 6;
+ int vbo_len_used = 0;
+ GWN_vertbuf_data_alloc(vbo, vbo_len_capacity);
+
+ float box[4][3];
+
+ /* fill in xy below */
+ box[0][2] = box[1][2] = box[2][2] = box[3][2] = 0.001;
+
+ for (int i = 0; i < ef->selboxes_len; i++) {
+ EditFontSelBox *sb = &ef->selboxes[i];
+
+ float selboxw;
+ if (i + 1 != ef->selboxes_len) {
+ if (ef->selboxes[i + 1].y == sb->y)
+ selboxw = ef->selboxes[i + 1].x - sb->x;
+ else
+ selboxw = sb->w;
+ }
+ else {
+ selboxw = sb->w;
+ }
+
+ if (sb->rot == 0.0f) {
+ copy_v2_fl2(box[0], sb->x, sb->y);
+ copy_v2_fl2(box[1], sb->x + selboxw, sb->y);
+ copy_v2_fl2(box[2], sb->x + selboxw, sb->y + sb->h);
+ copy_v2_fl2(box[3], sb->x, sb->y + sb->h);
+ }
+ else {
+ float mat[2][2];
+
+ angle_to_mat2(mat, sb->rot);
+
+ copy_v2_fl2(box[0], sb->x, sb->y);
+
+ copy_v2_fl2(box[1], selboxw, 0.0f);
+ mul_m2v2(mat, box[1]);
+ add_v2_v2(box[1], &sb->x);
+
+ copy_v2_fl2(box[2], selboxw, sb->h);
+ mul_m2v2(mat, box[2]);
+ add_v2_v2(box[2], &sb->x);
+
+ copy_v2_fl2(box[3], 0.0f, sb->h);
+ mul_m2v2(mat, box[3]);
+ add_v2_v2(box[3], &sb->x);
+ }
+
+ GWN_vertbuf_attr_set(vbo, attr_id.pos, vbo_len_used++, box[0]);
+ GWN_vertbuf_attr_set(vbo, attr_id.pos, vbo_len_used++, box[1]);
+ GWN_vertbuf_attr_set(vbo, attr_id.pos, vbo_len_used++, box[2]);
+
+ GWN_vertbuf_attr_set(vbo, attr_id.pos, vbo_len_used++, box[0]);
+ GWN_vertbuf_attr_set(vbo, attr_id.pos, vbo_len_used++, box[2]);
+ GWN_vertbuf_attr_set(vbo, attr_id.pos, vbo_len_used++, box[3]);
+ }
+ BLI_assert(vbo_len_used == vbo_len_capacity);
+ cache->text.select = GWN_batch_create_ex(GWN_PRIM_TRIS, vbo, NULL, GWN_BATCH_OWNS_VBO);
+ }
+ return cache->text.select;
+}
+
+static Gwn_Batch *curve_batch_cache_get_overlay_cursor(CurveRenderData *rdata, CurveBatchCache *cache)
+{
+ BLI_assert(rdata->types & CU_DATATYPE_TEXT_SELECT);
+ if (cache->text.cursor == NULL) {
+ static Gwn_VertFormat format = { 0 };
+ static struct { uint pos; } attr_id;
+ if (format.attrib_ct == 0) {
+ attr_id.pos = GWN_vertformat_attr_add(&format, "pos", GWN_COMP_F32, 2, GWN_FETCH_FLOAT);
+ }
+
+ Gwn_VertBuf *vbo = GWN_vertbuf_create_with_format(&format);
+ const int vbo_len_capacity = 4;
+ GWN_vertbuf_data_alloc(vbo, vbo_len_capacity);
+ for (int i = 0; i < 4; i++) {
+ GWN_vertbuf_attr_set(vbo, attr_id.pos, i, rdata->text.edit_font->textcurs[i]);
+ }
+ cache->text.cursor = GWN_batch_create_ex(GWN_PRIM_TRI_FAN, vbo, NULL, GWN_BATCH_OWNS_VBO);
+ }
+ return cache->text.cursor;
+}
+
+/** \} */
+
+/* -------------------------------------------------------------------- */
+
+/** \name Public Object/Curve API
+ * \{ */
+
+Gwn_Batch *DRW_curve_batch_cache_get_wire_edge(Curve *cu, CurveCache *ob_curve_cache)
+{
+ CurveBatchCache *cache = curve_batch_cache_get(cu);
+
+ if (cache->wire.batch == NULL) {
+ /* create batch from Curve */
+ CurveRenderData *rdata = curve_render_data_create(cu, ob_curve_cache, CU_DATATYPE_WIRE);
+
+ cache->wire.batch = GWN_batch_create(
+ GWN_PRIM_LINES,
+ curve_batch_cache_get_wire_verts(rdata, cache),
+ curve_batch_cache_get_wire_edges(rdata, cache));
+
+ curve_render_data_free(rdata);
+ }
+ return cache->wire.batch;
+}
+
+Gwn_Batch *DRW_curve_batch_cache_get_normal_edge(Curve *cu, CurveCache *ob_curve_cache, float normal_size)
+{
+ CurveBatchCache *cache = curve_batch_cache_get(cu);
+
+ if (cache->normal.batch != NULL) {
+ cache->normal_size = normal_size;
+ if (cache->normal_size != normal_size) {
+ GWN_BATCH_DISCARD_SAFE(cache->normal.batch);
+ GWN_VERTBUF_DISCARD_SAFE(cache->normal.edges);
+ }
+ }
+ cache->normal_size = normal_size;
+
+ if (cache->normal.batch == NULL) {
+ /* create batch from Curve */
+ CurveRenderData *rdata = curve_render_data_create(cu, ob_curve_cache, CU_DATATYPE_NORMAL);
+
+ cache->normal.batch = GWN_batch_create(
+ GWN_PRIM_LINES,
+ curve_batch_cache_get_normal_verts(rdata, cache),
+ curve_batch_cache_get_normal_edges(rdata, cache));
+
+ curve_render_data_free(rdata);
+ cache->normal_size = normal_size;
+ }
+ return cache->normal.batch;
+}
+
+Gwn_Batch *DRW_curve_batch_cache_get_overlay_edges(Curve *cu)
+{
+ CurveBatchCache *cache = curve_batch_cache_get(cu);
+
+ if (cache->overlay.edges == NULL) {
+ curve_batch_cache_create_overlay_batches(cu);
+ }
+
+ return cache->overlay.edges;
+}
+
+Gwn_Batch *DRW_curve_batch_cache_get_overlay_verts(Curve *cu)
+{
+ CurveBatchCache *cache = curve_batch_cache_get(cu);
+
+ if (cache->overlay.verts == NULL) {
+ curve_batch_cache_create_overlay_batches(cu);
+ }
+
+ return cache->overlay.verts;
+}
+
+Gwn_Batch *DRW_curve_batch_cache_get_triangles_with_normals(
+ struct Curve *cu, struct CurveCache *ob_curve_cache)
+{
+ CurveBatchCache *cache = curve_batch_cache_get(cu);
+
+ if (cache->surface.batch == NULL) {
+ CurveRenderData *rdata = curve_render_data_create(cu, ob_curve_cache, CU_DATATYPE_SURFACE);
+
+ curve_batch_cache_get_pos_and_normals(rdata, cache);
+
+ curve_render_data_free(rdata);
+ }
+
+ return cache->surface.batch;
+}
+
+Gwn_Batch **DRW_curve_batch_cache_get_surface_shaded(
+ struct Curve *cu, struct CurveCache *ob_curve_cache,
+ struct GPUMaterial **UNUSED(gpumat_array), uint gpumat_array_len)
+{
+ CurveBatchCache *cache = curve_batch_cache_get(cu);
+
+ if (cache->surface.mat_len != gpumat_array_len) {
+ /* TODO: deduplicate code */
+ if (cache->surface.shaded_triangles) {
+ for (int i = 0; i < cache->surface.mat_len; ++i) {
+ GWN_BATCH_DISCARD_SAFE(cache->surface.shaded_triangles[i]);
+ }
+ }
+ MEM_SAFE_FREE(cache->surface.shaded_triangles);
+ }
+
+ if (cache->surface.shaded_triangles == NULL) {
+ CurveRenderData *rdata = curve_render_data_create(cu, ob_curve_cache, CU_DATATYPE_SURFACE);
+ ListBase *lb = &rdata->ob_curve_cache->disp;
+
+ cache->surface.mat_len = gpumat_array_len;
+ if (cu->flag & CU_UV_ORCO) {
+ cache->surface.shaded_triangles = DRW_displist_batch_calc_tri_pos_normals_and_uv_split_by_material(
+ lb, gpumat_array_len);
+ }
+ else {
+ cache->surface.shaded_triangles = MEM_mallocN(
+ sizeof(*cache->surface.shaded_triangles) * gpumat_array_len, __func__);
+ Gwn_IndexBuf **el = DRW_displist_indexbuf_calc_triangles_in_order_split_by_material(
+ lb, gpumat_array_len);
+
+ if (cache->surface.verts == NULL) {
+ cache->surface.verts = DRW_displist_vertbuf_calc_pos_with_normals(lb);
+ }
+
+ for (int i = 0; i < gpumat_array_len; ++i) {
+ cache->surface.shaded_triangles[i] = GWN_batch_create_ex(
+ GWN_PRIM_TRIS, cache->surface.verts, el[i], GWN_BATCH_OWNS_INDEX);
+ }
+
+ MEM_freeN(el); /* Save `el` in cache? */
+ }
+
+ curve_render_data_free(rdata);
+ }
+
+ return cache->surface.shaded_triangles;
+}
+
+
+/* -------------------------------------------------------------------- */
+
+/** \name Public Object/Font API
+ * \{ */
+
+Gwn_Batch *DRW_curve_batch_cache_get_overlay_select(Curve *cu)
+{
+ CurveBatchCache *cache = curve_batch_cache_get(cu);
+
+ if (cache->text.select == NULL) {
+ CurveRenderData *rdata = curve_render_data_create(cu, NULL, CU_DATATYPE_TEXT_SELECT);
+
+ curve_batch_cache_get_overlay_select(rdata, cache);
+
+ curve_render_data_free(rdata);
+ }
+
+ return cache->text.select;
+}
+
+Gwn_Batch *DRW_curve_batch_cache_get_overlay_cursor(Curve *cu)
+{
+ CurveBatchCache *cache = curve_batch_cache_get(cu);
+
+ if (cache->text.cursor == NULL) {
+ CurveRenderData *rdata = curve_render_data_create(cu, NULL, CU_DATATYPE_TEXT_SELECT);
+
+ curve_batch_cache_get_overlay_cursor(rdata, cache);
+
+ curve_render_data_free(rdata);
+ }
+
+ return cache->text.cursor;
+}
+
+/** \} */
diff --git a/source/blender/draw/intern/draw_cache_impl_displist.c b/source/blender/draw/intern/draw_cache_impl_displist.c
new file mode 100644
index 00000000000..627fb38d9d6
--- /dev/null
+++ b/source/blender/draw/intern/draw_cache_impl_displist.c
@@ -0,0 +1,398 @@
+/*
+ * ***** BEGIN GPL LICENSE BLOCK *****
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * The Original Code is Copyright (C) 2017 by Blender Foundation.
+ * All rights reserved.
+ *
+ * ***** END GPL LICENSE BLOCK *****
+ */
+
+/** \file draw_cache_impl_displist.c
+ * \ingroup draw
+ *
+ * \brief DispList API for render engines
+ *
+ * \note DispList may be removed soon! This is a utility for object types that use render.
+ */
+
+#include "MEM_guardedalloc.h"
+
+#include "BLI_alloca.h"
+#include "BLI_utildefines.h"
+#include "BLI_math_vector.h"
+
+#include "DNA_curve_types.h"
+
+#include "BKE_displist.h"
+
+#include "GPU_batch.h"
+
+#include "draw_cache_impl.h" /* own include */
+
+static int dl_vert_len(const DispList *dl)
+{
+ switch (dl->type) {
+ case DL_INDEX3:
+ case DL_INDEX4:
+ return dl->nr;
+ case DL_SURF:
+ return dl->parts * dl->nr;
+ }
+ return 0;
+}
+
+static int dl_tri_len(const DispList *dl)
+{
+ switch (dl->type) {
+ case DL_INDEX3:
+ return dl->parts;
+ case DL_INDEX4:
+ return dl->parts * 2;
+ case DL_SURF:
+ return dl->totindex * 2;
+ }
+ return 0;
+}
+
+/* see: displist_get_allverts */
+static int curve_render_surface_vert_len_get(const ListBase *lb)
+{
+ int vert_len = 0;
+ for (const DispList *dl = lb->first; dl; dl = dl->next) {
+ vert_len += dl_vert_len(dl);
+ }
+ return vert_len;
+}
+
+static int curve_render_surface_tri_len_get(const ListBase *lb)
+{
+ int tri_len = 0;
+ for (const DispList *dl = lb->first; dl; dl = dl->next) {
+ tri_len += dl_tri_len(dl);
+ }
+ return tri_len;
+}
+
+static void displist_indexbufbuilder_set(Gwn_IndexBufBuilder *elb, const DispList *dl, const int ofs)
+{
+ if (ELEM(dl->type, DL_INDEX3, DL_INDEX4, DL_SURF)) {
+ const int *idx = dl->index;
+ if (dl->type == DL_INDEX3) {
+ const int i_end = dl->parts;
+ for (int i = 0; i < i_end; i++, idx += 3) {
+ GWN_indexbuf_add_tri_verts(elb, idx[0] + ofs, idx[2] + ofs, idx[1] + ofs);
+ }
+ }
+ else if (dl->type == DL_SURF) {
+ const int i_end = dl->totindex;
+ for (int i = 0; i < i_end; i++, idx += 4) {
+ GWN_indexbuf_add_tri_verts(elb, idx[0] + ofs, idx[2] + ofs, idx[1] + ofs);
+ GWN_indexbuf_add_tri_verts(elb, idx[0] + ofs, idx[3] + ofs, idx[2] + ofs);
+ }
+ }
+ else {
+ BLI_assert(dl->type == DL_INDEX4);
+ const int i_end = dl->parts;
+ for (int i = 0; i < i_end; i++, idx += 4) {
+ GWN_indexbuf_add_tri_verts(elb, idx[0] + ofs, idx[1] + ofs, idx[2] + ofs);
+
+ if (idx[2] != idx[3]) {
+ GWN_indexbuf_add_tri_verts(elb, idx[0] + ofs, idx[2] + ofs, idx[3] + ofs);
+ }
+ }
+ }
+ }
+}
+
+Gwn_VertBuf *DRW_displist_vertbuf_calc_pos_with_normals(ListBase *lb)
+{
+ static Gwn_VertFormat format = { 0 };
+ static struct { uint pos, nor; } attr_id;
+ if (format.attrib_ct == 0) {
+ /* initialize vertex format */
+ attr_id.pos = GWN_vertformat_attr_add(&format, "pos", GWN_COMP_F32, 3, GWN_FETCH_FLOAT);
+ attr_id.nor = GWN_vertformat_attr_add(&format, "nor", GWN_COMP_F32, 3, GWN_FETCH_FLOAT);
+ }
+
+ Gwn_VertBuf *vbo = GWN_vertbuf_create_with_format(&format);
+ GWN_vertbuf_data_alloc(vbo, curve_render_surface_vert_len_get(lb));
+
+ BKE_displist_normals_add(lb);
+
+ int vbo_len_used = 0;
+ for (const DispList *dl = lb->first; dl; dl = dl->next) {
+ const bool ndata_is_single = dl->type == DL_INDEX3;
+ if (ELEM(dl->type, DL_INDEX3, DL_INDEX4, DL_SURF)) {
+ const float *fp_co = dl->verts;
+ const float *fp_no = dl->nors;
+ const int vbo_end = vbo_len_used + dl_vert_len(dl);
+ while (vbo_len_used < vbo_end) {
+ GWN_vertbuf_attr_set(vbo, attr_id.pos, vbo_len_used, fp_co);
+ if (fp_no) {
+ GWN_vertbuf_attr_set(vbo, attr_id.nor, vbo_len_used, fp_no);
+ if (ndata_is_single == false) {
+ fp_no += 3;
+ }
+ }
+ fp_co += 3;
+ vbo_len_used += 1;
+ }
+ }
+ }
+
+ return vbo;
+}
+
+Gwn_IndexBuf *DRW_displist_indexbuf_calc_triangles_in_order(ListBase *lb)
+{
+ const int tri_len = curve_render_surface_tri_len_get(lb);
+ const int vert_len = curve_render_surface_vert_len_get(lb);
+
+ Gwn_IndexBufBuilder elb;
+ GWN_indexbuf_init(&elb, GWN_PRIM_TRIS, tri_len, vert_len);
+
+ int ofs = 0;
+ for (const DispList *dl = lb->first; dl; dl = dl->next) {
+ displist_indexbufbuilder_set(&elb, dl, ofs);
+ ofs += dl_vert_len(dl);
+ }
+
+ return GWN_indexbuf_build(&elb);
+}
+
+Gwn_IndexBuf **DRW_displist_indexbuf_calc_triangles_in_order_split_by_material(ListBase *lb, uint gpumat_array_len)
+{
+ Gwn_IndexBuf **shaded_triangles_in_order = MEM_callocN(sizeof(*shaded_triangles_in_order) * gpumat_array_len, __func__);
+ Gwn_IndexBufBuilder *elb = BLI_array_alloca(elb, gpumat_array_len);
+
+ const int tri_len = curve_render_surface_tri_len_get(lb);
+ const int vert_len = curve_render_surface_vert_len_get(lb);
+ int i;
+
+ /* Init each index buffer builder */
+ for (i = 0; i < gpumat_array_len; i++) {
+ GWN_indexbuf_init(&elb[i], GWN_PRIM_TRIS, tri_len, vert_len);
+ }
+
+ /* calc each index buffer builder */
+ int ofs = 0;
+ for (const DispList *dl = lb->first; dl; dl = dl->next) {
+ displist_indexbufbuilder_set(&elb[dl->col], dl, ofs);
+ ofs += dl_vert_len(dl);
+ }
+
+ /* build each indexbuf */
+ for (i = 0; i < gpumat_array_len; i++) {
+ shaded_triangles_in_order[i] = GWN_indexbuf_build(&elb[i]);
+ }
+
+ return shaded_triangles_in_order;
+}
+
+static void displist_vertbuf_attr_set_tri_pos_normals_and_uv(
+ Gwn_VertBufRaw *pos_step, Gwn_VertBufRaw *nor_step, Gwn_VertBufRaw *uv_step,
+ const float v1[3], const float v2[3], const float v3[3],
+ const float n1[3], const float n2[3], const float n3[3],
+ const float uv1[2], const float uv2[2], const float uv3[2])
+{
+ copy_v3_v3(GWN_vertbuf_raw_step(pos_step), v1);
+ copy_v3_v3(GWN_vertbuf_raw_step(nor_step), n1);
+ copy_v2_v2(GWN_vertbuf_raw_step(uv_step), uv1);
+
+ copy_v3_v3(GWN_vertbuf_raw_step(pos_step), v2);
+ copy_v3_v3(GWN_vertbuf_raw_step(nor_step), n2);
+ copy_v2_v2(GWN_vertbuf_raw_step(uv_step), uv2);
+
+ copy_v3_v3(GWN_vertbuf_raw_step(pos_step), v3);
+ copy_v3_v3(GWN_vertbuf_raw_step(nor_step), n3);
+ copy_v2_v2(GWN_vertbuf_raw_step(uv_step), uv3);
+}
+
+Gwn_Batch **DRW_displist_batch_calc_tri_pos_normals_and_uv_split_by_material(ListBase *lb, uint gpumat_array_len)
+{
+ static Gwn_VertFormat shaded_triangles_format = { 0 };
+ static struct { uint pos, nor, uv; } attr_id;
+
+ if (shaded_triangles_format.attrib_ct == 0) {
+ /* initialize vertex format */
+ attr_id.pos = GWN_vertformat_attr_add(&shaded_triangles_format, "pos", GWN_COMP_F32, 3, GWN_FETCH_FLOAT);
+ attr_id.nor = GWN_vertformat_attr_add(&shaded_triangles_format, "nor", GWN_COMP_F32, 3, GWN_FETCH_FLOAT);
+ attr_id.uv = GWN_vertformat_attr_add(&shaded_triangles_format, "u", GWN_COMP_F32, 2, GWN_FETCH_FLOAT);
+ }
+
+ Gwn_Batch **shaded_triangles = MEM_mallocN(sizeof(*shaded_triangles) * gpumat_array_len, __func__);
+
+ Gwn_VertBuf **vbo = BLI_array_alloca(vbo, gpumat_array_len);
+ uint *vbo_len_capacity = BLI_array_alloca(vbo_len_capacity, gpumat_array_len);
+
+ Gwn_VertBufRaw *pos_step, *nor_step, *uv_step;
+ pos_step = BLI_array_alloca(pos_step, gpumat_array_len);
+ nor_step = BLI_array_alloca(nor_step, gpumat_array_len);
+ uv_step = BLI_array_alloca(uv_step, gpumat_array_len);
+
+ /* Create each vertex buffer */
+ for (int i = 0; i < gpumat_array_len; i++) {
+ vbo[i] = GWN_vertbuf_create_with_format(&shaded_triangles_format);
+ vbo_len_capacity[i] = 0;
+ }
+
+ /* Calc `vbo_len_capacity` */
+ for (const DispList *dl = lb->first; dl; dl = dl->next) {
+ vbo_len_capacity[dl->col] += dl_tri_len(dl) * 3;
+ }
+
+ /* Alloc each vertex buffer and get each raw data */
+ for (int i = 0; i < gpumat_array_len; i++) {
+ GWN_vertbuf_data_alloc(vbo[i], vbo_len_capacity[i]);
+ GWN_vertbuf_attr_get_raw_data(vbo[i], attr_id.pos, &pos_step[i]);
+ GWN_vertbuf_attr_get_raw_data(vbo[i], attr_id.nor, &nor_step[i]);
+ GWN_vertbuf_attr_get_raw_data(vbo[i], attr_id.uv, &uv_step[i]);
+ }
+
+ BKE_displist_normals_add(lb);
+
+ for (const DispList *dl = lb->first; dl; dl = dl->next) {
+ if (ELEM(dl->type, DL_INDEX3, DL_INDEX4, DL_SURF)) {
+ const int col = dl->col;
+ const float(*verts)[3] = (float(*)[3])dl->verts;
+ const float(*nors)[3] = (float(*)[3])dl->nors;
+ const int *idx = dl->index;
+ float uv[4][2];
+
+ if (dl->type == DL_INDEX3) {
+ const float x_max = (float)(dl->nr - 1);
+ uv[0][1] = uv[1][1] = uv[2][1] = 0.0f;
+ const int i_end = dl->parts;
+ for (int i = 0; i < i_end; i++, idx += 3) {
+ uv[0][0] = idx[0] / x_max;
+ uv[1][0] = idx[2] / x_max;
+ uv[2][0] = idx[1] / x_max;
+
+ displist_vertbuf_attr_set_tri_pos_normals_and_uv(
+ &pos_step[col], &nor_step[col], &uv_step[col],
+ verts[idx[0]], verts[idx[2]], verts[idx[1]],
+ nors[idx[0]], nors[idx[2]], nors[idx[1]],
+ uv[0], uv[1], uv[2]);
+ }
+ }
+ else if (dl->type == DL_SURF) {
+ uint quad[4];
+ for (int a = 0; a < dl->parts; a++) {
+ if ((dl->flag & DL_CYCL_V) == 0 && a == dl->parts - 1) {
+ break;
+ }
+
+ int b;
+ if (dl->flag & DL_CYCL_U) {
+ quad[0] = dl->nr * a;
+ quad[3] = quad[0] + dl->nr - 1;
+ quad[1] = quad[0] + dl->nr;
+ quad[2] = quad[3] + dl->nr;
+ b = 0;
+ }
+ else {
+ quad[3] = dl->nr * a;
+ quad[0] = quad[3] + 1;
+ quad[2] = quad[3] + dl->nr;
+ quad[1] = quad[0] + dl->nr;
+ b = 1;
+ }
+ if ((dl->flag & DL_CYCL_V) && a == dl->parts - 1) {
+ quad[1] -= dl->parts * dl->nr;
+ quad[2] -= dl->parts * dl->nr;
+ }
+
+ for (; b < dl->nr; b++) {
+ int orco_sizeu = dl->nr - 1;
+ int orco_sizev = dl->parts - 1;
+
+ /* exception as handled in convertblender.c too */
+ if (dl->flag & DL_CYCL_U) {
+ orco_sizeu++;
+ }
+ if (dl->flag & DL_CYCL_V) {
+ orco_sizev++;
+ }
+
+ for (int i = 0; i < 4; i++) {
+ /* find uv based on vertex index into grid array */
+ uv[i][0] = (quad[i] / dl->nr) / (float)orco_sizev;
+ uv[i][1] = (quad[i] % dl->nr) / (float)orco_sizeu;
+
+ /* cyclic correction */
+ if ((i == 1 || i == 2) && uv[i][0] == 0.0f) {
+ uv[i][0] = 1.0f;
+ }
+ if ((i == 0 || i == 1) && uv[i][1] == 0.0f) {
+ uv[i][1] = 1.0f;
+ }
+ }
+
+ displist_vertbuf_attr_set_tri_pos_normals_and_uv(
+ &pos_step[col], &nor_step[col], &uv_step[col],
+ verts[quad[0]], verts[quad[1]], verts[quad[2]],
+ nors[quad[0]], nors[quad[1]], nors[quad[2]],
+ uv[0], uv[1], uv[2]);
+
+ displist_vertbuf_attr_set_tri_pos_normals_and_uv(
+ &pos_step[col], &nor_step[col], &uv_step[col],
+ verts[quad[0]], verts[quad[2]], verts[quad[3]],
+ nors[quad[0]], nors[quad[2]], nors[quad[3]],
+ uv[0], uv[2], uv[3]);
+
+ quad[2] = quad[1];
+ quad[1]++;
+ quad[3] = quad[0];
+ quad[0]++;
+ }
+ }
+ }
+ else {
+ BLI_assert(dl->type == DL_INDEX4);
+ uv[0][0] = uv[0][1] = uv[1][0] = uv[3][1] = 0.0f;
+ uv[1][1] = uv[2][0] = uv[2][1] = uv[3][0] = 1.0f;
+
+ const int i_end = dl->parts;
+ for (int i = 0; i < i_end; i++, idx += 4) {
+ displist_vertbuf_attr_set_tri_pos_normals_and_uv(
+ &pos_step[col], &nor_step[col], &uv_step[col],
+ verts[idx[0]], verts[idx[1]], verts[idx[2]],
+ nors[idx[0]], nors[idx[1]], nors[idx[2]],
+ uv[0], uv[1], uv[2]);
+
+ if (idx[2] != idx[3]) {
+ displist_vertbuf_attr_set_tri_pos_normals_and_uv(
+ &pos_step[col], &nor_step[col], &uv_step[col],
+ verts[idx[0]], verts[idx[2]], verts[idx[3]],
+ nors[idx[0]], nors[idx[2]], nors[idx[3]],
+ uv[0], uv[2], uv[3]);
+ }
+ }
+ }
+ }
+ }
+
+ for (int i = 0; i < gpumat_array_len; i++) {
+ uint vbo_len_used = GWN_vertbuf_raw_used(&pos_step[i]);
+ if (vbo_len_capacity[i] != vbo_len_used) {
+ GWN_vertbuf_data_resize(vbo[i], vbo_len_used);
+ }
+ shaded_triangles[i] = GWN_batch_create_ex(GWN_PRIM_TRIS, vbo[i], NULL, GWN_BATCH_OWNS_VBO);
+ }
+
+ return shaded_triangles;
+}
diff --git a/source/blender/draw/intern/draw_cache_impl_lattice.c b/source/blender/draw/intern/draw_cache_impl_lattice.c
new file mode 100644
index 00000000000..eed408de3cd
--- /dev/null
+++ b/source/blender/draw/intern/draw_cache_impl_lattice.c
@@ -0,0 +1,587 @@
+/*
+ * ***** BEGIN GPL LICENSE BLOCK *****
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * The Original Code is Copyright (C) 2017 by Blender Foundation.
+ * All rights reserved.
+ *
+ * Contributor(s): Blender Foundation, Mike Erwin, Dalai Felinto
+ *
+ * ***** END GPL LICENSE BLOCK *****
+ */
+
+/** \file draw_cache_impl_lattice.c
+ * \ingroup draw
+ *
+ * \brief Lattice API for render engines
+ */
+
+#include "MEM_guardedalloc.h"
+
+#include "BLI_utildefines.h"
+#include "BLI_math_vector.h"
+
+#include "DNA_curve_types.h"
+#include "DNA_lattice_types.h"
+#include "DNA_meshdata_types.h"
+#include "DNA_userdef_types.h"
+
+#include "BKE_lattice.h"
+#include "BKE_deform.h"
+#include "BKE_colorband.h"
+
+#include "GPU_batch.h"
+
+#include "draw_cache_impl.h" /* own include */
+
+#define SELECT 1
+
+/**
+ * TODO
+ * - 'DispList' is currently not used
+ * (we could avoid using since it will be removed)
+ */
+
+static void lattice_batch_cache_clear(Lattice *lt);
+
+/* ---------------------------------------------------------------------- */
+/* Lattice Interface, direct access to basic data. */
+
+static int vert_len_calc(int u, int v, int w)
+{
+ if (u <= 0 || v <= 0 || w <= 0) {
+ return 0;
+ }
+ return u * v * w;
+}
+
+static int edge_len_calc(int u, int v, int w)
+{
+ if (u <= 0 || v <= 0 || w <= 0) {
+ return 0;
+ }
+ return (((((u - 1) * v) +
+ ((v - 1) * u)) * w) +
+ ((w - 1) * (u * v)));
+}
+
+static int lattice_render_verts_len_get(Lattice *lt)
+{
+ if (lt->editlatt) {
+ lt = lt->editlatt->latt;
+ }
+
+ const int u = lt->pntsu;
+ const int v = lt->pntsv;
+ const int w = lt->pntsw;
+
+ if ((lt->flag & LT_OUTSIDE) == 0) {
+ return vert_len_calc(u, v, w);
+ }
+ else {
+ /* TODO remove internal coords */
+ return vert_len_calc(u, v, w);
+ }
+}
+
+static int lattice_render_edges_len_get(Lattice *lt)
+{
+ if (lt->editlatt) {
+ lt = lt->editlatt->latt;
+ }
+
+ const int u = lt->pntsu;
+ const int v = lt->pntsv;
+ const int w = lt->pntsw;
+
+ if ((lt->flag & LT_OUTSIDE) == 0) {
+ return edge_len_calc(u, v, w);
+ }
+ else {
+ /* TODO remove internal coords */
+ return edge_len_calc(u, v, w);
+ }
+}
+
+/* ---------------------------------------------------------------------- */
+/* Lattice Interface, indirect, partially cached access to complex data. */
+
+typedef struct LatticeRenderData {
+ int types;
+
+ int vert_len;
+ int edge_len;
+
+ struct {
+ int u_len, v_len, w_len;
+ } dims;
+ bool show_only_outside;
+
+ struct EditLatt *edit_latt;
+ BPoint *bp;
+
+ int actbp;
+
+ struct MDeformVert *dvert;
+} LatticeRenderData;
+
+enum {
+ LR_DATATYPE_VERT = 1 << 0,
+ LR_DATATYPE_EDGE = 1 << 1,
+ LR_DATATYPE_OVERLAY = 1 << 2,
+};
+
+static LatticeRenderData *lattice_render_data_create(Lattice *lt, const int types)
+{
+ LatticeRenderData *rdata = MEM_callocN(sizeof(*rdata), __func__);
+ rdata->types = types;
+
+ if (lt->editlatt) {
+ EditLatt *editlatt = lt->editlatt;
+ lt = editlatt->latt;
+
+ rdata->edit_latt = editlatt;
+
+ rdata->dvert = lt->dvert;
+
+ if (types & (LR_DATATYPE_VERT)) {
+ rdata->vert_len = lattice_render_verts_len_get(lt);
+ }
+ if (types & (LR_DATATYPE_EDGE)) {
+ rdata->edge_len = lattice_render_edges_len_get(lt);
+ }
+ if (types & LR_DATATYPE_OVERLAY) {
+ rdata->actbp = lt->actbp;
+ }
+ }
+ else {
+ rdata->dvert = NULL;
+
+ if (types & (LR_DATATYPE_VERT)) {
+ rdata->vert_len = lattice_render_verts_len_get(lt);
+ }
+ if (types & (LR_DATATYPE_EDGE)) {
+ rdata->edge_len = lattice_render_edges_len_get(lt);
+ /*no edge data */
+ }
+ }
+
+ rdata->bp = lt->def;
+
+ rdata->dims.u_len = lt->pntsu;
+ rdata->dims.v_len = lt->pntsv;
+ rdata->dims.w_len = lt->pntsw;
+
+ rdata->show_only_outside = (lt->flag & LT_OUTSIDE) != 0;
+ rdata->actbp = lt->actbp;
+
+ return rdata;
+}
+
+static void lattice_render_data_free(LatticeRenderData *rdata)
+{
+#if 0
+ if (rdata->loose_verts) {
+ MEM_freeN(rdata->loose_verts);
+ }
+#endif
+ MEM_freeN(rdata);
+}
+
+static int lattice_render_data_verts_len_get(const LatticeRenderData *rdata)
+{
+ BLI_assert(rdata->types & LR_DATATYPE_VERT);
+ return rdata->vert_len;
+}
+
+static int lattice_render_data_edges_len_get(const LatticeRenderData *rdata)
+{
+ BLI_assert(rdata->types & LR_DATATYPE_EDGE);
+ return rdata->edge_len;
+}
+
+static const BPoint *lattice_render_data_vert_bpoint(const LatticeRenderData *rdata, const int vert_idx)
+{
+ BLI_assert(rdata->types & LR_DATATYPE_VERT);
+ return &rdata->bp[vert_idx];
+}
+
+/* TODO, move into shader? */
+static void rgb_from_weight(float r_rgb[3], const float weight)
+{
+ const float blend = ((weight / 2.0f) + 0.5f);
+
+ if (weight <= 0.25f) { /* blue->cyan */
+ r_rgb[0] = 0.0f;
+ r_rgb[1] = blend * weight * 4.0f;
+ r_rgb[2] = blend;
+ }
+ else if (weight <= 0.50f) { /* cyan->green */
+ r_rgb[0] = 0.0f;
+ r_rgb[1] = blend;
+ r_rgb[2] = blend * (1.0f - ((weight - 0.25f) * 4.0f));
+ }
+ else if (weight <= 0.75f) { /* green->yellow */
+ r_rgb[0] = blend * ((weight - 0.50f) * 4.0f);
+ r_rgb[1] = blend;
+ r_rgb[2] = 0.0f;
+ }
+ else if (weight <= 1.0f) { /* yellow->red */
+ r_rgb[0] = blend;
+ r_rgb[1] = blend * (1.0f - ((weight - 0.75f) * 4.0f));
+ r_rgb[2] = 0.0f;
+ }
+ else {
+ /* exceptional value, unclamped or nan,
+ * avoid uninitialized memory use */
+ r_rgb[0] = 1.0f;
+ r_rgb[1] = 0.0f;
+ r_rgb[2] = 1.0f;
+ }
+}
+
+static void lattice_render_data_weight_col_get(const LatticeRenderData *rdata, const int vert_idx,
+ const int actdef, float r_col[4])
+{
+ if (actdef > -1) {
+ float weight = defvert_find_weight(rdata->dvert + vert_idx, actdef);
+
+ if (U.flag & USER_CUSTOM_RANGE) {
+ BKE_colorband_evaluate(&U.coba_weight, weight, r_col);
+ }
+ else {
+ rgb_from_weight(r_col, weight);
+ }
+
+ r_col[3] = 1.0f;
+ }
+ else {
+ zero_v4(r_col);
+ }
+}
+
+enum {
+ VFLAG_VERTEX_SELECTED = 1 << 0,
+ VFLAG_VERTEX_ACTIVE = 1 << 1,
+};
+
+/* ---------------------------------------------------------------------- */
+/* Lattice Gwn_Batch Cache */
+
+typedef struct LatticeBatchCache {
+ Gwn_VertBuf *pos;
+ Gwn_IndexBuf *edges;
+
+ Gwn_Batch *all_verts;
+ Gwn_Batch *all_edges;
+
+ Gwn_Batch *overlay_verts;
+
+ /* settings to determine if cache is invalid */
+ bool is_dirty;
+
+ struct {
+ int u_len, v_len, w_len;
+ } dims;
+ bool show_only_outside;
+
+ bool is_editmode;
+} LatticeBatchCache;
+
+/* Gwn_Batch cache management. */
+
+static bool lattice_batch_cache_valid(Lattice *lt)
+{
+ LatticeBatchCache *cache = lt->batch_cache;
+
+ if (cache == NULL) {
+ return false;
+ }
+
+ if (cache->is_editmode != (lt->editlatt != NULL)) {
+ return false;
+ }
+
+ if (cache->is_dirty == false) {
+ return true;
+ }
+ else {
+ if (cache->is_editmode) {
+ return false;
+ }
+ else if ((cache->dims.u_len != lt->pntsu) ||
+ (cache->dims.v_len != lt->pntsv) ||
+ (cache->dims.w_len != lt->pntsw) ||
+ ((cache->show_only_outside != ((lt->flag & LT_OUTSIDE) != 0))))
+ {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static void lattice_batch_cache_init(Lattice *lt)
+{
+ LatticeBatchCache *cache = lt->batch_cache;
+
+ if (!cache) {
+ cache = lt->batch_cache = MEM_callocN(sizeof(*cache), __func__);
+ }
+ else {
+ memset(cache, 0, sizeof(*cache));
+ }
+
+ cache->dims.u_len = lt->pntsu;
+ cache->dims.v_len = lt->pntsv;
+ cache->dims.w_len = lt->pntsw;
+ cache->show_only_outside = (lt->flag & LT_OUTSIDE) != 0;
+
+ cache->is_editmode = lt->editlatt != NULL;
+
+ cache->is_dirty = false;
+}
+
+static LatticeBatchCache *lattice_batch_cache_get(Lattice *lt)
+{
+ if (!lattice_batch_cache_valid(lt)) {
+ lattice_batch_cache_clear(lt);
+ lattice_batch_cache_init(lt);
+ }
+ return lt->batch_cache;
+}
+
+void DRW_lattice_batch_cache_dirty(Lattice *lt, int mode)
+{
+ LatticeBatchCache *cache = lt->batch_cache;
+ if (cache == NULL) {
+ return;
+ }
+ switch (mode) {
+ case BKE_LATTICE_BATCH_DIRTY_ALL:
+ cache->is_dirty = true;
+ break;
+ case BKE_LATTICE_BATCH_DIRTY_SELECT:
+ /* TODO Separate Flag vbo */
+ GWN_BATCH_DISCARD_SAFE(cache->overlay_verts);
+ break;
+ default:
+ BLI_assert(0);
+ }
+}
+
+static void lattice_batch_cache_clear(Lattice *lt)
+{
+ LatticeBatchCache *cache = lt->batch_cache;
+ if (!cache) {
+ return;
+ }
+
+ GWN_BATCH_DISCARD_SAFE(cache->all_verts);
+ GWN_BATCH_DISCARD_SAFE(cache->all_edges);
+ GWN_BATCH_DISCARD_SAFE(cache->overlay_verts);
+
+ GWN_VERTBUF_DISCARD_SAFE(cache->pos);
+ GWN_INDEXBUF_DISCARD_SAFE(cache->edges);
+}
+
+void DRW_lattice_batch_cache_free(Lattice *lt)
+{
+ lattice_batch_cache_clear(lt);
+ MEM_SAFE_FREE(lt->batch_cache);
+}
+
+/* Gwn_Batch cache usage. */
+static Gwn_VertBuf *lattice_batch_cache_get_pos(LatticeRenderData *rdata, LatticeBatchCache *cache,
+ bool use_weight, const int actdef)
+{
+ BLI_assert(rdata->types & LR_DATATYPE_VERT);
+
+ if (cache->pos == NULL) {
+ static Gwn_VertFormat format = { 0 };
+ static struct { uint pos, col; } attr_id;
+
+ GWN_vertformat_clear(&format);
+
+ /* initialize vertex format */
+ attr_id.pos = GWN_vertformat_attr_add(&format, "pos", GWN_COMP_F32, 3, GWN_FETCH_FLOAT);
+
+ if (use_weight) {
+ attr_id.col = GWN_vertformat_attr_add(&format, "color", GWN_COMP_F32, 4, GWN_FETCH_FLOAT);
+ }
+
+ const int vert_len = lattice_render_data_verts_len_get(rdata);
+
+ cache->pos = GWN_vertbuf_create_with_format(&format);
+ GWN_vertbuf_data_alloc(cache->pos, vert_len);
+ for (int i = 0; i < vert_len; ++i) {
+ const BPoint *bp = lattice_render_data_vert_bpoint(rdata, i);
+ GWN_vertbuf_attr_set(cache->pos, attr_id.pos, i, bp->vec);
+
+ if (use_weight) {
+ float w_col[4];
+ lattice_render_data_weight_col_get(rdata, i, actdef, w_col);
+
+ GWN_vertbuf_attr_set(cache->pos, attr_id.col, i, w_col);
+ }
+ }
+ }
+
+ return cache->pos;
+}
+
+static Gwn_IndexBuf *lattice_batch_cache_get_edges(LatticeRenderData *rdata, LatticeBatchCache *cache)
+{
+ BLI_assert(rdata->types & (LR_DATATYPE_VERT | LR_DATATYPE_EDGE));
+
+ if (cache->edges == NULL) {
+ const int vert_len = lattice_render_data_verts_len_get(rdata);
+ const int edge_len = lattice_render_data_edges_len_get(rdata);
+ int edge_len_real = 0;
+
+ Gwn_IndexBufBuilder elb;
+ GWN_indexbuf_init(&elb, GWN_PRIM_LINES, edge_len, vert_len);
+
+#define LATT_INDEX(u, v, w) \
+ ((((w) * rdata->dims.v_len + (v)) * rdata->dims.u_len) + (u))
+
+ for (int w = 0; w < rdata->dims.w_len; w++) {
+ int wxt = (w == 0 || w == rdata->dims.w_len - 1);
+ for (int v = 0; v < rdata->dims.v_len; v++) {
+ int vxt = (v == 0 || v == rdata->dims.v_len - 1);
+ for (int u = 0; u < rdata->dims.u_len; u++) {
+ int uxt = (u == 0 || u == rdata->dims.u_len - 1);
+
+ if (w && ((uxt || vxt) || !rdata->show_only_outside)) {
+ GWN_indexbuf_add_line_verts(&elb, LATT_INDEX(u, v, w - 1), LATT_INDEX(u, v, w));
+ BLI_assert(edge_len_real <= edge_len);
+ edge_len_real++;
+ }
+ if (v && ((uxt || wxt) || !rdata->show_only_outside)) {
+ GWN_indexbuf_add_line_verts(&elb, LATT_INDEX(u, v - 1, w), LATT_INDEX(u, v, w));
+ BLI_assert(edge_len_real <= edge_len);
+ edge_len_real++;
+ }
+ if (u && ((vxt || wxt) || !rdata->show_only_outside)) {
+ GWN_indexbuf_add_line_verts(&elb, LATT_INDEX(u - 1, v, w), LATT_INDEX(u, v, w));
+ BLI_assert(edge_len_real <= edge_len);
+ edge_len_real++;
+ }
+ }
+ }
+ }
+
+#undef LATT_INDEX
+
+ if (rdata->show_only_outside) {
+ BLI_assert(edge_len_real <= edge_len);
+ }
+ else {
+ BLI_assert(edge_len_real == edge_len);
+ }
+
+ cache->edges = GWN_indexbuf_build(&elb);
+ }
+
+ return cache->edges;
+}
+
+static void lattice_batch_cache_create_overlay_batches(Lattice *lt)
+{
+ /* Since LR_DATATYPE_OVERLAY is slow to generate, generate them all at once */
+ int options = LR_DATATYPE_VERT | LR_DATATYPE_OVERLAY;
+
+ LatticeBatchCache *cache = lattice_batch_cache_get(lt);
+ LatticeRenderData *rdata = lattice_render_data_create(lt, options);
+
+ if (cache->overlay_verts == NULL) {
+ static Gwn_VertFormat format = { 0 };
+ static struct { uint pos, data; } attr_id;
+ if (format.attrib_ct == 0) {
+ /* initialize vertex format */
+ attr_id.pos = GWN_vertformat_attr_add(&format, "pos", GWN_COMP_F32, 3, GWN_FETCH_FLOAT);
+ attr_id.data = GWN_vertformat_attr_add(&format, "data", GWN_COMP_U8, 1, GWN_FETCH_INT);
+ }
+
+ const int vert_len = lattice_render_data_verts_len_get(rdata);
+
+ Gwn_VertBuf *vbo = GWN_vertbuf_create_with_format(&format);
+ GWN_vertbuf_data_alloc(vbo, vert_len);
+ for (int i = 0; i < vert_len; ++i) {
+ const BPoint *bp = lattice_render_data_vert_bpoint(rdata, i);
+
+ char vflag = 0;
+ if (bp->f1 & SELECT) {
+ if (i == rdata->actbp) {
+ vflag |= VFLAG_VERTEX_ACTIVE;
+ }
+ else {
+ vflag |= VFLAG_VERTEX_SELECTED;
+ }
+ }
+
+ GWN_vertbuf_attr_set(vbo, attr_id.pos, i, bp->vec);
+ GWN_vertbuf_attr_set(vbo, attr_id.data, i, &vflag);
+ }
+
+ cache->overlay_verts = GWN_batch_create_ex(GWN_PRIM_POINTS, vbo, NULL, GWN_BATCH_OWNS_VBO);
+ }
+
+ lattice_render_data_free(rdata);
+}
+
+Gwn_Batch *DRW_lattice_batch_cache_get_all_edges(Lattice *lt, bool use_weight, const int actdef)
+{
+ LatticeBatchCache *cache = lattice_batch_cache_get(lt);
+
+ if (cache->all_edges == NULL) {
+ /* create batch from Lattice */
+ LatticeRenderData *rdata = lattice_render_data_create(lt, LR_DATATYPE_VERT | LR_DATATYPE_EDGE);
+
+ cache->all_edges = GWN_batch_create(GWN_PRIM_LINES, lattice_batch_cache_get_pos(rdata, cache, use_weight, actdef),
+ lattice_batch_cache_get_edges(rdata, cache));
+
+ lattice_render_data_free(rdata);
+ }
+
+ return cache->all_edges;
+}
+
+Gwn_Batch *DRW_lattice_batch_cache_get_all_verts(Lattice *lt)
+{
+ LatticeBatchCache *cache = lattice_batch_cache_get(lt);
+
+ if (cache->all_verts == NULL) {
+ LatticeRenderData *rdata = lattice_render_data_create(lt, LR_DATATYPE_VERT);
+
+ cache->all_verts = GWN_batch_create(GWN_PRIM_POINTS, lattice_batch_cache_get_pos(rdata, cache, false, -1), NULL);
+
+ lattice_render_data_free(rdata);
+ }
+
+ return cache->all_verts;
+}
+
+Gwn_Batch *DRW_lattice_batch_cache_get_overlay_verts(Lattice *lt)
+{
+ LatticeBatchCache *cache = lattice_batch_cache_get(lt);
+
+ if (cache->overlay_verts == NULL) {
+ lattice_batch_cache_create_overlay_batches(lt);
+ }
+
+ return cache->overlay_verts;
+}
diff --git a/source/blender/draw/intern/draw_cache_impl_mesh.c b/source/blender/draw/intern/draw_cache_impl_mesh.c
new file mode 100644
index 00000000000..f7a82c6d0c5
--- /dev/null
+++ b/source/blender/draw/intern/draw_cache_impl_mesh.c
@@ -0,0 +1,3932 @@
+/*
+ * ***** BEGIN GPL LICENSE BLOCK *****
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * The Original Code is Copyright (C) 2017 by Blender Foundation.
+ * All rights reserved.
+ *
+ * Contributor(s): Blender Foundation, Mike Erwin, Dalai Felinto
+ *
+ * ***** END GPL LICENSE BLOCK *****
+ */
+
+/** \file draw_cache_impl_mesh.c
+ * \ingroup draw
+ *
+ * \brief Mesh API for render engines
+ */
+
+#include "MEM_guardedalloc.h"
+
+#include "BLI_utildefines.h"
+#include "BLI_math_vector.h"
+#include "BLI_math_bits.h"
+#include "BLI_string.h"
+#include "BLI_alloca.h"
+
+#include "DNA_mesh_types.h"
+#include "DNA_meshdata_types.h"
+#include "DNA_object_types.h"
+
+#include "BKE_customdata.h"
+#include "BKE_deform.h"
+#include "BKE_DerivedMesh.h"
+#include "BKE_editmesh.h"
+#include "BKE_editmesh_tangent.h"
+#include "BKE_mesh.h"
+#include "BKE_mesh_tangent.h"
+#include "BKE_colorband.h"
+
+#include "bmesh.h"
+
+#include "GPU_batch.h"
+#include "GPU_draw.h"
+#include "GPU_material.h"
+
+#include "draw_cache_impl.h" /* own include */
+
+static void mesh_batch_cache_clear(Mesh *me);
+
+/* ---------------------------------------------------------------------- */
+
+/** \name Mesh/BMesh Interface (direct access to basic data).
+ * \{ */
+
+static int mesh_render_verts_len_get(Mesh *me)
+{
+ return me->edit_btmesh ? me->edit_btmesh->bm->totvert : me->totvert;
+}
+
+static int mesh_render_edges_len_get(Mesh *me)
+{
+ return me->edit_btmesh ? me->edit_btmesh->bm->totedge : me->totedge;
+}
+
+static int mesh_render_looptri_len_get(Mesh *me)
+{
+ return me->edit_btmesh ? me->edit_btmesh->tottri : poly_to_tri_count(me->totpoly, me->totloop);
+}
+
+static int mesh_render_polys_len_get(Mesh *me)
+{
+ return me->edit_btmesh ? me->edit_btmesh->bm->totface : me->totpoly;
+}
+
+static int mesh_render_mat_len_get(Mesh *me)
+{
+ return MAX2(1, me->totcol);
+}
+
+static int UNUSED_FUNCTION(mesh_render_loops_len_get)(Mesh *me)
+{
+ return me->edit_btmesh ? me->edit_btmesh->bm->totloop : me->totloop;
+}
+
+/** \} */
+
+
+/* ---------------------------------------------------------------------- */
+
+/** \name Mesh/BMesh Interface (indirect, partially cached access to complex data).
+ * \{ */
+
+typedef struct EdgeAdjacentPolys {
+ int count;
+ int face_index[2];
+} EdgeAdjacentPolys;
+
+typedef struct EdgeDrawAttr {
+ unsigned char v_flag;
+ unsigned char e_flag;
+ unsigned char crease;
+ unsigned char bweight;
+} EdgeDrawAttr;
+
+typedef struct MeshRenderData {
+ int types;
+
+ int vert_len;
+ int edge_len;
+ int tri_len;
+ int loop_len;
+ int poly_len;
+ int mat_len;
+ int loose_vert_len;
+ int loose_edge_len;
+
+ BMEditMesh *edit_bmesh;
+ MVert *mvert;
+ MEdge *medge;
+ MLoop *mloop;
+ MPoly *mpoly;
+ float (*orco)[3];
+ MDeformVert *dvert;
+ MLoopUV *mloopuv;
+ MLoopCol *mloopcol;
+ float (*loop_normals)[3];
+
+ /* CustomData 'cd' cache for efficient access. */
+ struct {
+ struct {
+ MLoopUV **uv;
+ int uv_len;
+ int uv_active;
+
+ MLoopCol **vcol;
+ int vcol_len;
+ int vcol_active;
+
+ float (**tangent)[4];
+ int tangent_len;
+ int tangent_active;
+
+ bool *auto_vcol;
+ } layers;
+
+ /* Custom-data offsets (only needed for BMesh access) */
+ struct {
+ int crease;
+ int bweight;
+ int *uv;
+ int *vcol;
+ } offset;
+
+ struct {
+ char (*auto_mix)[32];
+ char (*uv)[32];
+ char (*vcol)[32];
+ char (*tangent)[32];
+ } uuid;
+
+ /* for certain cases we need an output loop-data storage (bmesh tangents) */
+ struct {
+ CustomData ldata;
+ /* grr, special case variable (use in place of 'dm->tangent_mask') */
+ short tangent_mask;
+ } output;
+ } cd;
+
+ BMVert *eve_act;
+ BMEdge *eed_act;
+ BMFace *efa_act;
+
+ /* Data created on-demand (usually not for bmesh-based data). */
+ EdgeAdjacentPolys *edges_adjacent_polys;
+ MLoopTri *mlooptri;
+ int *loose_edges;
+ int *loose_verts;
+
+ float (*poly_normals)[3];
+ float (*vert_weight_color)[3];
+ char (*vert_color)[3];
+ Gwn_PackedNormal *poly_normals_pack;
+ Gwn_PackedNormal *vert_normals_pack;
+ bool *edge_select_bool;
+} MeshRenderData;
+
+enum {
+ MR_DATATYPE_VERT = 1 << 0,
+ MR_DATATYPE_EDGE = 1 << 1,
+ MR_DATATYPE_LOOPTRI = 1 << 2,
+ MR_DATATYPE_LOOP = 1 << 3,
+ MR_DATATYPE_POLY = 1 << 4,
+ MR_DATATYPE_OVERLAY = 1 << 5,
+ MR_DATATYPE_SHADING = 1 << 6,
+ MR_DATATYPE_DVERT = 1 << 7,
+ MR_DATATYPE_LOOPCOL = 1 << 8,
+ MR_DATATYPE_LOOPUV = 1 << 9,
+};
+
+/**
+ * These functions look like they would be slow but they will typically return true on the first iteration.
+ * Only false when all attached elements are hidden.
+ */
+static bool bm_vert_has_visible_edge(const BMVert *v)
+{
+ const BMEdge *e_iter, *e_first;
+
+ e_iter = e_first = v->e;
+ do {
+ if (!BM_elem_flag_test(e_iter, BM_ELEM_HIDDEN)) {
+ return true;
+ }
+ } while ((e_iter = BM_DISK_EDGE_NEXT(e_iter, v)) != e_first);
+ return false;
+}
+
+static bool bm_edge_has_visible_face(const BMEdge *e)
+{
+ const BMLoop *l_iter, *l_first;
+ l_iter = l_first = e->l;
+ do {
+ if (!BM_elem_flag_test(l_iter->f, BM_ELEM_HIDDEN)) {
+ return true;
+ }
+ } while ((l_iter = l_iter->radial_next) != l_first);
+ return false;
+}
+
+
+static void mesh_cd_calc_used_gpu_layers(
+ CustomData *UNUSED(cd_vdata), uchar cd_vused[CD_NUMTYPES],
+ CustomData *cd_ldata, ushort cd_lused[CD_NUMTYPES],
+ struct GPUMaterial **gpumat_array, int gpumat_array_len)
+{
+ /* See: DM_vertex_attributes_from_gpu for similar logic */
+ GPUVertexAttribs gattribs = {{{0}}};
+
+ for (int i = 0; i < gpumat_array_len; i++) {
+ GPUMaterial *gpumat = gpumat_array[i];
+ if (gpumat) {
+ GPU_material_vertex_attributes(gpumat, &gattribs);
+ for (int j = 0; j < gattribs.totlayer; j++) {
+ const char *name = gattribs.layer[j].name;
+ int type = gattribs.layer[j].type;
+ int layer = -1;
+
+ if (type == CD_AUTO_FROM_NAME) {
+ /* We need to deduct what exact layer is used.
+ *
+ * We do it based on the specified name.
+ */
+ if (name[0] != '\0') {
+ layer = CustomData_get_named_layer(cd_ldata, CD_MLOOPUV, name);
+ type = CD_MTFACE;
+
+ if (layer == -1) {
+ layer = CustomData_get_named_layer(cd_ldata, CD_MLOOPCOL, name);
+ type = CD_MCOL;
+ }
+#if 0 /* Tangents are always from UV's - this will never happen. */
+ if (layer == -1) {
+ layer = CustomData_get_named_layer(cd_ldata, CD_TANGENT, name);
+ type = CD_TANGENT;
+ }
+#endif
+ if (layer == -1) {
+ continue;
+ }
+ }
+ else {
+ /* Fall back to the UV layer, which matches old behavior. */
+ type = CD_MTFACE;
+ }
+ }
+
+ switch (type) {
+ case CD_MTFACE:
+ {
+ if (layer == -1) {
+ layer = (name[0] != '\0') ?
+ CustomData_get_named_layer(cd_ldata, CD_MLOOPUV, name) :
+ CustomData_get_active_layer(cd_ldata, CD_MLOOPUV);
+ }
+ if (layer != -1) {
+ cd_lused[CD_MLOOPUV] |= (1 << layer);
+ }
+ break;
+ }
+ case CD_TANGENT:
+ {
+ if (layer == -1) {
+ layer = (name[0] != '\0') ?
+ CustomData_get_named_layer(cd_ldata, CD_MLOOPUV, name) :
+ CustomData_get_active_layer(cd_ldata, CD_MLOOPUV);
+ }
+ if (layer != -1) {
+ cd_lused[CD_TANGENT] |= (1 << layer);
+ }
+ else {
+ /* no UV layers at all => requesting orco */
+ cd_lused[CD_TANGENT] |= DM_TANGENT_MASK_ORCO;
+ cd_vused[CD_ORCO] |= 1;
+ }
+ break;
+ }
+ case CD_MCOL:
+ {
+ if (layer == -1) {
+ layer = (name[0] != '\0') ?
+ CustomData_get_named_layer(cd_ldata, CD_MLOOPCOL, name) :
+ CustomData_get_active_layer(cd_ldata, CD_MLOOPCOL);
+ }
+ if (layer != -1) {
+ cd_lused[CD_MLOOPCOL] |= (1 << layer);
+ }
+ break;
+ }
+ case CD_ORCO:
+ {
+ cd_vused[CD_ORCO] |= 1;
+ break;
+ }
+ }
+ }
+ }
+ }
+}
+
+
+static void mesh_render_calc_normals_loop_and_poly(const Mesh *me, const float split_angle, MeshRenderData *rdata)
+{
+ BLI_assert((me->flag & ME_AUTOSMOOTH) != 0);
+
+ int totloop = me->totloop;
+ int totpoly = me->totpoly;
+ float (*loop_normals)[3] = MEM_mallocN(sizeof(*loop_normals) * totloop, __func__);
+ float (*poly_normals)[3] = MEM_mallocN(sizeof(*poly_normals) * totpoly, __func__);
+ short (*clnors)[2] = CustomData_get_layer(&me->ldata, CD_CUSTOMLOOPNORMAL);
+
+ BKE_mesh_calc_normals_poly(
+ me->mvert, NULL, me->totvert,
+ me->mloop, me->mpoly, totloop, totpoly, poly_normals, false);
+
+ BKE_mesh_normals_loop_split(
+ me->mvert, me->totvert, me->medge, me->totedge,
+ me->mloop, loop_normals, totloop, me->mpoly, poly_normals, totpoly,
+ true, split_angle, NULL, clnors, NULL);
+
+ rdata->loop_len = totloop;
+ rdata->poly_len = totpoly;
+ rdata->loop_normals = loop_normals;
+ rdata->poly_normals = poly_normals;
+}
+
+
+/**
+ * TODO(campbell): 'gpumat_array' may include materials linked to the object.
+ * While not default, object materials should be supported.
+ * Although this only impacts the data thats generated, not the materials that display.
+ */
+static MeshRenderData *mesh_render_data_create_ex(
+ Mesh *me, const int types,
+ struct GPUMaterial **gpumat_array, uint gpumat_array_len)
+{
+ MeshRenderData *rdata = MEM_callocN(sizeof(*rdata), __func__);
+ rdata->types = types;
+ rdata->mat_len = mesh_render_mat_len_get(me);
+
+ CustomData_reset(&rdata->cd.output.ldata);
+
+ const bool is_auto_smooth = (me->flag & ME_AUTOSMOOTH) != 0;
+ const float split_angle = is_auto_smooth ? me->smoothresh : (float)M_PI;
+
+ if (me->edit_btmesh) {
+ BMEditMesh *embm = me->edit_btmesh;
+ BMesh *bm = embm->bm;
+
+ rdata->edit_bmesh = embm;
+
+ int bm_ensure_types = 0;
+ if (types & (MR_DATATYPE_VERT)) {
+ rdata->vert_len = bm->totvert;
+ bm_ensure_types |= BM_VERT;
+ }
+ if (types & (MR_DATATYPE_EDGE)) {
+ rdata->edge_len = bm->totedge;
+ bm_ensure_types |= BM_EDGE;
+ }
+ if (types & MR_DATATYPE_LOOPTRI) {
+ BKE_editmesh_tessface_calc(embm);
+ rdata->tri_len = embm->tottri;
+ }
+ if (types & MR_DATATYPE_LOOP) {
+ int totloop = bm->totloop;
+ if (is_auto_smooth) {
+ rdata->loop_normals = MEM_mallocN(sizeof(*rdata->loop_normals) * totloop, __func__);
+ BM_loops_calc_normal_vcos(bm, NULL, NULL, NULL, true, split_angle, rdata->loop_normals, NULL, NULL, -1);
+ }
+ rdata->loop_len = totloop;
+ bm_ensure_types |= BM_LOOP;
+ }
+ if (types & MR_DATATYPE_POLY) {
+ rdata->poly_len = bm->totface;
+ bm_ensure_types |= BM_FACE;
+ }
+ if (types & MR_DATATYPE_OVERLAY) {
+ rdata->efa_act = BM_mesh_active_face_get(bm, false, true);
+ rdata->eed_act = BM_mesh_active_edge_get(bm);
+ rdata->eve_act = BM_mesh_active_vert_get(bm);
+ rdata->cd.offset.crease = CustomData_get_offset(&bm->edata, CD_CREASE);
+ rdata->cd.offset.bweight = CustomData_get_offset(&bm->edata, CD_BWEIGHT);
+ }
+ if (types & (MR_DATATYPE_DVERT)) {
+ bm_ensure_types |= BM_VERT;
+ }
+
+ BM_mesh_elem_index_ensure(bm, bm_ensure_types);
+ BM_mesh_elem_table_ensure(bm, bm_ensure_types & ~BM_LOOP);
+ if (types & MR_DATATYPE_OVERLAY) {
+ rdata->loose_vert_len = rdata->loose_edge_len = 0;
+
+ int *lverts = rdata->loose_verts = MEM_mallocN(rdata->vert_len * sizeof(int), "Loose Vert");
+ int *ledges = rdata->loose_edges = MEM_mallocN(rdata->edge_len * sizeof(int), "Loose Edges");
+
+ {
+ BLI_assert((bm->elem_table_dirty & BM_VERT) == 0);
+ BMVert **vtable = bm->vtable;
+ for (int i = 0; i < bm->totvert; i++) {
+ const BMVert *eve = vtable[i];
+ if (!BM_elem_flag_test(eve, BM_ELEM_HIDDEN)) {
+ /* Loose vert */
+ if (eve->e == NULL || !bm_vert_has_visible_edge(eve)) {
+ lverts[rdata->loose_vert_len++] = i;
+ }
+ }
+ }
+ }
+
+ {
+ BLI_assert((bm->elem_table_dirty & BM_EDGE) == 0);
+ BMEdge **etable = bm->etable;
+ for (int i = 0; i < bm->totedge; i++) {
+ const BMEdge *eed = etable[i];
+ if (!BM_elem_flag_test(eed, BM_ELEM_HIDDEN)) {
+ /* Loose edge */
+ if (eed->l == NULL || !bm_edge_has_visible_face(eed)) {
+ ledges[rdata->loose_edge_len++] = i;
+ }
+ }
+ }
+ }
+
+ rdata->loose_verts = MEM_reallocN(rdata->loose_verts, rdata->loose_vert_len * sizeof(int));
+ rdata->loose_edges = MEM_reallocN(rdata->loose_edges, rdata->loose_edge_len * sizeof(int));
+ }
+ }
+ else {
+ if (types & (MR_DATATYPE_VERT)) {
+ rdata->vert_len = me->totvert;
+ rdata->mvert = CustomData_get_layer(&me->vdata, CD_MVERT);
+ }
+ if (types & (MR_DATATYPE_EDGE)) {
+ rdata->edge_len = me->totedge;
+ rdata->medge = CustomData_get_layer(&me->edata, CD_MEDGE);
+ }
+ if (types & MR_DATATYPE_LOOPTRI) {
+ const int tri_len = rdata->tri_len = poly_to_tri_count(me->totpoly, me->totloop);
+ rdata->mlooptri = MEM_mallocN(sizeof(*rdata->mlooptri) * tri_len, __func__);
+ BKE_mesh_recalc_looptri(me->mloop, me->mpoly, me->mvert, me->totloop, me->totpoly, rdata->mlooptri);
+ }
+ if (types & MR_DATATYPE_LOOP) {
+ rdata->loop_len = me->totloop;
+ rdata->mloop = CustomData_get_layer(&me->ldata, CD_MLOOP);
+
+ if (is_auto_smooth) {
+ mesh_render_calc_normals_loop_and_poly(me, split_angle, rdata);
+ }
+ }
+ if (types & MR_DATATYPE_POLY) {
+ rdata->poly_len = me->totpoly;
+ rdata->mpoly = CustomData_get_layer(&me->pdata, CD_MPOLY);
+ }
+ if (types & MR_DATATYPE_DVERT) {
+ rdata->vert_len = me->totvert;
+ rdata->dvert = CustomData_get_layer(&me->vdata, CD_MDEFORMVERT);
+ }
+ if (types & MR_DATATYPE_LOOPCOL) {
+ rdata->loop_len = me->totloop;
+ rdata->mloopcol = CustomData_get_layer(&me->ldata, CD_MLOOPCOL);
+ }
+ if (types & MR_DATATYPE_LOOPUV) {
+ rdata->loop_len = me->totloop;
+ rdata->mloopuv = CustomData_get_layer(&me->ldata, CD_MLOOPUV);
+ }
+ }
+
+ if (types & MR_DATATYPE_SHADING) {
+ CustomData *cd_vdata, *cd_ldata;
+
+ if (me->edit_btmesh) {
+ BMesh *bm = me->edit_btmesh->bm;
+ cd_vdata = &bm->vdata;
+ cd_ldata = &bm->ldata;
+ }
+ else {
+ cd_vdata = &me->vdata;
+ cd_ldata = &me->ldata;
+ }
+
+ /* Add edge/poly if we need them */
+ uchar cd_vused[CD_NUMTYPES] = {0};
+ ushort cd_lused[CD_NUMTYPES] = {0};
+
+ mesh_cd_calc_used_gpu_layers(
+ cd_vdata, cd_vused,
+ cd_ldata, cd_lused,
+ gpumat_array, gpumat_array_len);
+
+
+ rdata->cd.layers.uv_active = CustomData_get_active_layer(cd_ldata, CD_MLOOPUV);
+ rdata->cd.layers.vcol_active = CustomData_get_active_layer(cd_ldata, CD_MLOOPCOL);
+ rdata->cd.layers.tangent_active = rdata->cd.layers.uv_active;
+
+#define CD_VALIDATE_ACTIVE_LAYER(active_index, used) \
+ if ((active_index != -1) && (used & (1 << active_index)) == 0) { \
+ active_index = -1; \
+ } ((void)0)
+
+ CD_VALIDATE_ACTIVE_LAYER(rdata->cd.layers.uv_active, cd_lused[CD_MLOOPUV]);
+ CD_VALIDATE_ACTIVE_LAYER(rdata->cd.layers.tangent_active, cd_lused[CD_TANGENT]);
+ CD_VALIDATE_ACTIVE_LAYER(rdata->cd.layers.vcol_active, cd_lused[CD_MLOOPCOL]);
+
+#undef CD_VALIDATE_ACTIVE_LAYER
+
+ if (cd_vused[CD_ORCO] & 1) {
+ rdata->orco = CustomData_get_layer(cd_vdata, CD_ORCO);
+ /* If orco is not available compute it ourselves */
+ if (!rdata->orco) {
+ if (me->edit_btmesh) {
+ BMesh *bm = me->edit_btmesh->bm;
+ rdata->orco = MEM_mallocN(sizeof(*rdata->orco) * rdata->vert_len, "orco mesh");
+ BLI_assert((bm->elem_table_dirty & BM_VERT) == 0);
+ BMVert **vtable = bm->vtable;
+ for (int i = 0; i < bm->totvert; i++) {
+ copy_v3_v3(rdata->orco[i], vtable[i]->co);
+ }
+ BKE_mesh_orco_verts_transform(me, rdata->orco, rdata->vert_len, 0);
+ }
+ else {
+ rdata->orco = MEM_mallocN(sizeof(*rdata->orco) * rdata->vert_len, "orco mesh");
+ MVert *mvert = rdata->mvert;
+ for (int a = 0; a < rdata->vert_len; a++, mvert++) {
+ copy_v3_v3(rdata->orco[a], mvert->co);
+ }
+ BKE_mesh_orco_verts_transform(me, rdata->orco, rdata->vert_len, 0);
+ }
+ }
+ }
+ else {
+ rdata->orco = NULL;
+ }
+
+ /* don't access mesh directly, instead use vars taken from BMesh or Mesh */
+#define me DONT_USE_THIS
+#ifdef me /* quiet warning */
+#endif
+ struct {
+ uint uv_len;
+ uint vcol_len;
+ } cd_layers_src = {
+ .uv_len = CustomData_number_of_layers(cd_ldata, CD_MLOOPUV),
+ .vcol_len = CustomData_number_of_layers(cd_ldata, CD_MLOOPCOL),
+ };
+
+ rdata->cd.layers.uv_len = count_bits_i(cd_lused[CD_MLOOPUV]);
+ rdata->cd.layers.tangent_len = count_bits_i(cd_lused[CD_TANGENT]);
+ rdata->cd.layers.vcol_len = count_bits_i(cd_lused[CD_MLOOPCOL]);
+
+ rdata->cd.layers.uv = MEM_mallocN(sizeof(*rdata->cd.layers.uv) * rdata->cd.layers.uv_len, __func__);
+ rdata->cd.layers.vcol = MEM_mallocN(sizeof(*rdata->cd.layers.vcol) * rdata->cd.layers.vcol_len, __func__);
+ rdata->cd.layers.tangent = MEM_mallocN(sizeof(*rdata->cd.layers.tangent) * rdata->cd.layers.tangent_len, __func__);
+
+ rdata->cd.uuid.uv = MEM_mallocN(sizeof(*rdata->cd.uuid.uv) * rdata->cd.layers.uv_len, __func__);
+ rdata->cd.uuid.vcol = MEM_mallocN(sizeof(*rdata->cd.uuid.vcol) * rdata->cd.layers.vcol_len, __func__);
+ rdata->cd.uuid.tangent = MEM_mallocN(sizeof(*rdata->cd.uuid.tangent) * rdata->cd.layers.tangent_len, __func__);
+
+ rdata->cd.offset.uv = MEM_mallocN(sizeof(*rdata->cd.offset.uv) * rdata->cd.layers.uv_len, __func__);
+ rdata->cd.offset.vcol = MEM_mallocN(sizeof(*rdata->cd.offset.vcol) * rdata->cd.layers.vcol_len, __func__);
+
+ /* Allocate max */
+ rdata->cd.layers.auto_vcol = MEM_callocN(
+ sizeof(*rdata->cd.layers.auto_vcol) * rdata->cd.layers.vcol_len, __func__);
+ rdata->cd.uuid.auto_mix = MEM_mallocN(
+ sizeof(*rdata->cd.uuid.auto_mix) * (rdata->cd.layers.vcol_len + rdata->cd.layers.uv_len), __func__);
+
+ /* XXX FIXME XXX */
+ /* We use a hash to identify each data layer based on its name.
+ * Gawain then search for this name in the current shader and bind if it exists.
+ * NOTE : This is prone to hash collision.
+ * One solution to hash collision would be to format the cd layer name
+ * to a safe glsl var name, but without name clash.
+ * NOTE 2 : Replicate changes to code_generate_vertex_new() in gpu_codegen.c */
+ if (rdata->cd.layers.vcol_len != 0) {
+ for (int i_src = 0, i_dst = 0; i_src < cd_layers_src.vcol_len; i_src++, i_dst++) {
+ if ((cd_lused[CD_MLOOPCOL] & (1 << i_src)) == 0) {
+ i_dst--;
+ if (rdata->cd.layers.vcol_active >= i_src) {
+ rdata->cd.layers.vcol_active--;
+ }
+ }
+ else {
+ const char *name = CustomData_get_layer_name(cd_ldata, CD_MLOOPCOL, i_src);
+ unsigned int hash = BLI_ghashutil_strhash_p(name);
+ BLI_snprintf(rdata->cd.uuid.vcol[i_dst], sizeof(*rdata->cd.uuid.vcol), "c%u", hash);
+ rdata->cd.layers.vcol[i_dst] = CustomData_get_layer_n(cd_ldata, CD_MLOOPCOL, i_src);
+ if (rdata->edit_bmesh) {
+ rdata->cd.offset.vcol[i_dst] = CustomData_get_n_offset(
+ &rdata->edit_bmesh->bm->ldata, CD_MLOOPCOL, i_src);
+ }
+
+ /* Gather number of auto layers. */
+ /* We only do vcols that are not overridden by uvs */
+ if (CustomData_get_named_layer_index(cd_ldata, CD_MLOOPUV, name) == -1) {
+ BLI_snprintf(
+ rdata->cd.uuid.auto_mix[rdata->cd.layers.uv_len + i_dst],
+ sizeof(*rdata->cd.uuid.auto_mix), "a%u", hash);
+ rdata->cd.layers.auto_vcol[i_dst] = true;
+ }
+ }
+ }
+ }
+
+ /* Start Fresh */
+ CustomData_free_layers(cd_ldata, CD_TANGENT, rdata->loop_len);
+ CustomData_free_layers(cd_ldata, CD_MLOOPTANGENT, rdata->loop_len);
+
+ if (rdata->cd.layers.uv_len != 0) {
+ for (int i_src = 0, i_dst = 0; i_src < cd_layers_src.uv_len; i_src++, i_dst++) {
+ if ((cd_lused[CD_MLOOPUV] & (1 << i_src)) == 0) {
+ i_dst--;
+ if (rdata->cd.layers.uv_active >= i_src) {
+ rdata->cd.layers.uv_active--;
+ }
+ }
+ else {
+ const char *name = CustomData_get_layer_name(cd_ldata, CD_MLOOPUV, i_src);
+ unsigned int hash = BLI_ghashutil_strhash_p(name);
+
+ BLI_snprintf(rdata->cd.uuid.uv[i_dst], sizeof(*rdata->cd.uuid.uv), "u%u", hash);
+ rdata->cd.layers.uv[i_dst] = CustomData_get_layer_n(cd_ldata, CD_MLOOPUV, i_src);
+ if (rdata->edit_bmesh) {
+ rdata->cd.offset.uv[i_dst] = CustomData_get_n_offset(
+ &rdata->edit_bmesh->bm->ldata, CD_MLOOPUV, i_src);
+ }
+ BLI_snprintf(rdata->cd.uuid.auto_mix[i_dst], sizeof(*rdata->cd.uuid.auto_mix), "a%u", hash);
+ }
+ }
+ }
+
+ if (rdata->cd.layers.tangent_len != 0) {
+
+ /* -------------------------------------------------------------------- */
+ /* Pre-calculate tangents into 'rdata->cd.output.ldata' */
+
+ BLI_assert(!CustomData_has_layer(&rdata->cd.output.ldata, CD_TANGENT));
+
+ /* Tangent Names */
+ char tangent_names[MAX_MTFACE][MAX_NAME];
+ for (int i_src = 0, i_dst = 0; i_src < cd_layers_src.uv_len; i_src++, i_dst++) {
+ if ((cd_lused[CD_TANGENT] & (1 << i_src)) == 0) {
+ i_dst--;
+ }
+ else {
+ BLI_strncpy(
+ tangent_names[i_dst],
+ CustomData_get_layer_name(cd_ldata, CD_MLOOPUV, i_src), MAX_NAME);
+ }
+ }
+
+ /* If tangent from orco is requested, decrement tangent_len */
+ int actual_tangent_len = (cd_lused[CD_TANGENT] & DM_TANGENT_MASK_ORCO) ?
+ rdata->cd.layers.tangent_len - 1 : rdata->cd.layers.tangent_len;
+ if (rdata->edit_bmesh) {
+ BMEditMesh *em = rdata->edit_bmesh;
+ BMesh *bm = em->bm;
+
+ if (is_auto_smooth && rdata->loop_normals == NULL) {
+ /* Should we store the previous array of `loop_normals` in somewhere? */
+ rdata->loop_len = bm->totloop;
+ rdata->loop_normals = MEM_mallocN(sizeof(*rdata->loop_normals) * rdata->loop_len, __func__);
+ BM_loops_calc_normal_vcos(bm, NULL, NULL, NULL, true, split_angle, rdata->loop_normals, NULL, NULL, -1);
+ }
+
+ bool calc_active_tangent = false;
+
+ BKE_editmesh_loop_tangent_calc(
+ em, calc_active_tangent,
+ tangent_names, actual_tangent_len,
+ rdata->poly_normals, rdata->loop_normals,
+ rdata->orco,
+ &rdata->cd.output.ldata, bm->totloop,
+ &rdata->cd.output.tangent_mask);
+ }
+ else {
+#undef me
+
+ if (is_auto_smooth && rdata->loop_normals == NULL) {
+ /* Should we store the previous array of `loop_normals` in CustomData? */
+ mesh_render_calc_normals_loop_and_poly(me, split_angle, rdata);
+ }
+
+ bool calc_active_tangent = false;
+
+ BKE_mesh_calc_loop_tangent_ex(
+ me->mvert,
+ me->mpoly, me->totpoly,
+ me->mloop,
+ rdata->mlooptri, rdata->tri_len,
+ cd_ldata,
+ calc_active_tangent,
+ tangent_names, actual_tangent_len,
+ rdata->poly_normals, rdata->loop_normals,
+ rdata->orco,
+ &rdata->cd.output.ldata, me->totloop,
+ &rdata->cd.output.tangent_mask);
+
+ /* If we store tangents in the mesh, set temporary. */
+#if 0
+ CustomData_set_layer_flag(cd_ldata, CD_TANGENT, CD_FLAG_TEMPORARY);
+#endif
+
+#define me DONT_USE_THIS
+#ifdef me /* quiet warning */
+#endif
+ }
+
+ /* End tangent calculation */
+ /* -------------------------------------------------------------------- */
+
+ BLI_assert(CustomData_number_of_layers(&rdata->cd.output.ldata, CD_TANGENT) == rdata->cd.layers.tangent_len);
+
+ int i_dst = 0;
+ for (int i_src = 0; i_src < cd_layers_src.uv_len; i_src++, i_dst++) {
+ if ((cd_lused[CD_TANGENT] & (1 << i_src)) == 0) {
+ i_dst--;
+ if (rdata->cd.layers.tangent_active >= i_src) {
+ rdata->cd.layers.tangent_active--;
+ }
+ }
+ else {
+ const char *name = CustomData_get_layer_name(cd_ldata, CD_MLOOPUV, i_src);
+ unsigned int hash = BLI_ghashutil_strhash_p(name);
+
+ BLI_snprintf(rdata->cd.uuid.tangent[i_dst], sizeof(*rdata->cd.uuid.tangent), "t%u", hash);
+
+ /* Done adding tangents. */
+
+ /* note: BKE_editmesh_loop_tangent_calc calculates 'CD_TANGENT',
+ * not 'CD_MLOOPTANGENT' (as done below). It's OK, they're compatible. */
+
+ /* note: normally we'd use 'i_src' here, but 'i_dst' is in sync with 'rdata->cd.output' */
+ rdata->cd.layers.tangent[i_dst] = CustomData_get_layer_n(&rdata->cd.output.ldata, CD_TANGENT, i_dst);
+ if (rdata->tri_len != 0) {
+ BLI_assert(rdata->cd.layers.tangent[i_dst] != NULL);
+ }
+ }
+ }
+ if (cd_lused[CD_TANGENT] & DM_TANGENT_MASK_ORCO) {
+ const char *name = CustomData_get_layer_name(&rdata->cd.output.ldata, CD_TANGENT, i_dst);
+ unsigned int hash = BLI_ghashutil_strhash_p(name);
+ BLI_snprintf(rdata->cd.uuid.tangent[i_dst], sizeof(*rdata->cd.uuid.tangent), "t%u", hash);
+
+ rdata->cd.layers.tangent[i_dst] = CustomData_get_layer_n(&rdata->cd.output.ldata, CD_TANGENT, i_dst);
+ }
+ }
+
+#undef me
+ }
+
+ return rdata;
+}
+
+static void mesh_render_data_free(MeshRenderData *rdata)
+{
+ MEM_SAFE_FREE(rdata->orco);
+ MEM_SAFE_FREE(rdata->cd.offset.uv);
+ MEM_SAFE_FREE(rdata->cd.offset.vcol);
+ MEM_SAFE_FREE(rdata->cd.uuid.auto_mix);
+ MEM_SAFE_FREE(rdata->cd.uuid.uv);
+ MEM_SAFE_FREE(rdata->cd.uuid.vcol);
+ MEM_SAFE_FREE(rdata->cd.uuid.tangent);
+ MEM_SAFE_FREE(rdata->cd.layers.uv);
+ MEM_SAFE_FREE(rdata->cd.layers.vcol);
+ MEM_SAFE_FREE(rdata->cd.layers.tangent);
+ MEM_SAFE_FREE(rdata->cd.layers.auto_vcol);
+ MEM_SAFE_FREE(rdata->loose_verts);
+ MEM_SAFE_FREE(rdata->loose_edges);
+ MEM_SAFE_FREE(rdata->edges_adjacent_polys);
+ MEM_SAFE_FREE(rdata->mlooptri);
+ MEM_SAFE_FREE(rdata->loop_normals);
+ MEM_SAFE_FREE(rdata->poly_normals);
+ MEM_SAFE_FREE(rdata->poly_normals_pack);
+ MEM_SAFE_FREE(rdata->vert_normals_pack);
+ MEM_SAFE_FREE(rdata->vert_weight_color);
+ MEM_SAFE_FREE(rdata->edge_select_bool);
+ MEM_SAFE_FREE(rdata->vert_color);
+
+ CustomData_free(&rdata->cd.output.ldata, rdata->loop_len);
+
+ MEM_freeN(rdata);
+}
+
+static MeshRenderData *mesh_render_data_create(Mesh *me, const int types)
+{
+ return mesh_render_data_create_ex(me, types, NULL, 0);
+}
+
+/** \} */
+
+
+/* ---------------------------------------------------------------------- */
+
+/** \name Accessor Functions
+ * \{ */
+
+static const char *mesh_render_data_uv_auto_layer_uuid_get(const MeshRenderData *rdata, int layer)
+{
+ BLI_assert(rdata->types & MR_DATATYPE_SHADING);
+ return rdata->cd.uuid.auto_mix[layer];
+}
+
+static const char *mesh_render_data_vcol_auto_layer_uuid_get(const MeshRenderData *rdata, int layer)
+{
+ BLI_assert(rdata->types & MR_DATATYPE_SHADING);
+ return rdata->cd.uuid.auto_mix[rdata->cd.layers.uv_len + layer];
+}
+
+static const char *mesh_render_data_uv_layer_uuid_get(const MeshRenderData *rdata, int layer)
+{
+ BLI_assert(rdata->types & MR_DATATYPE_SHADING);
+ return rdata->cd.uuid.uv[layer];
+}
+
+static const char *mesh_render_data_vcol_layer_uuid_get(const MeshRenderData *rdata, int layer)
+{
+ BLI_assert(rdata->types & MR_DATATYPE_SHADING);
+ return rdata->cd.uuid.vcol[layer];
+}
+
+static const char *mesh_render_data_tangent_layer_uuid_get(const MeshRenderData *rdata, int layer)
+{
+ BLI_assert(rdata->types & MR_DATATYPE_SHADING);
+ return rdata->cd.uuid.tangent[layer];
+}
+
+static int mesh_render_data_verts_len_get(const MeshRenderData *rdata)
+{
+ BLI_assert(rdata->types & MR_DATATYPE_VERT);
+ return rdata->vert_len;
+}
+
+static int mesh_render_data_loose_verts_len_get(const MeshRenderData *rdata)
+{
+ BLI_assert(rdata->types & MR_DATATYPE_OVERLAY);
+ return rdata->loose_vert_len;
+}
+
+static int mesh_render_data_edges_len_get(const MeshRenderData *rdata)
+{
+ BLI_assert(rdata->types & MR_DATATYPE_EDGE);
+ return rdata->edge_len;
+}
+
+static int mesh_render_data_loose_edges_len_get(const MeshRenderData *rdata)
+{
+ BLI_assert(rdata->types & MR_DATATYPE_OVERLAY);
+ return rdata->loose_edge_len;
+}
+
+static int mesh_render_data_looptri_len_get(const MeshRenderData *rdata)
+{
+ BLI_assert(rdata->types & MR_DATATYPE_LOOPTRI);
+ return rdata->tri_len;
+}
+
+static int mesh_render_data_mat_len_get(const MeshRenderData *rdata)
+{
+ BLI_assert(rdata->types & MR_DATATYPE_POLY);
+ return rdata->mat_len;
+}
+
+static int UNUSED_FUNCTION(mesh_render_data_loops_len_get)(const MeshRenderData *rdata)
+{
+ BLI_assert(rdata->types & MR_DATATYPE_LOOP);
+ return rdata->loop_len;
+}
+
+static int mesh_render_data_polys_len_get(const MeshRenderData *rdata)
+{
+ BLI_assert(rdata->types & MR_DATATYPE_POLY);
+ return rdata->poly_len;
+}
+
+/** \} */
+
+
+/* ---------------------------------------------------------------------- */
+
+/** \name Internal Cache (Lazy Initialization)
+ * \{ */
+
+/** Ensure #MeshRenderData.poly_normals_pack */
+static void mesh_render_data_ensure_poly_normals_pack(MeshRenderData *rdata)
+{
+ Gwn_PackedNormal *pnors_pack = rdata->poly_normals_pack;
+ if (pnors_pack == NULL) {
+ if (rdata->edit_bmesh) {
+ BMesh *bm = rdata->edit_bmesh->bm;
+ BMIter fiter;
+ BMFace *efa;
+ int i;
+
+ pnors_pack = rdata->poly_normals_pack = MEM_mallocN(sizeof(*pnors_pack) * rdata->poly_len, __func__);
+ BM_ITER_MESH_INDEX(efa, &fiter, bm, BM_FACES_OF_MESH, i) {
+ pnors_pack[i] = GWN_normal_convert_i10_v3(efa->no);
+ }
+ }
+ else {
+ float (*pnors)[3] = rdata->poly_normals;
+
+ if (!pnors) {
+ pnors = rdata->poly_normals = MEM_mallocN(sizeof(*pnors) * rdata->poly_len, __func__);
+ BKE_mesh_calc_normals_poly(
+ rdata->mvert, NULL, rdata->vert_len,
+ rdata->mloop, rdata->mpoly, rdata->loop_len, rdata->poly_len, pnors, true);
+ }
+
+ pnors_pack = rdata->poly_normals_pack = MEM_mallocN(sizeof(*pnors_pack) * rdata->poly_len, __func__);
+ for (int i = 0; i < rdata->poly_len; i++) {
+ pnors_pack[i] = GWN_normal_convert_i10_v3(pnors[i]);
+ }
+ }
+ }
+}
+
+/** Ensure #MeshRenderData.vert_normals_pack */
+static void mesh_render_data_ensure_vert_normals_pack(MeshRenderData *rdata)
+{
+ Gwn_PackedNormal *vnors_pack = rdata->vert_normals_pack;
+ if (vnors_pack == NULL) {
+ if (rdata->edit_bmesh) {
+ BMesh *bm = rdata->edit_bmesh->bm;
+ BMIter viter;
+ BMVert *eve;
+ int i;
+
+ vnors_pack = rdata->vert_normals_pack = MEM_mallocN(sizeof(*vnors_pack) * rdata->vert_len, __func__);
+ BM_ITER_MESH_INDEX(eve, &viter, bm, BM_VERT, i) {
+ vnors_pack[i] = GWN_normal_convert_i10_v3(eve->no);
+ }
+ }
+ else {
+ /* data from mesh used directly */
+ BLI_assert(0);
+ }
+ }
+}
+
+
+/** Ensure #MeshRenderData.vert_color */
+static void mesh_render_data_ensure_vert_color(MeshRenderData *rdata)
+{
+ char (*vcol)[3] = rdata->vert_color;
+ if (vcol == NULL) {
+ if (rdata->edit_bmesh) {
+ BMesh *bm = rdata->edit_bmesh->bm;
+ const int cd_loop_color_offset = CustomData_get_offset(&bm->ldata, CD_MLOOPCOL);
+ if (cd_loop_color_offset == -1) {
+ goto fallback;
+ }
+
+ vcol = rdata->vert_color = MEM_mallocN(sizeof(*vcol) * rdata->loop_len, __func__);
+
+ BMIter fiter;
+ BMFace *efa;
+ int i = 0;
+
+ BM_ITER_MESH(efa, &fiter, bm, BM_FACES_OF_MESH) {
+ BMLoop *l_iter, *l_first;
+ l_iter = l_first = BM_FACE_FIRST_LOOP(efa);
+ do {
+ const MLoopCol *lcol = BM_ELEM_CD_GET_VOID_P(l_iter, cd_loop_color_offset);
+ vcol[i][0] = lcol->r;
+ vcol[i][1] = lcol->g;
+ vcol[i][2] = lcol->b;
+ i += 1;
+ } while ((l_iter = l_iter->next) != l_first);
+ }
+ BLI_assert(i == rdata->loop_len);
+ }
+ else {
+ if (rdata->mloopcol == NULL) {
+ goto fallback;
+ }
+
+ vcol = rdata->vert_color = MEM_mallocN(sizeof(*vcol) * rdata->loop_len, __func__);
+
+ for (int i = 0; i < rdata->loop_len; i++) {
+ vcol[i][0] = rdata->mloopcol[i].r;
+ vcol[i][1] = rdata->mloopcol[i].g;
+ vcol[i][2] = rdata->mloopcol[i].b;
+ }
+ }
+ }
+ return;
+
+fallback:
+ vcol = rdata->vert_color = MEM_mallocN(sizeof(*vcol) * rdata->loop_len, __func__);
+
+ for (int i = 0; i < rdata->loop_len; i++) {
+ vcol[i][0] = 255;
+ vcol[i][1] = 255;
+ vcol[i][2] = 255;
+ }
+}
+
+/* TODO, move into shader? */
+static void rgb_from_weight(float r_rgb[3], const float weight)
+{
+ const float blend = ((weight / 2.0f) + 0.5f);
+
+ if (weight <= 0.25f) { /* blue->cyan */
+ r_rgb[0] = 0.0f;
+ r_rgb[1] = blend * weight * 4.0f;
+ r_rgb[2] = blend;
+ }
+ else if (weight <= 0.50f) { /* cyan->green */
+ r_rgb[0] = 0.0f;
+ r_rgb[1] = blend;
+ r_rgb[2] = blend * (1.0f - ((weight - 0.25f) * 4.0f));
+ }
+ else if (weight <= 0.75f) { /* green->yellow */
+ r_rgb[0] = blend * ((weight - 0.50f) * 4.0f);
+ r_rgb[1] = blend;
+ r_rgb[2] = 0.0f;
+ }
+ else if (weight <= 1.0f) { /* yellow->red */
+ r_rgb[0] = blend;
+ r_rgb[1] = blend * (1.0f - ((weight - 0.75f) * 4.0f));
+ r_rgb[2] = 0.0f;
+ }
+ else {
+ /* exceptional value, unclamped or nan,
+ * avoid uninitialized memory use */
+ r_rgb[0] = 1.0f;
+ r_rgb[1] = 0.0f;
+ r_rgb[2] = 1.0f;
+ }
+}
+
+
+/** Ensure #MeshRenderData.vert_weight_color */
+static void mesh_render_data_ensure_vert_weight_color(MeshRenderData *rdata, const int defgroup)
+{
+ float (*vweight)[3] = rdata->vert_weight_color;
+ if (vweight == NULL) {
+ if (defgroup == -1) {
+ goto fallback;
+ }
+
+ if (rdata->edit_bmesh) {
+ BMesh *bm = rdata->edit_bmesh->bm;
+ const int cd_dvert_offset = CustomData_get_offset(&bm->vdata, CD_MDEFORMVERT);
+ if (cd_dvert_offset == -1) {
+ goto fallback;
+ }
+
+ BMIter viter;
+ BMVert *eve;
+ int i;
+
+ vweight = rdata->vert_weight_color = MEM_mallocN(sizeof(*vweight) * rdata->vert_len, __func__);
+ BM_ITER_MESH_INDEX(eve, &viter, bm, BM_VERT, i) {
+ const MDeformVert *dvert = BM_ELEM_CD_GET_VOID_P(eve, cd_dvert_offset);
+ float weight = defvert_find_weight(dvert, defgroup);
+ if (U.flag & USER_CUSTOM_RANGE) {
+ BKE_colorband_evaluate(&U.coba_weight, weight, vweight[i]);
+ }
+ else {
+ rgb_from_weight(vweight[i], weight);
+ }
+ }
+ }
+ else {
+ if (rdata->dvert == NULL) {
+ goto fallback;
+ }
+
+ vweight = rdata->vert_weight_color = MEM_mallocN(sizeof(*vweight) * rdata->vert_len, __func__);
+ for (int i = 0; i < rdata->vert_len; i++) {
+ float weight = defvert_find_weight(&rdata->dvert[i], defgroup);
+ if (U.flag & USER_CUSTOM_RANGE) {
+ BKE_colorband_evaluate(&U.coba_weight, weight, vweight[i]);
+ }
+ else {
+ rgb_from_weight(vweight[i], weight);
+ }
+ }
+ }
+ }
+ return;
+
+fallback:
+ vweight = rdata->vert_weight_color = MEM_callocN(sizeof(*vweight) * rdata->vert_len, __func__);
+
+ for (int i = 0; i < rdata->vert_len; i++) {
+ vweight[i][2] = 0.5f;
+ }
+}
+
+/** Ensure #MeshRenderData.edge_select_bool */
+static void mesh_render_data_ensure_edge_select_bool(MeshRenderData *rdata, bool use_wire)
+{
+ bool *edge_select_bool = rdata->edge_select_bool;
+ if (edge_select_bool == NULL) {
+ edge_select_bool = rdata->edge_select_bool =
+ MEM_callocN(sizeof(*edge_select_bool) * rdata->edge_len, __func__);
+
+ for (int i = 0; i < rdata->poly_len; i++) {
+ MPoly *poly = &rdata->mpoly[i];
+
+ if (poly->flag & ME_FACE_SEL) {
+ for (int j = 0; j < poly->totloop; j++) {
+ MLoop *loop = &rdata->mloop[poly->loopstart + j];
+ if (use_wire) {
+ edge_select_bool[loop->e] = true;
+ }
+ else {
+ /* Not totally correct, will cause problems for edges with 3x faces. */
+ edge_select_bool[loop->e] = !edge_select_bool[loop->e];
+ }
+ }
+ }
+ }
+ }
+}
+
+/** \} */
+
+/* ---------------------------------------------------------------------- */
+
+/** \name Internal Cache Generation
+ * \{ */
+
+static bool mesh_render_data_pnors_pcenter_select_get(
+ MeshRenderData *rdata, const int poly,
+ float r_pnors[3], float r_center[3], bool *r_selected)
+{
+ BLI_assert(rdata->types & (MR_DATATYPE_VERT | MR_DATATYPE_LOOP | MR_DATATYPE_POLY));
+
+ if (rdata->edit_bmesh) {
+ const BMFace *efa = BM_face_at_index(rdata->edit_bmesh->bm, poly);
+ if (BM_elem_flag_test(efa, BM_ELEM_HIDDEN)) {
+ return false;
+ }
+ BM_face_calc_center_mean(efa, r_center);
+ copy_v3_v3(r_pnors, efa->no);
+ *r_selected = (BM_elem_flag_test(efa, BM_ELEM_SELECT) != 0) ? true : false;
+ }
+ else {
+ MVert *mvert = rdata->mvert;
+ const MPoly *mpoly = rdata->mpoly + poly;
+ const MLoop *mloop = rdata->mloop + mpoly->loopstart;
+
+ BKE_mesh_calc_poly_center(mpoly, mloop, mvert, r_center);
+ BKE_mesh_calc_poly_normal(mpoly, mloop, mvert, r_pnors);
+
+ *r_selected = false; /* No selection if not in edit mode */
+ }
+
+ return true;
+}
+
+static bool mesh_render_data_edge_vcos_manifold_pnors(
+ MeshRenderData *rdata, const int edge_index,
+ float **r_vco1, float **r_vco2, float **r_pnor1, float **r_pnor2, bool *r_is_manifold)
+{
+ BLI_assert(rdata->types & (MR_DATATYPE_VERT | MR_DATATYPE_EDGE | MR_DATATYPE_LOOP | MR_DATATYPE_POLY));
+
+ if (rdata->edit_bmesh) {
+ BMesh *bm = rdata->edit_bmesh->bm;
+ BMEdge *eed = BM_edge_at_index(bm, edge_index);
+ if (BM_elem_flag_test(eed, BM_ELEM_HIDDEN)) {
+ return false;
+ }
+ *r_vco1 = eed->v1->co;
+ *r_vco2 = eed->v2->co;
+ if (BM_edge_is_manifold(eed)) {
+ *r_pnor1 = eed->l->f->no;
+ *r_pnor2 = eed->l->radial_next->f->no;
+ *r_is_manifold = true;
+ }
+ else {
+ *r_is_manifold = false;
+ }
+ }
+ else {
+ MVert *mvert = rdata->mvert;
+ MEdge *medge = rdata->medge;
+ EdgeAdjacentPolys *eap = rdata->edges_adjacent_polys;
+ float (*pnors)[3] = rdata->poly_normals;
+
+ if (!eap) {
+ const MLoop *mloop = rdata->mloop;
+ const MPoly *mpoly = rdata->mpoly;
+ const int poly_len = rdata->poly_len;
+ const bool do_pnors = (pnors == NULL);
+
+ eap = rdata->edges_adjacent_polys = MEM_callocN(sizeof(*eap) * rdata->edge_len, __func__);
+ if (do_pnors) {
+ pnors = rdata->poly_normals = MEM_mallocN(sizeof(*pnors) * poly_len, __func__);
+ }
+
+ for (int i = 0; i < poly_len; i++, mpoly++) {
+ if (do_pnors) {
+ BKE_mesh_calc_poly_normal(mpoly, mloop + mpoly->loopstart, mvert, pnors[i]);
+ }
+
+ const int loopend = mpoly->loopstart + mpoly->totloop;
+ for (int j = mpoly->loopstart; j < loopend; j++) {
+ const int edge_idx = mloop[j].e;
+ if (eap[edge_idx].count < 2) {
+ eap[edge_idx].face_index[eap[edge_idx].count] = i;
+ }
+ eap[edge_idx].count++;
+ }
+ }
+ }
+ BLI_assert(eap && pnors);
+
+ *r_vco1 = mvert[medge[edge_index].v1].co;
+ *r_vco2 = mvert[medge[edge_index].v2].co;
+ if (eap[edge_index].count == 2) {
+ *r_pnor1 = pnors[eap[edge_index].face_index[0]];
+ *r_pnor2 = pnors[eap[edge_index].face_index[1]];
+ *r_is_manifold = true;
+ }
+ else {
+ *r_is_manifold = false;
+ }
+ }
+
+ return true;
+}
+
+
+/* First 2 bytes are bit flags
+ * 3rd is for sharp edges
+ * 4rd is for creased edges */
+enum {
+ VFLAG_VERTEX_ACTIVE = 1 << 0,
+ VFLAG_VERTEX_SELECTED = 1 << 1,
+ VFLAG_FACE_ACTIVE = 1 << 2,
+ VFLAG_FACE_SELECTED = 1 << 3,
+};
+
+enum {
+ VFLAG_EDGE_EXISTS = 1 << 0,
+ VFLAG_EDGE_ACTIVE = 1 << 1,
+ VFLAG_EDGE_SELECTED = 1 << 2,
+ VFLAG_EDGE_SEAM = 1 << 3,
+ VFLAG_EDGE_SHARP = 1 << 4,
+ /* Beware to not go over 1 << 7
+ * (see gpu_shader_edit_mesh_overlay_geom.glsl) */
+};
+
+static unsigned char mesh_render_data_looptri_flag(MeshRenderData *rdata, const BMFace *efa)
+{
+ unsigned char fflag = 0;
+
+ if (efa == rdata->efa_act)
+ fflag |= VFLAG_FACE_ACTIVE;
+
+ if (BM_elem_flag_test(efa, BM_ELEM_SELECT))
+ fflag |= VFLAG_FACE_SELECTED;
+
+ return fflag;
+}
+
+static void mesh_render_data_edge_flag(
+ const MeshRenderData *rdata, const BMEdge *eed,
+ EdgeDrawAttr *eattr)
+{
+ eattr->e_flag |= VFLAG_EDGE_EXISTS;
+
+ if (eed == rdata->eed_act)
+ eattr->e_flag |= VFLAG_EDGE_ACTIVE;
+
+ if (BM_elem_flag_test(eed, BM_ELEM_SELECT))
+ eattr->e_flag |= VFLAG_EDGE_SELECTED;
+
+ if (BM_elem_flag_test(eed, BM_ELEM_SEAM))
+ eattr->e_flag |= VFLAG_EDGE_SEAM;
+
+ if (!BM_elem_flag_test(eed, BM_ELEM_SMOOTH))
+ eattr->e_flag |= VFLAG_EDGE_SHARP;
+
+ /* Use a byte for value range */
+ if (rdata->cd.offset.crease != -1) {
+ float crease = BM_ELEM_CD_GET_FLOAT(eed, rdata->cd.offset.crease);
+ if (crease > 0) {
+ eattr->crease = (char)(crease * 255.0f);
+ }
+ }
+
+ /* Use a byte for value range */
+ if (rdata->cd.offset.bweight != -1) {
+ float bweight = BM_ELEM_CD_GET_FLOAT(eed, rdata->cd.offset.bweight);
+ if (bweight > 0) {
+ eattr->bweight = (char)(bweight * 255.0f);
+ }
+ }
+}
+
+static unsigned char mesh_render_data_vertex_flag(MeshRenderData *rdata, const BMVert *eve)
+{
+
+ unsigned char vflag = 0;
+
+ /* Current vertex */
+ if (eve == rdata->eve_act)
+ vflag |= VFLAG_VERTEX_ACTIVE;
+
+ if (BM_elem_flag_test(eve, BM_ELEM_SELECT))
+ vflag |= VFLAG_VERTEX_SELECTED;
+
+ return vflag;
+}
+
+static void add_overlay_tri(
+ MeshRenderData *rdata, Gwn_VertBuf *vbo_pos, Gwn_VertBuf *vbo_nor, Gwn_VertBuf *vbo_data,
+ const unsigned int pos_id, const unsigned int vnor_id, const unsigned int lnor_id, const unsigned int data_id,
+ const BMLoop **bm_looptri, const int base_vert_idx)
+{
+ unsigned char fflag;
+ unsigned char vflag;
+
+ if (vbo_pos) {
+ for (uint i = 0; i < 3; i++) {
+ const float *pos = bm_looptri[i]->v->co;
+ GWN_vertbuf_attr_set(vbo_pos, pos_id, base_vert_idx + i, pos);
+ }
+ }
+
+ if (vbo_nor) {
+ /* TODO real loop normal */
+ Gwn_PackedNormal lnor = GWN_normal_convert_i10_v3(bm_looptri[0]->f->no);
+ for (uint i = 0; i < 3; i++) {
+ Gwn_PackedNormal vnor = GWN_normal_convert_i10_v3(bm_looptri[i]->v->no);
+ GWN_vertbuf_attr_set(vbo_nor, vnor_id, base_vert_idx + i, &vnor);
+ GWN_vertbuf_attr_set(vbo_nor, lnor_id, base_vert_idx + i, &lnor);
+ }
+ }
+
+ if (vbo_data) {
+ fflag = mesh_render_data_looptri_flag(rdata, bm_looptri[0]->f);
+ uint i_prev = 1, i = 2;
+ for (uint i_next = 0; i_next < 3; i_next++) {
+ vflag = mesh_render_data_vertex_flag(rdata, bm_looptri[i]->v);
+ EdgeDrawAttr eattr = {0};
+ if (bm_looptri[i_next] == bm_looptri[i_prev]->prev) {
+ mesh_render_data_edge_flag(rdata, bm_looptri[i_next]->e, &eattr);
+ }
+ eattr.v_flag = fflag | vflag;
+ GWN_vertbuf_attr_set(vbo_data, data_id, base_vert_idx + i, &eattr);
+
+ i_prev = i;
+ i = i_next;
+ }
+ }
+}
+
+static void add_overlay_loose_edge(
+ MeshRenderData *rdata, Gwn_VertBuf *vbo_pos, Gwn_VertBuf *vbo_nor, Gwn_VertBuf *vbo_data,
+ const unsigned int pos_id, const unsigned int vnor_id, const unsigned int data_id,
+ const BMEdge *eed, const int base_vert_idx)
+{
+ if (vbo_pos) {
+ for (int i = 0; i < 2; ++i) {
+ const float *pos = (&eed->v1)[i]->co;
+ GWN_vertbuf_attr_set(vbo_pos, pos_id, base_vert_idx + i, pos);
+ }
+ }
+
+ if (vbo_nor) {
+ for (int i = 0; i < 2; ++i) {
+ Gwn_PackedNormal vnor = GWN_normal_convert_i10_v3((&eed->v1)[i]->no);
+ GWN_vertbuf_attr_set(vbo_nor, vnor_id, base_vert_idx + i, &vnor);
+ }
+ }
+
+ if (vbo_data) {
+ EdgeDrawAttr eattr = {0};
+ mesh_render_data_edge_flag(rdata, eed, &eattr);
+ for (int i = 0; i < 2; ++i) {
+ eattr.v_flag = mesh_render_data_vertex_flag(rdata, (&eed->v1)[i]);
+ GWN_vertbuf_attr_set(vbo_data, data_id, base_vert_idx + i, &eattr);
+ }
+ }
+}
+
+static void add_overlay_loose_vert(
+ MeshRenderData *rdata, Gwn_VertBuf *vbo_pos, Gwn_VertBuf *vbo_nor, Gwn_VertBuf *vbo_data,
+ const unsigned int pos_id, const unsigned int vnor_id, const unsigned int data_id,
+ const BMVert *eve, const int base_vert_idx)
+{
+ if (vbo_pos) {
+ const float *pos = eve->co;
+ GWN_vertbuf_attr_set(vbo_pos, pos_id, base_vert_idx, pos);
+ }
+
+ if (vbo_nor) {
+ Gwn_PackedNormal vnor = GWN_normal_convert_i10_v3(eve->no);
+ GWN_vertbuf_attr_set(vbo_nor, vnor_id, base_vert_idx, &vnor);
+ }
+
+ if (vbo_data) {
+ unsigned char vflag[4] = {0, 0, 0, 0};
+ vflag[0] = mesh_render_data_vertex_flag(rdata, eve);
+ GWN_vertbuf_attr_set(vbo_data, data_id, base_vert_idx, vflag);
+ }
+}
+
+/** \} */
+
+
+/* ---------------------------------------------------------------------- */
+
+/** \name Mesh Gwn_Batch Cache
+ * \{ */
+
+typedef struct MeshBatchCache {
+ Gwn_VertBuf *pos_in_order;
+ Gwn_VertBuf *nor_in_order;
+ Gwn_IndexBuf *edges_in_order;
+ Gwn_IndexBuf *triangles_in_order;
+
+ Gwn_Batch *all_verts;
+ Gwn_Batch *all_edges;
+ Gwn_Batch *all_triangles;
+
+ Gwn_VertBuf *pos_with_normals;
+ Gwn_VertBuf *tri_aligned_uv; /* Active UV layer (mloopuv) */
+
+ /**
+ * Other uses are all positions or loose elements.
+ * This stores all visible elements, needed for selection.
+ */
+ Gwn_VertBuf *ed_fcenter_pos_with_nor_and_sel;
+ Gwn_VertBuf *ed_edge_pos;
+ Gwn_VertBuf *ed_vert_pos;
+
+ Gwn_Batch *triangles_with_normals;
+
+ /* Skip hidden (depending on paint select mode) */
+ Gwn_Batch *triangles_with_weights;
+ Gwn_Batch *triangles_with_vert_colors;
+ /* Always skip hidden */
+ Gwn_Batch *triangles_with_select_mask;
+ Gwn_Batch *triangles_with_select_id;
+ uint triangles_with_select_id_offset;
+
+ Gwn_Batch *facedot_with_select_id; /* shares vbo with 'overlay_facedots' */
+ Gwn_Batch *edges_with_select_id;
+ Gwn_Batch *verts_with_select_id;
+
+ uint facedot_with_select_id_offset;
+ uint edges_with_select_id_offset;
+ uint verts_with_select_id_offset;
+
+ Gwn_Batch *points_with_normals;
+ Gwn_Batch *fancy_edges; /* owns its vertex buffer (not shared) */
+
+ /* Maybe have shaded_triangles_data split into pos_nor and uv_tangent
+ * to minimise data transfer for skinned mesh. */
+ Gwn_VertFormat shaded_triangles_format;
+ Gwn_VertBuf *shaded_triangles_data;
+ Gwn_IndexBuf **shaded_triangles_in_order;
+ Gwn_Batch **shaded_triangles;
+
+ /* Texture Paint.*/
+ /* per-texture batch */
+ Gwn_Batch **texpaint_triangles;
+ Gwn_Batch *texpaint_triangles_single;
+
+ /* Edit Cage Mesh buffers */
+ Gwn_VertBuf *ed_tri_pos;
+ Gwn_VertBuf *ed_tri_nor; /* LoopNor, VertNor */
+ Gwn_VertBuf *ed_tri_data;
+
+ Gwn_VertBuf *ed_ledge_pos;
+ Gwn_VertBuf *ed_ledge_nor; /* VertNor */
+ Gwn_VertBuf *ed_ledge_data;
+
+ Gwn_VertBuf *ed_lvert_pos;
+ Gwn_VertBuf *ed_lvert_nor; /* VertNor */
+ Gwn_VertBuf *ed_lvert_data;
+
+ Gwn_Batch *overlay_triangles;
+ Gwn_Batch *overlay_triangles_nor; /* GWN_PRIM_POINTS */
+ Gwn_Batch *overlay_loose_edges;
+ Gwn_Batch *overlay_loose_edges_nor; /* GWN_PRIM_POINTS */
+ Gwn_Batch *overlay_loose_verts;
+ Gwn_Batch *overlay_facedots;
+
+ Gwn_Batch *overlay_weight_faces;
+ Gwn_Batch *overlay_weight_verts;
+ Gwn_Batch *overlay_paint_edges;
+
+ /* settings to determine if cache is invalid */
+ bool is_maybe_dirty;
+ bool is_dirty; /* Instantly invalidates cache, skipping mesh check */
+ int edge_len;
+ int tri_len;
+ int poly_len;
+ int vert_len;
+ int mat_len;
+ bool is_editmode;
+
+ /* XXX, only keep for as long as sculpt mode uses shaded drawing. */
+ bool is_sculpt_points_tag;
+} MeshBatchCache;
+
+/* Gwn_Batch cache management. */
+
+static bool mesh_batch_cache_valid(Mesh *me)
+{
+ MeshBatchCache *cache = me->batch_cache;
+
+ if (cache == NULL) {
+ return false;
+ }
+
+ /* XXX find another place for this */
+ if (cache->mat_len != mesh_render_mat_len_get(me)) {
+ cache->is_maybe_dirty = true;
+ }
+
+ if (cache->is_editmode != (me->edit_btmesh != NULL)) {
+ return false;
+ }
+
+ if (cache->is_dirty) {
+ return false;
+ }
+
+ if (cache->is_maybe_dirty == false) {
+ return true;
+ }
+ else {
+ if (cache->is_editmode) {
+ return false;
+ }
+ else if ((cache->vert_len != mesh_render_verts_len_get(me)) ||
+ (cache->edge_len != mesh_render_edges_len_get(me)) ||
+ (cache->tri_len != mesh_render_looptri_len_get(me)) ||
+ (cache->poly_len != mesh_render_polys_len_get(me)) ||
+ (cache->mat_len != mesh_render_mat_len_get(me)))
+ {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static void mesh_batch_cache_init(Mesh *me)
+{
+ MeshBatchCache *cache = me->batch_cache;
+
+ if (!cache) {
+ cache = me->batch_cache = MEM_callocN(sizeof(*cache), __func__);
+ }
+ else {
+ memset(cache, 0, sizeof(*cache));
+ }
+
+ cache->is_editmode = me->edit_btmesh != NULL;
+
+ if (cache->is_editmode == false) {
+ cache->edge_len = mesh_render_edges_len_get(me);
+ cache->tri_len = mesh_render_looptri_len_get(me);
+ cache->poly_len = mesh_render_polys_len_get(me);
+ cache->vert_len = mesh_render_verts_len_get(me);
+ }
+
+ cache->mat_len = mesh_render_mat_len_get(me);
+
+ cache->is_maybe_dirty = false;
+ cache->is_dirty = false;
+}
+
+static MeshBatchCache *mesh_batch_cache_get(Mesh *me)
+{
+ if (!mesh_batch_cache_valid(me)) {
+ mesh_batch_cache_clear(me);
+ mesh_batch_cache_init(me);
+ }
+ return me->batch_cache;
+}
+
+void DRW_mesh_batch_cache_dirty(Mesh *me, int mode)
+{
+ MeshBatchCache *cache = me->batch_cache;
+ if (cache == NULL) {
+ return;
+ }
+ switch (mode) {
+ case BKE_MESH_BATCH_DIRTY_MAYBE_ALL:
+ cache->is_maybe_dirty = true;
+ break;
+ case BKE_MESH_BATCH_DIRTY_SELECT:
+ GWN_VERTBUF_DISCARD_SAFE(cache->ed_tri_data);
+ GWN_VERTBUF_DISCARD_SAFE(cache->ed_ledge_data);
+ GWN_VERTBUF_DISCARD_SAFE(cache->ed_lvert_data);
+ GWN_VERTBUF_DISCARD_SAFE(cache->ed_fcenter_pos_with_nor_and_sel); /* Contains select flag */
+ GWN_VERTBUF_DISCARD_SAFE(cache->ed_edge_pos);
+ GWN_VERTBUF_DISCARD_SAFE(cache->ed_vert_pos);
+
+ GWN_BATCH_DISCARD_SAFE(cache->overlay_triangles);
+ GWN_BATCH_DISCARD_SAFE(cache->overlay_loose_verts);
+ GWN_BATCH_DISCARD_SAFE(cache->overlay_loose_edges);
+ GWN_BATCH_DISCARD_SAFE(cache->overlay_facedots);
+ /* Edit mode selection. */
+ GWN_BATCH_DISCARD_SAFE(cache->facedot_with_select_id);
+ GWN_BATCH_DISCARD_SAFE(cache->edges_with_select_id);
+ GWN_BATCH_DISCARD_SAFE(cache->verts_with_select_id);
+ break;
+ case BKE_MESH_BATCH_DIRTY_ALL:
+ cache->is_dirty = true;
+ break;
+ case BKE_MESH_BATCH_DIRTY_SHADING:
+ /* TODO: This should only update UV and tangent data,
+ * and not free the entire cache. */
+ cache->is_dirty = true;
+ break;
+ case BKE_MESH_BATCH_DIRTY_SCULPT_COORDS:
+ cache->is_sculpt_points_tag = true;
+ break;
+ default:
+ BLI_assert(0);
+ }
+}
+
+/**
+ * This only clear the batches associated to the given vertex buffer.
+ **/
+static void mesh_batch_cache_clear_selective(Mesh *me, Gwn_VertBuf *vert)
+{
+ MeshBatchCache *cache = me->batch_cache;
+ if (!cache) {
+ return;
+ }
+
+ BLI_assert(vert != NULL);
+
+ if (cache->pos_with_normals == vert) {
+ GWN_BATCH_DISCARD_SAFE(cache->triangles_with_normals);
+ GWN_BATCH_DISCARD_SAFE(cache->triangles_with_weights);
+ GWN_BATCH_DISCARD_SAFE(cache->triangles_with_vert_colors);
+ GWN_BATCH_DISCARD_SAFE(cache->triangles_with_select_id);
+ GWN_BATCH_DISCARD_SAFE(cache->triangles_with_select_mask);
+ GWN_BATCH_DISCARD_SAFE(cache->points_with_normals);
+ if (cache->shaded_triangles) {
+ for (int i = 0; i < cache->mat_len; ++i) {
+ GWN_BATCH_DISCARD_SAFE(cache->shaded_triangles[i]);
+ }
+ }
+ MEM_SAFE_FREE(cache->shaded_triangles);
+ if (cache->texpaint_triangles) {
+ for (int i = 0; i < cache->mat_len; ++i) {
+ GWN_BATCH_DISCARD_SAFE(cache->texpaint_triangles[i]);
+ }
+ }
+ MEM_SAFE_FREE(cache->texpaint_triangles);
+ GWN_BATCH_DISCARD_SAFE(cache->texpaint_triangles_single);
+ }
+ /* TODO: add the other ones if needed. */
+ else {
+ /* Does not match any vertbuf in the batch cache! */
+ BLI_assert(0);
+ }
+}
+
+static void mesh_batch_cache_clear(Mesh *me)
+{
+ MeshBatchCache *cache = me->batch_cache;
+ if (!cache) {
+ return;
+ }
+
+ GWN_BATCH_DISCARD_SAFE(cache->all_verts);
+ GWN_BATCH_DISCARD_SAFE(cache->all_edges);
+ GWN_BATCH_DISCARD_SAFE(cache->all_triangles);
+
+ GWN_VERTBUF_DISCARD_SAFE(cache->pos_in_order);
+ GWN_INDEXBUF_DISCARD_SAFE(cache->edges_in_order);
+ GWN_INDEXBUF_DISCARD_SAFE(cache->triangles_in_order);
+
+ GWN_VERTBUF_DISCARD_SAFE(cache->ed_tri_pos);
+ GWN_VERTBUF_DISCARD_SAFE(cache->ed_tri_nor);
+ GWN_VERTBUF_DISCARD_SAFE(cache->ed_tri_data);
+ GWN_VERTBUF_DISCARD_SAFE(cache->ed_ledge_pos);
+ GWN_VERTBUF_DISCARD_SAFE(cache->ed_ledge_nor);
+ GWN_VERTBUF_DISCARD_SAFE(cache->ed_ledge_data);
+ GWN_VERTBUF_DISCARD_SAFE(cache->ed_lvert_pos);
+ GWN_VERTBUF_DISCARD_SAFE(cache->ed_lvert_nor);
+ GWN_VERTBUF_DISCARD_SAFE(cache->ed_lvert_data);
+ GWN_BATCH_DISCARD_SAFE(cache->overlay_triangles);
+ GWN_BATCH_DISCARD_SAFE(cache->overlay_triangles_nor);
+ GWN_BATCH_DISCARD_SAFE(cache->overlay_loose_verts);
+ GWN_BATCH_DISCARD_SAFE(cache->overlay_loose_edges);
+ GWN_BATCH_DISCARD_SAFE(cache->overlay_loose_edges_nor);
+
+ GWN_BATCH_DISCARD_SAFE(cache->overlay_weight_faces);
+ GWN_BATCH_DISCARD_SAFE(cache->overlay_weight_verts);
+ GWN_BATCH_DISCARD_SAFE(cache->overlay_paint_edges);
+ GWN_BATCH_DISCARD_SAFE(cache->overlay_facedots);
+
+ GWN_BATCH_DISCARD_SAFE(cache->triangles_with_normals);
+ GWN_BATCH_DISCARD_SAFE(cache->points_with_normals);
+ GWN_VERTBUF_DISCARD_SAFE(cache->pos_with_normals);
+ GWN_BATCH_DISCARD_SAFE(cache->triangles_with_weights);
+ GWN_BATCH_DISCARD_SAFE(cache->triangles_with_vert_colors);
+ GWN_VERTBUF_DISCARD_SAFE(cache->tri_aligned_uv);
+ GWN_VERTBUF_DISCARD_SAFE(cache->ed_fcenter_pos_with_nor_and_sel);
+ GWN_VERTBUF_DISCARD_SAFE(cache->ed_edge_pos);
+ GWN_VERTBUF_DISCARD_SAFE(cache->ed_vert_pos);
+ GWN_BATCH_DISCARD_SAFE(cache->triangles_with_select_mask);
+ GWN_BATCH_DISCARD_SAFE(cache->triangles_with_select_id);
+ GWN_BATCH_DISCARD_SAFE(cache->facedot_with_select_id);
+ GWN_BATCH_DISCARD_SAFE(cache->edges_with_select_id);
+ GWN_BATCH_DISCARD_SAFE(cache->verts_with_select_id);
+
+ GWN_BATCH_DISCARD_SAFE(cache->fancy_edges);
+
+ GWN_VERTBUF_DISCARD_SAFE(cache->shaded_triangles_data);
+ if (cache->shaded_triangles_in_order) {
+ for (int i = 0; i < cache->mat_len; ++i) {
+ GWN_INDEXBUF_DISCARD_SAFE(cache->shaded_triangles_in_order[i]);
+ }
+ }
+ if (cache->shaded_triangles) {
+ for (int i = 0; i < cache->mat_len; ++i) {
+ GWN_BATCH_DISCARD_SAFE(cache->shaded_triangles[i]);
+ }
+ }
+
+ MEM_SAFE_FREE(cache->shaded_triangles_in_order);
+ MEM_SAFE_FREE(cache->shaded_triangles);
+
+ if (cache->texpaint_triangles) {
+ for (int i = 0; i < cache->mat_len; ++i) {
+ GWN_BATCH_DISCARD_SAFE(cache->texpaint_triangles[i]);
+ }
+ }
+ MEM_SAFE_FREE(cache->texpaint_triangles);
+
+ GWN_BATCH_DISCARD_SAFE(cache->texpaint_triangles_single);
+
+}
+
+void DRW_mesh_batch_cache_free(Mesh *me)
+{
+ mesh_batch_cache_clear(me);
+ MEM_SAFE_FREE(me->batch_cache);
+}
+
+/* Gwn_Batch cache usage. */
+
+static Gwn_VertBuf *mesh_batch_cache_get_tri_shading_data(MeshRenderData *rdata, MeshBatchCache *cache)
+{
+ BLI_assert(rdata->types & (MR_DATATYPE_VERT | MR_DATATYPE_LOOPTRI | MR_DATATYPE_LOOP | MR_DATATYPE_POLY));
+#define USE_COMP_MESH_DATA
+
+ if (cache->shaded_triangles_data == NULL) {
+ const uint uv_len = rdata->cd.layers.uv_len;
+ const uint tangent_len = rdata->cd.layers.tangent_len;
+ const uint vcol_len = rdata->cd.layers.vcol_len;
+ const uint layers_combined_len = uv_len + vcol_len + tangent_len;
+
+ if (layers_combined_len == 0) {
+ return NULL;
+ }
+
+ Gwn_VertFormat *format = &cache->shaded_triangles_format;
+
+ GWN_vertformat_clear(format);
+
+ /* initialize vertex format */
+ uint *layers_combined_id = BLI_array_alloca(layers_combined_id, layers_combined_len);
+ uint *uv_id = layers_combined_id;
+ uint *tangent_id = uv_id + uv_len;
+ uint *vcol_id = tangent_id + tangent_len;
+
+ /* Not needed, just for sanity. */
+ if (uv_len == 0) { uv_id = NULL; }
+ if (tangent_len == 0) { tangent_id = NULL; }
+ if (vcol_len == 0) { vcol_id = NULL; }
+
+ for (uint i = 0; i < uv_len; i++) {
+ /* UV */
+ const char *attrib_name = mesh_render_data_uv_layer_uuid_get(rdata, i);
+#if defined(USE_COMP_MESH_DATA) && 0 /* these are clamped. Maybe use them as an option in the future */
+ uv_id[i] = GWN_vertformat_attr_add(format, attrib_name, GWN_COMP_I16, 2, GWN_FETCH_INT_TO_FLOAT_UNIT);
+#else
+ uv_id[i] = GWN_vertformat_attr_add(format, attrib_name, GWN_COMP_F32, 2, GWN_FETCH_FLOAT);
+#endif
+
+ /* Auto Name */
+ attrib_name = mesh_render_data_uv_auto_layer_uuid_get(rdata, i);
+ GWN_vertformat_alias_add(format, attrib_name);
+
+ if (i == rdata->cd.layers.uv_active) {
+ GWN_vertformat_alias_add(format, "u");
+ }
+ }
+
+ for (uint i = 0; i < tangent_len; i++) {
+ const char *attrib_name = mesh_render_data_tangent_layer_uuid_get(rdata, i);
+ /* WATCH IT : only specifying 3 component instead of 4 (4th is sign).
+ * That may cause some problem but I could not make it to fail (fclem) */
+#ifdef USE_COMP_MESH_DATA
+ /* Tangents need more precision than 10_10_10 */
+ tangent_id[i] = GWN_vertformat_attr_add(format, attrib_name, GWN_COMP_I16, 3, GWN_FETCH_INT_TO_FLOAT_UNIT);
+#else
+ tangent_id[i] = GWN_vertformat_attr_add(format, attrib_name, GWN_COMP_F32, 3, GWN_FETCH_FLOAT);
+#endif
+
+ if (i == rdata->cd.layers.tangent_active) {
+ GWN_vertformat_alias_add(format, "t");
+ }
+ }
+
+ for (uint i = 0; i < vcol_len; i++) {
+ const char *attrib_name = mesh_render_data_vcol_layer_uuid_get(rdata, i);
+ vcol_id[i] = GWN_vertformat_attr_add(format, attrib_name, GWN_COMP_U8, 3, GWN_FETCH_INT_TO_FLOAT_UNIT);
+
+ /* Auto layer */
+ if (rdata->cd.layers.auto_vcol[i]) {
+ attrib_name = mesh_render_data_vcol_auto_layer_uuid_get(rdata, i);
+ GWN_vertformat_alias_add(format, attrib_name);
+ }
+
+ if (i == rdata->cd.layers.vcol_active) {
+ GWN_vertformat_alias_add(format, "c");
+ }
+ }
+
+ const uint tri_len = mesh_render_data_looptri_len_get(rdata);
+
+ Gwn_VertBuf *vbo = cache->shaded_triangles_data = GWN_vertbuf_create_with_format(format);
+
+ const int vbo_len_capacity = tri_len * 3;
+ int vbo_len_used = 0;
+ GWN_vertbuf_data_alloc(vbo, vbo_len_capacity);
+
+ Gwn_VertBufRaw *layers_combined_step = BLI_array_alloca(layers_combined_step, layers_combined_len);
+
+ Gwn_VertBufRaw *uv_step = layers_combined_step;
+ Gwn_VertBufRaw *tangent_step = uv_step + uv_len;
+ Gwn_VertBufRaw *vcol_step = tangent_step + tangent_len;
+
+ /* Not needed, just for sanity. */
+ if (uv_len == 0) { uv_step = NULL; }
+ if (tangent_len == 0) { tangent_step = NULL; }
+ if (vcol_len == 0) { vcol_step = NULL; }
+
+ for (uint i = 0; i < uv_len; i++) {
+ GWN_vertbuf_attr_get_raw_data(vbo, uv_id[i], &uv_step[i]);
+ }
+ for (uint i = 0; i < tangent_len; i++) {
+ GWN_vertbuf_attr_get_raw_data(vbo, tangent_id[i], &tangent_step[i]);
+ }
+ for (uint i = 0; i < vcol_len; i++) {
+ GWN_vertbuf_attr_get_raw_data(vbo, vcol_id[i], &vcol_step[i]);
+ }
+
+ /* TODO deduplicate all verts and make use of Gwn_IndexBuf in
+ * mesh_batch_cache_get_triangles_in_order_split_by_material. */
+ if (rdata->edit_bmesh) {
+ for (uint i = 0; i < tri_len; i++) {
+ const BMLoop **bm_looptri = (const BMLoop **)rdata->edit_bmesh->looptris[i];
+ if (BM_elem_flag_test(bm_looptri[0]->f, BM_ELEM_HIDDEN)) {
+ continue;
+ }
+ /* UVs */
+ for (uint j = 0; j < uv_len; j++) {
+ const uint layer_offset = rdata->cd.offset.uv[j];
+ for (uint t = 0; t < 3; t++) {
+ const float *elem = ((MLoopUV *)BM_ELEM_CD_GET_VOID_P(bm_looptri[t], layer_offset))->uv;
+ copy_v2_v2(GWN_vertbuf_raw_step(&uv_step[j]), elem);
+ }
+ }
+ /* TANGENTs */
+ for (uint j = 0; j < tangent_len; j++) {
+ float (*layer_data)[4] = rdata->cd.layers.tangent[j];
+ for (uint t = 0; t < 3; t++) {
+ const float *elem = layer_data[BM_elem_index_get(bm_looptri[t])];
+ normal_float_to_short_v3(GWN_vertbuf_raw_step(&tangent_step[j]), elem);
+ }
+ }
+ /* VCOLs */
+ for (uint j = 0; j < vcol_len; j++) {
+ const uint layer_offset = rdata->cd.offset.vcol[j];
+ for (uint t = 0; t < 3; t++) {
+ const uchar *elem = &((MLoopCol *)BM_ELEM_CD_GET_VOID_P(bm_looptri[t], layer_offset))->r;
+ copy_v3_v3_uchar(GWN_vertbuf_raw_step(&vcol_step[j]), elem);
+ }
+ }
+ }
+ }
+ else {
+ for (uint i = 0; i < tri_len; i++) {
+ const MLoopTri *mlt = &rdata->mlooptri[i];
+
+ /* UVs */
+ for (uint j = 0; j < uv_len; j++) {
+ const MLoopUV *layer_data = rdata->cd.layers.uv[j];
+ for (uint t = 0; t < 3; t++) {
+ const float *elem = layer_data[mlt->tri[t]].uv;
+ copy_v2_v2(GWN_vertbuf_raw_step(&uv_step[j]), elem);
+ }
+ }
+ /* TANGENTs */
+ for (uint j = 0; j < tangent_len; j++) {
+ float (*layer_data)[4] = rdata->cd.layers.tangent[j];
+ for (uint t = 0; t < 3; t++) {
+ const float *elem = layer_data[mlt->tri[t]];
+#ifdef USE_COMP_MESH_DATA
+ normal_float_to_short_v3(GWN_vertbuf_raw_step(&tangent_step[j]), elem);
+#else
+ copy_v3_v3(GWN_vertbuf_raw_step(&tangent_step[j]), elem);
+#endif
+ }
+ }
+ /* VCOLs */
+ for (uint j = 0; j < vcol_len; j++) {
+ const MLoopCol *layer_data = rdata->cd.layers.vcol[j];
+ for (uint t = 0; t < 3; t++) {
+ const uchar *elem = &layer_data[mlt->tri[t]].r;
+ copy_v3_v3_uchar(GWN_vertbuf_raw_step(&vcol_step[j]), elem);
+ }
+ }
+ }
+ }
+
+ vbo_len_used = GWN_vertbuf_raw_used(&layers_combined_step[0]);
+
+#ifndef NDEBUG
+ /* Check all layers are write aligned. */
+ if (layers_combined_len > 1) {
+ for (uint i = 1; i < layers_combined_len; i++) {
+ BLI_assert(vbo_len_used == GWN_vertbuf_raw_used(&layers_combined_step[i]));
+ }
+ }
+#endif
+
+ if (vbo_len_capacity != vbo_len_used) {
+ GWN_vertbuf_data_resize(vbo, vbo_len_used);
+ }
+ }
+
+#undef USE_COMP_MESH_DATA
+
+ return cache->shaded_triangles_data;
+}
+
+static Gwn_VertBuf *mesh_batch_cache_get_tri_uv_active(
+ MeshRenderData *rdata, MeshBatchCache *cache)
+{
+ BLI_assert(rdata->types & (MR_DATATYPE_VERT | MR_DATATYPE_LOOPTRI | MR_DATATYPE_LOOP | MR_DATATYPE_LOOPUV));
+ BLI_assert(rdata->edit_bmesh == NULL);
+
+ if (cache->tri_aligned_uv == NULL) {
+ unsigned int vidx = 0;
+
+ static Gwn_VertFormat format = { 0 };
+ static struct { uint uv; } attr_id;
+ if (format.attrib_ct == 0) {
+ attr_id.uv = GWN_vertformat_attr_add(&format, "uv", GWN_COMP_F32, 2, GWN_FETCH_FLOAT);
+ }
+
+ const int tri_len = mesh_render_data_looptri_len_get(rdata);
+
+ Gwn_VertBuf *vbo = cache->tri_aligned_uv = GWN_vertbuf_create_with_format(&format);
+
+ const int vbo_len_capacity = tri_len * 3;
+ int vbo_len_used = 0;
+ GWN_vertbuf_data_alloc(vbo, vbo_len_capacity);
+
+ const MLoopUV *mloopuv = rdata->mloopuv;
+
+ for (int i = 0; i < tri_len; i++) {
+ const MLoopTri *mlt = &rdata->mlooptri[i];
+ GWN_vertbuf_attr_set(vbo, attr_id.uv, vidx++, mloopuv[mlt->tri[0]].uv);
+ GWN_vertbuf_attr_set(vbo, attr_id.uv, vidx++, mloopuv[mlt->tri[1]].uv);
+ GWN_vertbuf_attr_set(vbo, attr_id.uv, vidx++, mloopuv[mlt->tri[2]].uv);
+ }
+ vbo_len_used = vidx;
+
+ BLI_assert(vbo_len_capacity == vbo_len_used);
+ UNUSED_VARS_NDEBUG(vbo_len_used);
+ }
+
+ return cache->tri_aligned_uv;
+}
+
+static Gwn_VertBuf *mesh_batch_cache_get_tri_pos_and_normals_ex(
+ MeshRenderData *rdata, const bool use_hide,
+ Gwn_VertBuf **r_vbo)
+{
+ BLI_assert(rdata->types & (MR_DATATYPE_VERT | MR_DATATYPE_LOOPTRI | MR_DATATYPE_LOOP | MR_DATATYPE_POLY));
+
+ if (*r_vbo == NULL) {
+ static Gwn_VertFormat format = { 0 };
+ static struct { uint pos, nor; } attr_id;
+ if (format.attrib_ct == 0) {
+ attr_id.pos = GWN_vertformat_attr_add(&format, "pos", GWN_COMP_F32, 3, GWN_FETCH_FLOAT);
+ attr_id.nor = GWN_vertformat_attr_add(&format, "nor", GWN_COMP_I10, 3, GWN_FETCH_INT_TO_FLOAT_UNIT);
+ }
+
+ const int tri_len = mesh_render_data_looptri_len_get(rdata);
+
+ Gwn_VertBuf *vbo = *r_vbo = GWN_vertbuf_create_with_format(&format);
+
+ const int vbo_len_capacity = tri_len * 3;
+ int vbo_len_used = 0;
+ GWN_vertbuf_data_alloc(vbo, vbo_len_capacity);
+
+ Gwn_VertBufRaw pos_step, nor_step;
+ GWN_vertbuf_attr_get_raw_data(vbo, attr_id.pos, &pos_step);
+ GWN_vertbuf_attr_get_raw_data(vbo, attr_id.nor, &nor_step);
+
+ float (*lnors)[3] = rdata->loop_normals;
+
+ if (rdata->edit_bmesh) {
+ Gwn_PackedNormal *pnors_pack, *vnors_pack;
+
+ if (lnors == NULL) {
+ mesh_render_data_ensure_poly_normals_pack(rdata);
+ mesh_render_data_ensure_vert_normals_pack(rdata);
+
+ pnors_pack = rdata->poly_normals_pack;
+ vnors_pack = rdata->vert_normals_pack;
+ }
+
+ for (int i = 0; i < tri_len; i++) {
+ const BMLoop **bm_looptri = (const BMLoop **)rdata->edit_bmesh->looptris[i];
+ const BMFace *bm_face = bm_looptri[0]->f;
+
+ /* use_hide always for edit-mode */
+ if (BM_elem_flag_test(bm_face, BM_ELEM_HIDDEN)) {
+ continue;
+ }
+
+ if (lnors) {
+ for (uint t = 0; t < 3; t++) {
+ const float *nor = lnors[BM_elem_index_get(bm_looptri[t])];
+ *((Gwn_PackedNormal *)GWN_vertbuf_raw_step(&nor_step)) = GWN_normal_convert_i10_v3(nor);
+ }
+ }
+ else if (BM_elem_flag_test(bm_face, BM_ELEM_SMOOTH)) {
+ for (uint t = 0; t < 3; t++) {
+ *((Gwn_PackedNormal *)GWN_vertbuf_raw_step(&nor_step)) = vnors_pack[BM_elem_index_get(bm_looptri[t]->v)];
+ }
+ }
+ else {
+ const Gwn_PackedNormal *snor_pack = &pnors_pack[BM_elem_index_get(bm_face)];
+ for (uint t = 0; t < 3; t++) {
+ *((Gwn_PackedNormal *)GWN_vertbuf_raw_step(&nor_step)) = *snor_pack;
+ }
+ }
+
+ for (uint t = 0; t < 3; t++) {
+ copy_v3_v3(GWN_vertbuf_raw_step(&pos_step), bm_looptri[t]->v->co);
+ }
+ }
+ }
+ else {
+ if (lnors == NULL) {
+ /* Use normals from vertex. */
+ mesh_render_data_ensure_poly_normals_pack(rdata);
+ }
+
+ for (int i = 0; i < tri_len; i++) {
+ const MLoopTri *mlt = &rdata->mlooptri[i];
+ const MPoly *mp = &rdata->mpoly[mlt->poly];
+
+ if (use_hide && (mp->flag & ME_HIDE)) {
+ continue;
+ }
+
+ const uint vtri[3] = {
+ rdata->mloop[mlt->tri[0]].v,
+ rdata->mloop[mlt->tri[1]].v,
+ rdata->mloop[mlt->tri[2]].v,
+ };
+
+ if (lnors) {
+ for (uint t = 0; t < 3; t++) {
+ const float *nor = lnors[mlt->tri[t]];
+ *((Gwn_PackedNormal *)GWN_vertbuf_raw_step(&nor_step)) = GWN_normal_convert_i10_v3(nor);
+ }
+ }
+ else if (mp->flag & ME_SMOOTH) {
+ for (uint t = 0; t < 3; t++) {
+ const MVert *mv = &rdata->mvert[vtri[t]];
+ *((Gwn_PackedNormal *)GWN_vertbuf_raw_step(&nor_step)) = GWN_normal_convert_i10_s3(mv->no);
+ }
+ }
+ else {
+ const Gwn_PackedNormal *pnors_pack = &rdata->poly_normals_pack[mlt->poly];
+ for (uint t = 0; t < 3; t++) {
+ *((Gwn_PackedNormal *)GWN_vertbuf_raw_step(&nor_step)) = *pnors_pack;
+ }
+ }
+
+ for (uint t = 0; t < 3; t++) {
+ const MVert *mv = &rdata->mvert[vtri[t]];
+ copy_v3_v3(GWN_vertbuf_raw_step(&pos_step), mv->co);
+ }
+ }
+ }
+
+ vbo_len_used = GWN_vertbuf_raw_used(&pos_step);
+ BLI_assert(vbo_len_used == GWN_vertbuf_raw_used(&nor_step));
+
+ if (vbo_len_capacity != vbo_len_used) {
+ GWN_vertbuf_data_resize(vbo, vbo_len_used);
+ }
+ }
+ return *r_vbo;
+}
+
+static Gwn_VertBuf *mesh_batch_cache_get_tri_pos_and_normals(
+ MeshRenderData *rdata, MeshBatchCache *cache)
+{
+ return mesh_batch_cache_get_tri_pos_and_normals_ex(
+ rdata, false,
+ &cache->pos_with_normals);
+}
+static Gwn_VertBuf *mesh_create_tri_pos_and_normals_visible_only(
+ MeshRenderData *rdata)
+{
+ Gwn_VertBuf *vbo_dummy = NULL;
+ return mesh_batch_cache_get_tri_pos_and_normals_ex(
+ rdata, true,
+ &vbo_dummy);
+}
+
+static Gwn_VertBuf *mesh_batch_cache_get_facedot_pos_with_normals_and_flag(
+ MeshRenderData *rdata, MeshBatchCache *cache)
+{
+ BLI_assert(rdata->types & (MR_DATATYPE_VERT | MR_DATATYPE_LOOP | MR_DATATYPE_POLY));
+
+ if (cache->ed_fcenter_pos_with_nor_and_sel == NULL) {
+ static Gwn_VertFormat format = { 0 };
+ static struct { uint pos, data; } attr_id;
+ if (format.attrib_ct == 0) {
+ attr_id.pos = GWN_vertformat_attr_add(&format, "pos", GWN_COMP_F32, 3, GWN_FETCH_FLOAT);
+ attr_id.data = GWN_vertformat_attr_add(&format, "norAndFlag", GWN_COMP_I10, 4, GWN_FETCH_INT_TO_FLOAT_UNIT);
+ }
+
+ const int vbo_len_capacity = mesh_render_data_polys_len_get(rdata);
+ int vidx = 0;
+
+ Gwn_VertBuf *vbo = cache->ed_fcenter_pos_with_nor_and_sel = GWN_vertbuf_create_with_format(&format);
+ GWN_vertbuf_data_alloc(vbo, vbo_len_capacity);
+ for (int i = 0; i < vbo_len_capacity; ++i) {
+ float pcenter[3], pnor[3];
+ bool selected = false;
+
+ if (mesh_render_data_pnors_pcenter_select_get(rdata, i, pnor, pcenter, &selected)) {
+
+ Gwn_PackedNormal nor = { .x = 0, .y = 0, .z = -511 };
+ nor = GWN_normal_convert_i10_v3(pnor);
+ nor.w = selected ? 1 : 0;
+ GWN_vertbuf_attr_set(vbo, attr_id.data, vidx, &nor);
+
+ GWN_vertbuf_attr_set(vbo, attr_id.pos, vidx, pcenter);
+
+ vidx += 1;
+ }
+ }
+ const int vbo_len_used = vidx;
+ BLI_assert(vbo_len_used <= vbo_len_capacity);
+ if (vbo_len_used != vbo_len_capacity) {
+ GWN_vertbuf_data_resize(vbo, vbo_len_used);
+ }
+ }
+
+ return cache->ed_fcenter_pos_with_nor_and_sel;
+}
+
+static Gwn_VertBuf *mesh_batch_cache_get_edges_visible(
+ MeshRenderData *rdata, MeshBatchCache *cache)
+{
+ BLI_assert(rdata->types & (MR_DATATYPE_VERT | MR_DATATYPE_EDGE));
+
+ if (cache->ed_edge_pos == NULL) {
+ static Gwn_VertFormat format = { 0 };
+ static struct { uint pos, data; } attr_id;
+ if (format.attrib_ct == 0) {
+ attr_id.pos = GWN_vertformat_attr_add(&format, "pos", GWN_COMP_F32, 3, GWN_FETCH_FLOAT);
+ }
+
+ const int vbo_len_capacity = mesh_render_data_edges_len_get(rdata) * 2;
+ int vidx = 0;
+
+ Gwn_VertBuf *vbo = cache->ed_edge_pos = GWN_vertbuf_create_with_format(&format);
+ GWN_vertbuf_data_alloc(vbo, vbo_len_capacity);
+ if (rdata->edit_bmesh) {
+ BMesh *bm = rdata->edit_bmesh->bm;
+ BMIter iter;
+ BMEdge *eed;
+
+ BM_ITER_MESH (eed, &iter, bm, BM_EDGES_OF_MESH) {
+ if (!BM_elem_flag_test(eed, BM_ELEM_HIDDEN)) {
+ GWN_vertbuf_attr_set(vbo, attr_id.pos, vidx, eed->v1->co);
+ vidx += 1;
+ GWN_vertbuf_attr_set(vbo, attr_id.pos, vidx, eed->v2->co);
+ vidx += 1;
+ }
+ }
+ }
+ else {
+ /* not yet done! */
+ BLI_assert(0);
+ }
+ const int vbo_len_used = vidx;
+ if (vbo_len_used != vbo_len_capacity) {
+ GWN_vertbuf_data_resize(vbo, vbo_len_used);
+ }
+ UNUSED_VARS_NDEBUG(vbo_len_used);
+ }
+
+ return cache->ed_edge_pos;
+}
+
+static Gwn_VertBuf *mesh_batch_cache_get_verts_visible(
+ MeshRenderData *rdata, MeshBatchCache *cache)
+{
+ BLI_assert(rdata->types & MR_DATATYPE_VERT);
+
+ if (cache->ed_vert_pos == NULL) {
+ static Gwn_VertFormat format = { 0 };
+ static struct { uint pos, data; } attr_id;
+ if (format.attrib_ct == 0) {
+ attr_id.pos = GWN_vertformat_attr_add(&format, "pos", GWN_COMP_F32, 3, GWN_FETCH_FLOAT);
+ }
+
+ const int vbo_len_capacity = mesh_render_data_verts_len_get(rdata);
+ uint vidx = 0;
+
+ Gwn_VertBuf *vbo = cache->ed_vert_pos = GWN_vertbuf_create_with_format(&format);
+ GWN_vertbuf_data_alloc(vbo, vbo_len_capacity);
+ if (rdata->edit_bmesh) {
+ BMesh *bm = rdata->edit_bmesh->bm;
+ BMIter iter;
+ BMVert *eve;
+
+ BM_ITER_MESH (eve, &iter, bm, BM_VERTS_OF_MESH) {
+ if (!BM_elem_flag_test(eve, BM_ELEM_HIDDEN)) {
+ GWN_vertbuf_attr_set(vbo, attr_id.pos, vidx, eve->co);
+ vidx += 1;
+ }
+ }
+ }
+ else {
+ for (int i = 0; i < vbo_len_capacity; i++) {
+ const MVert *mv = &rdata->mvert[i];
+ if (!(mv->flag & ME_HIDE)) {
+ GWN_vertbuf_attr_set(vbo, attr_id.pos, vidx, mv->co);
+ vidx += 1;
+ }
+ }
+ }
+ const uint vbo_len_used = vidx;
+ if (vbo_len_used != vbo_len_capacity) {
+ GWN_vertbuf_data_resize(vbo, vbo_len_used);
+ }
+
+ UNUSED_VARS_NDEBUG(vbo_len_used);
+ }
+
+ return cache->ed_vert_pos;
+}
+
+static Gwn_VertBuf *mesh_create_facedot_select_id(
+ MeshRenderData *rdata, uint select_id_offset)
+{
+ BLI_assert(rdata->types & (MR_DATATYPE_VERT | MR_DATATYPE_LOOP | MR_DATATYPE_POLY));
+
+ Gwn_VertBuf *vbo;
+ {
+ static Gwn_VertFormat format = { 0 };
+ static struct { uint pos, col; } attr_id;
+ if (format.attrib_ct == 0) {
+ attr_id.col = GWN_vertformat_attr_add(&format, "color", GWN_COMP_I32, 1, GWN_FETCH_INT);
+ }
+
+ const int vbo_len_capacity = mesh_render_data_polys_len_get(rdata);
+ int vidx = 0;
+
+ vbo = GWN_vertbuf_create_with_format(&format);
+ GWN_vertbuf_data_alloc(vbo, vbo_len_capacity);
+ uint select_index = select_id_offset;
+
+ if (rdata->edit_bmesh) {
+ BMesh *bm = rdata->edit_bmesh->bm;
+ BMIter iter;
+ BMEdge *efa;
+
+ BM_ITER_MESH (efa, &iter, bm, BM_FACES_OF_MESH) {
+ if (!BM_elem_flag_test(efa, BM_ELEM_HIDDEN)) {
+ int select_id;
+ GPU_select_index_get(select_index, &select_id);
+ GWN_vertbuf_attr_set(vbo, attr_id.col, vidx, &select_id);
+ vidx += 1;
+ }
+ select_index += 1;
+ }
+ }
+ else {
+ /* not yet done! */
+ BLI_assert(0);
+ }
+ const int vbo_len_used = vidx;
+ if (vbo_len_used != vbo_len_capacity) {
+ GWN_vertbuf_data_resize(vbo, vbo_len_used);
+ }
+ }
+
+ return vbo;
+}
+
+static Gwn_VertBuf *mesh_create_edges_select_id(
+ MeshRenderData *rdata, uint select_id_offset)
+{
+ BLI_assert(rdata->types & (MR_DATATYPE_VERT | MR_DATATYPE_EDGE));
+
+ Gwn_VertBuf *vbo;
+ {
+ static Gwn_VertFormat format = { 0 };
+ static struct { uint pos, col; } attr_id;
+ if (format.attrib_ct == 0) {
+ attr_id.col = GWN_vertformat_attr_add(&format, "color", GWN_COMP_I32, 1, GWN_FETCH_INT);
+ }
+
+ const int vbo_len_capacity = mesh_render_data_edges_len_get(rdata) * 2;
+ int vidx = 0;
+
+ vbo = GWN_vertbuf_create_with_format(&format);
+ GWN_vertbuf_data_alloc(vbo, vbo_len_capacity);
+ uint select_index = select_id_offset;
+
+ if (rdata->edit_bmesh) {
+ BMesh *bm = rdata->edit_bmesh->bm;
+ BMIter iter;
+ BMEdge *eed;
+
+ BM_ITER_MESH (eed, &iter, bm, BM_EDGES_OF_MESH) {
+ if (!BM_elem_flag_test(eed, BM_ELEM_HIDDEN)) {
+ int select_id;
+ GPU_select_index_get(select_index, &select_id);
+ GWN_vertbuf_attr_set(vbo, attr_id.col, vidx, &select_id);
+ vidx += 1;
+ GWN_vertbuf_attr_set(vbo, attr_id.col, vidx, &select_id);
+ vidx += 1;
+ }
+ select_index += 1;
+ }
+ }
+ else {
+ /* not yet done! */
+ BLI_assert(0);
+ }
+ const int vbo_len_used = vidx;
+ if (vbo_len_used != vbo_len_capacity) {
+ GWN_vertbuf_data_resize(vbo, vbo_len_used);
+ }
+ }
+
+ return vbo;
+}
+
+static Gwn_VertBuf *mesh_create_verts_select_id(
+ MeshRenderData *rdata, uint select_id_offset)
+{
+ BLI_assert(rdata->types & (MR_DATATYPE_VERT | MR_DATATYPE_LOOP | MR_DATATYPE_POLY));
+
+ Gwn_VertBuf *vbo;
+ {
+ static Gwn_VertFormat format = { 0 };
+ static struct { uint pos, col; } attr_id;
+ if (format.attrib_ct == 0) {
+ attr_id.col = GWN_vertformat_attr_add(&format, "color", GWN_COMP_I32, 1, GWN_FETCH_INT);
+ }
+
+ const int vbo_len_capacity = mesh_render_data_verts_len_get(rdata);
+ int vidx = 0;
+
+ vbo = GWN_vertbuf_create_with_format(&format);
+ GWN_vertbuf_data_alloc(vbo, vbo_len_capacity);
+ uint select_index = select_id_offset;
+
+ if (rdata->edit_bmesh) {
+ BMesh *bm = rdata->edit_bmesh->bm;
+ BMIter iter;
+ BMVert *eve;
+
+ BM_ITER_MESH (eve, &iter, bm, BM_VERTS_OF_MESH) {
+ if (!BM_elem_flag_test(eve, BM_ELEM_HIDDEN)) {
+ int select_id;
+ GPU_select_index_get(select_index, &select_id);
+ GWN_vertbuf_attr_set(vbo, attr_id.col, vidx, &select_id);
+ vidx += 1;
+ }
+ select_index += 1;
+ }
+ }
+ else {
+ for (int i = 0; i < vbo_len_capacity; i++) {
+ const MVert *mv = &rdata->mvert[i];
+ if (!(mv->flag & ME_HIDE)) {
+ int select_id;
+ GPU_select_index_get(select_index, &select_id);
+ GWN_vertbuf_attr_set(vbo, attr_id.col, vidx, &select_id);
+ vidx += 1;
+ }
+ select_index += 1;
+ }
+ }
+ const int vbo_len_used = vidx;
+ if (vbo_len_used != vbo_len_capacity) {
+ GWN_vertbuf_data_resize(vbo, vbo_len_used);
+ }
+ }
+
+ return vbo;
+}
+
+static Gwn_VertBuf *mesh_create_tri_weights(
+ MeshRenderData *rdata, bool use_hide, int defgroup)
+{
+ BLI_assert(
+ rdata->types &
+ (MR_DATATYPE_VERT | MR_DATATYPE_LOOPTRI | MR_DATATYPE_LOOP | MR_DATATYPE_POLY | MR_DATATYPE_DVERT));
+
+ Gwn_VertBuf *vbo;
+ {
+ unsigned int cidx = 0;
+
+ static Gwn_VertFormat format = { 0 };
+ static struct { uint col; } attr_id;
+ if (format.attrib_ct == 0) {
+ attr_id.col = GWN_vertformat_attr_add(&format, "color", GWN_COMP_F32, 3, GWN_FETCH_FLOAT);
+ }
+
+ vbo = GWN_vertbuf_create_with_format(&format);
+
+ const int tri_len = mesh_render_data_looptri_len_get(rdata);
+ const int vbo_len_capacity = tri_len * 3;
+ int vbo_len_used = 0;
+ GWN_vertbuf_data_alloc(vbo, vbo_len_capacity);
+
+ mesh_render_data_ensure_vert_weight_color(rdata, defgroup);
+ const float (*vert_weight_color)[3] = rdata->vert_weight_color;
+
+ if (rdata->edit_bmesh) {
+ for (int i = 0; i < tri_len; i++) {
+ const BMLoop **ltri = (const BMLoop **)rdata->edit_bmesh->looptris[i];
+ /* Assume 'use_hide' */
+ if (!BM_elem_flag_test(ltri[0]->f, BM_ELEM_HIDDEN)) {
+ for (uint tri_corner = 0; tri_corner < 3; tri_corner++) {
+ const int v_index = BM_elem_index_get(ltri[tri_corner]->v);
+ GWN_vertbuf_attr_set(vbo, attr_id.col, cidx++, vert_weight_color[v_index]);
+ }
+ }
+ }
+ }
+ else {
+ for (int i = 0; i < tri_len; i++) {
+ const MLoopTri *mlt = &rdata->mlooptri[i];
+ if (!(use_hide && (rdata->mpoly[mlt->poly].flag & ME_HIDE))) {
+ for (uint tri_corner = 0; tri_corner < 3; tri_corner++) {
+ const uint v_index = rdata->mloop[mlt->tri[tri_corner]].v;
+ GWN_vertbuf_attr_set(vbo, attr_id.col, cidx++, vert_weight_color[v_index]);
+ }
+ }
+ }
+ }
+ vbo_len_used = cidx;
+
+ if (vbo_len_capacity != vbo_len_used) {
+ GWN_vertbuf_data_resize(vbo, vbo_len_used);
+ }
+ }
+
+ return vbo;
+}
+
+static Gwn_VertBuf *mesh_create_tri_vert_colors(
+ MeshRenderData *rdata, bool use_hide)
+{
+ BLI_assert(
+ rdata->types &
+ (MR_DATATYPE_VERT | MR_DATATYPE_LOOPTRI | MR_DATATYPE_LOOP | MR_DATATYPE_POLY | MR_DATATYPE_LOOPCOL));
+
+ Gwn_VertBuf *vbo;
+ {
+ unsigned int cidx = 0;
+
+ static Gwn_VertFormat format = { 0 };
+ static struct { uint col; } attr_id;
+ if (format.attrib_ct == 0) {
+ attr_id.col = GWN_vertformat_attr_add(&format, "color", GWN_COMP_U8, 3, GWN_FETCH_INT_TO_FLOAT_UNIT);
+ }
+
+ const int tri_len = mesh_render_data_looptri_len_get(rdata);
+
+ vbo = GWN_vertbuf_create_with_format(&format);
+
+ const uint vbo_len_capacity = tri_len * 3;
+ GWN_vertbuf_data_alloc(vbo, vbo_len_capacity);
+
+ mesh_render_data_ensure_vert_color(rdata);
+ const char (*vert_color)[3] = rdata->vert_color;
+
+ if (rdata->edit_bmesh) {
+ for (int i = 0; i < tri_len; i++) {
+ const BMLoop **ltri = (const BMLoop **)rdata->edit_bmesh->looptris[i];
+ /* Assume 'use_hide' */
+ if (!BM_elem_flag_test(ltri[0]->f, BM_ELEM_HIDDEN)) {
+ for (uint tri_corner = 0; tri_corner < 3; tri_corner++) {
+ const int l_index = BM_elem_index_get(ltri[tri_corner]);
+ GWN_vertbuf_attr_set(vbo, attr_id.col, cidx++, vert_color[l_index]);
+ }
+ }
+ }
+ }
+ else {
+ for (int i = 0; i < tri_len; i++) {
+ const MLoopTri *mlt = &rdata->mlooptri[i];
+ if (!(use_hide && (rdata->mpoly[mlt->poly].flag & ME_HIDE))) {
+ for (uint tri_corner = 0; tri_corner < 3; tri_corner++) {
+ const uint l_index = mlt->tri[tri_corner];
+ GWN_vertbuf_attr_set(vbo, attr_id.col, cidx++, vert_color[l_index]);
+ }
+ }
+ }
+ }
+ const uint vbo_len_used = cidx;
+
+ if (vbo_len_capacity != vbo_len_used) {
+ GWN_vertbuf_data_resize(vbo, vbo_len_used);
+ }
+ }
+
+ return vbo;
+}
+
+static Gwn_VertBuf *mesh_create_tri_select_id(
+ MeshRenderData *rdata, bool use_hide, uint select_id_offset)
+{
+ BLI_assert(
+ rdata->types &
+ (MR_DATATYPE_VERT | MR_DATATYPE_LOOPTRI | MR_DATATYPE_LOOP | MR_DATATYPE_POLY));
+
+ Gwn_VertBuf *vbo;
+ {
+ unsigned int cidx = 0;
+
+ static Gwn_VertFormat format = { 0 };
+ static struct { uint col; } attr_id;
+ if (format.attrib_ct == 0) {
+ attr_id.col = GWN_vertformat_attr_add(&format, "color", GWN_COMP_I32, 1, GWN_FETCH_INT);
+ }
+
+ const int tri_len = mesh_render_data_looptri_len_get(rdata);
+
+ vbo = GWN_vertbuf_create_with_format(&format);
+
+ const int vbo_len_capacity = tri_len * 3;
+ int vbo_len_used = 0;
+ GWN_vertbuf_data_alloc(vbo, vbo_len_capacity);
+
+ if (rdata->edit_bmesh) {
+ for (int i = 0; i < tri_len; i++) {
+ const BMLoop **ltri = (const BMLoop **)rdata->edit_bmesh->looptris[i];
+ /* Assume 'use_hide' */
+ if (!BM_elem_flag_test(ltri[0]->f, BM_ELEM_HIDDEN)) {
+ const int poly_index = BM_elem_index_get(ltri[0]->f);
+ int select_id;
+ GPU_select_index_get(poly_index + select_id_offset, &select_id);
+ for (uint tri_corner = 0; tri_corner < 3; tri_corner++) {
+ GWN_vertbuf_attr_set(vbo, attr_id.col, cidx++, &select_id);
+ }
+ }
+ }
+ }
+ else {
+ for (int i = 0; i < tri_len; i++) {
+ const MLoopTri *mlt = &rdata->mlooptri[i];
+ const int poly_index = mlt->poly;
+ if (!(use_hide && (rdata->mpoly[poly_index].flag & ME_HIDE))) {
+ int select_id;
+ GPU_select_index_get(poly_index + select_id_offset, &select_id);
+ for (uint tri_corner = 0; tri_corner < 3; tri_corner++) {
+ GWN_vertbuf_attr_set(vbo, attr_id.col, cidx++, &select_id);
+ }
+ }
+ }
+ }
+ vbo_len_used = cidx;
+
+ if (vbo_len_capacity != vbo_len_used) {
+ GWN_vertbuf_data_resize(vbo, vbo_len_used);
+ }
+ }
+ return vbo;
+}
+
+static Gwn_VertBuf *mesh_batch_cache_get_vert_pos_and_nor_in_order(
+ MeshRenderData *rdata, MeshBatchCache *cache)
+{
+ BLI_assert(rdata->types & MR_DATATYPE_VERT);
+
+ if (cache->pos_in_order == NULL) {
+ static Gwn_VertFormat format = { 0 };
+ static struct { uint pos, nor; } attr_id;
+ if (format.attrib_ct == 0) {
+ attr_id.pos = GWN_vertformat_attr_add(&format, "pos", GWN_COMP_F32, 3, GWN_FETCH_FLOAT);
+ attr_id.nor = GWN_vertformat_attr_add(&format, "nor", GWN_COMP_I16, 3, GWN_FETCH_INT_TO_FLOAT_UNIT);
+ }
+
+ Gwn_VertBuf *vbo = cache->pos_in_order = GWN_vertbuf_create_with_format(&format);
+ const int vbo_len_capacity = mesh_render_data_verts_len_get(rdata);
+ GWN_vertbuf_data_alloc(vbo, vbo_len_capacity);
+
+ if (rdata->edit_bmesh) {
+ BMesh *bm = rdata->edit_bmesh->bm;
+ BMIter iter;
+ BMVert *eve;
+ uint i;
+
+ BM_ITER_MESH_INDEX (eve, &iter, bm, BM_VERTS_OF_MESH, i) {
+ static short no_short[3];
+ normal_float_to_short_v3(no_short, eve->no);
+
+ GWN_vertbuf_attr_set(vbo, attr_id.pos, i, eve->co);
+ GWN_vertbuf_attr_set(vbo, attr_id.nor, i, no_short);
+ }
+ BLI_assert(i == vbo_len_capacity);
+ }
+ else {
+ for (int i = 0; i < vbo_len_capacity; ++i) {
+ GWN_vertbuf_attr_set(vbo, attr_id.pos, i, rdata->mvert[i].co);
+ GWN_vertbuf_attr_set(vbo, attr_id.nor, i, rdata->mvert[i].no);
+ }
+ }
+ }
+
+ return cache->pos_in_order;
+}
+
+static Gwn_VertFormat *edit_mesh_overlay_pos_format(unsigned int *r_pos_id)
+{
+ static Gwn_VertFormat format_pos = { 0 };
+ static unsigned pos_id;
+ if (format_pos.attrib_ct == 0) {
+ pos_id = GWN_vertformat_attr_add(&format_pos, "pos", GWN_COMP_F32, 3, GWN_FETCH_FLOAT);
+ }
+ *r_pos_id = pos_id;
+ return &format_pos;
+}
+
+static Gwn_VertFormat *edit_mesh_overlay_nor_format(unsigned int *r_vnor_id, unsigned int *r_lnor_id)
+{
+ static Gwn_VertFormat format_nor = { 0 };
+ static Gwn_VertFormat format_nor_loop = { 0 };
+ static unsigned vnor_id, vnor_loop_id, lnor_id;
+ if (format_nor.attrib_ct == 0) {
+ vnor_id = GWN_vertformat_attr_add(&format_nor, "vnor", GWN_COMP_I10, 3, GWN_FETCH_INT_TO_FLOAT_UNIT);
+ vnor_loop_id = GWN_vertformat_attr_add(&format_nor_loop, "vnor", GWN_COMP_I10, 3, GWN_FETCH_INT_TO_FLOAT_UNIT);
+ lnor_id = GWN_vertformat_attr_add(&format_nor_loop, "lnor", GWN_COMP_I10, 3, GWN_FETCH_INT_TO_FLOAT_UNIT);
+ }
+ if (r_lnor_id) {
+ *r_vnor_id = vnor_loop_id;
+ *r_lnor_id = lnor_id;
+ return &format_nor_loop;
+ }
+ else {
+ *r_vnor_id = vnor_id;
+ return &format_nor;
+ }
+}
+
+static Gwn_VertFormat *edit_mesh_overlay_data_format(unsigned int *r_data_id)
+{
+ static Gwn_VertFormat format_flag = { 0 };
+ static unsigned data_id;
+ if (format_flag.attrib_ct == 0) {
+ data_id = GWN_vertformat_attr_add(&format_flag, "data", GWN_COMP_U8, 4, GWN_FETCH_INT);
+ }
+ *r_data_id = data_id;
+ return &format_flag;
+}
+
+static void mesh_batch_cache_create_overlay_tri_buffers(
+ MeshRenderData *rdata, MeshBatchCache *cache)
+{
+ BLI_assert(rdata->types & (MR_DATATYPE_VERT | MR_DATATYPE_LOOPTRI));
+
+ const int tri_len = mesh_render_data_looptri_len_get(rdata);
+
+ const int vbo_len_capacity = tri_len * 3;
+ int vbo_len_used = 0;
+
+ /* Positions */
+ Gwn_VertBuf *vbo_pos = NULL;
+ static struct { uint pos, vnor, lnor, data; } attr_id;
+ if (cache->ed_tri_pos == NULL) {
+ vbo_pos = cache->ed_tri_pos =
+ GWN_vertbuf_create_with_format(edit_mesh_overlay_pos_format(&attr_id.pos));
+ GWN_vertbuf_data_alloc(vbo_pos, vbo_len_capacity);
+ }
+
+ /* Normals */
+ Gwn_VertBuf *vbo_nor = NULL;
+ if (cache->ed_tri_nor == NULL) {
+ vbo_nor = cache->ed_tri_nor =
+ GWN_vertbuf_create_with_format(edit_mesh_overlay_nor_format(&attr_id.vnor, &attr_id.lnor));
+ GWN_vertbuf_data_alloc(vbo_nor, vbo_len_capacity);
+ }
+
+ /* Data */
+ Gwn_VertBuf *vbo_data = NULL;
+ if (cache->ed_tri_data == NULL) {
+ vbo_data = cache->ed_tri_data =
+ GWN_vertbuf_create_with_format(edit_mesh_overlay_data_format(&attr_id.data));
+ GWN_vertbuf_data_alloc(vbo_data, vbo_len_capacity);
+ }
+
+ for (int i = 0; i < tri_len; i++) {
+ const BMLoop **bm_looptri = (const BMLoop **)rdata->edit_bmesh->looptris[i];
+ if (!BM_elem_flag_test(bm_looptri[0]->f, BM_ELEM_HIDDEN)) {
+ add_overlay_tri(
+ rdata, vbo_pos, vbo_nor, vbo_data,
+ attr_id.pos, attr_id.vnor, attr_id.lnor, attr_id.data,
+ bm_looptri, vbo_len_used);
+
+ vbo_len_used += 3;
+ }
+ }
+
+ /* Finish */
+ if (vbo_len_used != vbo_len_capacity) {
+ if (vbo_pos != NULL) {
+ GWN_vertbuf_data_resize(vbo_pos, vbo_len_used);
+ }
+ if (vbo_nor != NULL) {
+ GWN_vertbuf_data_resize(vbo_nor, vbo_len_used);
+ }
+ if (vbo_data != NULL) {
+ GWN_vertbuf_data_resize(vbo_data, vbo_len_used);
+ }
+ }
+}
+
+static void mesh_batch_cache_create_overlay_ledge_buffers(
+ MeshRenderData *rdata, MeshBatchCache *cache)
+{
+ BLI_assert(rdata->types & (MR_DATATYPE_VERT | MR_DATATYPE_LOOPTRI));
+
+ const int ledge_len = mesh_render_data_loose_edges_len_get(rdata);
+
+ const int vbo_len_capacity = ledge_len * 2;
+ int vbo_len_used = 0;
+
+ /* Positions */
+ Gwn_VertBuf *vbo_pos = NULL;
+ static struct { uint pos, vnor, data; } attr_id;
+ if (cache->ed_ledge_pos == NULL) {
+ vbo_pos = cache->ed_ledge_pos =
+ GWN_vertbuf_create_with_format(edit_mesh_overlay_pos_format(&attr_id.pos));
+ GWN_vertbuf_data_alloc(vbo_pos, vbo_len_capacity);
+ }
+
+ /* Normals */
+ Gwn_VertBuf *vbo_nor = NULL;
+ if (cache->ed_ledge_nor == NULL) {
+ vbo_nor = cache->ed_ledge_nor =
+ GWN_vertbuf_create_with_format(edit_mesh_overlay_nor_format(&attr_id.vnor, NULL));
+ GWN_vertbuf_data_alloc(vbo_nor, vbo_len_capacity);
+ }
+
+ /* Data */
+ Gwn_VertBuf *vbo_data = NULL;
+ if (cache->ed_ledge_data == NULL) {
+ vbo_data = cache->ed_ledge_data =
+ GWN_vertbuf_create_with_format(edit_mesh_overlay_data_format(&attr_id.data));
+ GWN_vertbuf_data_alloc(vbo_data, vbo_len_capacity);
+ }
+
+ if (rdata->edit_bmesh) {
+ BMesh *bm = rdata->edit_bmesh->bm;
+ for (uint i = 0; i < ledge_len; i++) {
+ const BMEdge *eed = BM_edge_at_index(bm, rdata->loose_edges[i]);
+ if (!BM_elem_flag_test(eed, BM_ELEM_HIDDEN)) {
+ add_overlay_loose_edge(
+ rdata, vbo_pos, vbo_nor, vbo_data,
+ attr_id.pos, attr_id.vnor, attr_id.data,
+ eed, vbo_len_used);
+ vbo_len_used += 2;
+ }
+ }
+ }
+
+ /* Finish */
+ if (vbo_len_used != vbo_len_capacity) {
+ if (vbo_pos != NULL) {
+ GWN_vertbuf_data_resize(vbo_pos, vbo_len_used);
+ }
+ if (vbo_nor != NULL) {
+ GWN_vertbuf_data_resize(vbo_nor, vbo_len_used);
+ }
+ if (vbo_data != NULL) {
+ GWN_vertbuf_data_resize(vbo_data, vbo_len_used);
+ }
+ }
+}
+
+static void mesh_batch_cache_create_overlay_lvert_buffers(
+ MeshRenderData *rdata, MeshBatchCache *cache)
+{
+ BLI_assert(rdata->types & (MR_DATATYPE_VERT | MR_DATATYPE_LOOPTRI));
+
+ BMesh *bm = rdata->edit_bmesh->bm;
+ const int lvert_len = mesh_render_data_loose_verts_len_get(rdata);
+
+ const int vbo_len_capacity = lvert_len;
+ int vbo_len_used = 0;
+
+ static struct { uint pos, vnor, data; } attr_id;
+
+ /* Positions */
+ Gwn_VertBuf *vbo_pos = NULL;
+ if (cache->ed_lvert_pos == NULL) {
+ vbo_pos = cache->ed_lvert_pos =
+ GWN_vertbuf_create_with_format(edit_mesh_overlay_pos_format(&attr_id.pos));
+ GWN_vertbuf_data_alloc(vbo_pos, vbo_len_capacity);
+ }
+
+ /* Normals */
+ Gwn_VertBuf *vbo_nor = NULL;
+ if (cache->ed_lvert_nor == NULL) {
+ vbo_nor = cache->ed_lvert_nor =
+ GWN_vertbuf_create_with_format(edit_mesh_overlay_nor_format(&attr_id.vnor, NULL));
+ GWN_vertbuf_data_alloc(vbo_nor, vbo_len_capacity);
+ }
+
+ /* Data */
+ Gwn_VertBuf *vbo_data = NULL;
+ if (cache->ed_lvert_data == NULL) {
+ vbo_data = cache->ed_lvert_data =
+ GWN_vertbuf_create_with_format(edit_mesh_overlay_data_format(&attr_id.data));
+ GWN_vertbuf_data_alloc(vbo_data, vbo_len_capacity);
+ }
+
+ for (uint i = 0; i < lvert_len; i++) {
+ BMVert *eve = BM_vert_at_index(bm, rdata->loose_verts[i]);
+ add_overlay_loose_vert(
+ rdata, vbo_pos, vbo_nor, vbo_data,
+ attr_id.pos, attr_id.vnor, attr_id.data,
+ eve, vbo_len_used);
+ vbo_len_used += 1;
+ }
+
+ /* Finish */
+ if (vbo_len_used != vbo_len_capacity) {
+ if (vbo_pos != NULL) {
+ GWN_vertbuf_data_resize(vbo_pos, vbo_len_used);
+ }
+ if (vbo_nor != NULL) {
+ GWN_vertbuf_data_resize(vbo_nor, vbo_len_used);
+ }
+ if (vbo_data != NULL) {
+ GWN_vertbuf_data_resize(vbo_data, vbo_len_used);
+ }
+ }
+}
+
+/* Position */
+static Gwn_VertBuf *mesh_batch_cache_get_edit_tri_pos(
+ MeshRenderData *rdata, MeshBatchCache *cache)
+{
+ BLI_assert(rdata->types & MR_DATATYPE_VERT);
+
+ if (cache->ed_tri_pos == NULL) {
+ mesh_batch_cache_create_overlay_tri_buffers(rdata, cache);
+ }
+
+ return cache->ed_tri_pos;
+}
+
+static Gwn_VertBuf *mesh_batch_cache_get_edit_ledge_pos(
+ MeshRenderData *rdata, MeshBatchCache *cache)
+{
+ BLI_assert(rdata->types & MR_DATATYPE_VERT);
+
+ if (cache->ed_ledge_pos == NULL) {
+ mesh_batch_cache_create_overlay_ledge_buffers(rdata, cache);
+ }
+
+ return cache->ed_ledge_pos;
+}
+
+static Gwn_VertBuf *mesh_batch_cache_get_edit_lvert_pos(
+ MeshRenderData *rdata, MeshBatchCache *cache)
+{
+ BLI_assert(rdata->types & MR_DATATYPE_VERT);
+
+ if (cache->ed_lvert_pos == NULL) {
+ mesh_batch_cache_create_overlay_lvert_buffers(rdata, cache);
+ }
+
+ return cache->ed_lvert_pos;
+}
+
+/* Normal */
+static Gwn_VertBuf *mesh_batch_cache_get_edit_tri_nor(
+ MeshRenderData *rdata, MeshBatchCache *cache)
+{
+ BLI_assert(rdata->types & MR_DATATYPE_VERT);
+
+ if (cache->ed_tri_nor == NULL) {
+ mesh_batch_cache_create_overlay_tri_buffers(rdata, cache);
+ }
+
+ return cache->ed_tri_nor;
+}
+
+static Gwn_VertBuf *mesh_batch_cache_get_edit_ledge_nor(
+ MeshRenderData *rdata, MeshBatchCache *cache)
+{
+ BLI_assert(rdata->types & MR_DATATYPE_VERT);
+
+ if (cache->ed_ledge_nor == NULL) {
+ mesh_batch_cache_create_overlay_ledge_buffers(rdata, cache);
+ }
+
+ return cache->ed_ledge_nor;
+}
+
+static Gwn_VertBuf *mesh_batch_cache_get_edit_lvert_nor(
+ MeshRenderData *rdata, MeshBatchCache *cache)
+{
+ BLI_assert(rdata->types & MR_DATATYPE_VERT);
+
+ if (cache->ed_lvert_nor == NULL) {
+ mesh_batch_cache_create_overlay_lvert_buffers(rdata, cache);
+ }
+
+ return cache->ed_lvert_nor;
+}
+
+/* Data */
+static Gwn_VertBuf *mesh_batch_cache_get_edit_tri_data(
+ MeshRenderData *rdata, MeshBatchCache *cache)
+{
+ BLI_assert(rdata->types & MR_DATATYPE_VERT);
+
+ if (cache->ed_tri_data == NULL) {
+ mesh_batch_cache_create_overlay_tri_buffers(rdata, cache);
+ }
+
+ return cache->ed_tri_data;
+}
+
+static Gwn_VertBuf *mesh_batch_cache_get_edit_ledge_data(
+ MeshRenderData *rdata, MeshBatchCache *cache)
+{
+ BLI_assert(rdata->types & MR_DATATYPE_VERT);
+
+ if (cache->ed_ledge_data == NULL) {
+ mesh_batch_cache_create_overlay_ledge_buffers(rdata, cache);
+ }
+
+ return cache->ed_ledge_data;
+}
+
+static Gwn_VertBuf *mesh_batch_cache_get_edit_lvert_data(
+ MeshRenderData *rdata, MeshBatchCache *cache)
+{
+ BLI_assert(rdata->types & MR_DATATYPE_VERT);
+
+ if (cache->ed_lvert_data == NULL) {
+ mesh_batch_cache_create_overlay_lvert_buffers(rdata, cache);
+ }
+
+ return cache->ed_lvert_data;
+}
+
+static Gwn_IndexBuf *mesh_batch_cache_get_edges_in_order(MeshRenderData *rdata, MeshBatchCache *cache)
+{
+ BLI_assert(rdata->types & (MR_DATATYPE_VERT | MR_DATATYPE_EDGE));
+
+ if (cache->edges_in_order == NULL) {
+ const int vert_len = mesh_render_data_verts_len_get(rdata);
+ const int edge_len = mesh_render_data_edges_len_get(rdata);
+
+ Gwn_IndexBufBuilder elb;
+ GWN_indexbuf_init(&elb, GWN_PRIM_LINES, edge_len, vert_len);
+
+ BLI_assert(rdata->types & MR_DATATYPE_EDGE);
+
+ if (rdata->edit_bmesh) {
+ BMesh *bm = rdata->edit_bmesh->bm;
+ BMIter eiter;
+ BMEdge *eed;
+ BM_ITER_MESH(eed, &eiter, bm, BM_EDGES_OF_MESH) {
+ if (!BM_elem_flag_test(eed, BM_ELEM_HIDDEN)) {
+ GWN_indexbuf_add_line_verts(&elb, BM_elem_index_get(eed->v1), BM_elem_index_get(eed->v2));
+ }
+ }
+ }
+ else {
+ const MEdge *ed = rdata->medge;
+ for (int i = 0; i < edge_len; i++, ed++) {
+ GWN_indexbuf_add_line_verts(&elb, ed->v1, ed->v2);
+ }
+ }
+ cache->edges_in_order = GWN_indexbuf_build(&elb);
+ }
+
+ return cache->edges_in_order;
+}
+
+static Gwn_IndexBuf *mesh_batch_cache_get_triangles_in_order(MeshRenderData *rdata, MeshBatchCache *cache)
+{
+ BLI_assert(rdata->types & (MR_DATATYPE_VERT | MR_DATATYPE_LOOPTRI));
+
+ if (cache->triangles_in_order == NULL) {
+ const int vert_len = mesh_render_data_verts_len_get(rdata);
+ const int tri_len = mesh_render_data_looptri_len_get(rdata);
+
+ Gwn_IndexBufBuilder elb;
+ GWN_indexbuf_init(&elb, GWN_PRIM_TRIS, tri_len, vert_len);
+
+ if (rdata->edit_bmesh) {
+ for (int i = 0; i < tri_len; ++i) {
+ const BMLoop **ltri = (const BMLoop **)rdata->edit_bmesh->looptris[i];
+ if (!BM_elem_flag_test(ltri[0]->f, BM_ELEM_HIDDEN)) {
+ for (uint tri_corner = 0; tri_corner < 3; tri_corner++) {
+ GWN_indexbuf_add_generic_vert(&elb, BM_elem_index_get(ltri[tri_corner]->v));
+ }
+ }
+ }
+ }
+ else {
+ for (int i = 0; i < tri_len; ++i) {
+ const MLoopTri *mlt = &rdata->mlooptri[i];
+ for (uint tri_corner = 0; tri_corner < 3; tri_corner++) {
+ GWN_indexbuf_add_generic_vert(&elb, mlt->tri[tri_corner]);
+ }
+ }
+ }
+ cache->triangles_in_order = GWN_indexbuf_build(&elb);
+ }
+
+ return cache->triangles_in_order;
+}
+
+static Gwn_IndexBuf **mesh_batch_cache_get_triangles_in_order_split_by_material(
+ MeshRenderData *rdata, MeshBatchCache *cache)
+{
+ BLI_assert(rdata->types & (MR_DATATYPE_VERT | MR_DATATYPE_POLY));
+
+ if (cache->shaded_triangles_in_order == NULL) {
+ const int poly_len = mesh_render_data_polys_len_get(rdata);
+ const int tri_len = mesh_render_data_looptri_len_get(rdata);
+ const int mat_len = mesh_render_data_mat_len_get(rdata);
+
+ int *mat_tri_len = MEM_callocN(sizeof(*mat_tri_len) * mat_len, __func__);
+ cache->shaded_triangles_in_order = MEM_callocN(sizeof(*cache->shaded_triangles) * mat_len, __func__);
+ Gwn_IndexBufBuilder *elb = MEM_callocN(sizeof(*elb) * mat_len, __func__);
+
+ /* Note that polygons (not triangles) are used here.
+ * This OK because result is _guaranteed_ to be the same. */
+ if (rdata->edit_bmesh) {
+ BMesh *bm = rdata->edit_bmesh->bm;
+ BMIter fiter;
+ BMFace *efa;
+
+ BM_ITER_MESH(efa, &fiter, bm, BM_FACES_OF_MESH) {
+ if (!BM_elem_flag_test(efa, BM_ELEM_HIDDEN)) {
+ const short ma_id = efa->mat_nr < mat_len ? efa->mat_nr : 0;
+ mat_tri_len[ma_id] += (efa->len - 2);
+ }
+ }
+ }
+ else {
+ for (uint i = 0; i < poly_len; i++) {
+ const MPoly *mp = &rdata->mpoly[i]; ;
+ const short ma_id = mp->mat_nr < mat_len ? mp->mat_nr : 0;
+ mat_tri_len[ma_id] += (mp->totloop - 2);
+ }
+ }
+
+ /* Init ELBs. */
+ for (int i = 0; i < mat_len; ++i) {
+ GWN_indexbuf_init(&elb[i], GWN_PRIM_TRIS, mat_tri_len[i], tri_len * 3);
+ }
+
+ /* Populate ELBs. */
+ uint nidx = 0;
+ if (rdata->edit_bmesh) {
+ BMesh *bm = rdata->edit_bmesh->bm;
+ BMIter fiter;
+ BMFace *efa;
+
+ BM_ITER_MESH(efa, &fiter, bm, BM_FACES_OF_MESH) {
+ if (!BM_elem_flag_test(efa, BM_ELEM_HIDDEN)) {
+ const short ma_id = efa->mat_nr < mat_len ? efa->mat_nr : 0;
+ for (int j = 2; j < efa->len; j++) {
+ GWN_indexbuf_add_tri_verts(&elb[ma_id], nidx + 0, nidx + 1, nidx + 2);
+ nidx += 3;
+ }
+ }
+ }
+ }
+ else {
+ for (uint i = 0; i < poly_len; i++) {
+ const MPoly *mp = &rdata->mpoly[i]; ;
+ const short ma_id = mp->mat_nr < mat_len ? mp->mat_nr : 0;
+ for (int j = 2; j < mp->totloop; j++) {
+ GWN_indexbuf_add_tri_verts(&elb[ma_id], nidx + 0, nidx + 1, nidx + 2);
+ nidx += 3;
+ }
+ }
+ }
+
+ /* Build ELBs. */
+ for (int i = 0; i < mat_len; ++i) {
+ cache->shaded_triangles_in_order[i] = GWN_indexbuf_build(&elb[i]);
+ }
+
+ MEM_freeN(mat_tri_len);
+ MEM_freeN(elb);
+ }
+
+ return cache->shaded_triangles_in_order;
+}
+
+static Gwn_VertBuf *mesh_create_edge_pos_with_sel(
+ MeshRenderData *rdata, bool use_wire, bool use_select_bool)
+{
+ BLI_assert(rdata->types & (MR_DATATYPE_VERT | MR_DATATYPE_EDGE | MR_DATATYPE_POLY | MR_DATATYPE_LOOP));
+ BLI_assert(rdata->edit_bmesh == NULL);
+
+ Gwn_VertBuf *vbo;
+ {
+ unsigned int vidx = 0, cidx = 0;
+
+ static Gwn_VertFormat format = { 0 };
+ static struct { uint pos, sel; } attr_id;
+ if (format.attrib_ct == 0) {
+ attr_id.pos = GWN_vertformat_attr_add(&format, "pos", GWN_COMP_F32, 3, GWN_FETCH_FLOAT);
+ attr_id.sel = GWN_vertformat_attr_add(&format, "select", GWN_COMP_U8, 1, GWN_FETCH_INT);
+ }
+
+ const int edge_len = mesh_render_data_edges_len_get(rdata);
+
+ vbo = GWN_vertbuf_create_with_format(&format);
+
+ const int vbo_len_capacity = edge_len * 2;
+ int vbo_len_used = 0;
+ GWN_vertbuf_data_alloc(vbo, vbo_len_capacity);
+
+ if (use_select_bool) {
+ mesh_render_data_ensure_edge_select_bool(rdata, use_wire);
+ }
+ bool *edge_select_bool = use_select_bool ? rdata->edge_select_bool : NULL;
+
+ for (int i = 0; i < edge_len; i++) {
+ const MEdge *ed = &rdata->medge[i];
+
+ uchar edge_vert_sel;
+ if (use_select_bool && edge_select_bool[i]) {
+ edge_vert_sel = true;
+ }
+ else if (use_wire) {
+ edge_vert_sel = false;
+ }
+ else {
+ continue;
+ }
+
+ GWN_vertbuf_attr_set(vbo, attr_id.sel, cidx++, &edge_vert_sel);
+ GWN_vertbuf_attr_set(vbo, attr_id.sel, cidx++, &edge_vert_sel);
+
+ GWN_vertbuf_attr_set(vbo, attr_id.pos, vidx++, rdata->mvert[ed->v1].co);
+ GWN_vertbuf_attr_set(vbo, attr_id.pos, vidx++, rdata->mvert[ed->v2].co);
+ }
+ vbo_len_used = vidx;
+
+ if (vbo_len_capacity != vbo_len_used) {
+ GWN_vertbuf_data_resize(vbo, vbo_len_used);
+ }
+ }
+
+ return vbo;
+}
+
+static Gwn_IndexBuf *mesh_create_tri_overlay_weight_faces(
+ MeshRenderData *rdata)
+{
+ BLI_assert(rdata->types & (MR_DATATYPE_VERT | MR_DATATYPE_LOOPTRI));
+
+ {
+ const int vert_len = mesh_render_data_verts_len_get(rdata);
+ const int tri_len = mesh_render_data_looptri_len_get(rdata);
+
+ Gwn_IndexBufBuilder elb;
+ GWN_indexbuf_init(&elb, GWN_PRIM_TRIS, tri_len, vert_len);
+
+ for (int i = 0; i < tri_len; ++i) {
+ const MLoopTri *mlt = &rdata->mlooptri[i];
+ if (!(rdata->mpoly[mlt->poly].flag & (ME_FACE_SEL | ME_HIDE))) {
+ for (uint tri_corner = 0; tri_corner < 3; tri_corner++) {
+ GWN_indexbuf_add_generic_vert(&elb, rdata->mloop[mlt->tri[tri_corner]].v);
+ }
+ }
+ }
+ return GWN_indexbuf_build(&elb);
+ }
+}
+
+/**
+ * Non-edit mode vertices (only used for weight-paint mode).
+ */
+static Gwn_VertBuf *mesh_create_vert_pos_with_overlay_data(
+ MeshRenderData *rdata)
+{
+ BLI_assert(rdata->types & (MR_DATATYPE_VERT));
+ BLI_assert(rdata->edit_bmesh == NULL);
+
+ Gwn_VertBuf *vbo;
+ {
+ unsigned int cidx = 0;
+
+ static Gwn_VertFormat format = { 0 };
+ static struct { uint data; } attr_id;
+ if (format.attrib_ct == 0) {
+ attr_id.data = GWN_vertformat_attr_add(&format, "data", GWN_COMP_I8, 1, GWN_FETCH_INT);
+ }
+
+ const int vert_len = mesh_render_data_verts_len_get(rdata);
+
+ vbo = GWN_vertbuf_create_with_format(&format);
+
+ const int vbo_len_capacity = vert_len;
+ int vbo_len_used = 0;
+ GWN_vertbuf_data_alloc(vbo, vbo_len_capacity);
+
+ for (int i = 0; i < vert_len; i++) {
+ const MVert *mv = &rdata->mvert[i];
+ const char data = mv->flag & (SELECT | ME_HIDE);
+ GWN_vertbuf_attr_set(vbo, attr_id.data, cidx++, &data);
+ }
+ vbo_len_used = cidx;
+
+ if (vbo_len_capacity != vbo_len_used) {
+ GWN_vertbuf_data_resize(vbo, vbo_len_used);
+ }
+ }
+ return vbo;
+}
+
+/** \} */
+
+
+/* ---------------------------------------------------------------------- */
+
+/** \name Public API
+ * \{ */
+
+Gwn_Batch *DRW_mesh_batch_cache_get_all_edges(Mesh *me)
+{
+ MeshBatchCache *cache = mesh_batch_cache_get(me);
+
+ if (cache->all_edges == NULL) {
+ /* create batch from Mesh */
+ const int datatype = MR_DATATYPE_VERT | MR_DATATYPE_EDGE;
+ MeshRenderData *rdata = mesh_render_data_create(me, datatype);
+
+ cache->all_edges = GWN_batch_create(
+ GWN_PRIM_LINES, mesh_batch_cache_get_vert_pos_and_nor_in_order(rdata, cache),
+ mesh_batch_cache_get_edges_in_order(rdata, cache));
+
+ mesh_render_data_free(rdata);
+ }
+
+ return cache->all_edges;
+}
+
+Gwn_Batch *DRW_mesh_batch_cache_get_all_triangles(Mesh *me)
+{
+ MeshBatchCache *cache = mesh_batch_cache_get(me);
+
+ if (cache->all_triangles == NULL) {
+ /* create batch from DM */
+ const int datatype = MR_DATATYPE_VERT | MR_DATATYPE_LOOPTRI;
+ MeshRenderData *rdata = mesh_render_data_create(me, datatype);
+
+ cache->all_triangles = GWN_batch_create(
+ GWN_PRIM_TRIS, mesh_batch_cache_get_vert_pos_and_nor_in_order(rdata, cache),
+ mesh_batch_cache_get_triangles_in_order(rdata, cache));
+
+ mesh_render_data_free(rdata);
+ }
+
+ return cache->all_triangles;
+}
+
+Gwn_Batch *DRW_mesh_batch_cache_get_triangles_with_normals(Mesh *me)
+{
+ MeshBatchCache *cache = mesh_batch_cache_get(me);
+
+ if (cache->triangles_with_normals == NULL) {
+ const int datatype = MR_DATATYPE_VERT | MR_DATATYPE_LOOPTRI | MR_DATATYPE_LOOP | MR_DATATYPE_POLY;
+ MeshRenderData *rdata = mesh_render_data_create(me, datatype);
+
+ cache->triangles_with_normals = GWN_batch_create(
+ GWN_PRIM_TRIS, mesh_batch_cache_get_tri_pos_and_normals(rdata, cache), NULL);
+
+ mesh_render_data_free(rdata);
+ }
+
+ return cache->triangles_with_normals;
+}
+
+Gwn_Batch *DRW_mesh_batch_cache_get_triangles_with_normals_and_weights(Mesh *me, int defgroup)
+{
+ MeshBatchCache *cache = mesh_batch_cache_get(me);
+
+ if (cache->triangles_with_weights == NULL) {
+ const bool use_hide = (me->editflag & (ME_EDIT_PAINT_VERT_SEL | ME_EDIT_PAINT_FACE_SEL)) != 0;
+ const int datatype =
+ MR_DATATYPE_VERT | MR_DATATYPE_LOOPTRI | MR_DATATYPE_LOOP | MR_DATATYPE_POLY | MR_DATATYPE_DVERT;
+ MeshRenderData *rdata = mesh_render_data_create(me, datatype);
+
+ cache->triangles_with_weights = GWN_batch_create_ex(
+ GWN_PRIM_TRIS, mesh_create_tri_weights(rdata, use_hide, defgroup), NULL, GWN_BATCH_OWNS_VBO);
+
+ Gwn_VertBuf *vbo_tris = use_hide ?
+ mesh_create_tri_pos_and_normals_visible_only(rdata) :
+ mesh_batch_cache_get_tri_pos_and_normals(rdata, cache);
+
+ GWN_batch_vertbuf_add_ex(cache->triangles_with_weights, vbo_tris, use_hide);
+
+ mesh_render_data_free(rdata);
+ }
+
+ return cache->triangles_with_weights;
+}
+
+Gwn_Batch *DRW_mesh_batch_cache_get_triangles_with_normals_and_vert_colors(Mesh *me)
+{
+ MeshBatchCache *cache = mesh_batch_cache_get(me);
+
+ if (cache->triangles_with_vert_colors == NULL) {
+ const bool use_hide = (me->editflag & (ME_EDIT_PAINT_VERT_SEL | ME_EDIT_PAINT_FACE_SEL)) != 0;
+ const int datatype =
+ MR_DATATYPE_VERT | MR_DATATYPE_LOOPTRI | MR_DATATYPE_LOOP | MR_DATATYPE_POLY | MR_DATATYPE_LOOPCOL;
+ MeshRenderData *rdata = mesh_render_data_create(me, datatype);
+
+ cache->triangles_with_vert_colors = GWN_batch_create_ex(
+ GWN_PRIM_TRIS, mesh_create_tri_vert_colors(rdata, use_hide), NULL, GWN_BATCH_OWNS_VBO);
+
+ Gwn_VertBuf *vbo_tris = use_hide ?
+ mesh_create_tri_pos_and_normals_visible_only(rdata) :
+ mesh_batch_cache_get_tri_pos_and_normals(rdata, cache);
+ GWN_batch_vertbuf_add_ex(cache->triangles_with_vert_colors, vbo_tris, use_hide);
+
+ mesh_render_data_free(rdata);
+ }
+
+ return cache->triangles_with_vert_colors;
+}
+
+
+struct Gwn_Batch *DRW_mesh_batch_cache_get_triangles_with_select_id(
+ struct Mesh *me, bool use_hide, uint select_id_offset)
+{
+ MeshBatchCache *cache = mesh_batch_cache_get(me);
+
+ if (cache->triangles_with_select_id_offset != select_id_offset) {
+ cache->triangles_with_select_id_offset = select_id_offset;
+ GWN_BATCH_DISCARD_SAFE(cache->triangles_with_select_id);
+ }
+
+ if (cache->triangles_with_select_id == NULL) {
+ const int datatype =
+ MR_DATATYPE_VERT | MR_DATATYPE_LOOPTRI | MR_DATATYPE_LOOP | MR_DATATYPE_POLY;
+ MeshRenderData *rdata = mesh_render_data_create(me, datatype);
+
+ cache->triangles_with_select_id = GWN_batch_create_ex(
+ GWN_PRIM_TRIS, mesh_create_tri_select_id(rdata, use_hide, select_id_offset), NULL, GWN_BATCH_OWNS_VBO);
+
+ Gwn_VertBuf *vbo_tris = use_hide ?
+ mesh_create_tri_pos_and_normals_visible_only(rdata) :
+ mesh_batch_cache_get_tri_pos_and_normals(rdata, cache);
+ GWN_batch_vertbuf_add_ex(cache->triangles_with_select_id, vbo_tris, use_hide);
+
+ mesh_render_data_free(rdata);
+ }
+
+ return cache->triangles_with_select_id;
+}
+
+/**
+ * Same as #DRW_mesh_batch_cache_get_triangles_with_select_id
+ * without the ID's, use to mask out geometry, eg - dont select face-dots behind other faces.
+ */
+struct Gwn_Batch *DRW_mesh_batch_cache_get_triangles_with_select_mask(struct Mesh *me, bool use_hide)
+{
+ MeshBatchCache *cache = mesh_batch_cache_get(me);
+ if (cache->triangles_with_select_mask == NULL) {
+ const int datatype =
+ MR_DATATYPE_VERT | MR_DATATYPE_LOOPTRI | MR_DATATYPE_LOOP | MR_DATATYPE_POLY;
+ MeshRenderData *rdata = mesh_render_data_create(me, datatype);
+
+ Gwn_VertBuf *vbo_tris = use_hide ?
+ mesh_create_tri_pos_and_normals_visible_only(rdata) :
+ mesh_batch_cache_get_tri_pos_and_normals(rdata, cache);
+
+ cache->triangles_with_select_mask = GWN_batch_create_ex(
+ GWN_PRIM_TRIS, vbo_tris, NULL, use_hide ? GWN_BATCH_OWNS_VBO : 0);
+
+ mesh_render_data_free(rdata);
+ }
+
+ return cache->triangles_with_select_mask;
+}
+
+Gwn_Batch *DRW_mesh_batch_cache_get_points_with_normals(Mesh *me)
+{
+ MeshBatchCache *cache = mesh_batch_cache_get(me);
+
+ if (cache->points_with_normals == NULL) {
+ const int datatype = MR_DATATYPE_VERT | MR_DATATYPE_LOOPTRI | MR_DATATYPE_LOOP | MR_DATATYPE_POLY;
+ MeshRenderData *rdata = mesh_render_data_create(me, datatype);
+
+ cache->points_with_normals = GWN_batch_create(
+ GWN_PRIM_POINTS, mesh_batch_cache_get_tri_pos_and_normals(rdata, cache), NULL);
+
+ mesh_render_data_free(rdata);
+ }
+
+ return cache->points_with_normals;
+}
+
+Gwn_Batch *DRW_mesh_batch_cache_get_all_verts(Mesh *me)
+{
+ MeshBatchCache *cache = mesh_batch_cache_get(me);
+
+ if (cache->all_verts == NULL) {
+ /* create batch from DM */
+ MeshRenderData *rdata = mesh_render_data_create(me, MR_DATATYPE_VERT);
+
+ cache->all_verts = GWN_batch_create(
+ GWN_PRIM_POINTS, mesh_batch_cache_get_vert_pos_and_nor_in_order(rdata, cache), NULL);
+
+ mesh_render_data_free(rdata);
+ }
+
+ return cache->all_verts;
+}
+
+Gwn_Batch *DRW_mesh_batch_cache_get_fancy_edges(Mesh *me)
+{
+ MeshBatchCache *cache = mesh_batch_cache_get(me);
+
+ if (cache->fancy_edges == NULL) {
+ /* create batch from DM */
+ static Gwn_VertFormat format = { 0 };
+ static struct { uint pos, n1, n2; } attr_id;
+ if (format.attrib_ct == 0) {
+ attr_id.pos = GWN_vertformat_attr_add(&format, "pos", GWN_COMP_F32, 3, GWN_FETCH_FLOAT);
+
+ attr_id.n1 = GWN_vertformat_attr_add(&format, "N1", GWN_COMP_I10, 3, GWN_FETCH_INT_TO_FLOAT_UNIT);
+ attr_id.n2 = GWN_vertformat_attr_add(&format, "N2", GWN_COMP_I10, 3, GWN_FETCH_INT_TO_FLOAT_UNIT);
+ }
+ Gwn_VertBuf *vbo = GWN_vertbuf_create_with_format(&format);
+
+ MeshRenderData *rdata = mesh_render_data_create(
+ me, MR_DATATYPE_VERT | MR_DATATYPE_EDGE | MR_DATATYPE_LOOP | MR_DATATYPE_POLY);
+
+ const int edge_len = mesh_render_data_edges_len_get(rdata);
+
+ const int vbo_len_capacity = edge_len * 2; /* these are PRIM_LINE verts, not mesh verts */
+ int vbo_len_used = 0;
+ GWN_vertbuf_data_alloc(vbo, vbo_len_capacity);
+ for (int i = 0; i < edge_len; ++i) {
+ float *vcos1, *vcos2;
+ float *pnor1 = NULL, *pnor2 = NULL;
+ bool is_manifold;
+
+ if (mesh_render_data_edge_vcos_manifold_pnors(rdata, i, &vcos1, &vcos2, &pnor1, &pnor2, &is_manifold)) {
+
+ Gwn_PackedNormal n1value = { .x = 0, .y = 0, .z = +511 };
+ Gwn_PackedNormal n2value = { .x = 0, .y = 0, .z = -511 };
+
+ if (is_manifold) {
+ n1value = GWN_normal_convert_i10_v3(pnor1);
+ n2value = GWN_normal_convert_i10_v3(pnor2);
+ }
+
+ const Gwn_PackedNormal *n1 = &n1value;
+ const Gwn_PackedNormal *n2 = &n2value;
+
+ GWN_vertbuf_attr_set(vbo, attr_id.pos, 2 * i, vcos1);
+ GWN_vertbuf_attr_set(vbo, attr_id.n1, 2 * i, n1);
+ GWN_vertbuf_attr_set(vbo, attr_id.n2, 2 * i, n2);
+
+ GWN_vertbuf_attr_set(vbo, attr_id.pos, 2 * i + 1, vcos2);
+ GWN_vertbuf_attr_set(vbo, attr_id.n1, 2 * i + 1, n1);
+ GWN_vertbuf_attr_set(vbo, attr_id.n2, 2 * i + 1, n2);
+
+ vbo_len_used += 2;
+ }
+ }
+ if (vbo_len_used != vbo_len_capacity) {
+ GWN_vertbuf_data_resize(vbo, vbo_len_used);
+ }
+
+ cache->fancy_edges = GWN_batch_create_ex(GWN_PRIM_LINES, vbo, NULL, GWN_BATCH_OWNS_VBO);
+
+ mesh_render_data_free(rdata);
+ }
+
+ return cache->fancy_edges;
+}
+
+static void mesh_batch_cache_create_overlay_batches(Mesh *me)
+{
+ BLI_assert(me->edit_btmesh != NULL);
+
+ /* Since MR_DATATYPE_OVERLAY is slow to generate, generate them all at once */
+ const int options =
+ MR_DATATYPE_VERT | MR_DATATYPE_EDGE | MR_DATATYPE_LOOP | MR_DATATYPE_POLY |
+ MR_DATATYPE_LOOPTRI | MR_DATATYPE_OVERLAY;
+
+ MeshBatchCache *cache = mesh_batch_cache_get(me);
+ MeshRenderData *rdata = mesh_render_data_create(me, options);
+
+ if (cache->overlay_triangles == NULL) {
+ cache->overlay_triangles = GWN_batch_create(
+ GWN_PRIM_TRIS, mesh_batch_cache_get_edit_tri_pos(rdata, cache), NULL);
+ GWN_batch_vertbuf_add(cache->overlay_triangles, mesh_batch_cache_get_edit_tri_nor(rdata, cache));
+ GWN_batch_vertbuf_add(cache->overlay_triangles, mesh_batch_cache_get_edit_tri_data(rdata, cache));
+ }
+
+ if (cache->overlay_loose_edges == NULL) {
+ cache->overlay_loose_edges = GWN_batch_create(
+ GWN_PRIM_LINES, mesh_batch_cache_get_edit_ledge_pos(rdata, cache), NULL);
+ GWN_batch_vertbuf_add(cache->overlay_loose_edges, mesh_batch_cache_get_edit_ledge_nor(rdata, cache));
+ GWN_batch_vertbuf_add(cache->overlay_loose_edges, mesh_batch_cache_get_edit_ledge_data(rdata, cache));
+ }
+
+ if (cache->overlay_loose_verts == NULL) {
+ cache->overlay_loose_verts = GWN_batch_create(
+ GWN_PRIM_POINTS, mesh_batch_cache_get_edit_lvert_pos(rdata, cache), NULL);
+ GWN_batch_vertbuf_add(cache->overlay_loose_verts, mesh_batch_cache_get_edit_lvert_nor(rdata, cache));
+ GWN_batch_vertbuf_add(cache->overlay_loose_verts, mesh_batch_cache_get_edit_lvert_data(rdata, cache));
+ }
+
+ if (cache->overlay_triangles_nor == NULL) {
+ cache->overlay_triangles_nor = GWN_batch_create(
+ GWN_PRIM_POINTS, mesh_batch_cache_get_edit_tri_pos(rdata, cache), NULL);
+ GWN_batch_vertbuf_add(cache->overlay_triangles_nor, mesh_batch_cache_get_edit_tri_nor(rdata, cache));
+ }
+
+ if (cache->overlay_loose_edges_nor == NULL) {
+ cache->overlay_loose_edges_nor = GWN_batch_create(
+ GWN_PRIM_POINTS, mesh_batch_cache_get_edit_ledge_pos(rdata, cache), NULL);
+ GWN_batch_vertbuf_add(cache->overlay_loose_edges_nor, mesh_batch_cache_get_edit_ledge_nor(rdata, cache));
+ }
+
+ mesh_render_data_free(rdata);
+}
+
+Gwn_Batch *DRW_mesh_batch_cache_get_overlay_triangles(Mesh *me)
+{
+ MeshBatchCache *cache = mesh_batch_cache_get(me);
+
+ if (cache->overlay_triangles == NULL) {
+ mesh_batch_cache_create_overlay_batches(me);
+ }
+
+ return cache->overlay_triangles;
+}
+
+Gwn_Batch *DRW_mesh_batch_cache_get_overlay_loose_edges(Mesh *me)
+{
+ MeshBatchCache *cache = mesh_batch_cache_get(me);
+
+ if (cache->overlay_loose_edges == NULL) {
+ mesh_batch_cache_create_overlay_batches(me);
+ }
+
+ return cache->overlay_loose_edges;
+}
+
+Gwn_Batch *DRW_mesh_batch_cache_get_overlay_loose_verts(Mesh *me)
+{
+ MeshBatchCache *cache = mesh_batch_cache_get(me);
+
+ if (cache->overlay_loose_verts == NULL) {
+ mesh_batch_cache_create_overlay_batches(me);
+ }
+
+ return cache->overlay_loose_verts;
+}
+
+Gwn_Batch *DRW_mesh_batch_cache_get_overlay_triangles_nor(Mesh *me)
+{
+ MeshBatchCache *cache = mesh_batch_cache_get(me);
+
+ if (cache->overlay_triangles_nor == NULL) {
+ mesh_batch_cache_create_overlay_batches(me);
+ }
+
+ return cache->overlay_triangles_nor;
+}
+
+Gwn_Batch *DRW_mesh_batch_cache_get_overlay_loose_edges_nor(Mesh *me)
+{
+ MeshBatchCache *cache = mesh_batch_cache_get(me);
+
+ if (cache->overlay_loose_edges_nor == NULL) {
+ mesh_batch_cache_create_overlay_batches(me);
+ }
+
+ return cache->overlay_loose_edges_nor;
+}
+
+Gwn_Batch *DRW_mesh_batch_cache_get_overlay_facedots(Mesh *me)
+{
+ MeshBatchCache *cache = mesh_batch_cache_get(me);
+
+ if (cache->overlay_facedots == NULL) {
+ MeshRenderData *rdata = mesh_render_data_create(me, MR_DATATYPE_VERT | MR_DATATYPE_LOOP | MR_DATATYPE_POLY);
+
+ cache->overlay_facedots = GWN_batch_create(
+ GWN_PRIM_POINTS, mesh_batch_cache_get_facedot_pos_with_normals_and_flag(rdata, cache), NULL);
+
+ mesh_render_data_free(rdata);
+ }
+
+ return cache->overlay_facedots;
+}
+
+Gwn_Batch *DRW_mesh_batch_cache_get_facedots_with_select_id(Mesh *me, uint select_id_offset)
+{
+ MeshBatchCache *cache = mesh_batch_cache_get(me);
+
+ if (cache->facedot_with_select_id_offset != select_id_offset) {
+ cache->facedot_with_select_id_offset = select_id_offset;
+ GWN_BATCH_DISCARD_SAFE(cache->edges_with_select_id);
+ }
+
+ if (cache->facedot_with_select_id == NULL) {
+ MeshRenderData *rdata = mesh_render_data_create(me, MR_DATATYPE_VERT | MR_DATATYPE_LOOP | MR_DATATYPE_POLY);
+
+ /* We only want the 'pos', not the normals or flag.
+ * Use since this is almost certainly already created. */
+ cache->facedot_with_select_id = GWN_batch_create(
+ GWN_PRIM_POINTS, mesh_batch_cache_get_facedot_pos_with_normals_and_flag(rdata, cache), NULL);
+
+ GWN_batch_vertbuf_add_ex(
+ cache->facedot_with_select_id,
+ mesh_create_facedot_select_id(rdata, select_id_offset), true);
+
+ mesh_render_data_free(rdata);
+ }
+
+ return cache->facedot_with_select_id;
+}
+
+Gwn_Batch *DRW_mesh_batch_cache_get_edges_with_select_id(Mesh *me, uint select_id_offset)
+{
+ MeshBatchCache *cache = mesh_batch_cache_get(me);
+
+ if (cache->edges_with_select_id_offset != select_id_offset) {
+ cache->edges_with_select_id_offset = select_id_offset;
+ GWN_BATCH_DISCARD_SAFE(cache->edges_with_select_id);
+ }
+
+ if (cache->edges_with_select_id == NULL) {
+ MeshRenderData *rdata = mesh_render_data_create(me, MR_DATATYPE_VERT | MR_DATATYPE_EDGE);
+
+ cache->edges_with_select_id = GWN_batch_create(
+ GWN_PRIM_LINES, mesh_batch_cache_get_edges_visible(rdata, cache), NULL);
+
+ GWN_batch_vertbuf_add_ex(
+ cache->edges_with_select_id,
+ mesh_create_edges_select_id(rdata, select_id_offset), true);
+
+ mesh_render_data_free(rdata);
+ }
+
+ return cache->edges_with_select_id;
+}
+
+Gwn_Batch *DRW_mesh_batch_cache_get_verts_with_select_id(Mesh *me, uint select_id_offset)
+{
+ MeshBatchCache *cache = mesh_batch_cache_get(me);
+
+ if (cache->verts_with_select_id_offset != select_id_offset) {
+ cache->verts_with_select_id_offset = select_id_offset;
+ GWN_BATCH_DISCARD_SAFE(cache->verts_with_select_id);
+ }
+
+ if (cache->verts_with_select_id == NULL) {
+ MeshRenderData *rdata = mesh_render_data_create(me, MR_DATATYPE_VERT);
+
+ cache->verts_with_select_id = GWN_batch_create(
+ GWN_PRIM_POINTS, mesh_batch_cache_get_verts_visible(rdata, cache), NULL);
+
+ GWN_batch_vertbuf_add_ex(
+ cache->verts_with_select_id,
+ mesh_create_verts_select_id(rdata, select_id_offset), true);
+
+ mesh_render_data_free(rdata);
+ }
+
+ return cache->verts_with_select_id;
+}
+
+Gwn_Batch **DRW_mesh_batch_cache_get_surface_shaded(
+ Mesh *me, struct GPUMaterial **gpumat_array, uint gpumat_array_len)
+{
+ MeshBatchCache *cache = mesh_batch_cache_get(me);
+
+ if (cache->shaded_triangles == NULL) {
+ /* create batch from DM */
+ const int datatype =
+ MR_DATATYPE_VERT | MR_DATATYPE_LOOP | MR_DATATYPE_LOOPTRI |
+ MR_DATATYPE_POLY | MR_DATATYPE_SHADING;
+ MeshRenderData *rdata = mesh_render_data_create_ex(me, datatype, gpumat_array, gpumat_array_len);
+
+ const int mat_len = mesh_render_data_mat_len_get(rdata);
+
+ cache->shaded_triangles = MEM_callocN(sizeof(*cache->shaded_triangles) * mat_len, __func__);
+
+ Gwn_IndexBuf **el = mesh_batch_cache_get_triangles_in_order_split_by_material(rdata, cache);
+
+ Gwn_VertBuf *vbo = mesh_batch_cache_get_tri_pos_and_normals(rdata, cache);
+ for (int i = 0; i < mat_len; ++i) {
+ cache->shaded_triangles[i] = GWN_batch_create(
+ GWN_PRIM_TRIS, vbo, el[i]);
+ Gwn_VertBuf *vbo_shading = mesh_batch_cache_get_tri_shading_data(rdata, cache);
+ if (vbo_shading) {
+ GWN_batch_vertbuf_add(cache->shaded_triangles[i], vbo_shading);
+ }
+ }
+
+
+ mesh_render_data_free(rdata);
+ }
+
+ return cache->shaded_triangles;
+}
+
+Gwn_Batch **DRW_mesh_batch_cache_get_surface_texpaint(Mesh *me)
+{
+ MeshBatchCache *cache = mesh_batch_cache_get(me);
+
+ if (cache->texpaint_triangles == NULL) {
+ /* create batch from DM */
+ const int datatype =
+ MR_DATATYPE_VERT | MR_DATATYPE_LOOP | MR_DATATYPE_POLY | MR_DATATYPE_LOOPTRI | MR_DATATYPE_LOOPUV;
+ MeshRenderData *rdata = mesh_render_data_create(me, datatype);
+
+ const int mat_len = mesh_render_data_mat_len_get(rdata);
+
+ cache->texpaint_triangles = MEM_callocN(sizeof(*cache->texpaint_triangles) * mat_len, __func__);
+
+ Gwn_IndexBuf **el = mesh_batch_cache_get_triangles_in_order_split_by_material(rdata, cache);
+
+ Gwn_VertBuf *vbo = mesh_batch_cache_get_tri_pos_and_normals(rdata, cache);
+ for (int i = 0; i < mat_len; ++i) {
+ cache->texpaint_triangles[i] = GWN_batch_create(
+ GWN_PRIM_TRIS, vbo, el[i]);
+ Gwn_VertBuf *vbo_uv = mesh_batch_cache_get_tri_uv_active(rdata, cache);
+ if (vbo_uv) {
+ GWN_batch_vertbuf_add(cache->texpaint_triangles[i], vbo_uv);
+ }
+ }
+ mesh_render_data_free(rdata);
+ }
+
+ return cache->texpaint_triangles;
+}
+
+Gwn_Batch *DRW_mesh_batch_cache_get_surface_texpaint_single(Mesh *me)
+{
+ MeshBatchCache *cache = mesh_batch_cache_get(me);
+
+ if (cache->texpaint_triangles_single == NULL) {
+ /* create batch from DM */
+ const int datatype =
+ MR_DATATYPE_VERT | MR_DATATYPE_LOOP | MR_DATATYPE_POLY | MR_DATATYPE_LOOPTRI | MR_DATATYPE_LOOPUV;
+ MeshRenderData *rdata = mesh_render_data_create(me, datatype);
+
+ Gwn_VertBuf *vbo = mesh_batch_cache_get_tri_pos_and_normals(rdata, cache);
+
+ cache->texpaint_triangles_single = GWN_batch_create(
+ GWN_PRIM_TRIS, vbo, NULL);
+ Gwn_VertBuf *vbo_uv = mesh_batch_cache_get_tri_uv_active(rdata, cache);
+ if (vbo_uv) {
+ GWN_batch_vertbuf_add(cache->texpaint_triangles_single, vbo_uv);
+ }
+ mesh_render_data_free(rdata);
+ }
+ return cache->texpaint_triangles_single;
+}
+
+Gwn_Batch *DRW_mesh_batch_cache_get_weight_overlay_edges(Mesh *me, bool use_wire, bool use_sel)
+{
+ MeshBatchCache *cache = mesh_batch_cache_get(me);
+
+ if (cache->overlay_paint_edges == NULL) {
+ /* create batch from Mesh */
+ const int datatype = MR_DATATYPE_VERT | MR_DATATYPE_EDGE | MR_DATATYPE_POLY | MR_DATATYPE_LOOP;
+ MeshRenderData *rdata = mesh_render_data_create(me, datatype);
+
+ cache->overlay_paint_edges = GWN_batch_create_ex(
+ GWN_PRIM_LINES, mesh_create_edge_pos_with_sel(rdata, use_wire, use_sel), NULL, GWN_BATCH_OWNS_VBO);
+
+ mesh_render_data_free(rdata);
+ }
+
+ return cache->overlay_paint_edges;
+}
+
+Gwn_Batch *DRW_mesh_batch_cache_get_weight_overlay_faces(Mesh *me)
+{
+ MeshBatchCache *cache = mesh_batch_cache_get(me);
+
+ if (cache->overlay_weight_faces == NULL) {
+ /* create batch from Mesh */
+ const int datatype = MR_DATATYPE_VERT | MR_DATATYPE_POLY | MR_DATATYPE_LOOP | MR_DATATYPE_LOOPTRI;
+ MeshRenderData *rdata = mesh_render_data_create(me, datatype);
+
+ cache->overlay_weight_faces = GWN_batch_create_ex(
+ GWN_PRIM_TRIS, mesh_batch_cache_get_vert_pos_and_nor_in_order(rdata, cache),
+ mesh_create_tri_overlay_weight_faces(rdata), GWN_BATCH_OWNS_INDEX);
+
+ mesh_render_data_free(rdata);
+ }
+
+ return cache->overlay_weight_faces;
+}
+
+Gwn_Batch *DRW_mesh_batch_cache_get_weight_overlay_verts(Mesh *me)
+{
+ MeshBatchCache *cache = mesh_batch_cache_get(me);
+
+ if (cache->overlay_weight_verts == NULL) {
+ /* create batch from Mesh */
+ MeshRenderData *rdata = mesh_render_data_create(me, MR_DATATYPE_VERT);
+
+ cache->overlay_weight_verts = GWN_batch_create(
+ GWN_PRIM_POINTS, mesh_batch_cache_get_vert_pos_and_nor_in_order(rdata, cache), NULL);
+
+ GWN_batch_vertbuf_add_ex(
+ cache->overlay_weight_verts,
+ mesh_create_vert_pos_with_overlay_data(rdata), true);
+ mesh_render_data_free(rdata);
+ }
+
+ return cache->overlay_weight_verts;
+}
+
+/**
+ * Needed for when we draw with shaded data.
+ */
+void DRW_mesh_cache_sculpt_coords_ensure(Mesh *me)
+{
+ if (me->batch_cache) {
+ MeshBatchCache *cache = mesh_batch_cache_get(me);
+ if (cache && cache->pos_with_normals && cache->is_sculpt_points_tag) {
+ /* XXX Force update of all the batches that contains the pos_with_normals buffer.
+ * TODO(fclem): Ideally, Gawain should provide a way to update a buffer without destroying it. */
+ mesh_batch_cache_clear_selective(me, cache->pos_with_normals);
+ GWN_VERTBUF_DISCARD_SAFE(cache->pos_with_normals);
+ }
+ cache->is_sculpt_points_tag = false;
+ }
+}
+
+/** \} */
+
+#undef MESH_RENDER_FUNCTION
diff --git a/source/blender/draw/intern/draw_cache_impl_metaball.c b/source/blender/draw/intern/draw_cache_impl_metaball.c
new file mode 100644
index 00000000000..f01e7b929f8
--- /dev/null
+++ b/source/blender/draw/intern/draw_cache_impl_metaball.c
@@ -0,0 +1,144 @@
+/*
+ * ***** BEGIN GPL LICENSE BLOCK *****
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * The Original Code is Copyright (C) 2017 by Blender Foundation.
+ * All rights reserved.
+ *
+ * ***** END GPL LICENSE BLOCK *****
+ */
+
+/** \file draw_cache_impl_metaball.c
+ * \ingroup draw
+ *
+ * \brief MetaBall API for render engines
+ */
+
+#include "MEM_guardedalloc.h"
+
+#include "BLI_utildefines.h"
+
+#include "DNA_meta_types.h"
+#include "DNA_object_types.h"
+
+#include "BKE_curve.h"
+#include "BKE_mball.h"
+
+#include "GPU_batch.h"
+
+#include "draw_cache_impl.h" /* own include */
+
+
+static void metaball_batch_cache_clear(MetaBall *mb);
+
+/* ---------------------------------------------------------------------- */
+/* MetaBall Gwn_Batch Cache */
+
+typedef struct MetaBallBatchCache {
+ Gwn_Batch *batch;
+
+ /* settings to determine if cache is invalid */
+ bool is_dirty;
+} MetaBallBatchCache;
+
+/* Gwn_Batch cache management. */
+
+static bool metaball_batch_cache_valid(MetaBall *mb)
+{
+ MetaBallBatchCache *cache = mb->batch_cache;
+
+ if (cache == NULL) {
+ return false;
+ }
+
+ return cache->is_dirty == false;
+}
+
+static void metaball_batch_cache_init(MetaBall *mb)
+{
+ MetaBallBatchCache *cache = mb->batch_cache;
+
+ if (!cache) {
+ cache = mb->batch_cache = MEM_mallocN(sizeof(*cache), __func__);
+ }
+ cache->batch = NULL;
+ cache->is_dirty = false;
+}
+
+static MetaBallBatchCache *metaball_batch_cache_get(MetaBall *mb)
+{
+ if (!metaball_batch_cache_valid(mb)) {
+ metaball_batch_cache_clear(mb);
+ metaball_batch_cache_init(mb);
+ }
+ return mb->batch_cache;
+}
+
+void DRW_mball_batch_cache_dirty(MetaBall *mb, int mode)
+{
+ MetaBallBatchCache *cache = mb->batch_cache;
+ if (cache == NULL) {
+ return;
+ }
+ switch (mode) {
+ case BKE_MBALL_BATCH_DIRTY_ALL:
+ cache->is_dirty = true;
+ break;
+ default:
+ BLI_assert(0);
+ }
+}
+
+static void metaball_batch_cache_clear(MetaBall *mb)
+{
+ MetaBallBatchCache *cache = mb->batch_cache;
+ if (!cache) {
+ return;
+ }
+
+ GWN_BATCH_DISCARD_SAFE(cache->batch);
+}
+
+void DRW_mball_batch_cache_free(MetaBall *mb)
+{
+ metaball_batch_cache_clear(mb);
+ MEM_SAFE_FREE(mb->batch_cache);
+}
+
+/* -------------------------------------------------------------------- */
+
+/** \name Public Object/MetaBall API
+ * \{ */
+
+Gwn_Batch *DRW_metaball_batch_cache_get_triangles_with_normals(Object *ob)
+{
+ if (!BKE_mball_is_basis(ob))
+ return NULL;
+
+ MetaBall *mb = ob->data;
+ MetaBallBatchCache *cache = metaball_batch_cache_get(mb);
+
+ if (cache->batch == NULL) {
+ ListBase *lb = &ob->curve_cache->disp;
+ cache->batch = GWN_batch_create_ex(
+ GWN_PRIM_TRIS,
+ DRW_displist_vertbuf_calc_pos_with_normals(lb),
+ DRW_displist_indexbuf_calc_triangles_in_order(lb),
+ GWN_BATCH_OWNS_VBO | GWN_BATCH_OWNS_INDEX);
+ }
+
+ return cache->batch;
+}
diff --git a/source/blender/draw/intern/draw_cache_impl_particles.c b/source/blender/draw/intern/draw_cache_impl_particles.c
new file mode 100644
index 00000000000..55d528e49b0
--- /dev/null
+++ b/source/blender/draw/intern/draw_cache_impl_particles.c
@@ -0,0 +1,544 @@
+/*
+ * ***** BEGIN GPL LICENSE BLOCK *****
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * The Original Code is Copyright (C) 2017 by Blender Foundation.
+ * All rights reserved.
+ *
+ * Contributor(s): Blender Foundation, Mike Erwin, Dalai Felinto
+ *
+ * ***** END GPL LICENSE BLOCK *****
+ */
+
+/** \file draw_cache_impl_particles.c
+ * \ingroup draw
+ *
+ * \brief Particle API for render engines
+ */
+
+#include "DRW_render.h"
+
+#include "MEM_guardedalloc.h"
+
+#include "BLI_utildefines.h"
+#include "BLI_math_vector.h"
+#include "BLI_string.h"
+#include "BLI_ghash.h"
+
+#include "DNA_modifier_types.h"
+#include "DNA_particle_types.h"
+
+#include "BKE_particle.h"
+#include "BKE_DerivedMesh.h"
+
+#include "GPU_batch.h"
+
+#include "draw_cache_impl.h" /* own include */
+
+static void particle_batch_cache_clear(ParticleSystem *psys);
+
+/* ---------------------------------------------------------------------- */
+/* Particle Gwn_Batch Cache */
+
+typedef struct ParticleBatchCache {
+ Gwn_VertBuf *pos;
+ Gwn_IndexBuf *segments;
+
+ Gwn_Batch *hairs;
+
+ int segment_count;
+ int point_count;
+
+ /* settings to determine if cache is invalid */
+ bool is_dirty;
+} ParticleBatchCache;
+
+/* Gwn_Batch cache management. */
+
+static bool particle_batch_cache_valid(ParticleSystem *psys)
+{
+ ParticleBatchCache *cache = psys->batch_cache;
+
+ if (cache == NULL) {
+ return false;
+ }
+
+ if (cache->is_dirty == false) {
+ return true;
+ }
+ else {
+ return false;
+ }
+
+ return true;
+}
+
+static void particle_batch_cache_init(ParticleSystem *psys)
+{
+ ParticleBatchCache *cache = psys->batch_cache;
+
+ if (!cache) {
+ cache = psys->batch_cache = MEM_callocN(sizeof(*cache), __func__);
+ }
+ else {
+ memset(cache, 0, sizeof(*cache));
+ }
+
+ cache->is_dirty = false;
+}
+
+static ParticleBatchCache *particle_batch_cache_get(ParticleSystem *psys)
+{
+ if (!particle_batch_cache_valid(psys)) {
+ particle_batch_cache_clear(psys);
+ particle_batch_cache_init(psys);
+ }
+ return psys->batch_cache;
+}
+
+void DRW_particle_batch_cache_dirty(ParticleSystem *psys, int mode)
+{
+ ParticleBatchCache *cache = psys->batch_cache;
+ if (cache == NULL) {
+ return;
+ }
+ switch (mode) {
+ case BKE_PARTICLE_BATCH_DIRTY_ALL:
+ cache->is_dirty = true;
+ break;
+ default:
+ BLI_assert(0);
+ }
+}
+
+static void particle_batch_cache_clear(ParticleSystem *psys)
+{
+ ParticleBatchCache *cache = psys->batch_cache;
+ if (!cache) {
+ return;
+ }
+
+ GWN_BATCH_DISCARD_SAFE(cache->hairs);
+
+ GWN_VERTBUF_DISCARD_SAFE(cache->pos);
+ GWN_INDEXBUF_DISCARD_SAFE(cache->segments);
+}
+
+void DRW_particle_batch_cache_free(ParticleSystem *psys)
+{
+ particle_batch_cache_clear(psys);
+ MEM_SAFE_FREE(psys->batch_cache);
+}
+
+static void ensure_seg_pt_count(ParticleSystem *psys, ParticleBatchCache *cache)
+{
+ if (cache->pos == NULL || cache->segments == NULL) {
+ cache->segment_count = 0;
+ cache->point_count = 0;
+
+ if (psys->pathcache && (!psys->childcache || (psys->part->draw & PART_DRAW_PARENT))) {
+ for (int i = 0; i < psys->totpart; i++) {
+ ParticleCacheKey *path = psys->pathcache[i];
+
+ if (path->segments > 0) {
+ cache->segment_count += path->segments;
+ cache->point_count += path->segments + 1;
+ }
+ }
+ }
+
+ if (psys->childcache) {
+ int child_count = psys->totchild * psys->part->disp / 100;
+
+ for (int i = 0; i < child_count; i++) {
+ ParticleCacheKey *path = psys->childcache[i];
+
+ if (path->segments > 0) {
+ cache->segment_count += path->segments;
+ cache->point_count += path->segments + 1;
+ }
+ }
+ }
+ }
+}
+
+/* Gwn_Batch cache usage. */
+static void particle_batch_cache_ensure_pos_and_seg(ParticleSystem *psys, ModifierData *md, ParticleBatchCache *cache)
+{
+ if (cache->pos != NULL && cache->segments != NULL) {
+ return;
+ }
+
+ int curr_point = 0;
+ ParticleSystemModifierData *psmd = (ParticleSystemModifierData *)md;
+
+ GWN_VERTBUF_DISCARD_SAFE(cache->pos);
+ GWN_INDEXBUF_DISCARD_SAFE(cache->segments);
+
+ static Gwn_VertFormat format = { 0 };
+ static struct { uint pos, tan, ind; } attr_id;
+ unsigned int *uv_id = NULL;
+ int uv_layers = 0;
+ MTFace **mtfaces = NULL;
+ float (**parent_uvs)[2] = NULL;
+ bool simple = psys->part->childtype == PART_CHILD_PARTICLES;
+
+ if (psmd) {
+ if (CustomData_has_layer(&psmd->dm_final->loopData, CD_MLOOPUV)) {
+ uv_layers = CustomData_number_of_layers(&psmd->dm_final->loopData, CD_MLOOPUV);
+ }
+ }
+
+ GWN_vertformat_clear(&format);
+
+ /* initialize vertex format */
+ attr_id.pos = GWN_vertformat_attr_add(&format, "pos", GWN_COMP_F32, 3, GWN_FETCH_FLOAT);
+ attr_id.tan = GWN_vertformat_attr_add(&format, "nor", GWN_COMP_F32, 3, GWN_FETCH_FLOAT);
+ attr_id.ind = GWN_vertformat_attr_add(&format, "ind", GWN_COMP_I32, 1, GWN_FETCH_INT);
+
+ if (psmd) {
+ uv_id = MEM_mallocN(sizeof(*uv_id) * uv_layers, "UV attrib format");
+
+ for (int i = 0; i < uv_layers; i++) {
+ const char *name = CustomData_get_layer_name(&psmd->dm_final->loopData, CD_MLOOPUV, i);
+ char uuid[32];
+
+ BLI_snprintf(uuid, sizeof(uuid), "u%u", BLI_ghashutil_strhash_p(name));
+ uv_id[i] = GWN_vertformat_attr_add(&format, uuid, GWN_COMP_F32, 2, GWN_FETCH_FLOAT);
+ }
+ }
+
+ cache->pos = GWN_vertbuf_create_with_format(&format);
+ GWN_vertbuf_data_alloc(cache->pos, cache->point_count);
+
+ Gwn_IndexBufBuilder elb;
+ GWN_indexbuf_init(&elb, GWN_PRIM_LINES, cache->segment_count, cache->point_count);
+
+ if (uv_layers) {
+ DM_ensure_tessface(psmd->dm_final);
+
+ mtfaces = MEM_mallocN(sizeof(*mtfaces) * uv_layers, "Faces UV layers");
+
+ for (int i = 0; i < uv_layers; i++) {
+ mtfaces[i] = (MTFace *)CustomData_get_layer_n(&psmd->dm_final->faceData, CD_MTFACE, i);
+ }
+ }
+
+ if (psys->pathcache && (!psys->childcache || (psys->part->draw & PART_DRAW_PARENT))) {
+ if (simple) {
+ parent_uvs = MEM_callocN(sizeof(*parent_uvs) * psys->totpart, "Parent particle UVs");
+ }
+
+ for (int i = 0; i < psys->totpart; i++) {
+ ParticleCacheKey *path = psys->pathcache[i];
+
+ if (path->segments > 0) {
+ float tangent[3];
+ int from = psmd ? psmd->psys->part->from : 0;
+ float (*uv)[2] = NULL;
+
+ if (psmd) {
+ uv = MEM_callocN(sizeof(*uv) * uv_layers, "Particle UVs");
+
+ if (simple) {
+ parent_uvs[i] = uv;
+ }
+ }
+
+ if (ELEM(from, PART_FROM_FACE, PART_FROM_VOLUME)) {
+ ParticleData *particle = &psys->particles[i];
+ int num = particle->num_dmcache;
+
+ if (num == DMCACHE_NOTFOUND) {
+ if (particle->num < psmd->dm_final->getNumTessFaces(psmd->dm_final)) {
+ num = particle->num;
+ }
+ }
+
+ if (num != DMCACHE_NOTFOUND) {
+ MFace *mface = psmd->dm_final->getTessFaceData(psmd->dm_final, num, CD_MFACE);
+
+ for (int j = 0; j < uv_layers; j++) {
+ psys_interpolate_uvs(mtfaces[j] + num, mface->v4, particle->fuv, uv[j]);
+ }
+ }
+ }
+
+ for (int j = 0; j < path->segments; j++) {
+ if (j == 0) {
+ sub_v3_v3v3(tangent, path[j + 1].co, path[j].co);
+ }
+ else {
+ sub_v3_v3v3(tangent, path[j + 1].co, path[j - 1].co);
+ }
+
+ GWN_vertbuf_attr_set(cache->pos, attr_id.pos, curr_point, path[j].co);
+ GWN_vertbuf_attr_set(cache->pos, attr_id.tan, curr_point, tangent);
+ GWN_vertbuf_attr_set(cache->pos, attr_id.ind, curr_point, &i);
+
+ if (psmd) {
+ for (int k = 0; k < uv_layers; k++) {
+ GWN_vertbuf_attr_set(cache->pos, uv_id[k], curr_point, uv[k]);
+ }
+ }
+
+ GWN_indexbuf_add_line_verts(&elb, curr_point, curr_point + 1);
+
+ curr_point++;
+ }
+
+ sub_v3_v3v3(tangent, path[path->segments].co, path[path->segments - 1].co);
+
+ GWN_vertbuf_attr_set(cache->pos, attr_id.pos, curr_point, path[path->segments].co);
+ GWN_vertbuf_attr_set(cache->pos, attr_id.tan, curr_point, tangent);
+ GWN_vertbuf_attr_set(cache->pos, attr_id.ind, curr_point, &i);
+
+ if (psmd) {
+ for (int k = 0; k < uv_layers; k++) {
+ GWN_vertbuf_attr_set(cache->pos, uv_id[k], curr_point, uv[k]);
+ }
+
+ if (!simple) {
+ MEM_freeN(uv);
+ }
+ }
+
+ curr_point++;
+ }
+ }
+ }
+
+ if (psys->childcache) {
+ int child_count = psys->totchild * psys->part->disp / 100;
+
+ if (simple && !parent_uvs) {
+ parent_uvs = MEM_callocN(sizeof(*parent_uvs) * psys->totpart, "Parent particle UVs");
+ }
+
+ for (int i = 0, x = psys->totpart; i < child_count; i++, x++) {
+ ParticleCacheKey *path = psys->childcache[i];
+ float tangent[3];
+
+ if (path->segments > 0) {
+ int from = psmd ? psmd->psys->part->from : 0;
+ float (*uv)[2] = NULL;
+
+ if (!simple) {
+ if (psmd) {
+ uv = MEM_callocN(sizeof(*uv) * uv_layers, "Particle UVs");
+ }
+
+ if (ELEM(from, PART_FROM_FACE, PART_FROM_VOLUME)) {
+ ChildParticle *particle = &psys->child[i];
+ int num = particle->num;
+
+ if (num != DMCACHE_NOTFOUND) {
+ MFace *mface = psmd->dm_final->getTessFaceData(psmd->dm_final, num, CD_MFACE);
+
+ for (int j = 0; j < uv_layers; j++) {
+ psys_interpolate_uvs(mtfaces[j] + num, mface->v4, particle->fuv, uv[j]);
+ }
+ }
+ }
+ }
+ else if (!parent_uvs[psys->child[i].parent]) {
+ if (psmd) {
+ parent_uvs[psys->child[i].parent] = MEM_callocN(sizeof(*uv) * uv_layers, "Particle UVs");
+ }
+
+ if (ELEM(from, PART_FROM_FACE, PART_FROM_VOLUME)) {
+ ParticleData *particle = &psys->particles[psys->child[i].parent];
+ int num = particle->num_dmcache;
+
+ if (num == DMCACHE_NOTFOUND) {
+ if (particle->num < psmd->dm_final->getNumTessFaces(psmd->dm_final)) {
+ num = particle->num;
+ }
+ }
+
+ if (num != DMCACHE_NOTFOUND) {
+ MFace *mface = psmd->dm_final->getTessFaceData(psmd->dm_final, num, CD_MFACE);
+
+ for (int j = 0; j < uv_layers; j++) {
+ psys_interpolate_uvs(mtfaces[j] + num, mface->v4, particle->fuv, parent_uvs[psys->child[i].parent][j]);
+ }
+ }
+ }
+ }
+
+ for (int j = 0; j < path->segments; j++) {
+ if (j == 0) {
+ sub_v3_v3v3(tangent, path[j + 1].co, path[j].co);
+ }
+ else {
+ sub_v3_v3v3(tangent, path[j + 1].co, path[j - 1].co);
+ }
+
+ GWN_vertbuf_attr_set(cache->pos, attr_id.pos, curr_point, path[j].co);
+ GWN_vertbuf_attr_set(cache->pos, attr_id.tan, curr_point, tangent);
+ GWN_vertbuf_attr_set(cache->pos, attr_id.ind, curr_point, &x);
+
+ if (psmd) {
+ for (int k = 0; k < uv_layers; k++) {
+ GWN_vertbuf_attr_set(cache->pos, uv_id[k], curr_point,
+ simple ? parent_uvs[psys->child[i].parent][k] : uv[k]);
+ }
+ }
+
+ GWN_indexbuf_add_line_verts(&elb, curr_point, curr_point + 1);
+
+ curr_point++;
+ }
+
+ sub_v3_v3v3(tangent, path[path->segments].co, path[path->segments - 1].co);
+
+ GWN_vertbuf_attr_set(cache->pos, attr_id.pos, curr_point, path[path->segments].co);
+ GWN_vertbuf_attr_set(cache->pos, attr_id.tan, curr_point, tangent);
+ GWN_vertbuf_attr_set(cache->pos, attr_id.ind, curr_point, &x);
+
+ if (psmd) {
+ for (int k = 0; k < uv_layers; k++) {
+ GWN_vertbuf_attr_set(cache->pos, uv_id[k], curr_point,
+ simple ? parent_uvs[psys->child[i].parent][k] : uv[k]);
+ }
+
+ if (!simple) {
+ MEM_freeN(uv);
+ }
+ }
+
+ curr_point++;
+ }
+ }
+ }
+
+ if (parent_uvs) {
+ for (int i = 0; i < psys->totpart; i++) {
+ MEM_SAFE_FREE(parent_uvs[i]);
+ }
+
+ MEM_freeN(parent_uvs);
+ }
+
+ if (uv_layers) {
+ MEM_freeN(mtfaces);
+ }
+
+ if (psmd) {
+ MEM_freeN(uv_id);
+ }
+
+ cache->segments = GWN_indexbuf_build(&elb);
+}
+
+static void particle_batch_cache_ensure_pos(Object *object, ParticleSystem *psys, ParticleBatchCache *cache)
+{
+ if (cache->pos != NULL) {
+ return;
+ }
+
+ static Gwn_VertFormat format = { 0 };
+ static unsigned pos_id, rot_id, val_id;
+ int i, curr_point;
+ ParticleData *pa;
+ ParticleKey state;
+ ParticleSimulationData sim = {NULL};
+ const DRWContextState *draw_ctx = DRW_context_state_get();
+
+ sim.eval_ctx = &draw_ctx->eval_ctx;
+ sim.scene = draw_ctx->scene;
+ sim.ob = object;
+ sim.psys = psys;
+ sim.psmd = psys_get_modifier(object, psys);
+
+ if (psys->part->phystype == PART_PHYS_KEYED) {
+ if (psys->flag & PSYS_KEYED) {
+ psys_count_keyed_targets(&sim);
+ if (psys->totkeyed == 0)
+ return;
+ }
+ }
+
+ GWN_VERTBUF_DISCARD_SAFE(cache->pos);
+ GWN_INDEXBUF_DISCARD_SAFE(cache->segments);
+
+ if (format.attrib_ct == 0) {
+ /* initialize vertex format */
+ pos_id = GWN_vertformat_attr_add(&format, "pos", GWN_COMP_F32, 3, GWN_FETCH_FLOAT);
+ rot_id = GWN_vertformat_attr_add(&format, "rot", GWN_COMP_F32, 4, GWN_FETCH_FLOAT);
+ val_id = GWN_vertformat_attr_add(&format, "val", GWN_COMP_F32, 1, GWN_FETCH_FLOAT);
+ }
+
+ cache->pos = GWN_vertbuf_create_with_format(&format);
+ GWN_vertbuf_data_alloc(cache->pos, psys->totpart);
+
+ for (curr_point = 0, i = 0, pa = psys->particles; i < psys->totpart; i++, pa++) {
+ state.time = draw_ctx->eval_ctx.ctime;
+ if (!psys_get_particle_state(&sim, curr_point, &state, 0)) {
+ continue;
+ }
+
+ float val;
+
+ GWN_vertbuf_attr_set(cache->pos, pos_id, curr_point, pa->state.co);
+ GWN_vertbuf_attr_set(cache->pos, rot_id, curr_point, pa->state.rot);
+
+ switch (psys->part->draw_col) {
+ case PART_DRAW_COL_VEL:
+ val = len_v3(pa->state.vel) / psys->part->color_vec_max;
+ break;
+ case PART_DRAW_COL_ACC:
+ val = len_v3v3(pa->state.vel, pa->prev_state.vel) / ((pa->state.time - pa->prev_state.time) * psys->part->color_vec_max);
+ break;
+ default:
+ val = -1.0f;
+ break;
+ }
+
+ GWN_vertbuf_attr_set(cache->pos, val_id, curr_point, &val);
+
+ curr_point++;
+ }
+
+ if (curr_point != psys->totpart) {
+ GWN_vertbuf_data_resize(cache->pos, curr_point);
+ }
+}
+
+Gwn_Batch *DRW_particles_batch_cache_get_hair(ParticleSystem *psys, ModifierData *md)
+{
+ ParticleBatchCache *cache = particle_batch_cache_get(psys);
+
+ if (cache->hairs == NULL) {
+ ensure_seg_pt_count(psys, cache);
+ particle_batch_cache_ensure_pos_and_seg(psys, md, cache);
+ cache->hairs = GWN_batch_create(GWN_PRIM_LINES, cache->pos, cache->segments);
+ }
+
+ return cache->hairs;
+}
+
+Gwn_Batch *DRW_particles_batch_cache_get_dots(Object *object, ParticleSystem *psys)
+{
+ ParticleBatchCache *cache = particle_batch_cache_get(psys);
+
+ if (cache->hairs == NULL) {
+ particle_batch_cache_ensure_pos(object, psys, cache);
+ cache->hairs = GWN_batch_create(GWN_PRIM_POINTS, cache->pos, NULL);
+ }
+
+ return cache->hairs;
+}
diff --git a/source/blender/draw/intern/draw_common.c b/source/blender/draw/intern/draw_common.c
new file mode 100644
index 00000000000..8d23688959c
--- /dev/null
+++ b/source/blender/draw/intern/draw_common.c
@@ -0,0 +1,537 @@
+/*
+ * Copyright 2016, Blender Foundation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Contributor(s): Blender Institute
+ *
+ */
+
+/** \file blender/draw/intern/draw_common.c
+ * \ingroup draw
+ */
+
+#include "DRW_render.h"
+
+#include "GPU_shader.h"
+#include "GPU_texture.h"
+
+#include "UI_resources.h"
+
+#include "BKE_global.h"
+#include "BKE_colorband.h"
+
+#include "draw_common.h"
+
+#if 0
+#define UI_COLOR_RGB_FROM_U8(r, g, b, v4) \
+ ARRAY_SET_ITEMS(v4, (float)r / 255.0f, (float)g / 255.0f, (float)b / 255.0f, 1.0)
+#endif
+#define UI_COLOR_RGBA_FROM_U8(r, g, b, a, v4) \
+ ARRAY_SET_ITEMS(v4, (float)r / 255.0f, (float)g / 255.0f, (float)b / 255.0f, (float)a / 255.0f)
+
+/* Colors & Constant */
+GlobalsUboStorage ts;
+struct GPUUniformBuffer *globals_ubo = NULL;
+struct GPUTexture *globals_ramp = NULL;
+
+void DRW_globals_update(void)
+{
+ UI_GetThemeColor4fv(TH_WIRE, ts.colorWire);
+ UI_GetThemeColor4fv(TH_WIRE_EDIT, ts.colorWireEdit);
+ UI_GetThemeColor4fv(TH_ACTIVE, ts.colorActive);
+ UI_GetThemeColor4fv(TH_SELECT, ts.colorSelect);
+ UI_GetThemeColor4fv(TH_TRANSFORM, ts.colorTransform);
+ UI_GetThemeColor4fv(TH_GROUP_ACTIVE, ts.colorGroupActive);
+ UI_GetThemeColorShade4fv(TH_GROUP_ACTIVE, -25, ts.colorGroupSelect);
+ UI_GetThemeColor4fv(TH_GROUP, ts.colorGroup);
+ UI_COLOR_RGBA_FROM_U8(0x88, 0xFF, 0xFF, 155, ts.colorLibrarySelect);
+ UI_COLOR_RGBA_FROM_U8(0x55, 0xCC, 0xCC, 155, ts.colorLibrary);
+ UI_GetThemeColor4fv(TH_LAMP, ts.colorLamp);
+ UI_GetThemeColor4fv(TH_SPEAKER, ts.colorSpeaker);
+ UI_GetThemeColor4fv(TH_CAMERA, ts.colorCamera);
+ UI_GetThemeColor4fv(TH_EMPTY, ts.colorEmpty);
+ UI_GetThemeColor4fv(TH_VERTEX, ts.colorVertex);
+ UI_GetThemeColor4fv(TH_VERTEX_SELECT, ts.colorVertexSelect);
+ UI_GetThemeColor4fv(TH_EDITMESH_ACTIVE, ts.colorEditMeshActive);
+ UI_GetThemeColor4fv(TH_EDGE_SELECT, ts.colorEdgeSelect);
+ UI_GetThemeColor4fv(TH_EDGE_SEAM, ts.colorEdgeSeam);
+ UI_GetThemeColor4fv(TH_EDGE_SHARP, ts.colorEdgeSharp);
+ UI_GetThemeColor4fv(TH_EDGE_CREASE, ts.colorEdgeCrease);
+ UI_GetThemeColor4fv(TH_EDGE_BEVEL, ts.colorEdgeBWeight);
+ UI_GetThemeColor4fv(TH_EDGE_FACESEL, ts.colorEdgeFaceSelect);
+ UI_GetThemeColor4fv(TH_FACE, ts.colorFace);
+ UI_GetThemeColor4fv(TH_FACE_SELECT, ts.colorFaceSelect);
+ UI_GetThemeColor4fv(TH_NORMAL, ts.colorNormal);
+ UI_GetThemeColor4fv(TH_VNORMAL, ts.colorVNormal);
+ UI_GetThemeColor4fv(TH_LNORMAL, ts.colorLNormal);
+ UI_GetThemeColor4fv(TH_FACE_DOT, ts.colorFaceDot);
+ UI_GetThemeColor4fv(TH_BACK, ts.colorBackground);
+
+ /* Curve */
+ UI_GetThemeColor4fv(TH_HANDLE_FREE, ts.colorHandleFree);
+ UI_GetThemeColor4fv(TH_HANDLE_AUTO, ts.colorHandleAuto);
+ UI_GetThemeColor4fv(TH_HANDLE_VECT, ts.colorHandleVect);
+ UI_GetThemeColor4fv(TH_HANDLE_ALIGN, ts.colorHandleAlign);
+ UI_GetThemeColor4fv(TH_HANDLE_AUTOCLAMP, ts.colorHandleAutoclamp);
+ UI_GetThemeColor4fv(TH_HANDLE_SEL_FREE, ts.colorHandleSelFree);
+ UI_GetThemeColor4fv(TH_HANDLE_SEL_AUTO, ts.colorHandleSelAuto);
+ UI_GetThemeColor4fv(TH_HANDLE_SEL_VECT, ts.colorHandleSelVect);
+ UI_GetThemeColor4fv(TH_HANDLE_SEL_ALIGN, ts.colorHandleSelAlign);
+ UI_GetThemeColor4fv(TH_HANDLE_SEL_AUTOCLAMP, ts.colorHandleSelAutoclamp);
+ UI_GetThemeColor4fv(TH_NURB_ULINE, ts.colorNurbUline);
+ UI_GetThemeColor4fv(TH_NURB_SEL_ULINE, ts.colorNurbSelUline);
+ UI_GetThemeColor4fv(TH_ACTIVE_SPLINE, ts.colorActiveSpline);
+
+ /* Grid */
+ UI_GetThemeColorShade4fv(TH_GRID, 10, ts.colorGrid);
+ /* emphasise division lines lighter instead of darker, if background is darker than grid */
+ UI_GetThemeColorShade4fv(
+ TH_GRID,
+ (ts.colorGrid[0] + ts.colorGrid[1] + ts.colorGrid[2] + 0.12f >
+ ts.colorBackground[0] + ts.colorBackground[1] + ts.colorBackground[2]) ?
+ 20 : -10, ts.colorGridEmphasise);
+ /* Grid Axis */
+ UI_GetThemeColorBlendShade4fv(TH_GRID, TH_AXIS_X, 0.5f, -10, ts.colorGridAxisX);
+ UI_GetThemeColorBlendShade4fv(TH_GRID, TH_AXIS_Y, 0.5f, -10, ts.colorGridAxisY);
+ UI_GetThemeColorBlendShade4fv(TH_GRID, TH_AXIS_Z, 0.5f, -10, ts.colorGridAxisZ);
+
+ UI_GetThemeColorShadeAlpha4fv(TH_TRANSFORM, 0, -80, ts.colorDeselect);
+ UI_GetThemeColorShadeAlpha4fv(TH_WIRE, 0, -30, ts.colorOutline);
+ UI_GetThemeColorShadeAlpha4fv(TH_LAMP, 0, 255, ts.colorLampNoAlpha);
+
+ ts.sizeLampCenter = (U.obcenter_dia + 1.5f) * U.pixelsize;
+ ts.sizeLampCircle = U.pixelsize * 9.0f;
+ ts.sizeLampCircleShadow = ts.sizeLampCircle + U.pixelsize * 3.0f;
+
+ /* M_SQRT2 to be at least the same size of the old square */
+ ts.sizeVertex = ceilf(UI_GetThemeValuef(TH_VERTEX_SIZE) * (float)M_SQRT2 / 2.0f);
+ ts.sizeFaceDot = ceilf(UI_GetThemeValuef(TH_FACEDOT_SIZE) * (float)M_SQRT2);
+ ts.sizeEdge = 1.0f / 2.0f; /* TODO Theme */
+ ts.sizeEdgeFix = 0.5f + 2.0f * (2.0f * (MAX2(ts.sizeVertex, ts.sizeEdge)) * (float)M_SQRT1_2);
+
+
+ if (globals_ubo == NULL) {
+ globals_ubo = DRW_uniformbuffer_create(sizeof(GlobalsUboStorage), &ts);
+ }
+
+ DRW_uniformbuffer_update(globals_ubo, &ts);
+
+ ColorBand ramp = {0};
+ float *colors;
+ int col_size;
+
+ ramp.tot = 3;
+ ramp.data[0].a = 1.0f;
+ ramp.data[0].b = 1.0f;
+ ramp.data[0].pos = 0.0f;
+ ramp.data[1].a = 1.0f;
+ ramp.data[1].g = 1.0f;
+ ramp.data[1].pos = 0.5f;
+ ramp.data[2].a = 1.0f;
+ ramp.data[2].r = 1.0f;
+ ramp.data[2].pos = 1.0f;
+
+ BKE_colorband_evaluate_table_rgba(&ramp, &colors, &col_size);
+
+ if (globals_ramp) {
+ GPU_texture_free(globals_ramp);
+ }
+ globals_ramp = GPU_texture_create_1D(col_size, colors, NULL);
+
+ MEM_freeN(colors);
+}
+
+/* ********************************* SHGROUP ************************************* */
+
+static struct {
+ struct Gwn_VertFormat *instance_screenspace;
+ struct Gwn_VertFormat *instance_color;
+ struct Gwn_VertFormat *instance_screen_aligned;
+ struct Gwn_VertFormat *instance_scaled;
+ struct Gwn_VertFormat *instance_sized;
+ struct Gwn_VertFormat *instance;
+ struct Gwn_VertFormat *instance_camera;
+ struct Gwn_VertFormat *instance_distance_lines;
+ struct Gwn_VertFormat *instance_spot;
+ struct Gwn_VertFormat *instance_bone_envelope_wire;
+ struct Gwn_VertFormat *instance_bone_envelope_solid;
+ struct Gwn_VertFormat *instance_mball_handles;
+} g_formats = {NULL};
+
+void DRW_globals_free(void)
+{
+ struct Gwn_VertFormat **format = &g_formats.instance_screenspace;
+ for (int i = 0; i < sizeof(g_formats) / sizeof(void *); ++i, ++format) {
+ MEM_SAFE_FREE(*format);
+ }
+}
+
+DRWShadingGroup *shgroup_dynlines_uniform_color(DRWPass *pass, float color[4])
+{
+ GPUShader *sh = GPU_shader_get_builtin_shader(GPU_SHADER_3D_UNIFORM_COLOR);
+
+ DRWShadingGroup *grp = DRW_shgroup_line_batch_create(sh, pass);
+ DRW_shgroup_uniform_vec4(grp, "color", color, 1);
+
+ return grp;
+}
+
+DRWShadingGroup *shgroup_dynpoints_uniform_color(DRWPass *pass, float color[4], float *size)
+{
+ GPUShader *sh = GPU_shader_get_builtin_shader(GPU_SHADER_3D_POINT_UNIFORM_SIZE_UNIFORM_COLOR_AA);
+
+ DRWShadingGroup *grp = DRW_shgroup_point_batch_create(sh, pass);
+ DRW_shgroup_uniform_vec4(grp, "color", color, 1);
+ DRW_shgroup_uniform_float(grp, "size", size, 1);
+ DRW_shgroup_state_enable(grp, DRW_STATE_POINT);
+
+ return grp;
+}
+
+DRWShadingGroup *shgroup_groundlines_uniform_color(DRWPass *pass, float color[4])
+{
+ GPUShader *sh = GPU_shader_get_builtin_shader(GPU_SHADER_3D_GROUNDLINE);
+
+ DRWShadingGroup *grp = DRW_shgroup_point_batch_create(sh, pass);
+ DRW_shgroup_uniform_vec4(grp, "color", color, 1);
+
+ return grp;
+}
+
+DRWShadingGroup *shgroup_groundpoints_uniform_color(DRWPass *pass, float color[4])
+{
+ GPUShader *sh = GPU_shader_get_builtin_shader(GPU_SHADER_3D_GROUNDPOINT);
+
+ DRWShadingGroup *grp = DRW_shgroup_point_batch_create(sh, pass);
+ DRW_shgroup_uniform_vec4(grp, "color", color, 1);
+ DRW_shgroup_state_enable(grp, DRW_STATE_POINT);
+
+ return grp;
+}
+
+DRWShadingGroup *shgroup_instance_screenspace(DRWPass *pass, struct Gwn_Batch *geom, float *size)
+{
+ GPUShader *sh = GPU_shader_get_builtin_shader(GPU_SHADER_3D_SCREENSPACE_VARIYING_COLOR);
+
+ DRW_shgroup_instance_format(g_formats.instance_screenspace, {
+ {"world_pos", DRW_ATTRIB_FLOAT, 3},
+ {"color" , DRW_ATTRIB_FLOAT, 3}
+ });
+
+ DRWShadingGroup *grp = DRW_shgroup_instance_create(sh, pass, geom, g_formats.instance_screenspace);
+ DRW_shgroup_uniform_float(grp, "size", size, 1);
+ DRW_shgroup_uniform_float(grp, "pixel_size", DRW_viewport_pixelsize_get(), 1);
+ DRW_shgroup_uniform_vec3(grp, "screen_vecs[0]", DRW_viewport_screenvecs_get(), 2);
+ DRW_shgroup_state_enable(grp, DRW_STATE_STIPPLE_3);
+
+ return grp;
+}
+
+DRWShadingGroup *shgroup_instance_solid(DRWPass *pass, struct Gwn_Batch *geom)
+{
+ static float light[3] = {0.0f, 0.0f, 1.0f};
+ GPUShader *sh = GPU_shader_get_builtin_shader(GPU_SHADER_3D_OBJECTSPACE_SIMPLE_LIGHTING_VARIYING_COLOR);
+
+ DRW_shgroup_instance_format(g_formats.instance_color, {
+ {"InstanceModelMatrix", DRW_ATTRIB_FLOAT, 16},
+ {"color" , DRW_ATTRIB_FLOAT, 4}
+ });
+
+ DRWShadingGroup *grp = DRW_shgroup_instance_create(sh, pass, geom, g_formats.instance_color);
+ DRW_shgroup_uniform_vec3(grp, "light", light, 1);
+
+ return grp;
+}
+
+DRWShadingGroup *shgroup_instance_wire(DRWPass *pass, struct Gwn_Batch *geom)
+{
+ GPUShader *sh = GPU_shader_get_builtin_shader(GPU_SHADER_3D_OBJECTSPACE_VARIYING_COLOR);
+
+ DRW_shgroup_instance_format(g_formats.instance_color, {
+ {"InstanceModelMatrix", DRW_ATTRIB_FLOAT, 16},
+ {"color" , DRW_ATTRIB_FLOAT, 4}
+ });
+
+ DRWShadingGroup *grp = DRW_shgroup_instance_create(sh, pass, geom, g_formats.instance_color);
+
+ return grp;
+}
+
+DRWShadingGroup *shgroup_instance_screen_aligned(DRWPass *pass, struct Gwn_Batch *geom)
+{
+ GPUShader *sh = GPU_shader_get_builtin_shader(GPU_SHADER_3D_INSTANCE_SCREEN_ALIGNED);
+
+ DRW_shgroup_instance_format(g_formats.instance_screen_aligned, {
+ {"color" , DRW_ATTRIB_FLOAT, 3},
+ {"size" , DRW_ATTRIB_FLOAT, 1},
+ {"InstanceModelMatrix", DRW_ATTRIB_FLOAT, 16}
+ });
+
+ DRWShadingGroup *grp = DRW_shgroup_instance_create(sh, pass, geom, g_formats.instance_screen_aligned);
+ DRW_shgroup_uniform_vec3(grp, "screen_vecs[0]", DRW_viewport_screenvecs_get(), 2);
+
+ return grp;
+}
+
+DRWShadingGroup *shgroup_instance_axis_names(DRWPass *pass, struct Gwn_Batch *geom)
+{
+ GPUShader *sh = GPU_shader_get_builtin_shader(GPU_SHADER_3D_INSTANCE_SCREEN_ALIGNED_AXIS);
+
+ DRW_shgroup_instance_format(g_formats.instance_screen_aligned, {
+ {"color" , DRW_ATTRIB_FLOAT, 3},
+ {"size" , DRW_ATTRIB_FLOAT, 1},
+ {"InstanceModelMatrix", DRW_ATTRIB_FLOAT, 16}
+ });
+
+ DRWShadingGroup *grp = DRW_shgroup_instance_create(sh, pass, geom, g_formats.instance_screen_aligned);
+ DRW_shgroup_uniform_vec3(grp, "screen_vecs[0]", DRW_viewport_screenvecs_get(), 2);
+
+ return grp;
+}
+
+DRWShadingGroup *shgroup_instance_scaled(DRWPass *pass, struct Gwn_Batch *geom)
+{
+ GPUShader *sh_inst = GPU_shader_get_builtin_shader(GPU_SHADER_INSTANCE_VARIYING_COLOR_VARIYING_SCALE);
+
+ DRW_shgroup_instance_format(g_formats.instance_scaled, {
+ {"color" , DRW_ATTRIB_FLOAT, 3},
+ {"size" , DRW_ATTRIB_FLOAT, 3},
+ {"InstanceModelMatrix", DRW_ATTRIB_FLOAT, 16}
+ });
+
+ DRWShadingGroup *grp = DRW_shgroup_instance_create(sh_inst, pass, geom, g_formats.instance_scaled);
+
+ return grp;
+}
+
+DRWShadingGroup *shgroup_instance(DRWPass *pass, struct Gwn_Batch *geom)
+{
+ GPUShader *sh_inst = GPU_shader_get_builtin_shader(GPU_SHADER_INSTANCE_VARIYING_COLOR_VARIYING_SIZE);
+
+ DRW_shgroup_instance_format(g_formats.instance_sized, {
+ {"color" , DRW_ATTRIB_FLOAT, 3},
+ {"size" , DRW_ATTRIB_FLOAT, 1},
+ {"InstanceModelMatrix", DRW_ATTRIB_FLOAT, 16}
+ });
+
+ DRWShadingGroup *grp = DRW_shgroup_instance_create(sh_inst, pass, geom, g_formats.instance_sized);
+
+ return grp;
+}
+
+DRWShadingGroup *shgroup_camera_instance(DRWPass *pass, struct Gwn_Batch *geom)
+{
+ GPUShader *sh_inst = GPU_shader_get_builtin_shader(GPU_SHADER_CAMERA);
+
+ DRW_shgroup_instance_format(g_formats.instance_camera, {
+ {"color" , DRW_ATTRIB_FLOAT, 3},
+ {"corners" , DRW_ATTRIB_FLOAT, 8},
+ {"depth" , DRW_ATTRIB_FLOAT, 1},
+ {"tria" , DRW_ATTRIB_FLOAT, 4},
+ {"InstanceModelMatrix", DRW_ATTRIB_FLOAT, 16}
+ });
+
+ DRWShadingGroup *grp = DRW_shgroup_instance_create(sh_inst, pass, geom, g_formats.instance_camera);
+
+ return grp;
+}
+
+DRWShadingGroup *shgroup_distance_lines_instance(DRWPass *pass, struct Gwn_Batch *geom)
+{
+ GPUShader *sh_inst = GPU_shader_get_builtin_shader(GPU_SHADER_DISTANCE_LINES);
+ static float point_size = 4.0f;
+
+ DRW_shgroup_instance_format(g_formats.instance_distance_lines, {
+ {"color" , DRW_ATTRIB_FLOAT, 3},
+ {"start" , DRW_ATTRIB_FLOAT, 1},
+ {"end" , DRW_ATTRIB_FLOAT, 1},
+ {"InstanceModelMatrix", DRW_ATTRIB_FLOAT, 16}
+ });
+
+ DRWShadingGroup *grp = DRW_shgroup_instance_create(sh_inst, pass, geom, g_formats.instance_distance_lines);
+ DRW_shgroup_uniform_float(grp, "size", &point_size, 1);
+
+ return grp;
+}
+
+DRWShadingGroup *shgroup_spot_instance(DRWPass *pass, struct Gwn_Batch *geom)
+{
+ GPUShader *sh_inst = GPU_shader_get_builtin_shader(GPU_SHADER_INSTANCE_EDGES_VARIYING_COLOR);
+ static const int True = true;
+ static const int False = false;
+
+ DRW_shgroup_instance_format(g_formats.instance_spot, {
+ {"color" , DRW_ATTRIB_FLOAT, 3},
+ {"InstanceModelMatrix", DRW_ATTRIB_FLOAT, 16}
+ });
+
+ DRWShadingGroup *grp = DRW_shgroup_instance_create(sh_inst, pass, geom, g_formats.instance_spot);
+ DRW_shgroup_uniform_bool(grp, "drawFront", &False, 1);
+ DRW_shgroup_uniform_bool(grp, "drawBack", &False, 1);
+ DRW_shgroup_uniform_bool(grp, "drawSilhouette", &True, 1);
+
+ return grp;
+}
+
+DRWShadingGroup *shgroup_instance_bone_envelope_wire(DRWPass *pass, struct Gwn_Batch *geom)
+{
+ GPUShader *sh = GPU_shader_get_builtin_shader(GPU_SHADER_3D_INSTANCE_BONE_ENVELOPE_WIRE);
+
+ DRW_shgroup_instance_format(g_formats.instance_bone_envelope_wire, {
+ {"InstanceModelMatrix", DRW_ATTRIB_FLOAT, 16},
+ {"color" , DRW_ATTRIB_FLOAT, 4},
+ {"radius_head" , DRW_ATTRIB_FLOAT, 1},
+ {"radius_tail" , DRW_ATTRIB_FLOAT, 1},
+ {"distance" , DRW_ATTRIB_FLOAT, 1}
+ });
+
+ DRWShadingGroup *grp = DRW_shgroup_instance_create(sh, pass, geom, g_formats.instance_bone_envelope_wire);
+
+ return grp;
+}
+
+DRWShadingGroup *shgroup_instance_bone_envelope_solid(DRWPass *pass, struct Gwn_Batch *geom)
+{
+ static float light[3] = {0.0f, 0.0f, 1.0f};
+ GPUShader *sh = GPU_shader_get_builtin_shader(GPU_SHADER_3D_INSTANCE_BONE_ENVELOPE_SOLID);
+
+ DRW_shgroup_instance_format(g_formats.instance_bone_envelope_solid, {
+ {"InstanceModelMatrix" , DRW_ATTRIB_FLOAT, 16},
+ {"color" , DRW_ATTRIB_FLOAT, 4},
+ {"radius_head" , DRW_ATTRIB_FLOAT, 1},
+ {"radius_tail" , DRW_ATTRIB_FLOAT, 1}
+ });
+
+ DRWShadingGroup *grp = DRW_shgroup_instance_create(sh, pass, geom, g_formats.instance_bone_envelope_solid);
+ DRW_shgroup_uniform_vec3(grp, "light", light, 1);
+
+ return grp;
+}
+
+DRWShadingGroup *shgroup_instance_mball_handles(DRWPass *pass, struct Gwn_Batch *geom)
+{
+ GPUShader *sh = GPU_shader_get_builtin_shader(GPU_SHADER_3D_INSTANCE_MBALL_HANDLES);
+
+ DRW_shgroup_instance_format(g_formats.instance_mball_handles, {
+ {"ScaleTranslationMatrix" , DRW_ATTRIB_FLOAT, 12},
+ {"radius" , DRW_ATTRIB_FLOAT, 1},
+ {"color" , DRW_ATTRIB_FLOAT, 3}
+ });
+
+ DRWShadingGroup *grp = DRW_shgroup_instance_create(sh, pass, geom, g_formats.instance_mball_handles);
+ DRW_shgroup_uniform_vec3(grp, "screen_vecs[0]", DRW_viewport_screenvecs_get(), 2);
+
+ return grp;
+}
+
+
+/* ******************************************** COLOR UTILS *********************************************** */
+
+/* TODO FINISH */
+/**
+ * Get the wire color theme_id of an object based on it's state
+ * \a r_color is a way to get a pointer to the static color var associated
+ */
+int DRW_object_wire_theme_get(Object *ob, ViewLayer *view_layer, float **r_color)
+{
+ const DRWContextState *draw_ctx = DRW_context_state_get();
+ const bool is_edit = (draw_ctx->object_mode & OB_MODE_EDIT) != 0;
+ const bool active = (view_layer->basact && view_layer->basact->object == ob);
+ /* confusing logic here, there are 2 methods of setting the color
+ * 'colortab[colindex]' and 'theme_id', colindex overrides theme_id.
+ *
+ * note: no theme yet for 'colindex' */
+ int theme_id = is_edit ? TH_WIRE_EDIT : TH_WIRE;
+
+ if (//(scene->obedit == NULL) &&
+ ((G.moving & G_TRANSFORM_OBJ) != 0) &&
+ ((ob->base_flag & BASE_SELECTED) != 0))
+ {
+ theme_id = TH_TRANSFORM;
+ }
+ else {
+ /* Sets the 'theme_id' or fallback to wire */
+ if ((ob->flag & OB_FROMGROUP) != 0) {
+ if ((ob->base_flag & BASE_SELECTED) != 0) {
+ theme_id = TH_GROUP_ACTIVE;
+ }
+ else {
+ theme_id = TH_GROUP;
+ }
+ }
+ else {
+ if ((ob->base_flag & BASE_SELECTED) != 0) {
+ theme_id = (active) ? TH_ACTIVE : TH_SELECT;
+ }
+ else {
+ if (ob->type == OB_LAMP) theme_id = TH_LAMP;
+ else if (ob->type == OB_SPEAKER) theme_id = TH_SPEAKER;
+ else if (ob->type == OB_CAMERA) theme_id = TH_CAMERA;
+ else if (ob->type == OB_EMPTY) theme_id = TH_EMPTY;
+ else if (ob->type == OB_LIGHTPROBE) theme_id = TH_EMPTY; /* TODO add lightprobe color */
+ /* fallback to TH_WIRE */
+ }
+ }
+ }
+
+ if (r_color != NULL) {
+ switch (theme_id) {
+ case TH_WIRE_EDIT: *r_color = ts.colorTransform; break;
+ case TH_ACTIVE: *r_color = ts.colorActive; break;
+ case TH_SELECT: *r_color = ts.colorSelect; break;
+ case TH_GROUP: *r_color = ts.colorGroup; break;
+ case TH_GROUP_ACTIVE: *r_color = ts.colorGroupActive; break;
+ case TH_TRANSFORM: *r_color = ts.colorTransform; break;
+ case OB_SPEAKER: *r_color = ts.colorSpeaker; break;
+ case OB_CAMERA: *r_color = ts.colorCamera; break;
+ case OB_EMPTY: *r_color = ts.colorEmpty; break;
+ case OB_LAMP: *r_color = ts.colorLamp; break;
+ default: *r_color = ts.colorWire; break;
+ }
+
+ /* uses darker active color for non-active + selected */
+ if ((theme_id == TH_GROUP_ACTIVE) && !active) {
+ *r_color = ts.colorGroupSelect;
+ }
+ }
+
+ return theme_id;
+}
+
+/* XXX This is utter shit, better find something more general */
+float *DRW_color_background_blend_get(int theme_id)
+{
+ static float colors[11][4];
+ float *ret;
+
+ switch (theme_id) {
+ case TH_WIRE_EDIT: ret = colors[0]; break;
+ case TH_ACTIVE: ret = colors[1]; break;
+ case TH_SELECT: ret = colors[2]; break;
+ case TH_GROUP: ret = colors[3]; break;
+ case TH_GROUP_ACTIVE: ret = colors[4]; break;
+ case TH_TRANSFORM: ret = colors[5]; break;
+ case OB_SPEAKER: ret = colors[6]; break;
+ case OB_CAMERA: ret = colors[7]; break;
+ case OB_EMPTY: ret = colors[8]; break;
+ case OB_LAMP: ret = colors[9]; break;
+ default: ret = colors[10]; break;
+ }
+
+ UI_GetThemeColorBlendShade4fv(theme_id, TH_BACK, 0.5, 0, ret);
+
+ return ret;
+}
diff --git a/source/blender/draw/intern/draw_common.h b/source/blender/draw/intern/draw_common.h
new file mode 100644
index 00000000000..ec646693207
--- /dev/null
+++ b/source/blender/draw/intern/draw_common.h
@@ -0,0 +1,148 @@
+/*
+ * Copyright 2016, Blender Foundation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Contributor(s): Blender Institute
+ *
+ */
+
+/** \file draw_common.h
+ * \ingroup draw
+ */
+
+#ifndef __DRAW_COMMON_H__
+#define __DRAW_COMMON_H__
+
+struct DRWPass;
+struct DRWShadingGroup;
+struct Gwn_Batch;
+struct Object;
+struct ViewLayer;
+
+/* Used as ubo but colors can be directly referenced as well */
+/* Keep in sync with: common_globals_lib.glsl (globalsBlock) */
+typedef struct GlobalsUboStorage {
+ /* UBOs data needs to be 16 byte aligned (size of vec4) */
+ float colorWire[4];
+ float colorWireEdit[4];
+ float colorActive[4];
+ float colorSelect[4];
+ float colorTransform[4];
+ float colorGroupActive[4];
+ float colorGroupSelect[4];
+ float colorGroup[4];
+ float colorLibrarySelect[4];
+ float colorLibrary[4];
+ float colorLamp[4];
+ float colorSpeaker[4];
+ float colorCamera[4];
+ float colorEmpty[4];
+ float colorVertex[4];
+ float colorVertexSelect[4];
+ float colorEditMeshActive[4];
+ float colorEdgeSelect[4];
+ float colorEdgeSeam[4];
+ float colorEdgeSharp[4];
+ float colorEdgeCrease[4];
+ float colorEdgeBWeight[4];
+ float colorEdgeFaceSelect[4];
+ float colorFace[4];
+ float colorFaceSelect[4];
+ float colorNormal[4];
+ float colorVNormal[4];
+ float colorLNormal[4];
+ float colorFaceDot[4];
+
+ float colorDeselect[4];
+ float colorOutline[4];
+ float colorLampNoAlpha[4];
+
+ float colorBackground[4];
+
+ float colorHandleFree[4];
+ float colorHandleAuto[4];
+ float colorHandleVect[4];
+ float colorHandleAlign[4];
+ float colorHandleAutoclamp[4];
+ float colorHandleSelFree[4];
+ float colorHandleSelAuto[4];
+ float colorHandleSelVect[4];
+ float colorHandleSelAlign[4];
+ float colorHandleSelAutoclamp[4];
+ float colorNurbUline[4];
+ float colorNurbSelUline[4];
+ float colorActiveSpline[4];
+
+ float colorGrid[4];
+ float colorGridEmphasise[4];
+ float colorGridAxisX[4];
+ float colorGridAxisY[4];
+ float colorGridAxisZ[4];
+
+ /* Pack individual float at the end of the buffer to avoid alignement errors */
+ float sizeLampCenter, sizeLampCircle, sizeLampCircleShadow;
+ float sizeVertex, sizeEdge, sizeEdgeFix, sizeFaceDot;
+ float gridDistance, gridResolution, gridSubdivisions, gridScale;
+} GlobalsUboStorage;
+/* Keep in sync with globalsBlock in shaders */
+
+void DRW_globals_update(void);
+void DRW_globals_free(void);
+
+struct DRWShadingGroup *shgroup_dynlines_uniform_color(struct DRWPass *pass, float color[4]);
+struct DRWShadingGroup *shgroup_dynpoints_uniform_color(struct DRWPass *pass, float color[4], float *size);
+struct DRWShadingGroup *shgroup_groundlines_uniform_color(struct DRWPass *pass, float color[4]);
+struct DRWShadingGroup *shgroup_groundpoints_uniform_color(struct DRWPass *pass, float color[4]);
+struct DRWShadingGroup *shgroup_instance_screenspace(struct DRWPass *pass, struct Gwn_Batch *geom, float *size);
+struct DRWShadingGroup *shgroup_instance_solid(struct DRWPass *pass, struct Gwn_Batch *geom);
+struct DRWShadingGroup *shgroup_instance_wire(struct DRWPass *pass, struct Gwn_Batch *geom);
+struct DRWShadingGroup *shgroup_instance_screen_aligned(struct DRWPass *pass, struct Gwn_Batch *geom);
+struct DRWShadingGroup *shgroup_instance_axis_names(struct DRWPass *pass, struct Gwn_Batch *geom);
+struct DRWShadingGroup *shgroup_instance_image_plane(struct DRWPass *pass, struct Gwn_Batch *geom);
+struct DRWShadingGroup *shgroup_instance_scaled(struct DRWPass *pass, struct Gwn_Batch *geom);
+struct DRWShadingGroup *shgroup_instance(struct DRWPass *pass, struct Gwn_Batch *geom);
+struct DRWShadingGroup *shgroup_camera_instance(struct DRWPass *pass, struct Gwn_Batch *geom);
+struct DRWShadingGroup *shgroup_distance_lines_instance(struct DRWPass *pass, struct Gwn_Batch *geom);
+struct DRWShadingGroup *shgroup_spot_instance(struct DRWPass *pass, struct Gwn_Batch *geom);
+struct DRWShadingGroup *shgroup_instance_bone_envelope_wire(struct DRWPass *pass, struct Gwn_Batch *geom);
+struct DRWShadingGroup *shgroup_instance_bone_envelope_solid(struct DRWPass *pass, struct Gwn_Batch *geom);
+struct DRWShadingGroup *shgroup_instance_mball_handles(struct DRWPass *pass, struct Gwn_Batch *geom);
+
+int DRW_object_wire_theme_get(
+ struct Object *ob, struct ViewLayer *view_layer, float **r_color);
+float *DRW_color_background_blend_get(int theme_id);
+
+/* draw_armature.c */
+void DRW_shgroup_armature_object(
+ struct Object *ob, struct ViewLayer *view_layer,
+ struct DRWPass *pass_bone_solid, struct DRWPass *pass_bone_wire, struct DRWPass *pass_bone_envelope,
+ struct DRWShadingGroup *shgrp_relationship_lines);
+
+void DRW_shgroup_armature_pose(
+ struct Object *ob,
+ struct DRWPass *pass_bone_solid, struct DRWPass *pass_bone_wire, struct DRWPass *pass_bone_envelope,
+ struct DRWShadingGroup *shgrp_relationship_lines);
+
+void DRW_shgroup_armature_edit(
+ struct Object *ob,
+ struct DRWPass *pass_bone_solid, struct DRWPass *pass_bone_wire, struct DRWPass *pass_bone_envelope,
+ struct DRWShadingGroup *shgrp_relationship_lines);
+
+/* pose_mode.c */
+bool DRW_pose_mode_armature(
+ struct Object *ob, struct Object *active_ob);
+
+#endif /* __DRAW_COMMON_H__ */
diff --git a/source/blender/draw/intern/draw_instance_data.c b/source/blender/draw/intern/draw_instance_data.c
new file mode 100644
index 00000000000..ee73a2ba2c6
--- /dev/null
+++ b/source/blender/draw/intern/draw_instance_data.c
@@ -0,0 +1,469 @@
+/*
+ * Copyright 2016, Blender Foundation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Contributor(s): Blender Institute
+ *
+ */
+
+/** \file blender/draw/intern/draw_instance_data.c
+ * \ingroup draw
+ */
+
+/**
+ * DRW Instance Data Manager
+ * This is a special memory manager that keeps memory blocks ready to send as vbo data in one continuous allocation.
+ * This way we avoid feeding gawain each instance data one by one and unecessary memcpy.
+ * Since we loose which memory block was used each DRWShadingGroup we need to redistribute them in the same order/size
+ * to avoid to realloc each frame.
+ * This is why DRWInstanceDatas are sorted in a list for each different data size.
+ **/
+
+#include "draw_instance_data.h"
+#include "DRW_engine.h"
+#include "DRW_render.h" /* For DRW_shgroup_get_instance_count() */
+
+#include "MEM_guardedalloc.h"
+#include "BLI_utildefines.h"
+
+#define BUFFER_CHUNK_SIZE 32
+#define BUFFER_VERTS_CHUNK 32
+
+typedef struct DRWBatchingBuffer {
+ struct DRWShadingGroup *shgroup; /* Link back to the owning shGroup. Also tells if it's used */
+ Gwn_VertFormat *format; /* Identifier. */
+ Gwn_VertBuf *vert; /* Gwn_VertBuf contained in the Gwn_Batch. */
+ Gwn_Batch *batch; /* Gwn_Batch containing the Gwn_VertBuf. */
+} DRWBatchingBuffer;
+
+typedef struct DRWInstancingBuffer {
+ struct DRWShadingGroup *shgroup; /* Link back to the owning shGroup. Also tells if it's used */
+ Gwn_VertFormat *format; /* Identifier. */
+ Gwn_Batch *instance; /* Identifier. */
+ Gwn_VertBuf *vert; /* Gwn_VertBuf contained in the Gwn_Batch. */
+ Gwn_Batch *batch; /* Gwn_Batch containing the Gwn_VertBuf. */
+} DRWInstancingBuffer;
+
+typedef struct DRWInstanceChunk {
+ size_t cursor; /* Offset to the next instance data. */
+ size_t alloc_size; /* Number of DRWBatchingBuffer/Batches alloc'd in ibufs/btchs. */
+ union {
+ DRWBatchingBuffer *bbufs;
+ DRWInstancingBuffer *ibufs;
+ };
+} DRWInstanceChunk;
+
+struct DRWInstanceData {
+ struct DRWInstanceData *next;
+ bool used; /* If this data is used or not. */
+ size_t chunk_size; /* Current size of the whole chunk. */
+ size_t data_size; /* Size of one instance data. */
+ size_t instance_group; /* How many instance to allocate at a time. */
+ size_t offset; /* Offset to the next instance data. */
+ float *memchunk; /* Should be float no matter what. */
+};
+
+struct DRWInstanceDataList {
+ struct DRWInstanceDataList *next, *prev;
+ /* Linked lists for all possible data pool size */
+ /* Not entirely sure if we should separate them in the first place.
+ * This is done to minimize the reattribution misses. */
+ DRWInstanceData *idata_head[MAX_INSTANCE_DATA_SIZE];
+ DRWInstanceData *idata_tail[MAX_INSTANCE_DATA_SIZE];
+
+ DRWInstanceChunk instancing;
+ DRWInstanceChunk batching;
+};
+
+static ListBase g_idatalists = {NULL, NULL};
+
+/* -------------------------------------------------------------------- */
+
+/** \name Instance Buffer Management
+ * \{ */
+
+/**
+ * This manager allows to distribute existing batches for instancing
+ * attributes. This reduce the number of batches creation.
+ * Querying a batch is done with a vertex format. This format should
+ * be static so that it's pointer never changes (because we are using
+ * this pointer as identifier [we don't want to check the full format
+ * that would be too slow]).
+ **/
+
+static void instance_batch_free(Gwn_Batch *batch, void *UNUSED(user_data))
+{
+ /* Free all batches that have the same key before they are reused. */
+ /* TODO: Make it thread safe! Batch freeing can happen from another thread. */
+ /* XXX we need to iterate over all idatalists unless we make some smart
+ * data structure to store the locations to update. */
+ for (DRWInstanceDataList *idatalist = g_idatalists.first; idatalist; idatalist = idatalist->next) {
+ DRWInstancingBuffer *ibuf = idatalist->instancing.ibufs;
+ for (int i = 0; i < idatalist->instancing.alloc_size; i++, ibuf++) {
+ if (ibuf->instance == batch) {
+ BLI_assert(ibuf->shgroup == NULL); /* Make sure it has no other users. */
+ GWN_VERTBUF_DISCARD_SAFE(ibuf->vert);
+ GWN_BATCH_DISCARD_SAFE(ibuf->batch);
+ /* Tag as non alloced. */
+ ibuf->format = NULL;
+ }
+ }
+ }
+}
+
+void DRW_batching_buffer_request(
+ DRWInstanceDataList *idatalist, Gwn_VertFormat *format, Gwn_PrimType type, struct DRWShadingGroup *shgroup,
+ Gwn_Batch **r_batch, Gwn_VertBuf **r_vert)
+{
+ DRWInstanceChunk *chunk = &idatalist->batching;
+ DRWBatchingBuffer *bbuf = idatalist->batching.bbufs;
+ BLI_assert(format);
+ /* Search for an unused batch. */
+ for (int i = 0; i < idatalist->batching.alloc_size; i++, bbuf++) {
+ if (bbuf->shgroup == NULL) {
+ if (bbuf->format == format) {
+ bbuf->shgroup = shgroup;
+ *r_batch = bbuf->batch;
+ *r_vert = bbuf->vert;
+ return;
+ }
+ }
+ }
+ int new_id = 0; /* Find insertion point. */
+ for (; new_id < chunk->alloc_size; ++new_id) {
+ if (chunk->bbufs[new_id].format == NULL)
+ break;
+ }
+ /* If there is no batch left. Allocate more. */
+ if (new_id == chunk->alloc_size) {
+ new_id = chunk->alloc_size;
+ chunk->alloc_size += BUFFER_CHUNK_SIZE;
+ chunk->bbufs = MEM_reallocN(chunk->bbufs, chunk->alloc_size * sizeof(DRWBatchingBuffer));
+ memset(chunk->bbufs + new_id, 0, sizeof(DRWBatchingBuffer) * BUFFER_CHUNK_SIZE);
+ }
+ /* Create the batch. */
+ bbuf = chunk->bbufs + new_id;
+ bbuf->vert = *r_vert = GWN_vertbuf_create_with_format_ex(format, GWN_USAGE_DYNAMIC);
+ bbuf->batch = *r_batch = GWN_batch_create_ex(type, bbuf->vert, NULL, 0);
+ bbuf->format = format;
+ bbuf->shgroup = shgroup;
+ GWN_vertbuf_data_alloc(*r_vert, BUFFER_VERTS_CHUNK);
+}
+
+void DRW_instancing_buffer_request(
+ DRWInstanceDataList *idatalist, Gwn_VertFormat *format, Gwn_Batch *instance, struct DRWShadingGroup *shgroup,
+ Gwn_Batch **r_batch, Gwn_VertBuf **r_vert)
+{
+ DRWInstanceChunk *chunk = &idatalist->instancing;
+ DRWInstancingBuffer *ibuf = idatalist->instancing.ibufs;
+ BLI_assert(format);
+ /* Search for an unused batch. */
+ for (int i = 0; i < idatalist->instancing.alloc_size; i++, ibuf++) {
+ if (ibuf->shgroup == NULL) {
+ if (ibuf->format == format) {
+ if (ibuf->instance == instance) {
+ ibuf->shgroup = shgroup;
+ *r_batch = ibuf->batch;
+ *r_vert = ibuf->vert;
+ return;
+ }
+ }
+ }
+ }
+ int new_id = 0; /* Find insertion point. */
+ for (; new_id < chunk->alloc_size; ++new_id) {
+ if (chunk->ibufs[new_id].format == NULL)
+ break;
+ }
+ /* If there is no batch left. Allocate more. */
+ if (new_id == chunk->alloc_size) {
+ new_id = chunk->alloc_size;
+ chunk->alloc_size += BUFFER_CHUNK_SIZE;
+ chunk->ibufs = MEM_reallocN(chunk->ibufs, chunk->alloc_size * sizeof(DRWInstancingBuffer));
+ memset(chunk->ibufs + new_id, 0, sizeof(DRWInstancingBuffer) * BUFFER_CHUNK_SIZE);
+ }
+ /* Create the batch. */
+ ibuf = chunk->ibufs + new_id;
+ ibuf->vert = *r_vert = GWN_vertbuf_create_with_format_ex(format, GWN_USAGE_DYNAMIC);
+ ibuf->batch = *r_batch = GWN_batch_duplicate(instance);
+ ibuf->format = format;
+ ibuf->shgroup = shgroup;
+ ibuf->instance = instance;
+ GWN_vertbuf_data_alloc(*r_vert, BUFFER_VERTS_CHUNK);
+ GWN_batch_instbuf_set(ibuf->batch, ibuf->vert, false);
+ /* Make sure to free this ibuf if the instance batch gets free. */
+ GWN_batch_callback_free_set(instance, &instance_batch_free, NULL);
+}
+
+void DRW_instance_buffer_finish(DRWInstanceDataList *idatalist)
+{
+ size_t realloc_size = 1; /* Avoid 0 size realloc. */
+ /* Resize down buffers in use and send data to GPU & free unused buffers. */
+ DRWInstanceChunk *batching = &idatalist->batching;
+ DRWBatchingBuffer *bbuf = batching->bbufs;
+ for (int i = 0; i < batching->alloc_size; i++, bbuf++) {
+ if (bbuf->shgroup != NULL) {
+ realloc_size = i + 1;
+ unsigned int vert_ct = DRW_shgroup_get_instance_count(bbuf->shgroup);
+ vert_ct += (vert_ct == 0) ? 1 : 0; /* Do not realloc to 0 size buffer */
+ if (vert_ct + BUFFER_VERTS_CHUNK <= bbuf->vert->vertex_ct) {
+ unsigned int size = vert_ct + BUFFER_VERTS_CHUNK - 1;
+ size = size - size % BUFFER_VERTS_CHUNK;
+ GWN_vertbuf_data_resize(bbuf->vert, size);
+ }
+ GWN_vertbuf_use(bbuf->vert); /* Send data. */
+ bbuf->shgroup = NULL; /* Set as non used for the next round. */
+ }
+ else {
+ GWN_VERTBUF_DISCARD_SAFE(bbuf->vert);
+ GWN_BATCH_DISCARD_SAFE(bbuf->batch);
+ bbuf->format = NULL; /* Tag as non alloced. */
+ }
+ }
+ /* Rounding up to nearest chunk size. */
+ realloc_size += BUFFER_CHUNK_SIZE - 1;
+ realloc_size -= realloc_size % BUFFER_CHUNK_SIZE;
+ /* Resize down if necessary. */
+ if (realloc_size < batching->alloc_size) {
+ batching->alloc_size = realloc_size;
+ batching->ibufs = MEM_reallocN(batching->ibufs, realloc_size * sizeof(DRWBatchingBuffer));
+ }
+
+ realloc_size = 1;
+ /* Resize down buffers in use and send data to GPU & free unused buffers. */
+ DRWInstanceChunk *instancing = &idatalist->instancing;
+ DRWInstancingBuffer *ibuf = instancing->ibufs;
+ for (int i = 0; i < instancing->alloc_size; i++, ibuf++) {
+ if (ibuf->shgroup != NULL) {
+ realloc_size = i + 1;
+ unsigned int vert_ct = DRW_shgroup_get_instance_count(ibuf->shgroup);
+ vert_ct += (vert_ct == 0) ? 1 : 0; /* Do not realloc to 0 size buffer */
+ if (vert_ct + BUFFER_VERTS_CHUNK <= ibuf->vert->vertex_ct) {
+ unsigned int size = vert_ct + BUFFER_VERTS_CHUNK - 1;
+ size = size - size % BUFFER_VERTS_CHUNK;
+ GWN_vertbuf_data_resize(ibuf->vert, size);
+ }
+ GWN_vertbuf_use(ibuf->vert); /* Send data. */
+ ibuf->shgroup = NULL; /* Set as non used for the next round. */
+ }
+ else {
+ GWN_VERTBUF_DISCARD_SAFE(ibuf->vert);
+ GWN_BATCH_DISCARD_SAFE(ibuf->batch);
+ ibuf->format = NULL; /* Tag as non alloced. */
+ }
+ }
+ /* Rounding up to nearest chunk size. */
+ realloc_size += BUFFER_CHUNK_SIZE - 1;
+ realloc_size -= realloc_size % BUFFER_CHUNK_SIZE;
+ /* Resize down if necessary. */
+ if (realloc_size < instancing->alloc_size) {
+ instancing->alloc_size = realloc_size;
+ instancing->ibufs = MEM_reallocN(instancing->ibufs, realloc_size * sizeof(DRWInstancingBuffer));
+ }
+}
+
+/** \} */
+
+/* -------------------------------------------------------------------- */
+
+/** \name Instance Data (DRWInstanceData)
+ * \{ */
+
+static DRWInstanceData *drw_instance_data_create(
+ DRWInstanceDataList *idatalist, unsigned int attrib_size, unsigned int instance_group)
+{
+ DRWInstanceData *idata = MEM_callocN(sizeof(DRWInstanceData), "DRWInstanceData");
+ idata->next = NULL;
+ idata->used = true;
+ idata->data_size = attrib_size;
+ idata->instance_group = instance_group;
+ idata->chunk_size = idata->data_size * instance_group;
+ idata->offset = 0;
+ idata->memchunk = MEM_mallocN(idata->chunk_size * sizeof(float), "DRWInstanceData memchunk");
+
+ BLI_assert(attrib_size > 0);
+
+ /* Push to linked list. */
+ if (idatalist->idata_head[attrib_size-1] == NULL) {
+ idatalist->idata_head[attrib_size-1] = idata;
+ }
+ else {
+ idatalist->idata_tail[attrib_size-1]->next = idata;
+ }
+ idatalist->idata_tail[attrib_size-1] = idata;
+
+ return idata;
+}
+
+static void DRW_instance_data_free(DRWInstanceData *idata)
+{
+ MEM_freeN(idata->memchunk);
+}
+
+/**
+ * Return a pointer to the next instance data space.
+ * DO NOT SAVE/REUSE THIS POINTER after the next call
+ * to this function since the chunk may have been
+ * reallocated.
+ **/
+void *DRW_instance_data_next(DRWInstanceData *idata)
+{
+ idata->offset += idata->data_size;
+
+ /* Check if chunk is large enough. realloc otherwise. */
+ if (idata->offset > idata->chunk_size) {
+ idata->chunk_size += idata->data_size * idata->instance_group;
+ idata->memchunk = MEM_reallocN(idata->memchunk, idata->chunk_size * sizeof(float));
+ }
+
+ return idata->memchunk + (idata->offset - idata->data_size);
+}
+
+void *DRW_instance_data_get(DRWInstanceData *idata)
+{
+ return (void *)idata->memchunk;
+}
+
+DRWInstanceData *DRW_instance_data_request(
+ DRWInstanceDataList *idatalist, unsigned int attrib_size, unsigned int instance_group)
+{
+ BLI_assert(attrib_size > 0 && attrib_size <= MAX_INSTANCE_DATA_SIZE);
+
+ DRWInstanceData *idata = idatalist->idata_head[attrib_size - 1];
+
+ /* Search for an unused data chunk. */
+ for (; idata; idata = idata->next) {
+ if (idata->used == false) {
+ idata->used = true;
+ return idata;
+ }
+ }
+
+ return drw_instance_data_create(idatalist, attrib_size, instance_group);
+}
+
+/** \} */
+
+/* -------------------------------------------------------------------- */
+
+/** \name Instance Data List (DRWInstanceDataList)
+ * \{ */
+
+DRWInstanceDataList *DRW_instance_data_list_create(void)
+{
+ DRWInstanceDataList *idatalist = MEM_callocN(sizeof(DRWInstanceDataList), "DRWInstanceDataList");
+ idatalist->batching.bbufs = MEM_callocN(sizeof(DRWBatchingBuffer) * BUFFER_CHUNK_SIZE, "DRWBatchingBuffers");
+ idatalist->batching.alloc_size = BUFFER_CHUNK_SIZE;
+ idatalist->instancing.ibufs = MEM_callocN(sizeof(DRWInstancingBuffer) * BUFFER_CHUNK_SIZE, "DRWInstancingBuffers");
+ idatalist->instancing.alloc_size = BUFFER_CHUNK_SIZE;
+
+ BLI_addtail(&g_idatalists, idatalist);
+
+ return idatalist;
+}
+
+void DRW_instance_data_list_free(DRWInstanceDataList *idatalist)
+{
+ DRWInstanceData *idata, *next_idata;
+
+ for (int i = 0; i < MAX_INSTANCE_DATA_SIZE; ++i) {
+ for (idata = idatalist->idata_head[i]; idata; idata = next_idata) {
+ next_idata = idata->next;
+ DRW_instance_data_free(idata);
+ MEM_freeN(idata);
+ }
+ idatalist->idata_head[i] = NULL;
+ idatalist->idata_tail[i] = NULL;
+ }
+
+ DRWBatchingBuffer *bbuf = idatalist->batching.bbufs;
+ for (int i = 0; i < idatalist->batching.alloc_size; i++, bbuf++) {
+ GWN_VERTBUF_DISCARD_SAFE(bbuf->vert);
+ GWN_BATCH_DISCARD_SAFE(bbuf->batch);
+ }
+ MEM_freeN(idatalist->batching.bbufs);
+
+ DRWInstancingBuffer *ibuf = idatalist->instancing.ibufs;
+ for (int i = 0; i < idatalist->instancing.alloc_size; i++, ibuf++) {
+ GWN_VERTBUF_DISCARD_SAFE(ibuf->vert);
+ GWN_BATCH_DISCARD_SAFE(ibuf->batch);
+ }
+ MEM_freeN(idatalist->instancing.ibufs);
+
+ BLI_remlink(&g_idatalists, idatalist);
+}
+
+void DRW_instance_data_list_reset(DRWInstanceDataList *idatalist)
+{
+ DRWInstanceData *idata;
+
+ for (int i = 0; i < MAX_INSTANCE_DATA_SIZE; ++i) {
+ for (idata = idatalist->idata_head[i]; idata; idata = idata->next) {
+ idata->used = false;
+ idata->offset = 0;
+ }
+ }
+}
+
+void DRW_instance_data_list_free_unused(DRWInstanceDataList *idatalist)
+{
+ DRWInstanceData *idata, *next_idata;
+
+ /* Remove unused data blocks and sanitize each list. */
+ for (int i = 0; i < MAX_INSTANCE_DATA_SIZE; ++i) {
+ idatalist->idata_tail[i] = NULL;
+ for (idata = idatalist->idata_head[i]; idata; idata = next_idata) {
+ next_idata = idata->next;
+ if (idata->used == false) {
+ if (idatalist->idata_head[i] == idata) {
+ idatalist->idata_head[i] = next_idata;
+ }
+ else {
+ /* idatalist->idata_tail[i] is garanteed not to be null in this case. */
+ idatalist->idata_tail[i]->next = next_idata;
+ }
+ DRW_instance_data_free(idata);
+ MEM_freeN(idata);
+ }
+ else {
+ if (idatalist->idata_tail[i] != NULL) {
+ idatalist->idata_tail[i]->next = idata;
+ }
+ idatalist->idata_tail[i] = idata;
+ }
+ }
+ }
+}
+
+void DRW_instance_data_list_resize(DRWInstanceDataList *idatalist)
+{
+ DRWInstanceData *idata;
+
+ for (int i = 0; i < MAX_INSTANCE_DATA_SIZE; ++i) {
+ for (idata = idatalist->idata_head[i]; idata; idata = idata->next) {
+ /* Rounding up to nearest chunk size to compare. */
+ size_t fac = idata->data_size * idata->instance_group;
+ size_t tmp = idata->offset + fac - 1;
+ size_t rounded_offset = tmp - tmp % fac;
+ if (rounded_offset < idata->chunk_size) {
+ idata->chunk_size = rounded_offset;
+ idata->memchunk = MEM_reallocN(idata->memchunk, idata->chunk_size * sizeof(float));
+ }
+ }
+ }
+}
+
+/** \} */
diff --git a/source/blender/draw/intern/draw_instance_data.h b/source/blender/draw/intern/draw_instance_data.h
new file mode 100644
index 00000000000..3b0f7839277
--- /dev/null
+++ b/source/blender/draw/intern/draw_instance_data.h
@@ -0,0 +1,60 @@
+/*
+ * Copyright 2016, Blender Foundation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Contributor(s): Blender Institute
+ *
+ */
+
+/** \file draw_instance_data.h
+ * \ingroup draw
+ */
+
+#ifndef __DRAW_INSTANCE_DATA_H__
+#define __DRAW_INSTANCE_DATA_H__
+
+#include "BLI_compiler_attrs.h"
+#include "BLI_sys_types.h"
+
+#include "GPU_batch.h"
+
+#define MAX_INSTANCE_DATA_SIZE 42 /* Can be adjusted for more */
+
+typedef struct DRWInstanceData DRWInstanceData;
+typedef struct DRWInstanceDataList DRWInstanceDataList;
+
+struct DRWShadingGroup;
+
+void *DRW_instance_data_next(DRWInstanceData *idata);
+void *DRW_instance_data_get(DRWInstanceData *idata);
+DRWInstanceData *DRW_instance_data_request(
+ DRWInstanceDataList *idatalist, unsigned int attrib_size, unsigned int instance_group);
+
+void DRW_batching_buffer_request(
+ DRWInstanceDataList *idatalist, Gwn_VertFormat *format, Gwn_PrimType type, struct DRWShadingGroup *shgroup,
+ Gwn_Batch **r_batch, Gwn_VertBuf **r_vert);
+void DRW_instancing_buffer_request(
+ DRWInstanceDataList *idatalist, Gwn_VertFormat *format, Gwn_Batch *instance, struct DRWShadingGroup *shgroup,
+ Gwn_Batch **r_batch, Gwn_VertBuf **r_vert);
+
+/* Upload all instance data to the GPU as soon as possible. */
+void DRW_instance_buffer_finish(DRWInstanceDataList *idatalist);
+
+void DRW_instance_data_list_reset(DRWInstanceDataList *idatalist);
+void DRW_instance_data_list_free_unused(DRWInstanceDataList *idatalist);
+void DRW_instance_data_list_resize(DRWInstanceDataList *idatalist);
+
+#endif /* __DRAW_INSTANCE_DATA_H__ */
diff --git a/source/blender/draw/intern/draw_manager.c b/source/blender/draw/intern/draw_manager.c
new file mode 100644
index 00000000000..96cd096ae27
--- /dev/null
+++ b/source/blender/draw/intern/draw_manager.c
@@ -0,0 +1,2087 @@
+/*
+ * Copyright 2016, Blender Foundation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Contributor(s): Blender Institute
+ *
+ */
+
+/** \file blender/draw/intern/draw_manager.c
+ * \ingroup draw
+ */
+
+#include <stdio.h>
+
+#include "BLI_listbase.h"
+#include "BLI_mempool.h"
+#include "BLI_rect.h"
+#include "BLI_string.h"
+#include "BLI_threads.h"
+
+#include "BLF_api.h"
+
+#include "BKE_global.h"
+#include "BKE_mesh.h"
+#include "BKE_object.h"
+#include "BKE_workspace.h"
+
+#include "draw_manager.h"
+#include "DNA_camera_types.h"
+#include "DNA_mesh_types.h"
+#include "DNA_meshdata_types.h"
+
+#include "ED_space_api.h"
+#include "ED_screen.h"
+#include "ED_view3d.h"
+
+#include "GPU_draw.h"
+#include "GPU_extensions.h"
+#include "GPU_framebuffer.h"
+#include "GPU_immediate.h"
+#include "GPU_uniformbuffer.h"
+#include "GPU_viewport.h"
+#include "GPU_matrix.h"
+
+#include "IMB_colormanagement.h"
+
+#include "RE_engine.h"
+#include "RE_pipeline.h"
+
+#include "UI_interface.h"
+#include "UI_resources.h"
+
+#include "WM_api.h"
+#include "wm_window.h"
+
+#include "draw_manager_text.h"
+#include "draw_manager_profiling.h"
+
+/* only for callbacks */
+#include "draw_cache_impl.h"
+
+#include "draw_mode_engines.h"
+#include "engines/clay/clay_engine.h"
+#include "engines/eevee/eevee_engine.h"
+#include "engines/basic/basic_engine.h"
+#include "engines/external/external_engine.h"
+
+#include "../../../intern/gawain/gawain/gwn_context.h"
+
+#include "DEG_depsgraph.h"
+#include "DEG_depsgraph_query.h"
+
+#ifdef USE_GPU_SELECT
+# include "GPU_select.h"
+#endif
+
+/** Render State: No persistent data between draw calls. */
+DRWManager DST = {NULL};
+
+ListBase DRW_engines = {NULL, NULL};
+
+extern struct GPUUniformBuffer *view_ubo; /* draw_manager_exec.c */
+
+static void drw_state_prepare_clean_for_draw(DRWManager *dst)
+{
+ memset(dst, 0x0, offsetof(DRWManager, ogl_context));
+}
+
+/* This function is used to reset draw manager to a state
+ * where we don't re-use data by accident across different
+ * draw calls.
+ */
+#ifdef DEBUG
+static void drw_state_ensure_not_reused(DRWManager *dst)
+{
+ memset(dst, 0xff, offsetof(DRWManager, ogl_context));
+}
+#endif
+
+/* -------------------------------------------------------------------- */
+
+void DRW_draw_callbacks_pre_scene(void)
+{
+ RegionView3D *rv3d = DST.draw_ctx.rv3d;
+
+ gpuLoadProjectionMatrix(rv3d->winmat);
+ gpuLoadMatrix(rv3d->viewmat);
+}
+
+void DRW_draw_callbacks_post_scene(void)
+{
+ RegionView3D *rv3d = DST.draw_ctx.rv3d;
+
+ gpuLoadProjectionMatrix(rv3d->winmat);
+ gpuLoadMatrix(rv3d->viewmat);
+}
+
+struct DRWTextStore *DRW_text_cache_ensure(void)
+{
+ BLI_assert(DST.text_store_p);
+ if (*DST.text_store_p == NULL) {
+ *DST.text_store_p = DRW_text_cache_create();
+ }
+ return *DST.text_store_p;
+}
+
+
+/* -------------------------------------------------------------------- */
+
+/** \name Settings
+ * \{ */
+
+bool DRW_object_is_renderable(Object *ob)
+{
+ BLI_assert(BKE_object_is_visible(ob, OB_VISIBILITY_CHECK_UNKNOWN_RENDER_MODE));
+
+ if (ob->type == OB_MESH) {
+ if (ob == DST.draw_ctx.object_edit) {
+ IDProperty *props = BKE_layer_collection_engine_evaluated_get(ob, COLLECTION_MODE_EDIT, "");
+ bool do_show_occlude_wire = BKE_collection_engine_property_value_get_bool(props, "show_occlude_wire");
+ if (do_show_occlude_wire) {
+ return false;
+ }
+ bool do_show_weight = BKE_collection_engine_property_value_get_bool(props, "show_weight");
+ if (do_show_weight) {
+ return false;
+ }
+ }
+ }
+
+ return true;
+}
+
+/**
+ * Return whether this object is visible depending if
+ * we are rendering or drawing in the viewport.
+ */
+bool DRW_check_object_visible_within_active_context(Object *ob)
+{
+ const eObjectVisibilityCheck mode = DRW_state_is_scene_render() ?
+ OB_VISIBILITY_CHECK_FOR_RENDER :
+ OB_VISIBILITY_CHECK_FOR_VIEWPORT;
+ return BKE_object_is_visible(ob, mode);
+}
+
+bool DRW_object_is_flat_normal(const Object *ob)
+{
+ if (ob->type == OB_MESH) {
+ const Mesh *me = ob->data;
+ if (me->mpoly && me->mpoly[0].flag & ME_SMOOTH) {
+ return false;
+ }
+ }
+ return true;
+}
+
+/**
+ * Return true if the object has its own draw mode.
+ * Caller must check this is active */
+int DRW_object_is_mode_shade(const Object *ob)
+{
+ BLI_assert(ob == DST.draw_ctx.obact);
+ UNUSED_VARS_NDEBUG(ob);
+ if ((DST.draw_ctx.object_mode & OB_MODE_EDIT) == 0) {
+ if (DST.draw_ctx.object_mode & (OB_MODE_VERTEX_PAINT | OB_MODE_WEIGHT_PAINT | OB_MODE_TEXTURE_PAINT)) {
+ if ((DST.draw_ctx.v3d->flag2 & V3D_SHOW_MODE_SHADE_OVERRIDE) == 0) {
+ return true;
+ }
+ else {
+ return false;
+ }
+ }
+ }
+ return -1;
+}
+
+/** \} */
+
+
+/* -------------------------------------------------------------------- */
+
+/** \name Color Management
+ * \{ */
+
+/* Use color management profile to draw texture to framebuffer */
+void DRW_transform_to_display(GPUTexture *tex)
+{
+ drw_state_set(DRW_STATE_WRITE_COLOR);
+
+ Gwn_VertFormat *vert_format = immVertexFormat();
+ unsigned int pos = GWN_vertformat_attr_add(vert_format, "pos", GWN_COMP_F32, 2, GWN_FETCH_FLOAT);
+ unsigned int texco = GWN_vertformat_attr_add(vert_format, "texCoord", GWN_COMP_F32, 2, GWN_FETCH_FLOAT);
+
+ const float dither = 1.0f;
+
+ bool use_ocio = false;
+
+ /* View transform is already applied for offscreen, don't apply again, see: T52046 */
+ if (!(DST.options.is_image_render && !DST.options.is_scene_render)) {
+ Scene *scene = DST.draw_ctx.scene;
+ use_ocio = IMB_colormanagement_setup_glsl_draw_from_space(
+ &scene->view_settings, &scene->display_settings, NULL, dither, false);
+ }
+
+ if (!use_ocio) {
+ /* View transform is already applied for offscreen, don't apply again, see: T52046 */
+ if (DST.options.is_image_render && !DST.options.is_scene_render) {
+ immBindBuiltinProgram(GPU_SHADER_2D_IMAGE_COLOR);
+ immUniformColor4f(1.0f, 1.0f, 1.0f, 1.0f);
+ }
+ else {
+ immBindBuiltinProgram(GPU_SHADER_2D_IMAGE_LINEAR_TO_SRGB);
+ }
+ immUniform1i("image", 0);
+ }
+
+ GPU_texture_bind(tex, 0); /* OCIO texture bind point is 0 */
+
+ float mat[4][4];
+ unit_m4(mat);
+ immUniformMatrix4fv("ModelViewProjectionMatrix", mat);
+
+ /* Full screen triangle */
+ immBegin(GWN_PRIM_TRIS, 3);
+ immAttrib2f(texco, 0.0f, 0.0f);
+ immVertex2f(pos, -1.0f, -1.0f);
+
+ immAttrib2f(texco, 2.0f, 0.0f);
+ immVertex2f(pos, 3.0f, -1.0f);
+
+ immAttrib2f(texco, 0.0f, 2.0f);
+ immVertex2f(pos, -1.0f, 3.0f);
+ immEnd();
+
+ GPU_texture_unbind(tex);
+
+ if (use_ocio) {
+ IMB_colormanagement_finish_glsl_draw();
+ }
+ else {
+ immUnbindProgram();
+ }
+}
+
+/** \} */
+
+
+/* -------------------------------------------------------------------- */
+
+/** \name Viewport (DRW_viewport)
+ * \{ */
+
+void *drw_viewport_engine_data_ensure(void *engine_type)
+{
+ void *data = GPU_viewport_engine_data_get(DST.viewport, engine_type);
+
+ if (data == NULL) {
+ data = GPU_viewport_engine_data_create(DST.viewport, engine_type);
+ }
+ return data;
+}
+
+void DRW_engine_viewport_data_size_get(
+ const void *engine_type_v,
+ int *r_fbl_len, int *r_txl_len, int *r_psl_len, int *r_stl_len)
+{
+ const DrawEngineType *engine_type = engine_type_v;
+
+ if (r_fbl_len) {
+ *r_fbl_len = engine_type->vedata_size->fbl_len;
+ }
+ if (r_txl_len) {
+ *r_txl_len = engine_type->vedata_size->txl_len;
+ }
+ if (r_psl_len) {
+ *r_psl_len = engine_type->vedata_size->psl_len;
+ }
+ if (r_stl_len) {
+ *r_stl_len = engine_type->vedata_size->stl_len;
+ }
+}
+
+const float *DRW_viewport_size_get(void)
+{
+ return DST.size;
+}
+
+const float *DRW_viewport_invert_size_get(void)
+{
+ return DST.inv_size;
+}
+
+const float *DRW_viewport_screenvecs_get(void)
+{
+ return &DST.screenvecs[0][0];
+}
+
+const float *DRW_viewport_pixelsize_get(void)
+{
+ return &DST.pixsize;
+}
+
+static void drw_viewport_cache_resize(void)
+{
+ /* Release the memiter before clearing the mempools that references them */
+ GPU_viewport_cache_release(DST.viewport);
+
+ if (DST.vmempool != NULL) {
+ BLI_mempool_clear_ex(DST.vmempool->calls, BLI_mempool_len(DST.vmempool->calls));
+ BLI_mempool_clear_ex(DST.vmempool->states, BLI_mempool_len(DST.vmempool->states));
+ BLI_mempool_clear_ex(DST.vmempool->shgroups, BLI_mempool_len(DST.vmempool->shgroups));
+ BLI_mempool_clear_ex(DST.vmempool->uniforms, BLI_mempool_len(DST.vmempool->uniforms));
+ BLI_mempool_clear_ex(DST.vmempool->passes, BLI_mempool_len(DST.vmempool->passes));
+ }
+
+ DRW_instance_data_list_free_unused(DST.idatalist);
+ DRW_instance_data_list_resize(DST.idatalist);
+}
+
+static void drw_state_eval_ctx_init(DRWManager *dst)
+{
+ DRWContextState *draw_ctx = &dst->draw_ctx;
+ DEG_evaluation_context_init_from_scene(
+ &draw_ctx->eval_ctx,
+ draw_ctx->scene,
+ draw_ctx->view_layer,
+ draw_ctx->engine_type,
+ draw_ctx->object_mode,
+ DST.options.is_scene_render ? DAG_EVAL_RENDER : DAG_EVAL_VIEWPORT);
+}
+
+/* Not a viewport variable, we could split this out. */
+static void drw_context_state_init(void)
+{
+ /* Edit object. */
+ if (DST.draw_ctx.object_mode & OB_MODE_EDIT) {
+ DST.draw_ctx.object_edit = DST.draw_ctx.obact;
+ }
+ else {
+ DST.draw_ctx.object_edit = NULL;
+ }
+
+ /* Pose object. */
+ if (DST.draw_ctx.object_mode & OB_MODE_POSE) {
+ DST.draw_ctx.object_pose = DST.draw_ctx.obact;
+ }
+ else if (DST.draw_ctx.object_mode & OB_MODE_WEIGHT_PAINT) {
+ DST.draw_ctx.object_pose = BKE_object_pose_armature_get(DST.draw_ctx.obact);
+ }
+ else {
+ DST.draw_ctx.object_pose = NULL;
+ }
+
+ drw_state_eval_ctx_init(&DST);
+}
+
+/* It also stores viewport variable to an immutable place: DST
+ * This is because a cache uniform only store reference
+ * to its value. And we don't want to invalidate the cache
+ * if this value change per viewport */
+static void drw_viewport_var_init(void)
+{
+ RegionView3D *rv3d = DST.draw_ctx.rv3d;
+ /* Refresh DST.size */
+ if (DST.viewport) {
+ int size[2];
+ GPU_viewport_size_get(DST.viewport, size);
+ DST.size[0] = size[0];
+ DST.size[1] = size[1];
+ DST.inv_size[0] = 1.0f / size[0];
+ DST.inv_size[1] = 1.0f / size[1];
+
+ DefaultFramebufferList *fbl = (DefaultFramebufferList *)GPU_viewport_framebuffer_list_get(DST.viewport);
+ DST.default_framebuffer = fbl->default_fb;
+
+ DST.vmempool = GPU_viewport_mempool_get(DST.viewport);
+
+ if (DST.vmempool->calls == NULL) {
+ DST.vmempool->calls = BLI_mempool_create(sizeof(DRWCall), 0, 512, 0);
+ }
+ if (DST.vmempool->states == NULL) {
+ DST.vmempool->states = BLI_mempool_create(sizeof(DRWCallState), 0, 512, BLI_MEMPOOL_ALLOW_ITER);
+ }
+ if (DST.vmempool->shgroups == NULL) {
+ DST.vmempool->shgroups = BLI_mempool_create(sizeof(DRWShadingGroup), 0, 256, 0);
+ }
+ if (DST.vmempool->uniforms == NULL) {
+ DST.vmempool->uniforms = BLI_mempool_create(sizeof(DRWUniform), 0, 512, 0);
+ }
+ if (DST.vmempool->passes == NULL) {
+ DST.vmempool->passes = BLI_mempool_create(sizeof(DRWPass), 0, 64, 0);
+ }
+
+ DST.idatalist = GPU_viewport_instance_data_list_get(DST.viewport);
+ DRW_instance_data_list_reset(DST.idatalist);
+ }
+ else {
+ DST.size[0] = 0;
+ DST.size[1] = 0;
+
+ DST.inv_size[0] = 0;
+ DST.inv_size[1] = 0;
+
+ DST.default_framebuffer = NULL;
+ DST.vmempool = NULL;
+ }
+
+ if (rv3d != NULL) {
+ /* Refresh DST.screenvecs */
+ copy_v3_v3(DST.screenvecs[0], rv3d->viewinv[0]);
+ copy_v3_v3(DST.screenvecs[1], rv3d->viewinv[1]);
+ normalize_v3(DST.screenvecs[0]);
+ normalize_v3(DST.screenvecs[1]);
+
+ /* Refresh DST.pixelsize */
+ DST.pixsize = rv3d->pixsize;
+
+ copy_m4_m4(DST.original_mat.mat[DRW_MAT_PERS], rv3d->persmat);
+ copy_m4_m4(DST.original_mat.mat[DRW_MAT_PERSINV], rv3d->persinv);
+ copy_m4_m4(DST.original_mat.mat[DRW_MAT_VIEW], rv3d->viewmat);
+ copy_m4_m4(DST.original_mat.mat[DRW_MAT_VIEWINV], rv3d->viewinv);
+ copy_m4_m4(DST.original_mat.mat[DRW_MAT_WIN], rv3d->winmat);
+ invert_m4_m4(DST.original_mat.mat[DRW_MAT_WININV], rv3d->winmat);
+
+ memcpy(DST.view_data.matstate.mat, DST.original_mat.mat, sizeof(DST.original_mat.mat));
+
+ copy_v4_v4(DST.view_data.viewcamtexcofac, rv3d->viewcamtexcofac);
+ }
+ else {
+ copy_v4_fl4(DST.view_data.viewcamtexcofac, 1.0f, 1.0f, 0.0f, 0.0f);
+ }
+
+ /* Reset facing */
+ DST.frontface = GL_CCW;
+ DST.backface = GL_CW;
+ glFrontFace(DST.frontface);
+
+ if (DST.draw_ctx.object_edit) {
+ ED_view3d_init_mats_rv3d(DST.draw_ctx.object_edit, rv3d);
+ }
+
+ /* Alloc array of texture reference. */
+ if (DST.RST.bound_texs == NULL) {
+ DST.RST.bound_texs = MEM_callocN(sizeof(GPUTexture *) * GPU_max_textures(), "Bound GPUTexture refs");
+ }
+ if (DST.RST.bound_tex_slots == NULL) {
+ DST.RST.bound_tex_slots = MEM_callocN(sizeof(char) * GPU_max_textures(), "Bound Texture Slots");
+ }
+ if (DST.RST.bound_ubos == NULL) {
+ DST.RST.bound_ubos = MEM_callocN(sizeof(GPUUniformBuffer *) * GPU_max_ubo_binds(), "Bound GPUUniformBuffer refs");
+ }
+ if (DST.RST.bound_ubo_slots == NULL) {
+ DST.RST.bound_ubo_slots = MEM_callocN(sizeof(char) * GPU_max_ubo_binds(), "Bound Ubo Slots");
+ }
+
+ if (view_ubo == NULL) {
+ view_ubo = DRW_uniformbuffer_create(sizeof(ViewUboStorage), NULL);
+ }
+
+ DST.override_mat = 0;
+ DST.dirty_mat = true;
+ DST.state_cache_id = 1;
+
+ DST.clipping.updated = false;
+
+ memset(DST.common_instance_data, 0x0, sizeof(DST.common_instance_data));
+}
+
+void DRW_viewport_matrix_get(float mat[4][4], DRWViewportMatrixType type)
+{
+ BLI_assert(type >= 0 && type < DRW_MAT_COUNT);
+ BLI_assert(((DST.override_mat & (1 << type)) != 0)|| DST.draw_ctx.rv3d != NULL); /* Can't use this in render mode. */
+
+ copy_m4_m4(mat, DST.view_data.matstate.mat[type]);
+}
+
+void DRW_viewport_matrix_get_all(DRWMatrixState *state)
+{
+ memcpy(state, DST.view_data.matstate.mat, sizeof(DRWMatrixState));
+}
+
+void DRW_viewport_matrix_override_set(float mat[4][4], DRWViewportMatrixType type)
+{
+ BLI_assert(type < DRW_MAT_COUNT);
+ copy_m4_m4(DST.view_data.matstate.mat[type], mat);
+ DST.override_mat |= (1 << type);
+ DST.dirty_mat = true;
+ DST.clipping.updated = false;
+}
+
+void DRW_viewport_matrix_override_unset(DRWViewportMatrixType type)
+{
+ BLI_assert(type < DRW_MAT_COUNT);
+ copy_m4_m4(DST.view_data.matstate.mat[type], DST.original_mat.mat[type]);
+ DST.override_mat &= ~(1 << type);
+ DST.dirty_mat = true;
+ DST.clipping.updated = false;
+}
+
+void DRW_viewport_matrix_override_set_all(DRWMatrixState *state)
+{
+ memcpy(DST.view_data.matstate.mat, state, sizeof(DRWMatrixState));
+ DST.override_mat = 0xFFFFFF;
+ DST.dirty_mat = true;
+ DST.clipping.updated = false;
+}
+
+void DRW_viewport_matrix_override_unset_all(void)
+{
+ memcpy(DST.view_data.matstate.mat, DST.original_mat.mat, sizeof(DRWMatrixState));
+ DST.override_mat = 0;
+ DST.dirty_mat = true;
+ DST.clipping.updated = false;
+}
+
+bool DRW_viewport_is_persp_get(void)
+{
+ RegionView3D *rv3d = DST.draw_ctx.rv3d;
+ if (rv3d) {
+ return rv3d->is_persp;
+ }
+ else {
+ return DST.view_data.matstate.mat[DRW_MAT_WIN][3][3] == 0.0f;
+ }
+ BLI_assert(0);
+ return false;
+}
+
+DefaultFramebufferList *DRW_viewport_framebuffer_list_get(void)
+{
+ return GPU_viewport_framebuffer_list_get(DST.viewport);
+}
+
+DefaultTextureList *DRW_viewport_texture_list_get(void)
+{
+ return GPU_viewport_texture_list_get(DST.viewport);
+}
+
+void DRW_viewport_request_redraw(void)
+{
+ GPU_viewport_tag_update(DST.viewport);
+}
+
+/** \} */
+
+
+/* -------------------------------------------------------------------- */
+/** \name ViewLayers (DRW_scenelayer)
+ * \{ */
+
+void *DRW_view_layer_engine_data_get(DrawEngineType *engine_type)
+{
+ for (ViewLayerEngineData *sled = DST.draw_ctx.view_layer->drawdata.first; sled; sled = sled->next) {
+ if (sled->engine_type == engine_type) {
+ return sled->storage;
+ }
+ }
+ return NULL;
+}
+
+void **DRW_view_layer_engine_data_ensure(DrawEngineType *engine_type, void (*callback)(void *storage))
+{
+ ViewLayerEngineData *sled;
+
+ for (sled = DST.draw_ctx.view_layer->drawdata.first; sled; sled = sled->next) {
+ if (sled->engine_type == engine_type) {
+ return &sled->storage;
+ }
+ }
+
+ sled = MEM_callocN(sizeof(ViewLayerEngineData), "ViewLayerEngineData");
+ sled->engine_type = engine_type;
+ sled->free = callback;
+ BLI_addtail(&DST.draw_ctx.view_layer->drawdata, sled);
+
+ return &sled->storage;
+}
+
+/** \} */
+
+
+/* -------------------------------------------------------------------- */
+
+/** \name Objects (DRW_object)
+ * \{ */
+
+ObjectEngineData *DRW_object_engine_data_get(Object *ob, DrawEngineType *engine_type)
+{
+ for (ObjectEngineData *oed = ob->drawdata.first; oed; oed = oed->next) {
+ if (oed->engine_type == engine_type) {
+ return oed;
+ }
+ }
+ return NULL;
+}
+
+ObjectEngineData *DRW_object_engine_data_ensure(
+ Object *ob,
+ DrawEngineType *engine_type,
+ size_t size,
+ ObjectEngineDataInitCb init_cb,
+ ObjectEngineDataFreeCb free_cb)
+{
+ BLI_assert(size >= sizeof(ObjectEngineData));
+ /* Try to re-use existing data. */
+ ObjectEngineData *oed = DRW_object_engine_data_get(ob, engine_type);
+ if (oed != NULL) {
+ return oed;
+ }
+ /* Allocate new data. */
+ if ((ob->base_flag & BASE_FROMDUPLI) != 0) {
+ /* NOTE: data is not persistent in this case. It is reset each redraw. */
+ BLI_assert(free_cb == NULL); /* No callback allowed. */
+ /* Round to sizeof(float) for DRW_instance_data_request(). */
+ const size_t t = sizeof(float) - 1;
+ size = (size + t) & ~t;
+ size_t fsize = size / sizeof(float);
+ if (DST.common_instance_data[fsize] == NULL) {
+ DST.common_instance_data[fsize] = DRW_instance_data_request(DST.idatalist, fsize, 16);
+ }
+ oed = (ObjectEngineData *)DRW_instance_data_next(DST.common_instance_data[fsize]);
+ memset(oed, 0, size);
+ }
+ else {
+ oed = MEM_callocN(size, "ObjectEngineData");
+ }
+ oed->engine_type = engine_type;
+ oed->free = free_cb;
+ /* Perform user-side initialization, if needed. */
+ if (init_cb != NULL) {
+ init_cb(oed);
+ }
+ /* Register in the list. */
+ BLI_addtail(&ob->drawdata, oed);
+ return oed;
+}
+
+/** \} */
+
+
+/* -------------------------------------------------------------------- */
+
+/** \name Rendering (DRW_engines)
+ * \{ */
+
+static void drw_engines_init(void)
+{
+ for (LinkData *link = DST.enabled_engines.first; link; link = link->next) {
+ DrawEngineType *engine = link->data;
+ ViewportEngineData *data = drw_viewport_engine_data_ensure(engine);
+ PROFILE_START(stime);
+
+ if (engine->engine_init) {
+ engine->engine_init(data);
+ }
+
+ PROFILE_END_UPDATE(data->init_time, stime);
+ }
+}
+
+static void drw_engines_cache_init(void)
+{
+ for (LinkData *link = DST.enabled_engines.first; link; link = link->next) {
+ DrawEngineType *engine = link->data;
+ ViewportEngineData *data = drw_viewport_engine_data_ensure(engine);
+
+ if (data->text_draw_cache) {
+ DRW_text_cache_destroy(data->text_draw_cache);
+ data->text_draw_cache = NULL;
+ }
+ if (DST.text_store_p == NULL) {
+ DST.text_store_p = &data->text_draw_cache;
+ }
+
+ if (engine->cache_init) {
+ engine->cache_init(data);
+ }
+ }
+}
+
+static void drw_engines_cache_populate(Object *ob)
+{
+ DST.ob_state = NULL;
+
+ for (LinkData *link = DST.enabled_engines.first; link; link = link->next) {
+ DrawEngineType *engine = link->data;
+ ViewportEngineData *data = drw_viewport_engine_data_ensure(engine);
+
+ if (engine->id_update) {
+ engine->id_update(data, &ob->id);
+ }
+
+ if (engine->cache_populate) {
+ engine->cache_populate(data, ob);
+ }
+ }
+}
+
+static void drw_engines_cache_finish(void)
+{
+ for (LinkData *link = DST.enabled_engines.first; link; link = link->next) {
+ DrawEngineType *engine = link->data;
+ ViewportEngineData *data = drw_viewport_engine_data_ensure(engine);
+
+ if (engine->cache_finish) {
+ engine->cache_finish(data);
+ }
+ }
+}
+
+static void drw_engines_draw_background(void)
+{
+ for (LinkData *link = DST.enabled_engines.first; link; link = link->next) {
+ DrawEngineType *engine = link->data;
+ ViewportEngineData *data = drw_viewport_engine_data_ensure(engine);
+
+ if (engine->draw_background) {
+ PROFILE_START(stime);
+
+ DRW_stats_group_start(engine->idname);
+ engine->draw_background(data);
+ DRW_stats_group_end();
+
+ PROFILE_END_UPDATE(data->background_time, stime);
+ return;
+ }
+ }
+
+ /* No draw_background found, doing default background */
+ if (DRW_state_draw_background()) {
+ DRW_draw_background();
+ }
+}
+
+static void drw_engines_draw_scene(void)
+{
+ for (LinkData *link = DST.enabled_engines.first; link; link = link->next) {
+ DrawEngineType *engine = link->data;
+ ViewportEngineData *data = drw_viewport_engine_data_ensure(engine);
+ PROFILE_START(stime);
+
+ if (engine->draw_scene) {
+ DRW_stats_group_start(engine->idname);
+ engine->draw_scene(data);
+ /* Restore for next engine */
+ if (DRW_state_is_fbo()) {
+ GPU_framebuffer_bind(DST.default_framebuffer);
+ }
+ DRW_stats_group_end();
+ }
+
+ PROFILE_END_UPDATE(data->render_time, stime);
+ }
+}
+
+static void drw_engines_draw_text(void)
+{
+ for (LinkData *link = DST.enabled_engines.first; link; link = link->next) {
+ DrawEngineType *engine = link->data;
+ ViewportEngineData *data = drw_viewport_engine_data_ensure(engine);
+ PROFILE_START(stime);
+
+ if (data->text_draw_cache) {
+ DRW_text_cache_draw(data->text_draw_cache, DST.draw_ctx.v3d, DST.draw_ctx.ar, false);
+ }
+
+ PROFILE_END_UPDATE(data->render_time, stime);
+ }
+}
+
+#define MAX_INFO_LINES 10
+
+/**
+ * Returns the offset required for the drawing of engines info.
+ */
+int DRW_draw_region_engine_info_offset(void)
+{
+ int lines = 0;
+ for (LinkData *link = DST.enabled_engines.first; link; link = link->next) {
+ DrawEngineType *engine = link->data;
+ ViewportEngineData *data = drw_viewport_engine_data_ensure(engine);
+
+ /* Count the number of lines. */
+ if (data->info[0] != '\0') {
+ lines++;
+ char *c = data->info;
+ while (*c++ != '\0') {
+ if (*c == '\n') {
+ lines++;
+ }
+ }
+ }
+ }
+ return MIN2(MAX_INFO_LINES, lines) * UI_UNIT_Y;
+}
+
+/**
+ * Actual drawing;
+ */
+void DRW_draw_region_engine_info(void)
+{
+ const char *info_array_final[MAX_INFO_LINES + 1];
+ /* This should be maxium number of engines running at the same time. */
+ char info_array[MAX_INFO_LINES][GPU_INFO_SIZE];
+ int i = 0;
+
+ const DRWContextState *draw_ctx = DRW_context_state_get();
+ ARegion *ar = draw_ctx->ar;
+ float fill_color[4] = {0.0f, 0.0f, 0.0f, 0.25f};
+
+ UI_GetThemeColor3fv(TH_HIGH_GRAD, fill_color);
+ mul_v3_fl(fill_color, fill_color[3]);
+
+ for (LinkData *link = DST.enabled_engines.first; link; link = link->next) {
+ DrawEngineType *engine = link->data;
+ ViewportEngineData *data = drw_viewport_engine_data_ensure(engine);
+
+ if (data->info[0] != '\0') {
+ char *chr_current = data->info;
+ char *chr_start = chr_current;
+ int line_len = 0;
+
+ while (*chr_current++ != '\0') {
+ line_len++;
+ if (*chr_current == '\n') {
+ BLI_strncpy(info_array[i++], chr_start, line_len + 1);
+ /* Re-start counting. */
+ chr_start = chr_current + 1;
+ line_len = -1;
+ }
+ }
+
+ BLI_strncpy(info_array[i++], chr_start, line_len + 1);
+
+ if (i >= MAX_INFO_LINES) {
+ break;
+ }
+ }
+ }
+
+ for (int j = 0; j < i; j++) {
+ info_array_final[j] = info_array[j];
+ }
+ info_array_final[i] = NULL;
+
+ if (info_array[0] != NULL) {
+ ED_region_info_draw_multiline(ar, info_array_final, fill_color, true);
+ }
+}
+
+#undef MAX_INFO_LINES
+
+static void use_drw_engine(DrawEngineType *engine)
+{
+ LinkData *ld = MEM_callocN(sizeof(LinkData), "enabled engine link data");
+ ld->data = engine;
+ BLI_addtail(&DST.enabled_engines, ld);
+}
+
+/**
+ * Use for external render engines.
+ */
+static void drw_engines_enable_external(void)
+{
+ use_drw_engine(DRW_engine_viewport_external_type.draw_engine);
+}
+
+/* TODO revisit this when proper layering is implemented */
+/* Gather all draw engines needed and store them in DST.enabled_engines
+ * That also define the rendering order of engines */
+static void drw_engines_enable_from_engine(RenderEngineType *engine_type)
+{
+ /* TODO layers */
+ if (engine_type->draw_engine != NULL) {
+ use_drw_engine(engine_type->draw_engine);
+ }
+
+ if ((engine_type->flag & RE_INTERNAL) == 0) {
+ drw_engines_enable_external();
+ }
+}
+
+static void drw_engines_enable_from_object_mode(void)
+{
+ use_drw_engine(&draw_engine_object_type);
+}
+
+static void drw_engines_enable_from_mode(int mode)
+{
+ switch (mode) {
+ case CTX_MODE_EDIT_MESH:
+ use_drw_engine(&draw_engine_edit_mesh_type);
+ break;
+ case CTX_MODE_EDIT_CURVE:
+ use_drw_engine(&draw_engine_edit_curve_type);
+ break;
+ case CTX_MODE_EDIT_SURFACE:
+ use_drw_engine(&draw_engine_edit_surface_type);
+ break;
+ case CTX_MODE_EDIT_TEXT:
+ use_drw_engine(&draw_engine_edit_text_type);
+ break;
+ case CTX_MODE_EDIT_ARMATURE:
+ use_drw_engine(&draw_engine_edit_armature_type);
+ break;
+ case CTX_MODE_EDIT_METABALL:
+ use_drw_engine(&draw_engine_edit_metaball_type);
+ break;
+ case CTX_MODE_EDIT_LATTICE:
+ use_drw_engine(&draw_engine_edit_lattice_type);
+ break;
+ case CTX_MODE_POSE:
+ use_drw_engine(&draw_engine_pose_type);
+ break;
+ case CTX_MODE_SCULPT:
+ use_drw_engine(&draw_engine_sculpt_type);
+ break;
+ case CTX_MODE_PAINT_WEIGHT:
+ use_drw_engine(&draw_engine_pose_type);
+ use_drw_engine(&draw_engine_paint_weight_type);
+ break;
+ case CTX_MODE_PAINT_VERTEX:
+ use_drw_engine(&draw_engine_paint_vertex_type);
+ break;
+ case CTX_MODE_PAINT_TEXTURE:
+ use_drw_engine(&draw_engine_paint_texture_type);
+ break;
+ case CTX_MODE_PARTICLE:
+ use_drw_engine(&draw_engine_particle_type);
+ break;
+ case CTX_MODE_OBJECT:
+ break;
+ default:
+ BLI_assert(!"Draw mode invalid");
+ break;
+ }
+}
+
+/**
+ * Use for select and depth-drawing.
+ */
+static void drw_engines_enable_basic(void)
+{
+ use_drw_engine(DRW_engine_viewport_basic_type.draw_engine);
+}
+
+static void drw_engines_enable(ViewLayer *view_layer, RenderEngineType *engine_type)
+{
+ Object *obact = OBACT(view_layer);
+ const int mode = CTX_data_mode_enum_ex(DST.draw_ctx.object_edit, obact, DST.draw_ctx.object_mode);
+
+ drw_engines_enable_from_engine(engine_type);
+
+ if (DRW_state_draw_support()) {
+ drw_engines_enable_from_object_mode();
+ drw_engines_enable_from_mode(mode);
+ }
+}
+
+static void drw_engines_disable(void)
+{
+ BLI_freelistN(&DST.enabled_engines);
+}
+
+static unsigned int DRW_engines_get_hash(void)
+{
+ unsigned int hash = 0;
+ /* The cache depends on enabled engines */
+ /* FIXME : if collision occurs ... segfault */
+ for (LinkData *link = DST.enabled_engines.first; link; link = link->next) {
+ DrawEngineType *engine = link->data;
+ hash += BLI_ghashutil_strhash_p(engine->idname);
+ }
+
+ return hash;
+}
+
+/* -------------------------------------------------------------------- */
+
+/** \name View Update
+ * \{ */
+
+void DRW_notify_view_update(const DRWUpdateContext *update_ctx)
+{
+ RenderEngineType *engine_type = update_ctx->engine_type;
+ ARegion *ar = update_ctx->ar;
+ View3D *v3d = update_ctx->v3d;
+ RegionView3D *rv3d = ar->regiondata;
+ Depsgraph *depsgraph = update_ctx->depsgraph;
+ Scene *scene = update_ctx->scene;
+ ViewLayer *view_layer = update_ctx->view_layer;
+
+ if (rv3d->viewport == NULL) {
+ return;
+ }
+
+ /* XXX Really nasty locking. But else this could
+ * be executed by the material previews thread
+ * while rendering a viewport. */
+ BLI_mutex_lock(&DST.ogl_context_mutex);
+
+ /* Reset before using it. */
+ drw_state_prepare_clean_for_draw(&DST);
+
+ DST.viewport = rv3d->viewport;
+ DST.draw_ctx = (DRWContextState){
+ .ar = ar, .rv3d = rv3d, .v3d = v3d,
+ .scene = scene, .view_layer = view_layer, .obact = OBACT(view_layer),
+ .engine_type = engine_type,
+ .depsgraph = depsgraph, .object_mode = OB_MODE_OBJECT,
+ };
+
+ drw_engines_enable(view_layer, engine_type);
+
+ for (LinkData *link = DST.enabled_engines.first; link; link = link->next) {
+ DrawEngineType *draw_engine = link->data;
+ ViewportEngineData *data = drw_viewport_engine_data_ensure(draw_engine);
+
+ if (draw_engine->view_update) {
+ draw_engine->view_update(data);
+ }
+ }
+
+ DST.viewport = NULL;
+
+ drw_engines_disable();
+
+ BLI_mutex_unlock(&DST.ogl_context_mutex);
+}
+
+/** \} */
+
+/** \name ID Update
+ * \{ */
+
+/* TODO(sergey): This code is run for each changed ID (including the ones which
+ * are changed indirectly via update flush. Need to find a way to make this to
+ * run really fast, hopefully without any memory allocations on a heap
+ * Idea here could be to run every known engine's id_update() and make them
+ * do nothing if there is no engine-specific data yet.
+ */
+void DRW_notify_id_update(const DRWUpdateContext *update_ctx, ID *id)
+{
+ RenderEngineType *engine_type = update_ctx->engine_type;
+ ARegion *ar = update_ctx->ar;
+ View3D *v3d = update_ctx->v3d;
+ RegionView3D *rv3d = ar->regiondata;
+ Depsgraph *depsgraph = update_ctx->depsgraph;
+ Scene *scene = update_ctx->scene;
+ ViewLayer *view_layer = update_ctx->view_layer;
+ if (rv3d->viewport == NULL) {
+ return;
+ }
+ /* Reset before using it. */
+ drw_state_prepare_clean_for_draw(&DST);
+ DST.viewport = rv3d->viewport;
+ DST.draw_ctx = (DRWContextState){
+ .ar = ar, .rv3d = rv3d, .v3d = v3d,
+ .scene = scene, .view_layer = view_layer, .obact = OBACT(view_layer),
+ .engine_type = engine_type,
+ .depsgraph = depsgraph, .object_mode = OB_MODE_OBJECT,
+ };
+ drw_engines_enable(view_layer, engine_type);
+ for (LinkData *link = DST.enabled_engines.first; link; link = link->next) {
+ DrawEngineType *draw_engine = link->data;
+ ViewportEngineData *data = drw_viewport_engine_data_ensure(draw_engine);
+ if (draw_engine->id_update) {
+ draw_engine->id_update(data, id);
+ }
+ }
+ DST.viewport = NULL;
+ drw_engines_disable();
+}
+
+/** \} */
+
+/* -------------------------------------------------------------------- */
+
+/** \name Main Draw Loops (DRW_draw)
+ * \{ */
+
+/* Everything starts here.
+ * This function takes care of calling all cache and rendering functions
+ * for each relevant engine / mode engine. */
+void DRW_draw_view(const bContext *C)
+{
+ EvaluationContext eval_ctx;
+ CTX_data_eval_ctx(C, &eval_ctx);
+ RenderEngineType *engine_type = CTX_data_engine_type(C);
+ ARegion *ar = CTX_wm_region(C);
+ View3D *v3d = CTX_wm_view3d(C);
+
+ /* Reset before using it. */
+ drw_state_prepare_clean_for_draw(&DST);
+ DRW_draw_render_loop_ex(eval_ctx.depsgraph, engine_type, ar, v3d, eval_ctx.object_mode, C);
+}
+
+/**
+ * Used for both regular and off-screen drawing.
+ * Need to reset DST before calling this function
+ */
+void DRW_draw_render_loop_ex(
+ struct Depsgraph *depsgraph,
+ RenderEngineType *engine_type,
+ ARegion *ar, View3D *v3d, const eObjectMode object_mode,
+ const bContext *evil_C)
+{
+
+ Scene *scene = DEG_get_evaluated_scene(depsgraph);
+ ViewLayer *view_layer = DEG_get_evaluated_view_layer(depsgraph);
+ RegionView3D *rv3d = ar->regiondata;
+
+ DST.draw_ctx.evil_C = evil_C;
+
+ DST.viewport = rv3d->viewport;
+ v3d->zbuf = true;
+
+ /* Setup viewport */
+ GPU_viewport_engines_data_validate(DST.viewport, DRW_engines_get_hash());
+
+ DST.draw_ctx = (DRWContextState){
+ .ar = ar, .rv3d = rv3d, .v3d = v3d,
+ .scene = scene, .view_layer = view_layer, .obact = OBACT(view_layer),
+ .engine_type = engine_type,
+ .depsgraph = depsgraph, .object_mode = object_mode,
+
+ /* reuse if caller sets */
+ .evil_C = DST.draw_ctx.evil_C,
+ };
+ drw_context_state_init();
+ drw_viewport_var_init();
+
+ /* Get list of enabled engines */
+ drw_engines_enable(view_layer, engine_type);
+
+ /* Update ubos */
+ DRW_globals_update();
+
+ /* No framebuffer allowed before drawing. */
+ BLI_assert(GPU_framebuffer_current_get() == 0);
+
+ /* Init engines */
+ drw_engines_init();
+
+ /* Cache filling */
+ {
+ PROFILE_START(stime);
+ drw_engines_cache_init();
+
+ DEG_OBJECT_ITER_FOR_RENDER_ENGINE_BEGIN(depsgraph, ob, DRW_iterator_mode_get())
+ {
+ drw_engines_cache_populate(ob);
+ }
+ DEG_OBJECT_ITER_FOR_RENDER_ENGINE_END;
+
+ drw_engines_cache_finish();
+
+ DRW_render_instance_buffer_finish();
+
+#ifdef USE_PROFILE
+ double *cache_time = GPU_viewport_cache_time_get(DST.viewport);
+ PROFILE_END_UPDATE(*cache_time, stime);
+#endif
+ }
+
+ DRW_stats_begin();
+
+ GPU_framebuffer_bind(DST.default_framebuffer);
+
+ /* Start Drawing */
+ DRW_state_reset();
+
+ drw_engines_draw_background();
+
+ /* WIP, single image drawn over the camera view (replace) */
+ bool do_bg_image = false;
+ if (rv3d->persp == RV3D_CAMOB) {
+ Object *cam_ob = v3d->camera;
+ if (cam_ob && cam_ob->type == OB_CAMERA) {
+ Camera *cam = cam_ob->data;
+ if (!BLI_listbase_is_empty(&cam->bg_images)) {
+ do_bg_image = true;
+ }
+ }
+ }
+
+ if (do_bg_image) {
+ ED_view3d_draw_bgpic_test(scene, depsgraph, ar, v3d, false, true);
+ }
+
+
+ DRW_draw_callbacks_pre_scene();
+ if (DST.draw_ctx.evil_C) {
+ ED_region_draw_cb_draw(DST.draw_ctx.evil_C, DST.draw_ctx.ar, REGION_DRAW_PRE_VIEW);
+ }
+
+ drw_engines_draw_scene();
+
+ DRW_draw_callbacks_post_scene();
+ if (DST.draw_ctx.evil_C) {
+ ED_region_draw_cb_draw(DST.draw_ctx.evil_C, DST.draw_ctx.ar, REGION_DRAW_POST_VIEW);
+ }
+
+ DRW_state_reset();
+
+ drw_engines_draw_text();
+
+ if (DST.draw_ctx.evil_C) {
+ /* needed so manipulator isn't obscured */
+ glDisable(GL_DEPTH_TEST);
+ DRW_draw_manipulator_3d();
+
+ DRW_draw_region_info();
+
+ /* Draw 2D after region info so we can draw on top of the camera passepartout overlay.
+ * 'DRW_draw_region_info' sets the projection in pixel-space. */
+ DRW_draw_manipulator_2d();
+ glEnable(GL_DEPTH_TEST);
+ }
+
+ DRW_stats_reset();
+
+ if (do_bg_image) {
+ ED_view3d_draw_bgpic_test(scene, depsgraph, ar, v3d, true, true);
+ }
+
+ if (G.debug_value > 20) {
+ glDisable(GL_DEPTH_TEST);
+ rcti rect; /* local coordinate visible rect inside region, to accomodate overlapping ui */
+ ED_region_visible_rect(DST.draw_ctx.ar, &rect);
+ DRW_stats_draw(&rect);
+ glEnable(GL_DEPTH_TEST);
+ }
+
+ GPU_framebuffer_restore();
+
+ DRW_state_reset();
+ drw_engines_disable();
+
+ drw_viewport_cache_resize();
+
+#ifdef DEBUG
+ /* Avoid accidental reuse. */
+ drw_state_ensure_not_reused(&DST);
+#endif
+}
+
+void DRW_draw_render_loop(
+ struct Depsgraph *depsgraph,
+ ARegion *ar, View3D *v3d, const eObjectMode object_mode)
+{
+ /* Reset before using it. */
+ drw_state_prepare_clean_for_draw(&DST);
+
+ Scene *scene = DEG_get_evaluated_scene(depsgraph);
+ RenderEngineType *engine_type = RE_engines_find(scene->view_render.engine_id);
+
+ DRW_draw_render_loop_ex(depsgraph, engine_type, ar, v3d, object_mode, NULL);
+}
+
+/* @viewport CAN be NULL, in this case we create one. */
+void DRW_draw_render_loop_offscreen(
+ struct Depsgraph *depsgraph, RenderEngineType *engine_type,
+ ARegion *ar, View3D *v3d, const eObjectMode object_mode,
+ const bool draw_background, GPUOffScreen *ofs,
+ GPUViewport *viewport)
+{
+ RegionView3D *rv3d = ar->regiondata;
+
+ /* backup */
+ void *backup_viewport = rv3d->viewport;
+ {
+ /* backup (_never_ use rv3d->viewport) */
+ if (viewport == NULL) {
+ rv3d->viewport = GPU_viewport_create_from_offscreen(ofs);
+ }
+ else {
+ rv3d->viewport = viewport;
+ }
+ }
+
+ GPU_framebuffer_restore();
+
+ /* Reset before using it. */
+ drw_state_prepare_clean_for_draw(&DST);
+ DST.options.is_image_render = true;
+ DST.options.draw_background = draw_background;
+ DRW_draw_render_loop_ex(depsgraph, engine_type, ar, v3d, object_mode, NULL);
+
+ /* restore */
+ {
+ if (viewport == NULL) {
+ /* don't free data owned by 'ofs' */
+ GPU_viewport_clear_from_offscreen(rv3d->viewport);
+ GPU_viewport_free(rv3d->viewport);
+ }
+
+ rv3d->viewport = backup_viewport;
+ }
+
+ /* we need to re-bind (annoying!) */
+ GPU_offscreen_bind(ofs, false);
+}
+
+void DRW_render_to_image(RenderEngine *engine, struct Depsgraph *depsgraph)
+{
+ Scene *scene = DEG_get_evaluated_scene(depsgraph);
+ ViewLayer *view_layer = DEG_get_evaluated_view_layer(depsgraph);
+ RenderEngineType *engine_type = engine->type;
+ DrawEngineType *draw_engine_type = engine_type->draw_engine;
+ RenderData *r = &scene->r;
+ Render *render = engine->re;
+ /* Changing Context */
+ DRW_opengl_context_enable();
+ /* IMPORTANT: We dont support immediate mode in render mode!
+ * This shall remain in effect until immediate mode supports
+ * multiple threads. */
+
+ /* Reset before using it. */
+ drw_state_prepare_clean_for_draw(&DST);
+ DST.options.is_image_render = true;
+ DST.options.is_scene_render = true;
+ DST.options.draw_background = scene->r.alphamode == R_ADDSKY;
+
+ DST.draw_ctx = (DRWContextState){
+ .scene = scene, .view_layer = view_layer,
+ .engine_type = engine_type,
+ .depsgraph = depsgraph, .object_mode = OB_MODE_OBJECT,
+ };
+ drw_context_state_init();
+
+ DST.viewport = GPU_viewport_create();
+ const int size[2] = {(r->size * r->xsch) / 100, (r->size * r->ysch) / 100};
+ GPU_viewport_size_set(DST.viewport, size);
+
+ drw_viewport_var_init();
+
+ ViewportEngineData *data = drw_viewport_engine_data_ensure(draw_engine_type);
+
+ /* set default viewport */
+ glViewport(0, 0, size[0], size[1]);
+
+ /* Main rendering. */
+ rctf view_rect;
+ rcti render_rect;
+ RE_GetViewPlane(render, &view_rect, &render_rect);
+ if (BLI_rcti_is_empty(&render_rect)) {
+ BLI_rcti_init(&render_rect, 0, size[0], 0, size[1]);
+ }
+
+ /* Init render result. */
+ RenderResult *render_result = RE_engine_begin_result(
+ engine,
+ 0,
+ 0,
+ (int)size[0],
+ (int)size[1],
+ view_layer->name,
+ /* RR_ALL_VIEWS */ NULL);
+
+ RenderLayer *render_layer = render_result->layers.first;
+ for (RenderView *render_view = render_result->views.first;
+ render_view != NULL;
+ render_view = render_view->next)
+ {
+ RE_SetActiveRenderView(render, render_view->name);
+ engine_type->draw_engine->render_to_image(data, engine, render_layer, &render_rect);
+ DST.buffer_finish_called = false;
+ }
+
+ RE_engine_end_result(engine, render_result, false, false, false);
+
+ /* Force cache to reset. */
+ drw_viewport_cache_resize();
+
+ /* TODO grease pencil */
+
+ GPU_viewport_free(DST.viewport);
+ GPU_framebuffer_restore();
+
+ /* Changing Context */
+ DRW_opengl_context_disable();
+
+#ifdef DEBUG
+ /* Avoid accidental reuse. */
+ drw_state_ensure_not_reused(&DST);
+#endif
+}
+
+void DRW_render_object_iter(
+ void *vedata, RenderEngine *engine, struct Depsgraph *depsgraph,
+ void (*callback)(void *vedata, Object *ob, RenderEngine *engine, struct Depsgraph *depsgraph))
+{
+ DEG_OBJECT_ITER_FOR_RENDER_ENGINE_BEGIN(depsgraph, ob, DRW_iterator_mode_get())
+ {
+ DST.ob_state = NULL;
+ callback(vedata, ob, engine, depsgraph);
+ }
+ DEG_OBJECT_ITER_FOR_RENDER_ENGINE_END
+}
+
+static struct DRWSelectBuffer {
+ struct GPUFrameBuffer *framebuffer;
+ struct GPUTexture *texture_depth;
+} g_select_buffer = {NULL};
+
+static void draw_select_framebuffer_setup(const rcti *rect)
+{
+ if (g_select_buffer.framebuffer == NULL) {
+ g_select_buffer.framebuffer = GPU_framebuffer_create();
+ }
+
+ /* If size mismatch recreate the texture. */
+ if ((g_select_buffer.texture_depth != NULL) &&
+ ((GPU_texture_width(g_select_buffer.texture_depth) != BLI_rcti_size_x(rect)) ||
+ (GPU_texture_height(g_select_buffer.texture_depth) != BLI_rcti_size_y(rect))))
+ {
+ GPU_texture_free(g_select_buffer.texture_depth);
+ g_select_buffer.texture_depth = NULL;
+ }
+
+ if (g_select_buffer.texture_depth == NULL) {
+ g_select_buffer.texture_depth = GPU_texture_create_depth(BLI_rcti_size_x(rect), BLI_rcti_size_y(rect), NULL);
+
+ GPU_framebuffer_texture_attach(g_select_buffer.framebuffer, g_select_buffer.texture_depth, 0, 0);
+
+ if (!GPU_framebuffer_check_valid(g_select_buffer.framebuffer, NULL)) {
+ printf("Error invalid selection framebuffer\n");
+ }
+ }
+}
+
+/* Must run after all instance datas have been added. */
+void DRW_render_instance_buffer_finish(void)
+{
+ BLI_assert(!DST.buffer_finish_called && "DRW_render_instance_buffer_finish called twice!");
+ DST.buffer_finish_called = true;
+ DRW_instance_buffer_finish(DST.idatalist);
+}
+
+/**
+ * object mode select-loop, see: ED_view3d_draw_select_loop (legacy drawing).
+ */
+void DRW_draw_select_loop(
+ struct Depsgraph *depsgraph,
+ ARegion *ar, View3D *v3d, const eObjectMode object_mode,
+ bool UNUSED(use_obedit_skip), bool UNUSED(use_nearest), const rcti *rect,
+ DRW_SelectPassFn select_pass_fn, void *select_pass_user_data)
+{
+ Scene *scene = DEG_get_evaluated_scene(depsgraph);
+ RenderEngineType *engine_type = RE_engines_find(scene->view_render.engine_id);
+ ViewLayer *view_layer = DEG_get_evaluated_view_layer(depsgraph);
+ Object *obact = OBACT(view_layer);
+#ifndef USE_GPU_SELECT
+ UNUSED_VARS(vc, scene, view_layer, v3d, ar, rect);
+#else
+ RegionView3D *rv3d = ar->regiondata;
+
+ /* Reset before using it. */
+ drw_state_prepare_clean_for_draw(&DST);
+
+ /* backup (_never_ use rv3d->viewport) */
+ void *backup_viewport = rv3d->viewport;
+ rv3d->viewport = NULL;
+
+ bool use_obedit = false;
+ int obedit_mode = 0;
+ if (object_mode & OB_MODE_EDIT) {
+ if (obact->type == OB_MBALL) {
+ use_obedit = true;
+ obedit_mode = CTX_MODE_EDIT_METABALL;
+ }
+ else if (obact->type == OB_ARMATURE) {
+ use_obedit = true;
+ obedit_mode = CTX_MODE_EDIT_ARMATURE;
+ }
+ }
+
+ struct GPUViewport *viewport = GPU_viewport_create();
+ GPU_viewport_size_set(viewport, (const int[2]){BLI_rcti_size_x(rect), BLI_rcti_size_y(rect)});
+
+ DST.viewport = viewport;
+ v3d->zbuf = true;
+
+ DST.options.is_select = true;
+
+ /* Get list of enabled engines */
+ if (use_obedit) {
+ drw_engines_enable_from_mode(obedit_mode);
+ }
+ else {
+ drw_engines_enable_basic();
+ drw_engines_enable_from_object_mode();
+ }
+
+ /* Setup viewport */
+
+ /* Instead of 'DRW_context_state_init(C, &DST.draw_ctx)', assign from args */
+ DST.draw_ctx = (DRWContextState){
+ .ar = ar, .rv3d = rv3d, .v3d = v3d,
+ .scene = scene, .view_layer = view_layer, .obact = obact,
+ .engine_type = engine_type,
+ .depsgraph = depsgraph, .object_mode = object_mode,
+ };
+ drw_context_state_init();
+ drw_viewport_var_init();
+
+ /* Update ubos */
+ DRW_globals_update();
+
+ /* Init engines */
+ drw_engines_init();
+
+ {
+ drw_engines_cache_init();
+
+ if (use_obedit) {
+ drw_engines_cache_populate(obact);
+ }
+ else {
+ DEG_OBJECT_ITER_BEGIN(
+ depsgraph, ob, DRW_iterator_mode_get(),
+ DEG_ITER_OBJECT_FLAG_LINKED_DIRECTLY |
+ DEG_ITER_OBJECT_FLAG_VISIBLE |
+ DEG_ITER_OBJECT_FLAG_DUPLI)
+ {
+ if ((ob->base_flag & BASE_SELECTABLED) != 0) {
+ DRW_select_load_id(ob->select_color);
+ drw_engines_cache_populate(ob);
+ }
+ }
+ DEG_OBJECT_ITER_END;
+ }
+
+ drw_engines_cache_finish();
+
+ DRW_render_instance_buffer_finish();
+ }
+
+ /* Setup framebuffer */
+ draw_select_framebuffer_setup(rect);
+ GPU_framebuffer_bind(g_select_buffer.framebuffer);
+ GPU_framebuffer_clear_depth(g_select_buffer.framebuffer, 1.0f);
+
+ /* Start Drawing */
+ DRW_state_reset();
+ DRW_draw_callbacks_pre_scene();
+
+ DRW_state_lock(
+ DRW_STATE_WRITE_DEPTH |
+ DRW_STATE_DEPTH_ALWAYS |
+ DRW_STATE_DEPTH_LESS |
+ DRW_STATE_DEPTH_EQUAL |
+ DRW_STATE_DEPTH_GREATER |
+ DRW_STATE_DEPTH_ALWAYS);
+
+ /* Only 1-2 passes. */
+ while (true) {
+ if (!select_pass_fn(DRW_SELECT_PASS_PRE, select_pass_user_data)) {
+ break;
+ }
+
+ drw_engines_draw_scene();
+
+ if (!select_pass_fn(DRW_SELECT_PASS_POST, select_pass_user_data)) {
+ break;
+ }
+ }
+
+ DRW_state_lock(0);
+
+ DRW_draw_callbacks_post_scene();
+
+ DRW_state_reset();
+ drw_engines_disable();
+
+#ifdef DEBUG
+ /* Avoid accidental reuse. */
+ drw_state_ensure_not_reused(&DST);
+#endif
+ GPU_framebuffer_restore();
+
+ /* Cleanup for selection state */
+ GPU_viewport_free(viewport);
+
+ /* restore */
+ rv3d->viewport = backup_viewport;
+#endif /* USE_GPU_SELECT */
+}
+
+static void draw_depth_texture_to_screen(GPUTexture *texture)
+{
+ const float w = (float)GPU_texture_width(texture);
+ const float h = (float)GPU_texture_height(texture);
+
+ Gwn_VertFormat *format = immVertexFormat();
+ unsigned int texcoord = GWN_vertformat_attr_add(format, "texCoord", GWN_COMP_F32, 2, GWN_FETCH_FLOAT);
+ unsigned int pos = GWN_vertformat_attr_add(format, "pos", GWN_COMP_F32, 2, GWN_FETCH_FLOAT);
+
+ immBindBuiltinProgram(GPU_SHADER_3D_IMAGE_DEPTH_COPY);
+
+ GPU_texture_bind(texture, 0);
+
+ immUniform1i("image", 0); /* default GL_TEXTURE0 unit */
+
+ immBegin(GWN_PRIM_TRI_STRIP, 4);
+
+ immAttrib2f(texcoord, 0.0f, 0.0f);
+ immVertex2f(pos, 0.0f, 0.0f);
+
+ immAttrib2f(texcoord, 1.0f, 0.0f);
+ immVertex2f(pos, w, 0.0f);
+
+ immAttrib2f(texcoord, 0.0f, 1.0f);
+ immVertex2f(pos, 0.0f, h);
+
+ immAttrib2f(texcoord, 1.0f, 1.0f);
+ immVertex2f(pos, w, h);
+
+ immEnd();
+
+ GPU_texture_unbind(texture);
+
+ immUnbindProgram();
+}
+
+/**
+ * object mode select-loop, see: ED_view3d_draw_depth_loop (legacy drawing).
+ */
+void DRW_draw_depth_loop(
+ Depsgraph *depsgraph,
+ ARegion *ar, View3D *v3d, const eObjectMode object_mode)
+{
+ Scene *scene = DEG_get_evaluated_scene(depsgraph);
+ RenderEngineType *engine_type = RE_engines_find(scene->view_render.engine_id);
+ ViewLayer *view_layer = DEG_get_evaluated_view_layer(depsgraph);
+ RegionView3D *rv3d = ar->regiondata;
+
+ DRW_opengl_context_enable();
+
+ /* backup (_never_ use rv3d->viewport) */
+ void *backup_viewport = rv3d->viewport;
+ rv3d->viewport = NULL;
+
+ /* Reset before using it. */
+ drw_state_prepare_clean_for_draw(&DST);
+
+ struct GPUViewport *viewport = GPU_viewport_create();
+ GPU_viewport_size_set(viewport, (const int[2]){ar->winx, ar->winy});
+
+ /* Setup framebuffer */
+ draw_select_framebuffer_setup(&ar->winrct);
+ GPU_framebuffer_bind(g_select_buffer.framebuffer);
+ GPU_framebuffer_clear_depth(g_select_buffer.framebuffer, 1.0f);
+
+ bool cache_is_dirty;
+ DST.viewport = viewport;
+ v3d->zbuf = true;
+
+ DST.options.is_depth = true;
+
+ /* Get list of enabled engines */
+ {
+ drw_engines_enable_basic();
+ drw_engines_enable_from_object_mode();
+ }
+
+ /* Setup viewport */
+ cache_is_dirty = true;
+
+ /* Instead of 'DRW_context_state_init(C, &DST.draw_ctx)', assign from args */
+ DST.draw_ctx = (DRWContextState){
+ .ar = ar, .rv3d = rv3d, .v3d = v3d,
+ .scene = scene, .view_layer = view_layer, .obact = OBACT(view_layer),
+ .engine_type = engine_type,
+ .depsgraph = depsgraph, .object_mode = object_mode,
+ };
+ drw_context_state_init();
+ drw_viewport_var_init();
+
+ /* Update ubos */
+ DRW_globals_update();
+
+ /* Init engines */
+ drw_engines_init();
+
+ /* TODO : tag to refresh by the dependency graph */
+ /* ideally only refresh when objects are added/removed */
+ /* or render properties / materials change */
+ if (cache_is_dirty) {
+ drw_engines_cache_init();
+
+ DEG_OBJECT_ITER_FOR_RENDER_ENGINE_BEGIN(depsgraph, ob, DRW_iterator_mode_get())
+ {
+ drw_engines_cache_populate(ob);
+ }
+ DEG_OBJECT_ITER_FOR_RENDER_ENGINE_END;
+
+ drw_engines_cache_finish();
+
+ DRW_render_instance_buffer_finish();
+ }
+
+ /* Start Drawing */
+ DRW_state_reset();
+ DRW_draw_callbacks_pre_scene();
+ drw_engines_draw_scene();
+ DRW_draw_callbacks_post_scene();
+
+ DRW_state_reset();
+ drw_engines_disable();
+
+#ifdef DEBUG
+ /* Avoid accidental reuse. */
+ drw_state_ensure_not_reused(&DST);
+#endif
+
+ /* TODO: Reading depth for operators should be done here. */
+
+ GPU_framebuffer_restore();
+
+ /* Cleanup for selection state */
+ GPU_viewport_free(viewport);
+
+ /* Changin context */
+ DRW_opengl_context_disable();
+
+ /* XXX Drawing the resulting buffer to the BACK_BUFFER */
+ gpuPushMatrix();
+ gpuPushProjectionMatrix();
+ wmOrtho2_region_pixelspace(ar);
+ gpuLoadIdentity();
+
+ glEnable(GL_DEPTH_TEST); /* Cannot write to depth buffer without testing */
+ glDepthFunc(GL_ALWAYS);
+ draw_depth_texture_to_screen(g_select_buffer.texture_depth);
+ glDepthFunc(GL_LEQUAL);
+
+ gpuPopMatrix();
+ gpuPopProjectionMatrix();
+
+ /* restore */
+ rv3d->viewport = backup_viewport;
+}
+
+/** \} */
+
+
+/* -------------------------------------------------------------------- */
+
+/** \name Draw Manager State (DRW_state)
+ * \{ */
+
+void DRW_state_dfdy_factors_get(float dfdyfac[2])
+{
+ GPU_get_dfdy_factors(dfdyfac);
+}
+
+/**
+ * When false, drawing doesn't output to a pixel buffer
+ * eg: Occlusion queries, or when we have setup a context to draw in already.
+ */
+bool DRW_state_is_fbo(void)
+{
+ return ((DST.default_framebuffer != NULL) || DST.options.is_image_render);
+}
+
+/**
+ * For when engines need to know if this is drawing for selection or not.
+ */
+bool DRW_state_is_select(void)
+{
+ return DST.options.is_select;
+}
+
+bool DRW_state_is_depth(void)
+{
+ return DST.options.is_depth;
+}
+
+/**
+ * Whether we are rendering for an image
+ */
+bool DRW_state_is_image_render(void)
+{
+ return DST.options.is_image_render;
+}
+
+/**
+ * Whether we are rendering only the render engine,
+ * or if we should also render the mode engines.
+ */
+bool DRW_state_is_scene_render(void)
+{
+ BLI_assert(DST.options.is_scene_render ?
+ DST.options.is_image_render : true);
+ return DST.options.is_scene_render;
+}
+
+/**
+* Whether we are rendering simple opengl render
+*/
+bool DRW_state_is_opengl_render(void)
+{
+ return DST.options.is_image_render && !DST.options.is_scene_render;
+}
+
+/**
+ * Gives you the iterator mode to use for depsgraph.
+ */
+eDepsObjectIteratorMode DRW_iterator_mode_get(void)
+{
+ return DRW_state_is_scene_render() ? DEG_ITER_OBJECT_MODE_RENDER :
+ DEG_ITER_OBJECT_MODE_VIEWPORT;
+}
+
+/**
+ * Should text draw in this mode?
+ */
+bool DRW_state_show_text(void)
+{
+ return (DST.options.is_select) == 0 &&
+ (DST.options.is_depth) == 0 &&
+ (DST.options.is_scene_render) == 0;
+}
+
+/**
+ * Should draw support elements
+ * Objects center, selection outline, probe data, ...
+ */
+bool DRW_state_draw_support(void)
+{
+ View3D *v3d = DST.draw_ctx.v3d;
+ return (DRW_state_is_scene_render() == false) &&
+ (v3d != NULL) &&
+ ((v3d->flag2 & V3D_RENDER_OVERRIDE) == 0);
+}
+
+/**
+ * Whether we should render the background
+ */
+bool DRW_state_draw_background(void)
+{
+ if (DRW_state_is_image_render() == false) {
+ return true;
+ }
+ return DST.options.draw_background;
+}
+
+/** \} */
+
+
+/* -------------------------------------------------------------------- */
+
+/** \name Context State (DRW_context_state)
+ * \{ */
+
+const DRWContextState *DRW_context_state_get(void)
+{
+ return &DST.draw_ctx;
+}
+
+/** \} */
+
+
+/* -------------------------------------------------------------------- */
+
+/** \name Init/Exit (DRW_engines)
+ * \{ */
+
+bool DRW_engine_render_support(DrawEngineType *draw_engine_type)
+{
+ return draw_engine_type->render_to_image;
+}
+
+void DRW_engine_register(DrawEngineType *draw_engine_type)
+{
+ BLI_addtail(&DRW_engines, draw_engine_type);
+}
+
+void DRW_engines_register(void)
+{
+#ifdef WITH_CLAY_ENGINE
+ RE_engines_register(NULL, &DRW_engine_viewport_clay_type);
+#endif
+ RE_engines_register(NULL, &DRW_engine_viewport_eevee_type);
+
+ DRW_engine_register(&draw_engine_object_type);
+ DRW_engine_register(&draw_engine_edit_armature_type);
+ DRW_engine_register(&draw_engine_edit_curve_type);
+ DRW_engine_register(&draw_engine_edit_lattice_type);
+ DRW_engine_register(&draw_engine_edit_mesh_type);
+ DRW_engine_register(&draw_engine_edit_metaball_type);
+ DRW_engine_register(&draw_engine_edit_surface_type);
+ DRW_engine_register(&draw_engine_edit_text_type);
+ DRW_engine_register(&draw_engine_paint_texture_type);
+ DRW_engine_register(&draw_engine_paint_vertex_type);
+ DRW_engine_register(&draw_engine_paint_weight_type);
+ DRW_engine_register(&draw_engine_particle_type);
+ DRW_engine_register(&draw_engine_pose_type);
+ DRW_engine_register(&draw_engine_sculpt_type);
+
+ /* setup callbacks */
+ {
+ /* BKE: mball.c */
+ extern void *BKE_mball_batch_cache_dirty_cb;
+ extern void *BKE_mball_batch_cache_free_cb;
+ /* BKE: curve.c */
+ extern void *BKE_curve_batch_cache_dirty_cb;
+ extern void *BKE_curve_batch_cache_free_cb;
+ /* BKE: mesh.c */
+ extern void *BKE_mesh_batch_cache_dirty_cb;
+ extern void *BKE_mesh_batch_cache_free_cb;
+ /* BKE: lattice.c */
+ extern void *BKE_lattice_batch_cache_dirty_cb;
+ extern void *BKE_lattice_batch_cache_free_cb;
+ /* BKE: particle.c */
+ extern void *BKE_particle_batch_cache_dirty_cb;
+ extern void *BKE_particle_batch_cache_free_cb;
+
+ BKE_mball_batch_cache_dirty_cb = DRW_mball_batch_cache_dirty;
+ BKE_mball_batch_cache_free_cb = DRW_mball_batch_cache_free;
+
+ BKE_curve_batch_cache_dirty_cb = DRW_curve_batch_cache_dirty;
+ BKE_curve_batch_cache_free_cb = DRW_curve_batch_cache_free;
+
+ BKE_mesh_batch_cache_dirty_cb = DRW_mesh_batch_cache_dirty;
+ BKE_mesh_batch_cache_free_cb = DRW_mesh_batch_cache_free;
+
+ BKE_lattice_batch_cache_dirty_cb = DRW_lattice_batch_cache_dirty;
+ BKE_lattice_batch_cache_free_cb = DRW_lattice_batch_cache_free;
+
+ BKE_particle_batch_cache_dirty_cb = DRW_particle_batch_cache_dirty;
+ BKE_particle_batch_cache_free_cb = DRW_particle_batch_cache_free;
+ }
+}
+
+extern struct Gwn_VertFormat *g_pos_format; /* draw_shgroup.c */
+extern struct GPUUniformBuffer *globals_ubo; /* draw_common.c */
+extern struct GPUTexture *globals_ramp; /* draw_common.c */
+void DRW_engines_free(void)
+{
+ DRW_opengl_context_enable();
+
+ DRW_TEXTURE_FREE_SAFE(g_select_buffer.texture_depth);
+ GPU_FRAMEBUFFER_FREE_SAFE(g_select_buffer.framebuffer);
+
+ DRW_shape_cache_free();
+ DRW_stats_free();
+ DRW_globals_free();
+
+ DrawEngineType *next;
+ for (DrawEngineType *type = DRW_engines.first; type; type = next) {
+ next = type->next;
+ BLI_remlink(&R_engines, type);
+
+ if (type->engine_free) {
+ type->engine_free();
+ }
+ }
+
+ DRW_UBO_FREE_SAFE(globals_ubo);
+ DRW_UBO_FREE_SAFE(view_ubo);
+ DRW_TEXTURE_FREE_SAFE(globals_ramp);
+ MEM_SAFE_FREE(g_pos_format);
+
+ MEM_SAFE_FREE(DST.RST.bound_texs);
+ MEM_SAFE_FREE(DST.RST.bound_tex_slots);
+ MEM_SAFE_FREE(DST.RST.bound_ubos);
+ MEM_SAFE_FREE(DST.RST.bound_ubo_slots);
+
+ DRW_opengl_context_disable();
+
+#ifdef WITH_CLAY_ENGINE
+ BLI_remlink(&R_engines, &DRW_engine_viewport_clay_type);
+#endif
+}
+
+/** \} */
+
+/** \name Init/Exit (DRW_opengl_ctx)
+ * \{ */
+
+void DRW_opengl_context_create(void)
+{
+ BLI_assert(DST.ogl_context == NULL); /* Ensure it's called once */
+
+ BLI_mutex_init(&DST.ogl_context_mutex);
+
+ immDeactivate();
+ /* This changes the active context. */
+ DST.ogl_context = WM_opengl_context_create();
+ /* Be sure to create gawain.context too. */
+ DST.gwn_context = GWN_context_create();
+ immActivate();
+ /* Set default Blender OpenGL state */
+ GPU_state_init();
+ /* So we activate the window's one afterwards. */
+ wm_window_reset_drawable();
+}
+
+void DRW_opengl_context_destroy(void)
+{
+ BLI_assert(BLI_thread_is_main());
+ if (DST.ogl_context != NULL) {
+ WM_opengl_context_activate(DST.ogl_context);
+ GWN_context_active_set(DST.gwn_context);
+ GWN_context_discard(DST.gwn_context);
+ WM_opengl_context_dispose(DST.ogl_context);
+ BLI_mutex_end(&DST.ogl_context_mutex);
+ }
+}
+
+void DRW_opengl_context_enable(void)
+{
+ if (DST.ogl_context != NULL) {
+ /* IMPORTANT: We dont support immediate mode in render mode!
+ * This shall remain in effect until immediate mode supports
+ * multiple threads. */
+ BLI_mutex_lock(&DST.ogl_context_mutex);
+ if (BLI_thread_is_main()) {
+ immDeactivate();
+ }
+ WM_opengl_context_activate(DST.ogl_context);
+ GWN_context_active_set(DST.gwn_context);
+ if (BLI_thread_is_main()) {
+ immActivate();
+ BLF_batch_reset();
+ }
+ }
+}
+
+void DRW_opengl_context_disable(void)
+{
+ if (DST.ogl_context != NULL) {
+#ifdef __APPLE__
+ /* Need to flush before disabling draw context, otherwise it does not
+ * always finish drawing and viewport can be empty or partially drawn */
+ glFlush();
+#endif
+
+ if (BLI_thread_is_main()) {
+ wm_window_reset_drawable();
+ }
+ else {
+ WM_opengl_context_release(DST.ogl_context);
+ GWN_context_active_set(NULL);
+ }
+
+ BLI_mutex_unlock(&DST.ogl_context_mutex);
+ }
+}
+
+/** \} */
diff --git a/source/blender/draw/intern/draw_manager.h b/source/blender/draw/intern/draw_manager.h
new file mode 100644
index 00000000000..dd7e84f67d4
--- /dev/null
+++ b/source/blender/draw/intern/draw_manager.h
@@ -0,0 +1,358 @@
+/*
+ * Copyright 2016, Blender Foundation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Contributor(s): Blender Institute
+ *
+ */
+
+/** \file draw_manager.h
+ * \ingroup draw
+ */
+
+/* Private functions / structs of the draw manager */
+
+#ifndef __DRAW_MANAGER_H__
+#define __DRAW_MANAGER_H__
+
+#include "DRW_engine.h"
+#include "DRW_render.h"
+
+#include "BLI_linklist.h"
+#include "BLI_threads.h"
+
+#include "GPU_batch.h"
+#include "GPU_framebuffer.h"
+#include "GPU_shader.h"
+#include "GPU_uniformbuffer.h"
+#include "GPU_viewport.h"
+
+#include "draw_instance_data.h"
+
+/* Use draw manager to call GPU_select, see: DRW_draw_select_loop */
+#define USE_GPU_SELECT
+
+/* ------------ Profiling --------------- */
+
+#define USE_PROFILE
+
+#ifdef USE_PROFILE
+# include "PIL_time.h"
+
+# define PROFILE_TIMER_FALLOFF 0.04
+
+# define PROFILE_START(time_start) \
+ double time_start = PIL_check_seconds_timer();
+
+# define PROFILE_END_ACCUM(time_accum, time_start) { \
+ time_accum += (PIL_check_seconds_timer() - time_start) * 1e3; \
+} ((void)0)
+
+/* exp average */
+# define PROFILE_END_UPDATE(time_update, time_start) { \
+ double _time_delta = (PIL_check_seconds_timer() - time_start) * 1e3; \
+ time_update = (time_update * (1.0 - PROFILE_TIMER_FALLOFF)) + \
+ (_time_delta * PROFILE_TIMER_FALLOFF); \
+} ((void)0)
+
+#else /* USE_PROFILE */
+
+# define PROFILE_START(time_start) ((void)0)
+# define PROFILE_END_ACCUM(time_accum, time_start) ((void)0)
+# define PROFILE_END_UPDATE(time_update, time_start) ((void)0)
+
+#endif /* USE_PROFILE */
+
+/* ------------ Data Structure --------------- */
+/**
+ * Data structure containing all drawcalls organized by passes and materials.
+ * DRWPass > DRWShadingGroup > DRWCall > DRWCallState
+ * > DRWUniform
+ **/
+
+/* Used by DRWCallState.flag */
+enum {
+ DRW_CALL_CULLED = (1 << 0),
+ DRW_CALL_NEGSCALE = (1 << 1),
+};
+
+/* Used by DRWCallState.matflag */
+enum {
+ DRW_CALL_MODELINVERSE = (1 << 0),
+ DRW_CALL_MODELVIEW = (1 << 1),
+ DRW_CALL_MODELVIEWINVERSE = (1 << 2),
+ DRW_CALL_MODELVIEWPROJECTION = (1 << 3),
+ DRW_CALL_NORMALVIEW = (1 << 4),
+ DRW_CALL_NORMALWORLD = (1 << 5),
+ DRW_CALL_ORCOTEXFAC = (1 << 6),
+ DRW_CALL_EYEVEC = (1 << 7),
+};
+
+typedef struct DRWCallState {
+ unsigned char flag;
+ unsigned char cache_id; /* Compared with DST.state_cache_id to see if matrices are still valid. */
+ uint16_t matflag; /* Which matrices to compute. */
+ /* Culling: Using Bounding Sphere for now for faster culling.
+ * Not ideal for planes. */
+ BoundSphere bsphere;
+ /* Matrices */
+ float model[4][4];
+ float modelinverse[4][4];
+ float modelview[4][4];
+ float modelviewinverse[4][4];
+ float modelviewprojection[4][4];
+ float normalview[3][3];
+ float normalworld[3][3]; /* Not view dependant */
+ float orcotexfac[2][3]; /* Not view dependant */
+ float eyevec[3];
+} DRWCallState;
+
+typedef enum {
+ DRW_CALL_SINGLE, /* A single batch */
+ DRW_CALL_INSTANCES, /* Draw instances without any instancing attribs. */
+ DRW_CALL_GENERATE, /* Uses a callback to draw with any number of batches. */
+} DRWCallType;
+
+typedef struct DRWCall {
+ struct DRWCall *next;
+ DRWCallState *state;
+
+ union {
+ struct { /* type == DRW_CALL_SINGLE */
+ Gwn_Batch *geometry;
+ } single;
+ struct { /* type == DRW_CALL_INSTANCES */
+ Gwn_Batch *geometry;
+ /* Count can be adjusted between redraw. If needed, we can add fixed count. */
+ unsigned int *count;
+ } instances;
+ struct { /* type == DRW_CALL_GENERATE */
+ DRWCallGenerateFn *geometry_fn;
+ void *user_data;
+ } generate;
+ };
+
+ DRWCallType type;
+#ifdef USE_GPU_SELECT
+ int select_id;
+#endif
+} DRWCall;
+
+/* Used by DRWUniform.type */
+typedef enum {
+ DRW_UNIFORM_BOOL,
+ DRW_UNIFORM_SHORT_TO_INT,
+ DRW_UNIFORM_SHORT_TO_FLOAT,
+ DRW_UNIFORM_INT,
+ DRW_UNIFORM_FLOAT,
+ DRW_UNIFORM_TEXTURE,
+ DRW_UNIFORM_TEXTURE_PERSIST,
+ DRW_UNIFORM_TEXTURE_REF,
+ DRW_UNIFORM_BLOCK,
+ DRW_UNIFORM_BLOCK_PERSIST
+} DRWUniformType;
+
+struct DRWUniform {
+ DRWUniform *next; /* single-linked list */
+ const void *value;
+ int location;
+ char type; /* DRWUniformType */
+ char length; /* cannot be more than 16 */
+ char arraysize; /* cannot be more than 16 too */
+};
+
+typedef enum {
+ DRW_SHG_NORMAL,
+ DRW_SHG_POINT_BATCH,
+ DRW_SHG_LINE_BATCH,
+ DRW_SHG_TRIANGLE_BATCH,
+ DRW_SHG_INSTANCE,
+ DRW_SHG_INSTANCE_EXTERNAL,
+} DRWShadingGroupType;
+
+struct DRWShadingGroup {
+ DRWShadingGroup *next;
+
+ GPUShader *shader; /* Shader to bind */
+ DRWUniform *uniforms; /* Uniforms pointers */
+
+ /* Watch this! Can be nasty for debugging. */
+ union {
+ struct { /* DRW_SHG_NORMAL */
+ DRWCall *first, *last; /* Linked list of DRWCall or DRWCallDynamic depending of type */
+ } calls;
+ struct { /* DRW_SHG_***_BATCH */
+ struct Gwn_Batch *batch_geom; /* Result of call batching */
+ struct Gwn_VertBuf *batch_vbo;
+ unsigned int primitive_count;
+ };
+ struct { /* DRW_SHG_INSTANCE[_EXTERNAL] */
+ struct Gwn_Batch *instance_geom;
+ struct Gwn_VertBuf *instance_vbo;
+ unsigned int instance_count;
+ float instance_orcofac[2][3]; /* TODO find a better place. */
+ };
+ };
+
+ DRWState state_extra; /* State changes for this batch only (or'd with the pass's state) */
+ DRWState state_extra_disable; /* State changes for this batch only (and'd with the pass's state) */
+ unsigned int stencil_mask; /* Stencil mask to use for stencil test / write operations */
+ DRWShadingGroupType type;
+
+ /* Builtin matrices locations */
+ int model;
+ int modelinverse;
+ int modelview;
+ int modelviewinverse;
+ int modelviewprojection;
+ int normalview;
+ int normalworld;
+ int orcotexfac;
+ int eye;
+ uint16_t matflag; /* Matrices needed, same as DRWCall.flag */
+
+#ifndef NDEBUG
+ char attribs_count;
+#endif
+
+#ifdef USE_GPU_SELECT
+ DRWInstanceData *inst_selectid;
+ DRWPass *pass_parent; /* backlink to pass we're in */
+ int override_selectid; /* Override for single object instances. */
+#endif
+};
+
+#define MAX_PASS_NAME 32
+
+struct DRWPass {
+ /* Linked list */
+ struct {
+ DRWShadingGroup *first;
+ DRWShadingGroup *last;
+ } shgroups;
+
+ DRWState state;
+ char name[MAX_PASS_NAME];
+};
+
+typedef struct ViewUboStorage {
+ DRWMatrixState matstate;
+ float viewcamtexcofac[4];
+ float clipplanes[2][4];
+} ViewUboStorage;
+
+/* ------------- DRAW MANAGER ------------ */
+
+#define MAX_CLIP_PLANES 6 /* GL_MAX_CLIP_PLANES is at least 6 */
+
+typedef struct DRWManager {
+ /* TODO clean up this struct a bit */
+ /* Cache generation */
+ ViewportMemoryPool *vmempool;
+ DRWInstanceDataList *idatalist;
+ DRWInstanceData *common_instance_data[MAX_INSTANCE_DATA_SIZE];
+ /* State of the object being evaluated if already allocated. */
+ DRWCallState *ob_state;
+ unsigned char state_cache_id; /* Could be larger but 254 view changes is already a lot! */
+
+ /* Rendering state */
+ GPUShader *shader;
+
+ /* Managed by `DRW_state_set`, `DRW_state_reset` */
+ DRWState state;
+ DRWState state_lock;
+ unsigned int stencil_mask;
+
+ /* Per viewport */
+ GPUViewport *viewport;
+ struct GPUFrameBuffer *default_framebuffer;
+ float size[2];
+ float inv_size[2];
+ float screenvecs[2][3];
+ float pixsize;
+
+ GLenum backface, frontface;
+
+ struct {
+ unsigned int is_select : 1;
+ unsigned int is_depth : 1;
+ unsigned int is_image_render : 1;
+ unsigned int is_scene_render : 1;
+ unsigned int draw_background : 1;
+ } options;
+
+ /* Current rendering context */
+ DRWContextState draw_ctx;
+
+ /* Convenience pointer to text_store owned by the viewport */
+ struct DRWTextStore **text_store_p;
+
+ ListBase enabled_engines; /* RenderEngineType */
+
+ bool buffer_finish_called; /* Avoid bad usage of DRW_render_instance_buffer_finish */
+
+ /* View dependant uniforms. */
+ DRWMatrixState original_mat; /* Original rv3d matrices. */
+ int override_mat; /* Bitflag of which matrices are overriden. */
+ int num_clip_planes; /* Number of active clipplanes. */
+ bool dirty_mat;
+
+ /* keep in sync with viewBlock */
+ ViewUboStorage view_data;
+
+ struct {
+ float frustum_planes[6][4];
+ BoundSphere frustum_bsphere;
+ bool updated;
+ } clipping;
+
+#ifdef USE_GPU_SELECT
+ unsigned int select_id;
+#endif
+
+ /* ---------- Nothing after this point is cleared after use ----------- */
+
+ /* ogl_context serves as the offset for clearing only
+ * the top portion of the struct so DO NOT MOVE IT! */
+ void *ogl_context; /* Unique ghost context used by the draw manager. */
+ Gwn_Context *gwn_context;
+ ThreadMutex ogl_context_mutex; /* Mutex to lock the drw manager and avoid concurent context usage. */
+
+ /** GPU Resource State: Memory storage between drawing. */
+ struct {
+ GPUTexture **bound_texs;
+ char *bound_tex_slots;
+ int bind_tex_inc;
+ GPUUniformBuffer **bound_ubos;
+ char *bound_ubo_slots;
+ int bind_ubo_inc;
+ } RST;
+} DRWManager;
+
+extern DRWManager DST; /* TODO : get rid of this and allow multithreaded rendering */
+
+/* --------------- FUNCTIONS ------------- */
+
+void drw_texture_set_parameters(GPUTexture *tex, DRWTextureFlag flags);
+void drw_texture_get_format(
+ DRWTextureFormat format, bool is_framebuffer,
+ GPUTextureFormat *r_data_type, int *r_channels, bool *r_is_depth);
+
+void *drw_viewport_engine_data_ensure(void *engine_type);
+
+void drw_state_set(DRWState state);
+
+#endif /* __DRAW_MANAGER_H__ */
diff --git a/source/blender/draw/intern/draw_manager_data.c b/source/blender/draw/intern/draw_manager_data.c
new file mode 100644
index 00000000000..ae7854b436c
--- /dev/null
+++ b/source/blender/draw/intern/draw_manager_data.c
@@ -0,0 +1,935 @@
+/*
+ * Copyright 2016, Blender Foundation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Contributor(s): Blender Institute
+ *
+ */
+
+/** \file blender/draw/intern/draw_manager_data.c
+ * \ingroup draw
+ */
+
+#include "draw_manager.h"
+
+#include "BKE_curve.h"
+#include "BKE_global.h"
+#include "BKE_mesh.h"
+#include "BKE_object.h"
+#include "BKE_paint.h"
+#include "BKE_pbvh.h"
+
+#include "DNA_curve_types.h"
+#include "DNA_mesh_types.h"
+#include "DNA_meta_types.h"
+
+#include "BLI_link_utils.h"
+#include "BLI_mempool.h"
+
+#include "intern/gpu_codegen.h"
+
+struct Gwn_VertFormat *g_pos_format = NULL;
+
+extern struct GPUUniformBuffer *view_ubo; /* draw_manager_exec.c */
+
+/* -------------------------------------------------------------------- */
+
+/** \name Uniform Buffer Object (DRW_uniformbuffer)
+ * \{ */
+
+GPUUniformBuffer *DRW_uniformbuffer_create(int size, const void *data)
+{
+ return GPU_uniformbuffer_create(size, data, NULL);
+}
+
+void DRW_uniformbuffer_update(GPUUniformBuffer *ubo, const void *data)
+{
+ GPU_uniformbuffer_update(ubo, data);
+}
+
+void DRW_uniformbuffer_free(GPUUniformBuffer *ubo)
+{
+ GPU_uniformbuffer_free(ubo);
+}
+
+/** \} */
+
+/* -------------------------------------------------------------------- */
+
+/** \name Uniforms (DRW_shgroup_uniform)
+ * \{ */
+
+static void drw_shgroup_uniform_create_ex(DRWShadingGroup *shgroup, int loc,
+ DRWUniformType type, const void *value, int length, int arraysize)
+{
+ DRWUniform *uni = BLI_mempool_alloc(DST.vmempool->uniforms);
+ uni->location = loc;
+ uni->type = type;
+ uni->value = value;
+ uni->length = length;
+ uni->arraysize = arraysize;
+
+ BLI_LINKS_PREPEND(shgroup->uniforms, uni);
+}
+
+static void drw_shgroup_builtin_uniform(
+ DRWShadingGroup *shgroup, int builtin, const void *value, int length, int arraysize)
+{
+ int loc = GPU_shader_get_builtin_uniform(shgroup->shader, builtin);
+
+ if (loc != -1) {
+ drw_shgroup_uniform_create_ex(shgroup, loc, DRW_UNIFORM_FLOAT, value, length, arraysize);
+ }
+}
+
+static void drw_shgroup_uniform(DRWShadingGroup *shgroup, const char *name,
+ DRWUniformType type, const void *value, int length, int arraysize)
+{
+ int location;
+ if (ELEM(type, DRW_UNIFORM_BLOCK, DRW_UNIFORM_BLOCK_PERSIST)) {
+ location = GPU_shader_get_uniform_block(shgroup->shader, name);
+ }
+ else {
+ location = GPU_shader_get_uniform(shgroup->shader, name);
+ }
+
+ if (location == -1) {
+ if (G.debug & G_DEBUG)
+ fprintf(stderr, "Uniform '%s' not found!\n", name);
+ /* Nice to enable eventually, for now eevee uses uniforms that might not exist. */
+ // BLI_assert(0);
+ return;
+ }
+
+ BLI_assert(arraysize > 0 && arraysize <= 16);
+ BLI_assert(length >= 0 && length <= 16);
+
+ drw_shgroup_uniform_create_ex(shgroup, location, type, value, length, arraysize);
+}
+
+void DRW_shgroup_uniform_texture(DRWShadingGroup *shgroup, const char *name, const GPUTexture *tex)
+{
+ BLI_assert(tex != NULL);
+ drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_TEXTURE, tex, 0, 1);
+}
+
+/* Same as DRW_shgroup_uniform_texture but is garanteed to be bound if shader does not change between shgrp. */
+void DRW_shgroup_uniform_texture_persistent(DRWShadingGroup *shgroup, const char *name, const GPUTexture *tex)
+{
+ BLI_assert(tex != NULL);
+ drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_TEXTURE_PERSIST, tex, 0, 1);
+}
+
+void DRW_shgroup_uniform_block(DRWShadingGroup *shgroup, const char *name, const GPUUniformBuffer *ubo)
+{
+ BLI_assert(ubo != NULL);
+ drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_BLOCK, ubo, 0, 1);
+}
+
+/* Same as DRW_shgroup_uniform_block but is garanteed to be bound if shader does not change between shgrp. */
+void DRW_shgroup_uniform_block_persistent(DRWShadingGroup *shgroup, const char *name, const GPUUniformBuffer *ubo)
+{
+ BLI_assert(ubo != NULL);
+ drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_BLOCK_PERSIST, ubo, 0, 1);
+}
+
+void DRW_shgroup_uniform_texture_ref(DRWShadingGroup *shgroup, const char *name, GPUTexture **tex)
+{
+ drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_TEXTURE_REF, tex, 0, 1);
+}
+
+void DRW_shgroup_uniform_bool(DRWShadingGroup *shgroup, const char *name, const int *value, int arraysize)
+{
+ drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_BOOL, value, 1, arraysize);
+}
+
+void DRW_shgroup_uniform_float(DRWShadingGroup *shgroup, const char *name, const float *value, int arraysize)
+{
+ drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_FLOAT, value, 1, arraysize);
+}
+
+void DRW_shgroup_uniform_vec2(DRWShadingGroup *shgroup, const char *name, const float *value, int arraysize)
+{
+ drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_FLOAT, value, 2, arraysize);
+}
+
+void DRW_shgroup_uniform_vec3(DRWShadingGroup *shgroup, const char *name, const float *value, int arraysize)
+{
+ drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_FLOAT, value, 3, arraysize);
+}
+
+void DRW_shgroup_uniform_vec4(DRWShadingGroup *shgroup, const char *name, const float *value, int arraysize)
+{
+ drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_FLOAT, value, 4, arraysize);
+}
+
+void DRW_shgroup_uniform_short_to_int(DRWShadingGroup *shgroup, const char *name, const short *value, int arraysize)
+{
+ drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_SHORT_TO_INT, value, 1, arraysize);
+}
+
+void DRW_shgroup_uniform_short_to_float(DRWShadingGroup *shgroup, const char *name, const short *value, int arraysize)
+{
+ drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_SHORT_TO_FLOAT, value, 1, arraysize);
+}
+
+void DRW_shgroup_uniform_int(DRWShadingGroup *shgroup, const char *name, const int *value, int arraysize)
+{
+ drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_INT, value, 1, arraysize);
+}
+
+void DRW_shgroup_uniform_ivec2(DRWShadingGroup *shgroup, const char *name, const int *value, int arraysize)
+{
+ drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_INT, value, 2, arraysize);
+}
+
+void DRW_shgroup_uniform_ivec3(DRWShadingGroup *shgroup, const char *name, const int *value, int arraysize)
+{
+ drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_INT, value, 3, arraysize);
+}
+
+void DRW_shgroup_uniform_mat3(DRWShadingGroup *shgroup, const char *name, const float *value)
+{
+ drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_FLOAT, value, 9, 1);
+}
+
+void DRW_shgroup_uniform_mat4(DRWShadingGroup *shgroup, const char *name, const float *value)
+{
+ drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_FLOAT, value, 16, 1);
+}
+
+/** \} */
+
+/* -------------------------------------------------------------------- */
+
+/** \name Draw Call (DRW_calls)
+ * \{ */
+
+static void drw_call_calc_orco(Object *ob, float (*r_orcofacs)[3])
+{
+ ID *ob_data = (ob) ? ob->data : NULL;
+ float *texcoloc = NULL;
+ float *texcosize = NULL;
+ if (ob_data != NULL) {
+ switch (GS(ob_data->name)) {
+ case ID_ME:
+ BKE_mesh_texspace_get_reference((Mesh *)ob_data, NULL, &texcoloc, NULL, &texcosize);
+ break;
+ case ID_CU:
+ {
+ Curve *cu = (Curve *)ob_data;
+ if (cu->bb == NULL || (cu->bb->flag & BOUNDBOX_DIRTY)) {
+ BKE_curve_texspace_calc(cu);
+ }
+ texcoloc = cu->loc;
+ texcosize = cu->size;
+ break;
+ }
+ case ID_MB:
+ {
+ MetaBall *mb = (MetaBall *)ob_data;
+ texcoloc = mb->loc;
+ texcosize = mb->size;
+ break;
+ }
+ default:
+ break;
+ }
+ }
+
+ if ((texcoloc != NULL) && (texcosize != NULL)) {
+ mul_v3_v3fl(r_orcofacs[1], texcosize, 2.0f);
+ invert_v3(r_orcofacs[1]);
+ sub_v3_v3v3(r_orcofacs[0], texcoloc, texcosize);
+ negate_v3(r_orcofacs[0]);
+ mul_v3_v3(r_orcofacs[0], r_orcofacs[1]); /* result in a nice MADD in the shader */
+ }
+ else {
+ copy_v3_fl(r_orcofacs[0], 0.0f);
+ copy_v3_fl(r_orcofacs[1], 1.0f);
+ }
+}
+
+static DRWCallState *drw_call_state_create(DRWShadingGroup *shgroup, float (*obmat)[4], Object *ob)
+{
+ DRWCallState *state = BLI_mempool_alloc(DST.vmempool->states);
+ state->flag = 0;
+ state->cache_id = 0;
+ state->matflag = shgroup->matflag;
+
+ /* Matrices */
+ if (obmat != NULL) {
+ copy_m4_m4(state->model, obmat);
+
+ if (is_negative_m4(state->model)) {
+ state->flag |= DRW_CALL_NEGSCALE;
+ }
+ }
+ else {
+ unit_m4(state->model);
+ }
+
+ if (ob != NULL) {
+ float corner[3];
+ BoundBox *bbox = BKE_object_boundbox_get(ob);
+ /* Get BoundSphere center and radius from the BoundBox. */
+ mid_v3_v3v3(state->bsphere.center, bbox->vec[0], bbox->vec[6]);
+ mul_v3_m4v3(corner, obmat, bbox->vec[0]);
+ mul_m4_v3(obmat, state->bsphere.center);
+ state->bsphere.radius = len_v3v3(state->bsphere.center, corner);
+ }
+ else {
+ /* Bypass test. */
+ state->bsphere.radius = -1.0f;
+ }
+
+ /* Orco factors: We compute this at creation to not have to save the *ob_data */
+ if ((state->matflag & DRW_CALL_ORCOTEXFAC) != 0) {
+ drw_call_calc_orco(ob, state->orcotexfac);
+ state->matflag &= ~DRW_CALL_ORCOTEXFAC;
+ }
+
+ return state;
+}
+
+static DRWCallState *drw_call_state_object(DRWShadingGroup *shgroup, float (*obmat)[4], Object *ob)
+{
+ if (DST.ob_state == NULL) {
+ DST.ob_state = drw_call_state_create(shgroup, obmat, ob);
+ }
+ else {
+ /* If the DRWCallState is reused, add necessary matrices. */
+ DST.ob_state->matflag |= shgroup->matflag;
+ }
+
+ return DST.ob_state;
+}
+
+void DRW_shgroup_call_add(DRWShadingGroup *shgroup, Gwn_Batch *geom, float (*obmat)[4])
+{
+ BLI_assert(geom != NULL);
+ BLI_assert(shgroup->type == DRW_SHG_NORMAL);
+
+ DRWCall *call = BLI_mempool_alloc(DST.vmempool->calls);
+ call->state = drw_call_state_create(shgroup, obmat, NULL);
+ call->type = DRW_CALL_SINGLE;
+ call->single.geometry = geom;
+#ifdef USE_GPU_SELECT
+ call->select_id = DST.select_id;
+#endif
+
+ BLI_LINKS_APPEND(&shgroup->calls, call);
+}
+
+/* These calls can be culled and are optimized for redraw */
+void DRW_shgroup_call_object_add(DRWShadingGroup *shgroup, Gwn_Batch *geom, Object *ob)
+{
+ BLI_assert(geom != NULL);
+ BLI_assert(shgroup->type == DRW_SHG_NORMAL);
+
+ DRWCall *call = BLI_mempool_alloc(DST.vmempool->calls);
+ call->state = drw_call_state_object(shgroup, ob->obmat, ob);
+ call->type = DRW_CALL_SINGLE;
+ call->single.geometry = geom;
+#ifdef USE_GPU_SELECT
+ call->select_id = DST.select_id;
+#endif
+
+ BLI_LINKS_APPEND(&shgroup->calls, call);
+}
+
+void DRW_shgroup_call_instances_add(DRWShadingGroup *shgroup, Gwn_Batch *geom, float (*obmat)[4], unsigned int *count)
+{
+ BLI_assert(geom != NULL);
+ BLI_assert(shgroup->type == DRW_SHG_NORMAL);
+
+ DRWCall *call = BLI_mempool_alloc(DST.vmempool->calls);
+ call->state = drw_call_state_create(shgroup, obmat, NULL);
+ call->type = DRW_CALL_INSTANCES;
+ call->instances.geometry = geom;
+ call->instances.count = count;
+#ifdef USE_GPU_SELECT
+ call->select_id = DST.select_id;
+#endif
+
+ BLI_LINKS_APPEND(&shgroup->calls, call);
+}
+
+/* These calls can be culled and are optimized for redraw */
+void DRW_shgroup_call_object_instances_add(DRWShadingGroup *shgroup, Gwn_Batch *geom, Object *ob, unsigned int *count)
+{
+ BLI_assert(geom != NULL);
+ BLI_assert(shgroup->type == DRW_SHG_NORMAL);
+
+ DRWCall *call = BLI_mempool_alloc(DST.vmempool->calls);
+ call->state = drw_call_state_object(shgroup, ob->obmat, ob);
+ call->type = DRW_CALL_INSTANCES;
+ call->instances.geometry = geom;
+ call->instances.count = count;
+#ifdef USE_GPU_SELECT
+ call->select_id = DST.select_id;
+#endif
+
+ BLI_LINKS_APPEND(&shgroup->calls, call);
+}
+
+void DRW_shgroup_call_generate_add(
+ DRWShadingGroup *shgroup,
+ DRWCallGenerateFn *geometry_fn, void *user_data,
+ float (*obmat)[4])
+{
+ BLI_assert(geometry_fn != NULL);
+ BLI_assert(shgroup->type == DRW_SHG_NORMAL);
+
+ DRWCall *call = BLI_mempool_alloc(DST.vmempool->calls);
+ call->state = drw_call_state_create(shgroup, obmat, NULL);
+ call->type = DRW_CALL_GENERATE;
+ call->generate.geometry_fn = geometry_fn;
+ call->generate.user_data = user_data;
+#ifdef USE_GPU_SELECT
+ call->select_id = DST.select_id;
+#endif
+
+ BLI_LINKS_APPEND(&shgroup->calls, call);
+}
+
+static void sculpt_draw_cb(
+ DRWShadingGroup *shgroup,
+ void (*draw_fn)(DRWShadingGroup *shgroup, Gwn_Batch *geom),
+ void *user_data)
+{
+ Object *ob = user_data;
+ PBVH *pbvh = ob->sculpt->pbvh;
+
+ if (pbvh) {
+ BKE_pbvh_draw_cb(
+ pbvh, NULL, NULL, false,
+ (void (*)(void *, Gwn_Batch *))draw_fn, shgroup);
+ }
+}
+
+void DRW_shgroup_call_sculpt_add(DRWShadingGroup *shgroup, Object *ob, float (*obmat)[4])
+{
+ DRW_shgroup_call_generate_add(shgroup, sculpt_draw_cb, ob, obmat);
+}
+
+void DRW_shgroup_call_dynamic_add_array(DRWShadingGroup *shgroup, const void *attr[], unsigned int attr_len)
+{
+#ifdef USE_GPU_SELECT
+ if (G.f & G_PICKSEL) {
+ if (shgroup->inst_selectid == NULL) {
+ shgroup->inst_selectid = DRW_instance_data_request(DST.idatalist, 1, 128);
+ }
+
+ int *select_id = DRW_instance_data_next(shgroup->inst_selectid);
+ *select_id = DST.select_id;
+ }
+#endif
+
+ BLI_assert(attr_len == shgroup->attribs_count);
+ UNUSED_VARS_NDEBUG(attr_len);
+
+ for (int i = 0; i < attr_len; ++i) {
+ if (shgroup->instance_count == shgroup->instance_vbo->vertex_ct) {
+ GWN_vertbuf_data_resize(shgroup->instance_vbo, shgroup->instance_count + 32);
+ }
+ GWN_vertbuf_attr_set(shgroup->instance_vbo, i, shgroup->instance_count, attr[i]);
+ }
+
+ shgroup->instance_count += 1;
+}
+
+/** \} */
+
+/* -------------------------------------------------------------------- */
+
+/** \name Shading Groups (DRW_shgroup)
+ * \{ */
+
+static void drw_shgroup_init(DRWShadingGroup *shgroup, GPUShader *shader)
+{
+ shgroup->instance_geom = NULL;
+ shgroup->instance_vbo = NULL;
+ shgroup->instance_count = 0;
+ shgroup->uniforms = NULL;
+#ifdef USE_GPU_SELECT
+ shgroup->inst_selectid = NULL;
+ shgroup->override_selectid = -1;
+#endif
+#ifndef NDEBUG
+ shgroup->attribs_count = 0;
+#endif
+
+ int view_ubo_location = GPU_shader_get_uniform_block(shader, "viewBlock");
+
+ if (view_ubo_location != -1) {
+ drw_shgroup_uniform_create_ex(shgroup, view_ubo_location, DRW_UNIFORM_BLOCK_PERSIST, view_ubo, 0, 1);
+ }
+ else {
+ /* Only here to support builtin shaders. This should not be used by engines. */
+ drw_shgroup_builtin_uniform(shgroup, GWN_UNIFORM_VIEW, DST.view_data.matstate.mat[DRW_MAT_VIEW], 16, 1);
+ drw_shgroup_builtin_uniform(shgroup, GWN_UNIFORM_VIEW_INV, DST.view_data.matstate.mat[DRW_MAT_VIEWINV], 16, 1);
+ drw_shgroup_builtin_uniform(shgroup, GWN_UNIFORM_VIEWPROJECTION, DST.view_data.matstate.mat[DRW_MAT_PERS], 16, 1);
+ drw_shgroup_builtin_uniform(shgroup, GWN_UNIFORM_VIEWPROJECTION_INV, DST.view_data.matstate.mat[DRW_MAT_PERSINV], 16, 1);
+ drw_shgroup_builtin_uniform(shgroup, GWN_UNIFORM_PROJECTION, DST.view_data.matstate.mat[DRW_MAT_WIN], 16, 1);
+ drw_shgroup_builtin_uniform(shgroup, GWN_UNIFORM_PROJECTION_INV, DST.view_data.matstate.mat[DRW_MAT_WININV], 16, 1);
+ drw_shgroup_builtin_uniform(shgroup, GWN_UNIFORM_CAMERATEXCO, DST.view_data.viewcamtexcofac, 3, 2);
+ }
+
+ shgroup->model = GPU_shader_get_builtin_uniform(shader, GWN_UNIFORM_MODEL);
+ shgroup->modelinverse = GPU_shader_get_builtin_uniform(shader, GWN_UNIFORM_MODEL_INV);
+ shgroup->modelview = GPU_shader_get_builtin_uniform(shader, GWN_UNIFORM_MODELVIEW);
+ shgroup->modelviewinverse = GPU_shader_get_builtin_uniform(shader, GWN_UNIFORM_MODELVIEW_INV);
+ shgroup->modelviewprojection = GPU_shader_get_builtin_uniform(shader, GWN_UNIFORM_MVP);
+ shgroup->normalview = GPU_shader_get_builtin_uniform(shader, GWN_UNIFORM_NORMAL);
+ shgroup->normalworld = GPU_shader_get_builtin_uniform(shader, GWN_UNIFORM_WORLDNORMAL);
+ shgroup->orcotexfac = GPU_shader_get_builtin_uniform(shader, GWN_UNIFORM_ORCO);
+ shgroup->eye = GPU_shader_get_builtin_uniform(shader, GWN_UNIFORM_EYE);
+
+ shgroup->matflag = 0;
+ if (shgroup->modelinverse > -1)
+ shgroup->matflag |= DRW_CALL_MODELINVERSE;
+ if (shgroup->modelview > -1)
+ shgroup->matflag |= DRW_CALL_MODELVIEW;
+ if (shgroup->modelviewinverse > -1)
+ shgroup->matflag |= DRW_CALL_MODELVIEWINVERSE;
+ if (shgroup->modelviewprojection > -1)
+ shgroup->matflag |= DRW_CALL_MODELVIEWPROJECTION;
+ if (shgroup->normalview > -1)
+ shgroup->matflag |= DRW_CALL_NORMALVIEW;
+ if (shgroup->normalworld > -1)
+ shgroup->matflag |= DRW_CALL_NORMALWORLD;
+ if (shgroup->orcotexfac > -1)
+ shgroup->matflag |= DRW_CALL_ORCOTEXFAC;
+ if (shgroup->eye > -1)
+ shgroup->matflag |= DRW_CALL_EYEVEC;
+}
+
+static void drw_shgroup_instance_init(
+ DRWShadingGroup *shgroup, GPUShader *shader, Gwn_Batch *batch, Gwn_VertFormat *format)
+{
+ BLI_assert(shgroup->type == DRW_SHG_INSTANCE);
+ BLI_assert(batch != NULL);
+ BLI_assert(format != NULL);
+
+ drw_shgroup_init(shgroup, shader);
+
+ shgroup->instance_geom = batch;
+#ifndef NDEBUG
+ shgroup->attribs_count = format->attrib_ct;
+#endif
+
+ DRW_instancing_buffer_request(DST.idatalist, format, batch, shgroup,
+ &shgroup->instance_geom, &shgroup->instance_vbo);
+}
+
+static void drw_shgroup_batching_init(
+ DRWShadingGroup *shgroup, GPUShader *shader, Gwn_VertFormat *format)
+{
+ drw_shgroup_init(shgroup, shader);
+
+#ifndef NDEBUG
+ shgroup->attribs_count = (format != NULL) ? format->attrib_ct : 0;
+#endif
+ BLI_assert(format != NULL);
+
+ Gwn_PrimType type;
+ switch (shgroup->type) {
+ case DRW_SHG_POINT_BATCH: type = GWN_PRIM_POINTS; break;
+ case DRW_SHG_LINE_BATCH: type = GWN_PRIM_LINES; break;
+ case DRW_SHG_TRIANGLE_BATCH: type = GWN_PRIM_TRIS; break;
+ default: type = GWN_PRIM_NONE; BLI_assert(0); break;
+ }
+
+ DRW_batching_buffer_request(DST.idatalist, format, type, shgroup,
+ &shgroup->batch_geom, &shgroup->batch_vbo);
+}
+
+static DRWShadingGroup *drw_shgroup_create_ex(struct GPUShader *shader, DRWPass *pass)
+{
+ DRWShadingGroup *shgroup = BLI_mempool_alloc(DST.vmempool->shgroups);
+
+ BLI_LINKS_APPEND(&pass->shgroups, shgroup);
+
+ shgroup->type = DRW_SHG_NORMAL;
+ shgroup->shader = shader;
+ shgroup->state_extra = 0;
+ shgroup->state_extra_disable = ~0x0;
+ shgroup->stencil_mask = 0;
+ shgroup->calls.first = NULL;
+ shgroup->calls.last = NULL;
+#if 0 /* All the same in the union! */
+ shgroup->batch_geom = NULL;
+ shgroup->batch_vbo = NULL;
+
+ shgroup->instance_geom = NULL;
+ shgroup->instance_vbo = NULL;
+#endif
+
+#ifdef USE_GPU_SELECT
+ shgroup->pass_parent = pass;
+#endif
+
+ return shgroup;
+}
+
+static DRWShadingGroup *drw_shgroup_material_create_ex(GPUPass *gpupass, DRWPass *pass)
+{
+ if (!gpupass) {
+ /* Shader compilation error */
+ return NULL;
+ }
+
+ DRWShadingGroup *grp = drw_shgroup_create_ex(GPU_pass_shader(gpupass), pass);
+ return grp;
+}
+
+static DRWShadingGroup *drw_shgroup_material_inputs(DRWShadingGroup *grp, struct GPUMaterial *material)
+{
+ /* TODO : Ideally we should not convert. But since the whole codegen
+ * is relying on GPUPass we keep it as is for now. */
+
+ ListBase *inputs = GPU_material_get_inputs(material);
+
+ /* Converting dynamic GPUInput to DRWUniform */
+ for (GPUInput *input = inputs->first; input; input = input->next) {
+ /* Textures */
+ if (input->ima) {
+ double time = 0.0; /* TODO make time variable */
+ GPUTexture *tex = GPU_texture_from_blender(
+ input->ima, input->iuser, input->textarget, input->image_isdata, time, 1);
+
+ if (input->bindtex) {
+ DRW_shgroup_uniform_texture(grp, input->shadername, tex);
+ }
+ }
+ /* Color Ramps */
+ else if (input->tex) {
+ DRW_shgroup_uniform_texture(grp, input->shadername, input->tex);
+ }
+ /* Floats */
+ else {
+ switch (input->type) {
+ case GPU_FLOAT:
+ case GPU_VEC2:
+ case GPU_VEC3:
+ case GPU_VEC4:
+ /* Should already be in the material ubo. */
+ break;
+ case GPU_MAT3:
+ DRW_shgroup_uniform_mat3(grp, input->shadername, (float *)input->dynamicvec);
+ break;
+ case GPU_MAT4:
+ DRW_shgroup_uniform_mat4(grp, input->shadername, (float *)input->dynamicvec);
+ break;
+ default:
+ break;
+ }
+ }
+ }
+
+ GPUUniformBuffer *ubo = GPU_material_get_uniform_buffer(material);
+ if (ubo != NULL) {
+ DRW_shgroup_uniform_block(grp, GPU_UBO_BLOCK_NAME, ubo);
+ }
+
+ return grp;
+}
+
+Gwn_VertFormat *DRW_shgroup_instance_format_array(const DRWInstanceAttribFormat attribs[], int arraysize)
+{
+ Gwn_VertFormat *format = MEM_callocN(sizeof(Gwn_VertFormat), "Gwn_VertFormat");
+
+ for (int i = 0; i < arraysize; ++i) {
+ GWN_vertformat_attr_add(format, attribs[i].name,
+ (attribs[i].type == DRW_ATTRIB_INT) ? GWN_COMP_I32 : GWN_COMP_F32,
+ attribs[i].components,
+ (attribs[i].type == DRW_ATTRIB_INT) ? GWN_FETCH_INT : GWN_FETCH_FLOAT);
+ }
+ return format;
+}
+
+DRWShadingGroup *DRW_shgroup_material_create(
+ struct GPUMaterial *material, DRWPass *pass)
+{
+ GPUPass *gpupass = GPU_material_get_pass(material);
+ DRWShadingGroup *shgroup = drw_shgroup_material_create_ex(gpupass, pass);
+
+ if (shgroup) {
+ drw_shgroup_init(shgroup, GPU_pass_shader(gpupass));
+ drw_shgroup_material_inputs(shgroup, material);
+ }
+
+ return shgroup;
+}
+
+DRWShadingGroup *DRW_shgroup_material_instance_create(
+ struct GPUMaterial *material, DRWPass *pass, Gwn_Batch *geom, Object *ob, Gwn_VertFormat *format)
+{
+ GPUPass *gpupass = GPU_material_get_pass(material);
+ DRWShadingGroup *shgroup = drw_shgroup_material_create_ex(gpupass, pass);
+
+ if (shgroup) {
+ shgroup->type = DRW_SHG_INSTANCE;
+ shgroup->instance_geom = geom;
+ drw_call_calc_orco(ob, shgroup->instance_orcofac);
+ drw_shgroup_instance_init(shgroup, GPU_pass_shader(gpupass), geom, format);
+ drw_shgroup_material_inputs(shgroup, material);
+ }
+
+ return shgroup;
+}
+
+DRWShadingGroup *DRW_shgroup_material_empty_tri_batch_create(
+ struct GPUMaterial *material, DRWPass *pass, int tri_count)
+{
+#ifdef USE_GPU_SELECT
+ BLI_assert((G.f & G_PICKSEL) == 0);
+#endif
+ GPUPass *gpupass = GPU_material_get_pass(material);
+ DRWShadingGroup *shgroup = drw_shgroup_material_create_ex(gpupass, pass);
+
+ if (shgroup) {
+ /* Calling drw_shgroup_init will cause it to call GWN_draw_primitive(). */
+ drw_shgroup_init(shgroup, GPU_pass_shader(gpupass));
+ shgroup->type = DRW_SHG_TRIANGLE_BATCH;
+ shgroup->instance_count = tri_count * 3;
+ drw_shgroup_material_inputs(shgroup, material);
+ }
+
+ return shgroup;
+}
+
+DRWShadingGroup *DRW_shgroup_create(struct GPUShader *shader, DRWPass *pass)
+{
+ DRWShadingGroup *shgroup = drw_shgroup_create_ex(shader, pass);
+ drw_shgroup_init(shgroup, shader);
+ return shgroup;
+}
+
+DRWShadingGroup *DRW_shgroup_instance_create(
+ struct GPUShader *shader, DRWPass *pass, Gwn_Batch *geom, Gwn_VertFormat *format)
+{
+ DRWShadingGroup *shgroup = drw_shgroup_create_ex(shader, pass);
+ shgroup->type = DRW_SHG_INSTANCE;
+ shgroup->instance_geom = geom;
+ drw_call_calc_orco(NULL, shgroup->instance_orcofac);
+ drw_shgroup_instance_init(shgroup, shader, geom, format);
+
+ return shgroup;
+}
+
+DRWShadingGroup *DRW_shgroup_point_batch_create(struct GPUShader *shader, DRWPass *pass)
+{
+ DRW_shgroup_instance_format(g_pos_format, {{"pos", DRW_ATTRIB_FLOAT, 3}});
+
+ DRWShadingGroup *shgroup = drw_shgroup_create_ex(shader, pass);
+ shgroup->type = DRW_SHG_POINT_BATCH;
+
+ drw_shgroup_batching_init(shgroup, shader, g_pos_format);
+
+ return shgroup;
+}
+
+DRWShadingGroup *DRW_shgroup_line_batch_create(struct GPUShader *shader, DRWPass *pass)
+{
+ DRW_shgroup_instance_format(g_pos_format, {{"pos", DRW_ATTRIB_FLOAT, 3}});
+
+ DRWShadingGroup *shgroup = drw_shgroup_create_ex(shader, pass);
+ shgroup->type = DRW_SHG_LINE_BATCH;
+
+ drw_shgroup_batching_init(shgroup, shader, g_pos_format);
+
+ return shgroup;
+}
+
+/* Very special batch. Use this if you position
+ * your vertices with the vertex shader
+ * and dont need any VBO attrib */
+DRWShadingGroup *DRW_shgroup_empty_tri_batch_create(struct GPUShader *shader, DRWPass *pass, int tri_count)
+{
+#ifdef USE_GPU_SELECT
+ BLI_assert((G.f & G_PICKSEL) == 0);
+#endif
+ DRWShadingGroup *shgroup = drw_shgroup_create_ex(shader, pass);
+
+ /* Calling drw_shgroup_init will cause it to call GWN_draw_primitive(). */
+ drw_shgroup_init(shgroup, shader);
+
+ shgroup->type = DRW_SHG_TRIANGLE_BATCH;
+ shgroup->instance_count = tri_count * 3;
+
+ return shgroup;
+}
+
+/* Specify an external batch instead of adding each attrib one by one. */
+void DRW_shgroup_instance_batch(DRWShadingGroup *shgroup, struct Gwn_Batch *batch)
+{
+ BLI_assert(shgroup->type == DRW_SHG_INSTANCE);
+ BLI_assert(shgroup->instance_count == 0);
+ /* You cannot use external instancing batch without a dummy format. */
+ BLI_assert(shgroup->attribs_count != 0);
+
+ shgroup->type = DRW_SHG_INSTANCE_EXTERNAL;
+ drw_call_calc_orco(NULL, shgroup->instance_orcofac);
+ /* PERF : This destroys the vaos cache so better check if it's necessary. */
+ /* Note: This WILL break if batch->verts[0] is destroyed and reallocated
+ * at the same adress. Bindings/VAOs would remain obsolete. */
+ //if (shgroup->instancing_geom->inst != batch->verts[0])
+ GWN_batch_instbuf_set(shgroup->instance_geom, batch->verts[0], false);
+
+#ifdef USE_GPU_SELECT
+ shgroup->override_selectid = DST.select_id;
+#endif
+}
+
+unsigned int DRW_shgroup_get_instance_count(const DRWShadingGroup *shgroup)
+{
+ return shgroup->instance_count;
+}
+
+/**
+ * State is added to #Pass.state while drawing.
+ * Use to temporarily enable draw options.
+ */
+void DRW_shgroup_state_enable(DRWShadingGroup *shgroup, DRWState state)
+{
+ shgroup->state_extra |= state;
+}
+
+void DRW_shgroup_state_disable(DRWShadingGroup *shgroup, DRWState state)
+{
+ shgroup->state_extra_disable &= ~state;
+}
+
+void DRW_shgroup_stencil_mask(DRWShadingGroup *shgroup, unsigned int mask)
+{
+ BLI_assert(mask <= 255);
+ shgroup->stencil_mask = mask;
+}
+
+/** \} */
+
+/* -------------------------------------------------------------------- */
+
+/** \name Passes (DRW_pass)
+ * \{ */
+
+DRWPass *DRW_pass_create(const char *name, DRWState state)
+{
+ DRWPass *pass = BLI_mempool_alloc(DST.vmempool->passes);
+ pass->state = state;
+ if (G.debug_value > 20) {
+ BLI_strncpy(pass->name, name, MAX_PASS_NAME);
+ }
+
+ pass->shgroups.first = NULL;
+ pass->shgroups.last = NULL;
+
+ return pass;
+}
+
+void DRW_pass_state_set(DRWPass *pass, DRWState state)
+{
+ pass->state = state;
+}
+
+void DRW_pass_free(DRWPass *pass)
+{
+ pass->shgroups.first = NULL;
+ pass->shgroups.last = NULL;
+}
+
+void DRW_pass_foreach_shgroup(DRWPass *pass, void (*callback)(void *userData, DRWShadingGroup *shgrp), void *userData)
+{
+ for (DRWShadingGroup *shgroup = pass->shgroups.first; shgroup; shgroup = shgroup->next) {
+ callback(userData, shgroup);
+ }
+}
+
+typedef struct ZSortData {
+ float *axis;
+ float *origin;
+} ZSortData;
+
+static int pass_shgroup_dist_sort(void *thunk, const void *a, const void *b)
+{
+ const ZSortData *zsortdata = (ZSortData *)thunk;
+ const DRWShadingGroup *shgrp_a = (const DRWShadingGroup *)a;
+ const DRWShadingGroup *shgrp_b = (const DRWShadingGroup *)b;
+
+ const DRWCall *call_a = (DRWCall *)shgrp_a->calls.first;
+ const DRWCall *call_b = (DRWCall *)shgrp_b->calls.first;
+
+ if (call_a == NULL) return -1;
+ if (call_b == NULL) return -1;
+
+ float tmp[3];
+ sub_v3_v3v3(tmp, zsortdata->origin, call_a->state->model[3]);
+ const float a_sq = dot_v3v3(zsortdata->axis, tmp);
+ sub_v3_v3v3(tmp, zsortdata->origin, call_b->state->model[3]);
+ const float b_sq = dot_v3v3(zsortdata->axis, tmp);
+
+ if (a_sq < b_sq) return 1;
+ else if (a_sq > b_sq) return -1;
+ else {
+ /* If there is a depth prepass put it before */
+ if ((shgrp_a->state_extra & DRW_STATE_WRITE_DEPTH) != 0) {
+ return -1;
+ }
+ else if ((shgrp_b->state_extra & DRW_STATE_WRITE_DEPTH) != 0) {
+ return 1;
+ }
+ else return 0;
+ }
+}
+
+/* ------------------ Shading group sorting --------------------- */
+
+#define SORT_IMPL_LINKTYPE DRWShadingGroup
+
+#define SORT_IMPL_USE_THUNK
+#define SORT_IMPL_FUNC shgroup_sort_fn_r
+#include "../../blenlib/intern/list_sort_impl.h"
+#undef SORT_IMPL_FUNC
+#undef SORT_IMPL_USE_THUNK
+
+#undef SORT_IMPL_LINKTYPE
+
+/**
+ * Sort Shading groups by decreasing Z of their first draw call.
+ * This is usefull for order dependant effect such as transparency.
+ **/
+void DRW_pass_sort_shgroup_z(DRWPass *pass)
+{
+ float (*viewinv)[4];
+ viewinv = DST.view_data.matstate.mat[DRW_MAT_VIEWINV];
+
+ ZSortData zsortdata = {viewinv[2], viewinv[3]};
+
+ if (pass->shgroups.first && pass->shgroups.first->next) {
+ pass->shgroups.first = shgroup_sort_fn_r(pass->shgroups.first, pass_shgroup_dist_sort, &zsortdata);
+
+ /* Find the next last */
+ DRWShadingGroup *last = pass->shgroups.first;
+ while ((last = last->next)) {
+ /* Do nothing */
+ }
+ pass->shgroups.last = last;
+ }
+}
+
+/** \} */
diff --git a/source/blender/draw/intern/draw_manager_exec.c b/source/blender/draw/intern/draw_manager_exec.c
new file mode 100644
index 00000000000..e69a1026815
--- /dev/null
+++ b/source/blender/draw/intern/draw_manager_exec.c
@@ -0,0 +1,1170 @@
+/*
+ * Copyright 2016, Blender Foundation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Contributor(s): Blender Institute
+ *
+ */
+
+/** \file blender/draw/intern/draw_manager_exec.c
+ * \ingroup draw
+ */
+
+#include "draw_manager.h"
+
+#include "BLI_mempool.h"
+
+#include "BIF_glutil.h"
+
+#include "BKE_global.h"
+#include "BKE_object.h"
+
+#include "GPU_draw.h"
+#include "GPU_extensions.h"
+
+#ifdef USE_GPU_SELECT
+# include "ED_view3d.h"
+# include "ED_armature.h"
+# include "GPU_select.h"
+#endif
+
+#ifdef USE_GPU_SELECT
+void DRW_select_load_id(unsigned int id)
+{
+ BLI_assert(G.f & G_PICKSEL);
+ DST.select_id = id;
+}
+#endif
+
+struct GPUUniformBuffer *view_ubo;
+
+/* -------------------------------------------------------------------- */
+
+/** \name Draw State (DRW_state)
+ * \{ */
+
+void drw_state_set(DRWState state)
+{
+ if (DST.state == state) {
+ return;
+ }
+
+#define CHANGED_TO(f) \
+ ((DST.state_lock & (f)) ? 0 : \
+ (((DST.state & (f)) ? \
+ ((state & (f)) ? 0 : -1) : \
+ ((state & (f)) ? 1 : 0))))
+
+#define CHANGED_ANY(f) \
+ (((DST.state & (f)) != (state & (f))) && \
+ ((DST.state_lock & (f)) == 0))
+
+#define CHANGED_ANY_STORE_VAR(f, enabled) \
+ (((DST.state & (f)) != (enabled = (state & (f)))) && \
+ (((DST.state_lock & (f)) == 0)))
+
+ /* Depth Write */
+ {
+ int test;
+ if ((test = CHANGED_TO(DRW_STATE_WRITE_DEPTH))) {
+ if (test == 1) {
+ glDepthMask(GL_TRUE);
+ }
+ else {
+ glDepthMask(GL_FALSE);
+ }
+ }
+ }
+
+ /* Color Write */
+ {
+ int test;
+ if ((test = CHANGED_TO(DRW_STATE_WRITE_COLOR))) {
+ if (test == 1) {
+ glColorMask(GL_TRUE, GL_TRUE, GL_TRUE, GL_TRUE);
+ }
+ else {
+ glColorMask(GL_FALSE, GL_FALSE, GL_FALSE, GL_FALSE);
+ }
+ }
+ }
+
+ /* Cull */
+ {
+ DRWState test;
+ if (CHANGED_ANY_STORE_VAR(
+ DRW_STATE_CULL_BACK | DRW_STATE_CULL_FRONT,
+ test))
+ {
+ if (test) {
+ glEnable(GL_CULL_FACE);
+
+ if ((state & DRW_STATE_CULL_BACK) != 0) {
+ glCullFace(GL_BACK);
+ }
+ else if ((state & DRW_STATE_CULL_FRONT) != 0) {
+ glCullFace(GL_FRONT);
+ }
+ else {
+ BLI_assert(0);
+ }
+ }
+ else {
+ glDisable(GL_CULL_FACE);
+ }
+ }
+ }
+
+ /* Depth Test */
+ {
+ DRWState test;
+ if (CHANGED_ANY_STORE_VAR(
+ DRW_STATE_DEPTH_LESS | DRW_STATE_DEPTH_EQUAL | DRW_STATE_DEPTH_GREATER | DRW_STATE_DEPTH_ALWAYS,
+ test))
+ {
+ if (test) {
+ glEnable(GL_DEPTH_TEST);
+
+ if (state & DRW_STATE_DEPTH_LESS) {
+ glDepthFunc(GL_LEQUAL);
+ }
+ else if (state & DRW_STATE_DEPTH_EQUAL) {
+ glDepthFunc(GL_EQUAL);
+ }
+ else if (state & DRW_STATE_DEPTH_GREATER) {
+ glDepthFunc(GL_GREATER);
+ }
+ else if (state & DRW_STATE_DEPTH_ALWAYS) {
+ glDepthFunc(GL_ALWAYS);
+ }
+ else {
+ BLI_assert(0);
+ }
+ }
+ else {
+ glDisable(GL_DEPTH_TEST);
+ }
+ }
+ }
+
+ /* Wire Width */
+ {
+ if (CHANGED_ANY(DRW_STATE_WIRE)) {
+ if ((state & DRW_STATE_WIRE) != 0) {
+ glLineWidth(1.0f);
+ }
+ else {
+ /* do nothing */
+ }
+ }
+ }
+
+ /* Points Size */
+ {
+ int test;
+ if ((test = CHANGED_TO(DRW_STATE_POINT))) {
+ if (test == 1) {
+ GPU_enable_program_point_size();
+ glPointSize(5.0f);
+ }
+ else {
+ GPU_disable_program_point_size();
+ }
+ }
+ }
+
+ /* Blending (all buffer) */
+ {
+ int test;
+ if (CHANGED_ANY_STORE_VAR(
+ DRW_STATE_BLEND | DRW_STATE_ADDITIVE | DRW_STATE_MULTIPLY | DRW_STATE_TRANSMISSION |
+ DRW_STATE_ADDITIVE_FULL,
+ test))
+ {
+ if (test) {
+ glEnable(GL_BLEND);
+
+ if ((state & DRW_STATE_BLEND) != 0) {
+ glBlendFuncSeparate(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA, /* RGB */
+ GL_ONE, GL_ONE_MINUS_SRC_ALPHA); /* Alpha */
+ }
+ else if ((state & DRW_STATE_MULTIPLY) != 0) {
+ glBlendFunc(GL_DST_COLOR, GL_ZERO);
+ }
+ else if ((state & DRW_STATE_TRANSMISSION) != 0) {
+ glBlendFunc(GL_ONE, GL_SRC_ALPHA);
+ }
+ else if ((state & DRW_STATE_ADDITIVE) != 0) {
+ /* Do not let alpha accumulate but premult the source RGB by it. */
+ glBlendFuncSeparate(GL_SRC_ALPHA, GL_ONE, /* RGB */
+ GL_ZERO, GL_ONE); /* Alpha */
+ }
+ else if ((state & DRW_STATE_ADDITIVE_FULL) != 0) {
+ /* Let alpha accumulate. */
+ glBlendFunc(GL_ONE, GL_ONE);
+ }
+ else {
+ BLI_assert(0);
+ }
+ }
+ else {
+ glDisable(GL_BLEND);
+ }
+ }
+ }
+
+ /* Clip Planes */
+ {
+ int test;
+ if ((test = CHANGED_TO(DRW_STATE_CLIP_PLANES))) {
+ if (test == 1) {
+ for (int i = 0; i < DST.num_clip_planes; ++i) {
+ glEnable(GL_CLIP_DISTANCE0 + i);
+ }
+ }
+ else {
+ for (int i = 0; i < MAX_CLIP_PLANES; ++i) {
+ glDisable(GL_CLIP_DISTANCE0 + i);
+ }
+ }
+ }
+ }
+
+ /* Line Stipple */
+ {
+ int test;
+ if (CHANGED_ANY_STORE_VAR(
+ DRW_STATE_STIPPLE_2 | DRW_STATE_STIPPLE_3 | DRW_STATE_STIPPLE_4,
+ test))
+ {
+ if (test) {
+ if ((state & DRW_STATE_STIPPLE_2) != 0) {
+ setlinestyle(2);
+ }
+ else if ((state & DRW_STATE_STIPPLE_3) != 0) {
+ setlinestyle(3);
+ }
+ else if ((state & DRW_STATE_STIPPLE_4) != 0) {
+ setlinestyle(4);
+ }
+ else {
+ BLI_assert(0);
+ }
+ }
+ else {
+ setlinestyle(0);
+ }
+ }
+ }
+
+ /* Stencil */
+ {
+ DRWState test;
+ if (CHANGED_ANY_STORE_VAR(
+ DRW_STATE_WRITE_STENCIL |
+ DRW_STATE_STENCIL_EQUAL,
+ test))
+ {
+ if (test) {
+ glEnable(GL_STENCIL_TEST);
+
+ /* Stencil Write */
+ if ((state & DRW_STATE_WRITE_STENCIL) != 0) {
+ glStencilMask(0xFF);
+ glStencilOp(GL_KEEP, GL_KEEP, GL_REPLACE);
+ }
+ /* Stencil Test */
+ else if ((state & DRW_STATE_STENCIL_EQUAL) != 0) {
+ glStencilMask(0x00); /* disable write */
+ DST.stencil_mask = 0;
+ }
+ else {
+ BLI_assert(0);
+ }
+ }
+ else {
+ /* disable write & test */
+ DST.stencil_mask = 0;
+ glStencilMask(0x00);
+ glStencilFunc(GL_ALWAYS, 1, 0xFF);
+ glDisable(GL_STENCIL_TEST);
+ }
+ }
+ }
+
+#undef CHANGED_TO
+#undef CHANGED_ANY
+#undef CHANGED_ANY_STORE_VAR
+
+ DST.state = state;
+}
+
+static void drw_stencil_set(unsigned int mask)
+{
+ if (DST.stencil_mask != mask) {
+ /* Stencil Write */
+ if ((DST.state & DRW_STATE_WRITE_STENCIL) != 0) {
+ glStencilFunc(GL_ALWAYS, mask, 0xFF);
+ DST.stencil_mask = mask;
+ }
+ /* Stencil Test */
+ else if ((DST.state & DRW_STATE_STENCIL_EQUAL) != 0) {
+ glStencilFunc(GL_EQUAL, mask, 0xFF);
+ DST.stencil_mask = mask;
+ }
+ }
+}
+
+/* Reset state to not interfer with other UI drawcall */
+void DRW_state_reset_ex(DRWState state)
+{
+ DST.state = ~state;
+ drw_state_set(state);
+}
+
+/**
+ * Use with care, intended so selection code can override passes depth settings,
+ * which is important for selection to work properly.
+ *
+ * Should be set in main draw loop, cleared afterwards
+ */
+void DRW_state_lock(DRWState state)
+{
+ DST.state_lock = state;
+}
+
+void DRW_state_reset(void)
+{
+ /* Reset blending function */
+ glBlendFuncSeparate(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA, GL_ONE, GL_ONE_MINUS_SRC_ALPHA);
+
+ DRW_state_reset_ex(DRW_STATE_DEFAULT);
+}
+
+/* NOTE : Make sure to reset after use! */
+void DRW_state_invert_facing(void)
+{
+ SWAP(GLenum, DST.backface, DST.frontface);
+ glFrontFace(DST.frontface);
+}
+
+/**
+ * This only works if DRWPasses have been tagged with DRW_STATE_CLIP_PLANES,
+ * and if the shaders have support for it (see usage of gl_ClipDistance).
+ * Be sure to call DRW_state_clip_planes_reset() after you finish drawing.
+ **/
+void DRW_state_clip_planes_count_set(unsigned int plane_ct)
+{
+ BLI_assert(plane_ct <= MAX_CLIP_PLANES);
+ DST.num_clip_planes = plane_ct;
+}
+
+void DRW_state_clip_planes_reset(void)
+{
+ DST.num_clip_planes = 0;
+}
+
+/** \} */
+
+/* -------------------------------------------------------------------- */
+
+/** \name Clipping (DRW_clipping)
+ * \{ */
+
+/* Extract the 8 corners (world space).
+ * Although less accurate, this solution can be simplified as follows:
+ *
+ * BKE_boundbox_init_from_minmax(&bbox, (const float[3]){-1.0f, -1.0f, -1.0f}, (const float[3]){1.0f, 1.0f, 1.0f});
+ * for (int i = 0; i < 8; i++) {mul_project_m4_v3(viewprojinv, bbox.vec[i]);}
+ */
+static void draw_frustum_boundbox_calc(const float (*projmat)[4], const float (*viewinv)[4], BoundBox *r_bbox)
+{
+ float screenvecs[3][3], loc[3], near, far, w_half, h_half;
+ bool is_persp = projmat[3][3] == 0.0f;
+ copy_m3_m4(screenvecs, viewinv);
+ copy_v3_v3(loc, viewinv[3]);
+
+ /* get the values of the minimum and maximum clipping planes distances
+ * and half the width and height of the nearplane rectangle. */
+ if (is_persp) {
+ near = projmat[3][2] / (projmat[2][2] - 1.0f);
+ far = projmat[3][2] / (projmat[2][2] + 1.0f);
+ w_half = near / projmat[0][0];
+ h_half = near / projmat[1][1];
+ }
+ else {
+ near = (projmat[3][2] + 1.0f) / projmat[2][2];
+ far = (projmat[3][2] - 1.0f) / projmat[2][2];
+ w_half = 1.0f / projmat[0][0];
+ h_half = 1.0f / projmat[1][1];
+ }
+
+ /* With vectors aligned to the screen, reconstruct
+ * the near plane from the dimensions obtained earlier. */
+ float mid[3], hor[3], ver[3];
+ mul_v3_v3fl(hor, screenvecs[0], w_half);
+ mul_v3_v3fl(ver, screenvecs[1], h_half);
+ madd_v3_v3v3fl(mid, loc, screenvecs[2], -near);
+
+ /* The case below is for non-symmetric frustum. */
+ if (is_persp) {
+ madd_v3_v3fl(mid, hor, projmat[2][0]);
+ madd_v3_v3fl(mid, ver, projmat[2][1]);
+ }
+ else {
+ madd_v3_v3fl(mid, hor, projmat[3][0]);
+ madd_v3_v3fl(mid, ver, projmat[3][1]);
+ }
+
+ r_bbox->vec[0][0] = mid[0] - ver[0] - hor[0];
+ r_bbox->vec[0][1] = mid[1] - ver[1] - hor[1];
+ r_bbox->vec[0][2] = mid[2] - ver[2] - hor[2];
+
+ r_bbox->vec[3][0] = mid[0] + ver[0] - hor[0];
+ r_bbox->vec[3][1] = mid[1] + ver[1] - hor[1];
+ r_bbox->vec[3][2] = mid[2] + ver[2] - hor[2];
+
+ r_bbox->vec[7][0] = mid[0] + ver[0] + hor[0];
+ r_bbox->vec[7][1] = mid[1] + ver[1] + hor[1];
+ r_bbox->vec[7][2] = mid[2] + ver[2] + hor[2];
+
+ r_bbox->vec[4][0] = mid[0] - ver[0] + hor[0];
+ r_bbox->vec[4][1] = mid[1] - ver[1] + hor[1];
+ r_bbox->vec[4][2] = mid[2] - ver[2] + hor[2];
+
+ /* Get the coordinates of the far plane. */
+ if (is_persp) {
+ float sca_far = far / near;
+ mid[0] = mid[0] + (mid[0] - loc[0]) * sca_far;
+ mid[1] = mid[1] + (mid[1] - loc[1]) * sca_far;
+ mid[2] = mid[2] + (mid[2] - loc[2]) * sca_far;
+
+ mul_v3_fl(hor, sca_far);
+ mul_v3_fl(ver, sca_far);
+ }
+ else {
+ madd_v3_v3v3fl(mid, loc, screenvecs[2], -far);
+
+ /* Non-symmetric frustum. */
+ madd_v3_v3fl(mid, hor, projmat[3][0]);
+ madd_v3_v3fl(mid, ver, projmat[3][1]);
+ }
+
+ r_bbox->vec[1][0] = mid[0] - ver[0] - hor[0];
+ r_bbox->vec[1][1] = mid[1] - ver[1] - hor[1];
+ r_bbox->vec[1][2] = mid[2] - ver[2] - hor[2];
+
+ r_bbox->vec[2][0] = mid[0] + ver[0] - hor[0];
+ r_bbox->vec[2][1] = mid[1] + ver[1] - hor[1];
+ r_bbox->vec[2][2] = mid[2] + ver[2] - hor[2];
+
+ r_bbox->vec[6][0] = mid[0] + ver[0] + hor[0];
+ r_bbox->vec[6][1] = mid[1] + ver[1] + hor[1];
+ r_bbox->vec[6][2] = mid[2] + ver[2] + hor[2];
+
+ r_bbox->vec[5][0] = mid[0] - ver[0] + hor[0];
+ r_bbox->vec[5][1] = mid[1] - ver[1] + hor[1];
+ r_bbox->vec[5][2] = mid[2] - ver[2] + hor[2];
+}
+
+static void draw_clipping_setup_from_view(void)
+{
+ if (DST.clipping.updated)
+ return;
+
+ float (*viewinv)[4] = DST.view_data.matstate.mat[DRW_MAT_VIEWINV];
+ float (*projmat)[4] = DST.view_data.matstate.mat[DRW_MAT_WIN];
+ float (*projinv)[4] = DST.view_data.matstate.mat[DRW_MAT_WININV];
+ BoundSphere *bsphere = &DST.clipping.frustum_bsphere;
+
+ /* Extract Clipping Planes */
+ BoundBox bbox;
+ draw_frustum_boundbox_calc(projmat, viewinv, &bbox);
+
+ /* Compute clip planes using the world space frustum corners. */
+ for (int p = 0; p < 6; p++) {
+ int q, r;
+ switch (p) {
+ case 0: q=1; r=2; break;
+ case 1: q=0; r=5; break;
+ case 2: q=1; r=5; break;
+ case 3: q=2; r=6; break;
+ case 4: q=0; r=3; break;
+ default: q=4; r=7; break;
+ }
+ if (DST.frontface == GL_CW) {
+ SWAP(int, q, r);
+ }
+
+ normal_tri_v3(DST.clipping.frustum_planes[p], bbox.vec[p], bbox.vec[q], bbox.vec[r]);
+ DST.clipping.frustum_planes[p][3] = -dot_v3v3(DST.clipping.frustum_planes[p], bbox.vec[p]);
+ }
+
+ /* Extract Bounding Sphere */
+ if (projmat[3][3] != 0.0f) {
+ /* Orthographic */
+ /* The most extreme points on the near and far plane. (normalized device coords). */
+ float *nearpoint = bbox.vec[0];
+ float *farpoint = bbox.vec[6];
+
+ mul_project_m4_v3(projinv, nearpoint);
+ mul_project_m4_v3(projinv, farpoint);
+
+ /* just use median point */
+ mid_v3_v3v3(bsphere->center, farpoint, nearpoint);
+ bsphere->radius = len_v3v3(bsphere->center, farpoint);
+ }
+ else if (projmat[2][0] == 0.0f && projmat[2][1] == 0.0f) {
+ /* Perspective with symmetrical frustum. */
+
+ /* We obtain the center and radius of the circumscribed circle of the
+ * isosceles trapezoid composed by the diagonals of the near and far clipping plane */
+
+ /* center of each clipping plane */
+ float mid_min[3], mid_max[3];
+ mid_v3_v3v3(mid_min, bbox.vec[3], bbox.vec[4]);
+ mid_v3_v3v3(mid_max, bbox.vec[2], bbox.vec[5]);
+
+ /* square length of the diagonals of each clipping plane */
+ float a_sq = len_squared_v3v3(bbox.vec[3], bbox.vec[4]);
+ float b_sq = len_squared_v3v3(bbox.vec[2], bbox.vec[5]);
+
+ /* distance squared between clipping planes */
+ float h_sq = len_squared_v3v3(mid_min, mid_max);
+
+ float fac = (4 * h_sq + b_sq - a_sq) / (8 * h_sq);
+ BLI_assert(fac >= 0.0f);
+
+ /* The goal is to get the smallest sphere,
+ * not the sphere that passes through each corner */
+ CLAMP(fac, 0.0f, 1.0f);
+
+ interp_v3_v3v3(bsphere->center, mid_min, mid_max, fac);
+
+ /* distance from the center to one of the points of the far plane (1, 2, 5, 6) */
+ bsphere->radius = len_v3v3(bsphere->center, bbox.vec[1]);
+ }
+ else {
+ /* Perspective with asymmetrical frustum. */
+
+ /* We put the sphere center on the line that goes from origin
+ * to the center of the far clipping plane. */
+
+ /* Detect which of the corner of the far clipping plane is the farthest to the origin */
+ float nfar[4]; /* most extreme far point in NDC space */
+ float farxy[2]; /* farpoint projection onto the near plane */
+ float farpoint[3] = {0.0f}; /* most extreme far point in camera coordinate */
+ float nearpoint[3]; /* most extreme near point in camera coordinate */
+ float farcenter[3] = {0.0f}; /* center of far cliping plane in camera coordinate */
+ float F = -1.0f, N; /* square distance of far and near point to origin */
+ float f, n; /* distance of far and near point to z axis. f is always > 0 but n can be < 0 */
+ float e, s; /* far and near clipping distance (<0) */
+ float c; /* slope of center line = distance of far clipping center to z axis / far clipping distance */
+ float z; /* projection of sphere center on z axis (<0) */
+
+ /* Find farthest corner and center of far clip plane. */
+ float corner[3] = {1.0f, 1.0f, 1.0f}; /* in clip space */
+ for (int i = 0; i < 4; i++) {
+ float point[3];
+ mul_v3_project_m4_v3(point, projinv, corner);
+ float len = len_squared_v3(point);
+ if (len > F) {
+ copy_v3_v3(nfar, corner);
+ copy_v3_v3(farpoint, point);
+ F = len;
+ }
+ add_v3_v3(farcenter, point);
+ /* rotate by 90 degree to walk through the 4 points of the far clip plane */
+ float tmp = corner[0];
+ corner[0] = -corner[1];
+ corner[1] = tmp;
+ }
+
+ /* the far center is the average of the far clipping points */
+ mul_v3_fl(farcenter, 0.25f);
+ /* the extreme near point is the opposite point on the near clipping plane */
+ copy_v3_fl3(nfar, -nfar[0], -nfar[1], -1.0f);
+ mul_v3_project_m4_v3(nearpoint, projinv, nfar);
+ /* this is a frustum projection */
+ N = len_squared_v3(nearpoint);
+ e = farpoint[2];
+ s = nearpoint[2];
+ /* distance to view Z axis */
+ f = len_v2(farpoint);
+ /* get corresponding point on the near plane */
+ mul_v2_v2fl(farxy, farpoint, s/e);
+ /* this formula preserve the sign of n */
+ sub_v2_v2(nearpoint, farxy);
+ n = f * s / e - len_v2(nearpoint);
+ c = len_v2(farcenter) / e;
+ /* the big formula, it simplifies to (F-N)/(2(e-s)) for the symmetric case */
+ z = (F-N) / (2.0f * (e-s + c*(f-n)));
+
+ bsphere->center[0] = farcenter[0] * z/e;
+ bsphere->center[1] = farcenter[1] * z/e;
+ bsphere->center[2] = z;
+ bsphere->radius = len_v3v3(bsphere->center, farpoint);
+
+ /* Transform to world space. */
+ mul_m4_v3(viewinv, bsphere->center);
+ }
+
+ DST.clipping.updated = true;
+}
+
+/* Return True if the given BoundSphere intersect the current view frustum */
+bool DRW_culling_sphere_test(BoundSphere *bsphere)
+{
+ draw_clipping_setup_from_view();
+
+ /* Bypass test if radius is negative. */
+ if (bsphere->radius < 0.0f)
+ return true;
+
+ /* Do a rough test first: Sphere VS Sphere intersect. */
+ BoundSphere *frustum_bsphere = &DST.clipping.frustum_bsphere;
+ float center_dist = len_squared_v3v3(bsphere->center, frustum_bsphere->center);
+ if (center_dist > SQUARE(bsphere->radius + frustum_bsphere->radius))
+ return false;
+
+ /* Test against the 6 frustum planes. */
+ for (int p = 0; p < 6; p++) {
+ float dist = plane_point_side_v3(DST.clipping.frustum_planes[p], bsphere->center);
+ if (dist < -bsphere->radius) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+/* Return True if the given BoundBox intersect the current view frustum.
+ * bbox must be in world space. */
+bool DRW_culling_box_test(BoundBox *bbox)
+{
+ draw_clipping_setup_from_view();
+
+ /* 6 view frustum planes */
+ for (int p = 0; p < 6; p++) {
+ /* 8 box vertices. */
+ for (int v = 0; v < 8 ; v++) {
+ float dist = plane_point_side_v3(DST.clipping.frustum_planes[p], bbox->vec[v]);
+ if (dist > 0.0f) {
+ /* At least one point in front of this plane.
+ * Go to next plane. */
+ break;
+ }
+ else if (v == 7) {
+ /* 8 points behind this plane. */
+ return false;
+ }
+ }
+ }
+
+ return true;
+}
+
+/** \} */
+
+/* -------------------------------------------------------------------- */
+
+/** \name Draw (DRW_draw)
+ * \{ */
+
+static void draw_matrices_model_prepare(DRWCallState *st)
+{
+ if (st->cache_id == DST.state_cache_id) {
+ return; /* Values are already updated for this view. */
+ }
+ else {
+ st->cache_id = DST.state_cache_id;
+ }
+
+ if (DRW_culling_sphere_test(&st->bsphere)) {
+ st->flag &= ~DRW_CALL_CULLED;
+ }
+ else {
+ st->flag |= DRW_CALL_CULLED;
+ return; /* No need to go further the call will not be used. */
+ }
+
+ /* Order matters */
+ if (st->matflag & (DRW_CALL_MODELVIEW | DRW_CALL_MODELVIEWINVERSE |
+ DRW_CALL_NORMALVIEW | DRW_CALL_EYEVEC))
+ {
+ mul_m4_m4m4(st->modelview, DST.view_data.matstate.mat[DRW_MAT_VIEW], st->model);
+ }
+ if (st->matflag & DRW_CALL_MODELVIEWINVERSE) {
+ invert_m4_m4(st->modelviewinverse, st->modelview);
+ }
+ if (st->matflag & DRW_CALL_MODELVIEWPROJECTION) {
+ mul_m4_m4m4(st->modelviewprojection, DST.view_data.matstate.mat[DRW_MAT_PERS], st->model);
+ }
+ if (st->matflag & (DRW_CALL_NORMALVIEW | DRW_CALL_EYEVEC)) {
+ copy_m3_m4(st->normalview, st->modelview);
+ invert_m3(st->normalview);
+ transpose_m3(st->normalview);
+ }
+ if (st->matflag & DRW_CALL_EYEVEC) {
+ /* Used by orthographic wires */
+ float tmp[3][3];
+ copy_v3_fl3(st->eyevec, 0.0f, 0.0f, 1.0f);
+ invert_m3_m3(tmp, st->normalview);
+ /* set eye vector, transformed to object coords */
+ mul_m3_v3(tmp, st->eyevec);
+ }
+ /* Non view dependant */
+ if (st->matflag & DRW_CALL_MODELINVERSE) {
+ invert_m4_m4(st->modelinverse, st->model);
+ st->matflag &= ~DRW_CALL_MODELINVERSE;
+ }
+ if (st->matflag & DRW_CALL_NORMALWORLD) {
+ copy_m3_m4(st->normalworld, st->model);
+ invert_m3(st->normalworld);
+ transpose_m3(st->normalworld);
+ st->matflag &= ~DRW_CALL_NORMALWORLD;
+ }
+}
+
+static void draw_geometry_prepare(DRWShadingGroup *shgroup, DRWCallState *state)
+{
+ /* step 1 : bind object dependent matrices */
+ if (state != NULL) {
+ GPU_shader_uniform_vector(shgroup->shader, shgroup->model, 16, 1, (float *)state->model);
+ GPU_shader_uniform_vector(shgroup->shader, shgroup->modelinverse, 16, 1, (float *)state->modelinverse);
+ GPU_shader_uniform_vector(shgroup->shader, shgroup->modelview, 16, 1, (float *)state->modelview);
+ GPU_shader_uniform_vector(shgroup->shader, shgroup->modelviewinverse, 16, 1, (float *)state->modelviewinverse);
+ GPU_shader_uniform_vector(shgroup->shader, shgroup->modelviewprojection, 16, 1, (float *)state->modelviewprojection);
+ GPU_shader_uniform_vector(shgroup->shader, shgroup->normalview, 9, 1, (float *)state->normalview);
+ GPU_shader_uniform_vector(shgroup->shader, shgroup->normalworld, 9, 1, (float *)state->normalworld);
+ GPU_shader_uniform_vector(shgroup->shader, shgroup->orcotexfac, 3, 2, (float *)state->orcotexfac);
+ GPU_shader_uniform_vector(shgroup->shader, shgroup->eye, 3, 1, (float *)state->eyevec);
+ }
+ else {
+ BLI_assert((shgroup->normalview == -1) && (shgroup->normalworld == -1) && (shgroup->eye == -1));
+ /* For instancing and batching. */
+ float unitmat[4][4];
+ unit_m4(unitmat);
+ GPU_shader_uniform_vector(shgroup->shader, shgroup->model, 16, 1, (float *)unitmat);
+ GPU_shader_uniform_vector(shgroup->shader, shgroup->modelinverse, 16, 1, (float *)unitmat);
+ GPU_shader_uniform_vector(shgroup->shader, shgroup->modelview, 16, 1, (float *)DST.view_data.matstate.mat[DRW_MAT_VIEW]);
+ GPU_shader_uniform_vector(shgroup->shader, shgroup->modelviewinverse, 16, 1, (float *)DST.view_data.matstate.mat[DRW_MAT_VIEWINV]);
+ GPU_shader_uniform_vector(shgroup->shader, shgroup->modelviewprojection, 16, 1, (float *)DST.view_data.matstate.mat[DRW_MAT_PERS]);
+ GPU_shader_uniform_vector(shgroup->shader, shgroup->orcotexfac, 3, 2, (float *)shgroup->instance_orcofac);
+ }
+}
+
+static void draw_geometry_execute_ex(
+ DRWShadingGroup *shgroup, Gwn_Batch *geom, unsigned int start, unsigned int count, bool draw_instance)
+{
+ /* Special case: empty drawcall, placement is done via shader, don't bind anything. */
+ if (geom == NULL) {
+ BLI_assert(shgroup->type == DRW_SHG_TRIANGLE_BATCH); /* Add other type if needed. */
+ /* Shader is already bound. */
+ GWN_draw_primitive(GWN_PRIM_TRIS, count);
+ return;
+ }
+
+ /* step 2 : bind vertex array & draw */
+ GWN_batch_program_set_no_use(geom, GPU_shader_get_program(shgroup->shader), GPU_shader_get_interface(shgroup->shader));
+ /* XXX hacking gawain. we don't want to call glUseProgram! (huge performance loss) */
+ geom->program_in_use = true;
+
+ GWN_batch_draw_range_ex(geom, start, count, draw_instance);
+
+ geom->program_in_use = false; /* XXX hacking gawain */
+}
+
+static void draw_geometry_execute(DRWShadingGroup *shgroup, Gwn_Batch *geom)
+{
+ draw_geometry_execute_ex(shgroup, geom, 0, 0, false);
+}
+
+enum {
+ BIND_NONE = 0,
+ BIND_TEMP = 1, /* Release slot after this shading group. */
+ BIND_PERSIST = 2, /* Release slot only after the next shader change. */
+};
+
+static void bind_texture(GPUTexture *tex, char bind_type)
+{
+ int index;
+ char *slot_flags = DST.RST.bound_tex_slots;
+ int bind_num = GPU_texture_bound_number(tex);
+ if (bind_num == -1) {
+ for (int i = 0; i < GPU_max_textures(); ++i) {
+ index = DST.RST.bind_tex_inc = (DST.RST.bind_tex_inc + 1) % GPU_max_textures();
+ if (slot_flags[index] == BIND_NONE) {
+ if (DST.RST.bound_texs[index] != NULL) {
+ GPU_texture_unbind(DST.RST.bound_texs[index]);
+ }
+ GPU_texture_bind(tex, index);
+ DST.RST.bound_texs[index] = tex;
+ slot_flags[index] = bind_type;
+ // printf("Binds Texture %d %p\n", DST.RST.bind_tex_inc, tex);
+ return;
+ }
+ }
+ printf("Not enough texture slots! Reduce number of textures used by your shader.\n");
+ }
+ slot_flags[bind_num] = bind_type;
+}
+
+static void bind_ubo(GPUUniformBuffer *ubo, char bind_type)
+{
+ int index;
+ char *slot_flags = DST.RST.bound_ubo_slots;
+ int bind_num = GPU_uniformbuffer_bindpoint(ubo);
+ if (bind_num == -1) {
+ for (int i = 0; i < GPU_max_ubo_binds(); ++i) {
+ index = DST.RST.bind_ubo_inc = (DST.RST.bind_ubo_inc + 1) % GPU_max_ubo_binds();
+ if (slot_flags[index] == BIND_NONE) {
+ if (DST.RST.bound_ubos[index] != NULL) {
+ GPU_uniformbuffer_unbind(DST.RST.bound_ubos[index]);
+ }
+ GPU_uniformbuffer_bind(ubo, index);
+ DST.RST.bound_ubos[index] = ubo;
+ slot_flags[index] = bind_type;
+ return;
+ }
+ }
+ /* printf so user can report bad behaviour */
+ printf("Not enough ubo slots! This should not happen!\n");
+ /* This is not depending on user input.
+ * It is our responsability to make sure there is enough slots. */
+ BLI_assert(0);
+ }
+ slot_flags[bind_num] = bind_type;
+}
+
+static void release_texture_slots(bool with_persist)
+{
+ if (with_persist) {
+ memset(DST.RST.bound_tex_slots, 0x0, sizeof(*DST.RST.bound_tex_slots) * GPU_max_textures());
+ }
+ else {
+ for (int i = 0; i < GPU_max_textures(); ++i) {
+ if (DST.RST.bound_tex_slots[i] != BIND_PERSIST)
+ DST.RST.bound_tex_slots[i] = BIND_NONE;
+ }
+ }
+
+ /* Reset so that slots are consistenly assigned for different shader
+ * draw calls, to avoid shader specialization/patching by the driver. */
+ DST.RST.bind_tex_inc = 0;
+}
+
+static void release_ubo_slots(bool with_persist)
+{
+ if (with_persist) {
+ memset(DST.RST.bound_ubo_slots, 0x0, sizeof(*DST.RST.bound_ubo_slots) * GPU_max_ubo_binds());
+ }
+ else {
+ for (int i = 0; i < GPU_max_ubo_binds(); ++i) {
+ if (DST.RST.bound_ubo_slots[i] != BIND_PERSIST)
+ DST.RST.bound_ubo_slots[i] = BIND_NONE;
+ }
+ }
+
+ /* Reset so that slots are consistenly assigned for different shader
+ * draw calls, to avoid shader specialization/patching by the driver. */
+ DST.RST.bind_ubo_inc = 0;
+}
+
+static void draw_shgroup(DRWShadingGroup *shgroup, DRWState pass_state)
+{
+ BLI_assert(shgroup->shader);
+
+ GPUTexture *tex;
+ GPUUniformBuffer *ubo;
+ int val;
+ float fval;
+ const bool shader_changed = (DST.shader != shgroup->shader);
+
+ if (shader_changed) {
+ if (DST.shader) GPU_shader_unbind();
+ GPU_shader_bind(shgroup->shader);
+ DST.shader = shgroup->shader;
+ }
+
+ release_ubo_slots(shader_changed);
+ release_texture_slots(shader_changed);
+
+ drw_state_set((pass_state & shgroup->state_extra_disable) | shgroup->state_extra);
+ drw_stencil_set(shgroup->stencil_mask);
+
+ /* Binding Uniform */
+ /* Don't check anything, Interface should already contain the least uniform as possible */
+ for (DRWUniform *uni = shgroup->uniforms; uni; uni = uni->next) {
+ switch (uni->type) {
+ case DRW_UNIFORM_SHORT_TO_INT:
+ val = (int)*((short *)uni->value);
+ GPU_shader_uniform_vector_int(
+ shgroup->shader, uni->location, uni->length, uni->arraysize, (int *)&val);
+ break;
+ case DRW_UNIFORM_SHORT_TO_FLOAT:
+ fval = (float)*((short *)uni->value);
+ GPU_shader_uniform_vector(
+ shgroup->shader, uni->location, uni->length, uni->arraysize, (float *)&fval);
+ break;
+ case DRW_UNIFORM_BOOL:
+ case DRW_UNIFORM_INT:
+ GPU_shader_uniform_vector_int(
+ shgroup->shader, uni->location, uni->length, uni->arraysize, (int *)uni->value);
+ break;
+ case DRW_UNIFORM_FLOAT:
+ GPU_shader_uniform_vector(
+ shgroup->shader, uni->location, uni->length, uni->arraysize, (float *)uni->value);
+ break;
+ case DRW_UNIFORM_TEXTURE:
+ tex = (GPUTexture *)uni->value;
+ BLI_assert(tex);
+ bind_texture(tex, BIND_TEMP);
+ GPU_shader_uniform_texture(shgroup->shader, uni->location, tex);
+ break;
+ case DRW_UNIFORM_TEXTURE_PERSIST:
+ tex = (GPUTexture *)uni->value;
+ BLI_assert(tex);
+ bind_texture(tex, BIND_PERSIST);
+ GPU_shader_uniform_texture(shgroup->shader, uni->location, tex);
+ break;
+ case DRW_UNIFORM_TEXTURE_REF:
+ tex = *((GPUTexture **)uni->value);
+ BLI_assert(tex);
+ bind_texture(tex, BIND_TEMP);
+ GPU_shader_uniform_texture(shgroup->shader, uni->location, tex);
+ break;
+ case DRW_UNIFORM_BLOCK:
+ ubo = (GPUUniformBuffer *)uni->value;
+ bind_ubo(ubo, BIND_TEMP);
+ GPU_shader_uniform_buffer(shgroup->shader, uni->location, ubo);
+ break;
+ case DRW_UNIFORM_BLOCK_PERSIST:
+ ubo = (GPUUniformBuffer *)uni->value;
+ bind_ubo(ubo, BIND_PERSIST);
+ GPU_shader_uniform_buffer(shgroup->shader, uni->location, ubo);
+ break;
+ }
+ }
+
+#ifdef USE_GPU_SELECT
+# define GPU_SELECT_LOAD_IF_PICKSEL(_select_id) \
+ if (G.f & G_PICKSEL) { \
+ GPU_select_load_id(_select_id); \
+ } ((void)0)
+
+# define GPU_SELECT_LOAD_IF_PICKSEL_CALL(_call) \
+ if ((G.f & G_PICKSEL) && (_call)) { \
+ GPU_select_load_id((_call)->select_id); \
+ } ((void)0)
+
+# define GPU_SELECT_LOAD_IF_PICKSEL_LIST(_shgroup, _start, _count) \
+ _start = 0; \
+ _count = _shgroup->instance_count; \
+ int *select_id = NULL; \
+ if (G.f & G_PICKSEL) { \
+ if (_shgroup->override_selectid == -1) { \
+ select_id = DRW_instance_data_get(_shgroup->inst_selectid); \
+ switch (_shgroup->type) { \
+ case DRW_SHG_TRIANGLE_BATCH: _count = 3; break; \
+ case DRW_SHG_LINE_BATCH: _count = 2; break; \
+ default: _count = 1; break; \
+ } \
+ } \
+ else { \
+ GPU_select_load_id(_shgroup->override_selectid); \
+ } \
+ } \
+ while (_start < _shgroup->instance_count) { \
+ if (select_id) { \
+ GPU_select_load_id(select_id[_start]); \
+ }
+
+# define GPU_SELECT_LOAD_IF_PICKSEL_LIST_END(_start, _count) \
+ _start += _count; \
+ }
+
+#else
+# define GPU_SELECT_LOAD_IF_PICKSEL(select_id)
+# define GPU_SELECT_LOAD_IF_PICKSEL_CALL(call)
+# define GPU_SELECT_LOAD_IF_PICKSEL_LIST_END(start, count)
+# define GPU_SELECT_LOAD_IF_PICKSEL_LIST(_shgroup, _start, _count) \
+ _start = 0; \
+ _count = _shgroup->interface.instance_count;
+
+#endif
+
+ /* Rendering Calls */
+ if (!ELEM(shgroup->type, DRW_SHG_NORMAL)) {
+ /* Replacing multiple calls with only one */
+ if (ELEM(shgroup->type, DRW_SHG_INSTANCE, DRW_SHG_INSTANCE_EXTERNAL)) {
+ if (shgroup->type == DRW_SHG_INSTANCE_EXTERNAL) {
+ if (shgroup->instance_geom != NULL) {
+ GPU_SELECT_LOAD_IF_PICKSEL(shgroup->override_selectid);
+ draw_geometry_prepare(shgroup, NULL);
+ draw_geometry_execute_ex(shgroup, shgroup->instance_geom, 0, 0, true);
+ }
+ }
+ else {
+ if (shgroup->instance_count > 0) {
+ unsigned int count, start;
+ draw_geometry_prepare(shgroup, NULL);
+ GPU_SELECT_LOAD_IF_PICKSEL_LIST(shgroup, start, count)
+ {
+ draw_geometry_execute_ex(shgroup, shgroup->instance_geom, start, count, true);
+ }
+ GPU_SELECT_LOAD_IF_PICKSEL_LIST_END(start, count)
+ }
+ }
+ }
+ else { /* DRW_SHG_***_BATCH */
+ /* Some dynamic batch can have no geom (no call to aggregate) */
+ if (shgroup->instance_count > 0) {
+ unsigned int count, start;
+ draw_geometry_prepare(shgroup, NULL);
+ GPU_SELECT_LOAD_IF_PICKSEL_LIST(shgroup, start, count)
+ {
+ draw_geometry_execute_ex(shgroup, shgroup->batch_geom, start, count, false);
+ }
+ GPU_SELECT_LOAD_IF_PICKSEL_LIST_END(start, count)
+ }
+ }
+ }
+ else {
+ bool prev_neg_scale = false;
+ for (DRWCall *call = shgroup->calls.first; call; call = call->next) {
+
+ /* OPTI/IDEA(clem): Do this preparation in another thread. */
+ draw_matrices_model_prepare(call->state);
+
+ if ((call->state->flag & DRW_CALL_CULLED) != 0)
+ continue;
+
+ /* Negative scale objects */
+ bool neg_scale = call->state->flag & DRW_CALL_NEGSCALE;
+ if (neg_scale != prev_neg_scale) {
+ glFrontFace((neg_scale) ? DST.backface : DST.frontface);
+ prev_neg_scale = neg_scale;
+ }
+
+ GPU_SELECT_LOAD_IF_PICKSEL_CALL(call);
+ draw_geometry_prepare(shgroup, call->state);
+
+ switch (call->type) {
+ case DRW_CALL_SINGLE:
+ draw_geometry_execute(shgroup, call->single.geometry);
+ break;
+ case DRW_CALL_INSTANCES:
+ draw_geometry_execute_ex(shgroup, call->instances.geometry, 0, *call->instances.count, true);
+ break;
+ case DRW_CALL_GENERATE:
+ call->generate.geometry_fn(shgroup, draw_geometry_execute, call->generate.user_data);
+ break;
+ default:
+ BLI_assert(0);
+ }
+ }
+ /* Reset state */
+ glFrontFace(DST.frontface);
+ }
+
+ /* TODO: remove, (currently causes alpha issue with sculpt, need to investigate) */
+ DRW_state_reset();
+}
+
+static void drw_update_view(void)
+{
+ if (DST.dirty_mat) {
+ DST.state_cache_id++;
+ DST.dirty_mat = false;
+
+ DRW_uniformbuffer_update(view_ubo, &DST.view_data);
+
+ /* Catch integer wrap around. */
+ if (UNLIKELY(DST.state_cache_id == 0)) {
+ DST.state_cache_id = 1;
+ /* We must reset all CallStates to ensure that not
+ * a single one stayed with cache_id equal to 1. */
+ BLI_mempool_iter iter;
+ DRWCallState *state;
+ BLI_mempool_iternew(DST.vmempool->states, &iter);
+ while ((state = BLI_mempool_iterstep(&iter))) {
+ state->cache_id = 0;
+ }
+ }
+
+ /* TODO dispatch threads to compute matrices/culling */
+ }
+
+ draw_clipping_setup_from_view();
+}
+
+static void drw_draw_pass_ex(DRWPass *pass, DRWShadingGroup *start_group, DRWShadingGroup *end_group)
+{
+ DST.shader = NULL;
+
+ BLI_assert(DST.buffer_finish_called && "DRW_render_instance_buffer_finish had not been called before drawing");
+
+ drw_update_view();
+
+ drw_state_set(pass->state);
+
+ DRW_stats_query_start(pass->name);
+
+ for (DRWShadingGroup *shgroup = start_group; shgroup; shgroup = shgroup->next) {
+ draw_shgroup(shgroup, pass->state);
+ /* break if upper limit */
+ if (shgroup == end_group) {
+ break;
+ }
+ }
+
+ /* Clear Bound textures */
+ for (int i = 0; i < GPU_max_textures(); i++) {
+ if (DST.RST.bound_texs[i] != NULL) {
+ GPU_texture_unbind(DST.RST.bound_texs[i]);
+ DST.RST.bound_texs[i] = NULL;
+ }
+ }
+
+ /* Clear Bound Ubos */
+ for (int i = 0; i < GPU_max_ubo_binds(); i++) {
+ if (DST.RST.bound_ubos[i] != NULL) {
+ GPU_uniformbuffer_unbind(DST.RST.bound_ubos[i]);
+ DST.RST.bound_ubos[i] = NULL;
+ }
+ }
+
+ if (DST.shader) {
+ GPU_shader_unbind();
+ DST.shader = NULL;
+ }
+
+ DRW_stats_query_end();
+}
+
+void DRW_draw_pass(DRWPass *pass)
+{
+ drw_draw_pass_ex(pass, pass->shgroups.first, pass->shgroups.last);
+}
+
+/* Draw only a subset of shgroups. Used in special situations as grease pencil strokes */
+void DRW_draw_pass_subset(DRWPass *pass, DRWShadingGroup *start_group, DRWShadingGroup *end_group)
+{
+ drw_draw_pass_ex(pass, start_group, end_group);
+}
+
+/** \} */
diff --git a/source/blender/draw/intern/draw_manager_profiling.c b/source/blender/draw/intern/draw_manager_profiling.c
new file mode 100644
index 00000000000..47769b1fb18
--- /dev/null
+++ b/source/blender/draw/intern/draw_manager_profiling.c
@@ -0,0 +1,354 @@
+/*
+ * Copyright 2016, Blender Foundation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Contributor(s): Blender Institute
+ *
+ */
+
+/** \file draw_manager_profiling.c
+ * \ingroup draw
+ */
+
+#include "BLI_rect.h"
+#include "BLI_string.h"
+
+#include "BKE_global.h"
+
+#include "BLF_api.h"
+
+#include "MEM_guardedalloc.h"
+
+#include "draw_manager.h"
+
+#include "GPU_glew.h"
+#include "GPU_texture.h"
+
+#include "UI_resources.h"
+
+#include "WM_api.h"
+#include "WM_types.h"
+
+#include "draw_manager_profiling.h"
+
+#define MAX_TIMER_NAME 32
+#define MAX_NESTED_TIMER 8
+#define CHUNK_SIZE 8
+#define GPU_TIMER_FALLOFF 0.1
+
+typedef struct DRWTimer {
+ GLuint query[2];
+ GLuint64 time_average;
+ char name[MAX_TIMER_NAME];
+ int lvl; /* Hierarchy level for nested timer. */
+ bool is_query; /* Does this timer actually perform queries or is it just a group. */
+} DRWTimer;
+
+static struct DRWTimerPool {
+ DRWTimer *timers;
+ int chunk_count; /* Number of chunk allocated. */
+ int timer_count; /* chunk_count * CHUNK_SIZE */
+ int timer_increment; /* Keep track of where we are in the stack. */
+ int end_increment; /* Keep track of bad usage. */
+ bool is_recording; /* Are we in the render loop? */
+ bool is_querying; /* Keep track of bad usage. */
+} DTP = {NULL};
+
+void DRW_stats_free(void)
+{
+ if (DTP.timers != NULL) {
+ for (int i = 0; i < DTP.timer_count; ++i) {
+ DRWTimer *timer = &DTP.timers[i];
+ glDeleteQueries(2, timer->query);
+ }
+ MEM_freeN(DTP.timers);
+ DTP.timers = NULL;
+ }
+}
+
+void DRW_stats_begin(void)
+{
+ if (G.debug_value > 20) {
+ DTP.is_recording = true;
+ }
+
+ if (DTP.is_recording && DTP.timers == NULL) {
+ DTP.chunk_count = 1;
+ DTP.timer_count = DTP.chunk_count * CHUNK_SIZE;
+ DTP.timers = MEM_callocN(sizeof(DRWTimer) * DTP.timer_count, "DRWTimer stack");
+ }
+ else if (!DTP.is_recording && DTP.timers != NULL) {
+ DRW_stats_free();
+ }
+
+ DTP.is_querying = false;
+ DTP.timer_increment = 0;
+ DTP.end_increment = 0;
+}
+
+static DRWTimer *drw_stats_timer_get(void)
+{
+ if (UNLIKELY(DTP.timer_increment >= DTP.timer_count)) {
+ /* Resize the stack. */
+ DTP.chunk_count++;
+ DTP.timer_count = DTP.chunk_count * CHUNK_SIZE;
+ DTP.timers = MEM_recallocN(DTP.timers, sizeof(DRWTimer) * DTP.timer_count);
+ }
+
+ return &DTP.timers[DTP.timer_increment++];
+}
+
+static void drw_stats_timer_start_ex(const char *name, const bool is_query)
+{
+ if (DTP.is_recording) {
+ DRWTimer *timer = drw_stats_timer_get();
+ BLI_strncpy(timer->name, name, MAX_TIMER_NAME);
+ timer->lvl = DTP.timer_increment - DTP.end_increment - 1;
+ timer->is_query = is_query;
+
+ /* Queries cannot be nested or interleaved. */
+ BLI_assert(!DTP.is_querying);
+ if (timer->is_query) {
+ if (timer->query[0] == 0) {
+ glGenQueries(1, timer->query);
+ }
+
+ /* Issue query for the next frame */
+ glBeginQuery(GL_TIME_ELAPSED, timer->query[0]);
+ DTP.is_querying = true;
+ }
+ }
+}
+
+/* Use this to group the queries. It does NOT keep track
+ * of the time, it only sum what the queries inside it. */
+void DRW_stats_group_start(const char *name)
+{
+ drw_stats_timer_start_ex(name, false);
+}
+
+void DRW_stats_group_end(void)
+{
+ if (DTP.is_recording) {
+ BLI_assert(!DTP.is_querying);
+ DTP.end_increment++;
+ }
+}
+
+/* NOTE: Only call this when no sub timer will be called. */
+void DRW_stats_query_start(const char *name)
+{
+ drw_stats_timer_start_ex(name, true);
+}
+
+void DRW_stats_query_end(void)
+{
+ if (DTP.is_recording) {
+ DTP.end_increment++;
+ BLI_assert(DTP.is_querying);
+ glEndQuery(GL_TIME_ELAPSED);
+ DTP.is_querying = false;
+ }
+}
+
+void DRW_stats_reset(void)
+{
+ BLI_assert((DTP.timer_increment - DTP.end_increment) <= 0 && "You forgot a DRW_stats_group/query_end somewhere!");
+ BLI_assert((DTP.timer_increment - DTP.end_increment) >= 0 && "You forgot a DRW_stats_group/query_start somewhere!");
+
+ if (DTP.is_recording) {
+ GLuint64 lvl_time[MAX_NESTED_TIMER] = {0};
+
+ /* Swap queries for the next frame and sum up each lvl time. */
+ for (int i = DTP.timer_increment - 1; i >= 0; --i) {
+ DRWTimer *timer = &DTP.timers[i];
+ SWAP(GLuint, timer->query[0], timer->query[1]);
+
+ BLI_assert(timer->lvl < MAX_NESTED_TIMER);
+
+ if (timer->is_query) {
+ GLuint64 time;
+ if (timer->query[0] != 0) {
+ glGetQueryObjectui64v(timer->query[0], GL_QUERY_RESULT, &time);
+ }
+ else {
+ time = 1000000000; /* 1ms default */
+ }
+
+ timer->time_average = timer->time_average * (1.0 - GPU_TIMER_FALLOFF) + time * GPU_TIMER_FALLOFF;
+ timer->time_average = MIN2(timer->time_average, 1000000000);
+ }
+ else {
+ timer->time_average = lvl_time[timer->lvl + 1];
+ lvl_time[timer->lvl + 1] = 0;
+ }
+
+ lvl_time[timer->lvl] += timer->time_average;
+ }
+
+ DTP.is_recording = false;
+ }
+}
+
+static void draw_stat_5row(rcti *rect, int u, int v, const char *txt, const int size)
+{
+ BLF_draw_default_ascii(rect->xmin + (1 + u * 5) * U.widget_unit,
+ rect->ymax - (3 + v) * U.widget_unit, 0.0f,
+ txt, size);
+}
+
+static void draw_stat(rcti *rect, int u, int v, const char *txt, const int size)
+{
+ BLF_draw_default_ascii(rect->xmin + (1 + u) * U.widget_unit,
+ rect->ymax - (3 + v) * U.widget_unit, 0.0f,
+ txt, size);
+}
+
+void DRW_stats_draw(rcti *rect)
+{
+ char stat_string[64];
+ int lvl_index[MAX_NESTED_TIMER];
+ int v = 0, u = 0;
+
+ double init_tot_time = 0.0, background_tot_time = 0.0, render_tot_time = 0.0, tot_time = 0.0;
+
+ UI_FontThemeColor(BLF_default(), TH_TEXT_HI);
+
+ /* ------------------------------------------ */
+ /* ---------------- CPU stats --------------- */
+ /* ------------------------------------------ */
+ /* Label row */
+ char col_label[32];
+ sprintf(col_label, "Engine");
+ draw_stat_5row(rect, u++, v, col_label, sizeof(col_label));
+ sprintf(col_label, "Init");
+ draw_stat_5row(rect, u++, v, col_label, sizeof(col_label));
+ sprintf(col_label, "Background");
+ draw_stat_5row(rect, u++, v, col_label, sizeof(col_label));
+ sprintf(col_label, "Render");
+ draw_stat_5row(rect, u++, v, col_label, sizeof(col_label));
+ sprintf(col_label, "Total (w/o cache)");
+ draw_stat_5row(rect, u++, v, col_label, sizeof(col_label));
+ v++;
+
+ /* Engines rows */
+ char time_to_txt[16];
+ for (LinkData *link = DST.enabled_engines.first; link; link = link->next) {
+ u = 0;
+ DrawEngineType *engine = link->data;
+ ViewportEngineData *data = drw_viewport_engine_data_ensure(engine);
+
+ draw_stat_5row(rect, u++, v, engine->idname, sizeof(engine->idname));
+
+ init_tot_time += data->init_time;
+ sprintf(time_to_txt, "%.2fms", data->init_time);
+ draw_stat_5row(rect, u++, v, time_to_txt, sizeof(time_to_txt));
+
+ background_tot_time += data->background_time;
+ sprintf(time_to_txt, "%.2fms", data->background_time);
+ draw_stat_5row(rect, u++, v, time_to_txt, sizeof(time_to_txt));
+
+ render_tot_time += data->render_time;
+ sprintf(time_to_txt, "%.2fms", data->render_time);
+ draw_stat_5row(rect, u++, v, time_to_txt, sizeof(time_to_txt));
+
+ tot_time += data->init_time + data->background_time + data->render_time;
+ sprintf(time_to_txt, "%.2fms", data->init_time + data->background_time + data->render_time);
+ draw_stat_5row(rect, u++, v, time_to_txt, sizeof(time_to_txt));
+ v++;
+ }
+
+ /* Totals row */
+ u = 0;
+ sprintf(col_label, "Sub Total");
+ draw_stat_5row(rect, u++, v, col_label, sizeof(col_label));
+ sprintf(time_to_txt, "%.2fms", init_tot_time);
+ draw_stat_5row(rect, u++, v, time_to_txt, sizeof(time_to_txt));
+ sprintf(time_to_txt, "%.2fms", background_tot_time);
+ draw_stat_5row(rect, u++, v, time_to_txt, sizeof(time_to_txt));
+ sprintf(time_to_txt, "%.2fms", render_tot_time);
+ draw_stat_5row(rect, u++, v, time_to_txt, sizeof(time_to_txt));
+ sprintf(time_to_txt, "%.2fms", tot_time);
+ draw_stat_5row(rect, u++, v, time_to_txt, sizeof(time_to_txt));
+ v += 2;
+
+ u = 0;
+ double *cache_time = GPU_viewport_cache_time_get(DST.viewport);
+ sprintf(col_label, "Cache Time");
+ draw_stat_5row(rect, u++, v, col_label, sizeof(col_label));
+ sprintf(time_to_txt, "%.2fms", *cache_time);
+ draw_stat_5row(rect, u++, v, time_to_txt, sizeof(time_to_txt));
+ v += 2;
+
+ /* ------------------------------------------ */
+ /* ---------------- GPU stats --------------- */
+ /* ------------------------------------------ */
+
+ /* Memory Stats */
+ unsigned int tex_mem = GPU_texture_memory_usage_get();
+ unsigned int vbo_mem = GWN_vertbuf_get_memory_usage();
+
+ sprintf(stat_string, "GPU Memory");
+ draw_stat(rect, 0, v, stat_string, sizeof(stat_string));
+ sprintf(stat_string, "%.2fMB", (double)(tex_mem + vbo_mem) / 1000000.0);
+ draw_stat_5row(rect, 1, v++, stat_string, sizeof(stat_string));
+ sprintf(stat_string, "Textures");
+ draw_stat(rect, 1, v, stat_string, sizeof(stat_string));
+ sprintf(stat_string, "%.2fMB", (double)tex_mem / 1000000.0);
+ draw_stat_5row(rect, 1, v++, stat_string, sizeof(stat_string));
+ sprintf(stat_string, "Meshes");
+ draw_stat(rect, 1, v, stat_string, sizeof(stat_string));
+ sprintf(stat_string, "%.2fMB", (double)vbo_mem / 1000000.0);
+ draw_stat_5row(rect, 1, v++, stat_string, sizeof(stat_string));
+ v += 1;
+
+ /* GPU Timings */
+ BLI_snprintf(stat_string, sizeof(stat_string), "GPU Render Timings");
+ draw_stat(rect, 0, v++, stat_string, sizeof(stat_string));
+
+ for (int i = 0; i < DTP.timer_increment; ++i) {
+ double time_ms, time_percent;
+ DRWTimer *timer = &DTP.timers[i];
+ DRWTimer *timer_parent = (timer->lvl > 0) ? &DTP.timers[lvl_index[timer->lvl - 1]] : NULL;
+
+ /* Only display a number of lvl at a time */
+ if ((G.debug_value - 21) < timer->lvl) continue;
+
+ BLI_assert(timer->lvl < MAX_NESTED_TIMER);
+ lvl_index[timer->lvl] = i;
+
+ time_ms = timer->time_average / 1000000.0;
+
+ if (timer_parent != NULL) {
+ time_percent = ((double)timer->time_average / (double)timer_parent->time_average) * 100.0;
+ }
+ else {
+ time_percent = 100.0;
+ }
+
+ /* avoid very long number */
+ time_ms = MIN2(time_ms, 999.0);
+ time_percent = MIN2(time_percent, 100.0);
+
+ BLI_snprintf(stat_string, sizeof(stat_string), "%s", timer->name);
+ draw_stat(rect, 0 + timer->lvl, v, stat_string, sizeof(stat_string));
+ BLI_snprintf(stat_string, sizeof(stat_string), "%.2fms", time_ms);
+ draw_stat(rect, 12 + timer->lvl, v, stat_string, sizeof(stat_string));
+ BLI_snprintf(stat_string, sizeof(stat_string), "%.0f", time_percent);
+ draw_stat(rect, 16 + timer->lvl, v, stat_string, sizeof(stat_string));
+ v++;
+ }
+}
diff --git a/source/blender/draw/intern/draw_manager_profiling.h b/source/blender/draw/intern/draw_manager_profiling.h
new file mode 100644
index 00000000000..233cd3878d2
--- /dev/null
+++ b/source/blender/draw/intern/draw_manager_profiling.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright 2016, Blender Foundation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Contributor(s): Blender Institute
+ *
+ */
+
+/** \file draw_manager_profiling.h
+ * \ingroup draw
+ */
+
+#ifndef __DRAW_MANAGER_PROFILING_H__
+#define __DRAW_MANAGER_PROFILING_H__
+
+struct rcti;
+
+void DRW_stats_free(void);
+void DRW_stats_begin(void);
+void DRW_stats_reset(void);
+
+void DRW_stats_group_start(const char *name);
+void DRW_stats_group_end(void);
+
+void DRW_stats_query_start(const char *name);
+void DRW_stats_query_end(void);
+
+void DRW_stats_draw(rcti *rect);
+
+#endif /* __DRAW_MANAGER_PROFILING_H__ */
diff --git a/source/blender/draw/intern/draw_manager_shader.c b/source/blender/draw/intern/draw_manager_shader.c
new file mode 100644
index 00000000000..5b4971f0730
--- /dev/null
+++ b/source/blender/draw/intern/draw_manager_shader.c
@@ -0,0 +1,377 @@
+/*
+ * Copyright 2016, Blender Foundation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Contributor(s): Blender Institute
+ *
+ */
+
+/** \file blender/draw/intern/draw_manager_shader.c
+ * \ingroup draw
+ */
+
+#include "draw_manager.h"
+
+#include "DNA_world_types.h"
+#include "DNA_material_types.h"
+
+#include "BLI_listbase.h"
+#include "BLI_string.h"
+#include "BLI_string_utils.h"
+#include "BLI_threads.h"
+#include "BLI_task.h"
+
+#include "BKE_global.h"
+#include "BKE_main.h"
+
+#include "GPU_shader.h"
+#include "GPU_material.h"
+
+#include "WM_api.h"
+#include "WM_types.h"
+
+extern char datatoc_gpu_shader_2D_vert_glsl[];
+extern char datatoc_gpu_shader_3D_vert_glsl[];
+extern char datatoc_common_fullscreen_vert_glsl[];
+
+
+/* -------------------------------------------------------------------- */
+
+/** \name Deferred Compilation (DRW_deferred)
+ *
+ * Since compiling shader can take a long time, we do it in a non blocking
+ * manner in another thread.
+ *
+ * \{ */
+
+typedef struct DRWDeferredShader {
+ struct DRWDeferredShader *prev, *next;
+
+ GPUMaterial *mat;
+ char *vert, *geom, *frag, *defs;
+} DRWDeferredShader;
+
+typedef struct DRWShaderCompiler {
+ ListBase queue; /* DRWDeferredShader */
+ SpinLock list_lock;
+
+ DRWDeferredShader *mat_compiling;
+ ThreadMutex compilation_lock;
+
+ void *ogl_context;
+
+ int shaders_done; /* To compute progress. */
+} DRWShaderCompiler;
+
+static void drw_deferred_shader_free(DRWDeferredShader *dsh)
+{
+ /* Make sure it is not queued before freeing. */
+ MEM_SAFE_FREE(dsh->vert);
+ MEM_SAFE_FREE(dsh->geom);
+ MEM_SAFE_FREE(dsh->frag);
+ MEM_SAFE_FREE(dsh->defs);
+
+ MEM_freeN(dsh);
+}
+
+static void drw_deferred_shader_queue_free(ListBase *queue)
+{
+ DRWDeferredShader *dsh;
+ while((dsh = BLI_pophead(queue))) {
+ drw_deferred_shader_free(dsh);
+ }
+}
+
+static void drw_deferred_shader_compilation_exec(void *custom_data, short *stop, short *do_update, float *progress)
+{
+ DRWShaderCompiler *comp = (DRWShaderCompiler *)custom_data;
+
+ WM_opengl_context_activate(comp->ogl_context);
+
+ while (true) {
+ BLI_spin_lock(&comp->list_lock);
+
+ if (*stop != 0) {
+ /* We don't want user to be able to cancel the compilation
+ * but wm can kill the task if we are closing blender. */
+ BLI_spin_unlock(&comp->list_lock);
+ break;
+ }
+
+ /* Pop tail because it will be less likely to lock the main thread
+ * if all GPUMaterials are to be freed (see DRW_deferred_shader_remove()). */
+ comp->mat_compiling = BLI_poptail(&comp->queue);
+ if (comp->mat_compiling == NULL) {
+ /* No more Shader to compile. */
+ BLI_spin_unlock(&comp->list_lock);
+ break;
+ }
+
+ comp->shaders_done++;
+ int total = BLI_listbase_count(&comp->queue) + comp->shaders_done;
+
+ BLI_mutex_lock(&comp->compilation_lock);
+ BLI_spin_unlock(&comp->list_lock);
+
+ /* Do the compilation. */
+ GPU_material_generate_pass(
+ comp->mat_compiling->mat,
+ comp->mat_compiling->vert,
+ comp->mat_compiling->geom,
+ comp->mat_compiling->frag,
+ comp->mat_compiling->defs);
+
+ *progress = (float)comp->shaders_done / (float)total;
+ *do_update = true;
+
+ BLI_mutex_unlock(&comp->compilation_lock);
+
+ drw_deferred_shader_free(comp->mat_compiling);
+ }
+
+ WM_opengl_context_release(comp->ogl_context);
+}
+
+static void drw_deferred_shader_compilation_free(void *custom_data)
+{
+ DRWShaderCompiler *comp = (DRWShaderCompiler *)custom_data;
+
+ drw_deferred_shader_queue_free(&comp->queue);
+
+ BLI_spin_end(&comp->list_lock);
+ BLI_mutex_end(&comp->compilation_lock);
+
+ WM_opengl_context_dispose(comp->ogl_context);
+
+ MEM_freeN(comp);
+}
+
+static void drw_deferred_shader_add(
+ GPUMaterial *mat, const char *vert, const char *geom, const char *frag_lib, const char *defines)
+{
+ /* Do not deferre the compilation if we are rendering for image. */
+ if (DRW_state_is_image_render()) {
+ /* Double checking that this GPUMaterial is not going to be
+ * compiled by another thread. */
+ DRW_deferred_shader_remove(mat);
+ GPU_material_generate_pass(mat, vert, geom, frag_lib, defines);
+ return;
+ }
+
+ DRWDeferredShader *dsh = MEM_callocN(sizeof(DRWDeferredShader), "Deferred Shader");
+
+ dsh->mat = mat;
+ if (vert) dsh->vert = BLI_strdup(vert);
+ if (geom) dsh->geom = BLI_strdup(geom);
+ if (frag_lib) dsh->frag = BLI_strdup(frag_lib);
+ if (defines) dsh->defs = BLI_strdup(defines);
+
+ BLI_assert(DST.draw_ctx.evil_C);
+ wmWindowManager *wm = CTX_wm_manager(DST.draw_ctx.evil_C);
+ wmWindow *win = CTX_wm_window(DST.draw_ctx.evil_C);
+ Scene *scene = DST.draw_ctx.scene;
+
+ /* Get the running job or a new one if none is running. Can only have one job per type & owner. */
+ wmJob *wm_job = WM_jobs_get(wm, win, scene, "Shaders Compilation",
+ WM_JOB_PROGRESS | WM_JOB_SUSPEND, WM_JOB_TYPE_SHADER_COMPILATION);
+
+ DRWShaderCompiler *old_comp = (DRWShaderCompiler *)WM_jobs_customdata_get(wm_job);
+
+ DRWShaderCompiler *comp = MEM_callocN(sizeof(DRWShaderCompiler), "DRWShaderCompiler");
+ BLI_spin_init(&comp->list_lock);
+ BLI_mutex_init(&comp->compilation_lock);
+
+ if (old_comp) {
+ BLI_spin_lock(&old_comp->list_lock);
+ BLI_movelisttolist(&comp->queue, &old_comp->queue);
+ BLI_spin_unlock(&old_comp->list_lock);
+ }
+
+ BLI_addtail(&comp->queue, dsh);
+
+ /* Create one context per task. */
+ comp->ogl_context = WM_opengl_context_create();
+ WM_opengl_context_activate(DST.ogl_context);
+
+ WM_jobs_customdata_set(wm_job, comp, drw_deferred_shader_compilation_free);
+ WM_jobs_timer(wm_job, 0.1, NC_MATERIAL | ND_SHADING_DRAW, 0);
+ WM_jobs_callbacks(wm_job, drw_deferred_shader_compilation_exec, NULL, NULL, NULL);
+ WM_jobs_start(wm, wm_job);
+}
+
+void DRW_deferred_shader_remove(GPUMaterial *mat)
+{
+ Scene *scene = GPU_material_scene(mat);
+
+ for (wmWindowManager *wm = G.main->wm.first; wm; wm = wm->id.next) {
+ if (WM_jobs_test(wm, scene, WM_JOB_TYPE_SHADER_COMPILATION) == false) {
+ /* No job running, do not create a new one by calling WM_jobs_get. */
+ continue;
+ }
+ for (wmWindow *win = wm->windows.first; win; win = win->next) {
+ wmJob *wm_job = WM_jobs_get(wm, win, scene, "Shaders Compilation",
+ WM_JOB_PROGRESS | WM_JOB_SUSPEND, WM_JOB_TYPE_SHADER_COMPILATION);
+
+ DRWShaderCompiler *comp = (DRWShaderCompiler *)WM_jobs_customdata_get(wm_job);
+ if (comp != NULL) {
+ BLI_spin_lock(&comp->list_lock);
+ DRWDeferredShader *dsh;
+ dsh = (DRWDeferredShader *)BLI_findptr(&comp->queue, mat, offsetof(DRWDeferredShader, mat));
+ if (dsh) {
+ BLI_remlink(&comp->queue, dsh);
+ }
+
+ /* Wait for compilation to finish */
+ if (comp->mat_compiling != NULL) {
+ if (comp->mat_compiling->mat == mat) {
+ BLI_mutex_lock(&comp->compilation_lock);
+ BLI_mutex_unlock(&comp->compilation_lock);
+ }
+ }
+ BLI_spin_unlock(&comp->list_lock);
+
+ if (dsh) {
+ drw_deferred_shader_free(dsh);
+ }
+ }
+ }
+ }
+}
+
+/** \} */
+
+/* -------------------------------------------------------------------- */
+
+GPUShader *DRW_shader_create(const char *vert, const char *geom, const char *frag, const char *defines)
+{
+ return GPU_shader_create(vert, frag, geom, NULL, defines);
+}
+
+GPUShader *DRW_shader_create_with_lib(
+ const char *vert, const char *geom, const char *frag, const char *lib, const char *defines)
+{
+ GPUShader *sh;
+ char *vert_with_lib = NULL;
+ char *frag_with_lib = NULL;
+ char *geom_with_lib = NULL;
+
+ vert_with_lib = BLI_string_joinN(lib, vert);
+ frag_with_lib = BLI_string_joinN(lib, frag);
+ if (geom) {
+ geom_with_lib = BLI_string_joinN(lib, geom);
+ }
+
+ sh = GPU_shader_create(vert_with_lib, frag_with_lib, geom_with_lib, NULL, defines);
+
+ MEM_freeN(vert_with_lib);
+ MEM_freeN(frag_with_lib);
+ if (geom) {
+ MEM_freeN(geom_with_lib);
+ }
+
+ return sh;
+}
+
+GPUShader *DRW_shader_create_2D(const char *frag, const char *defines)
+{
+ return GPU_shader_create(datatoc_gpu_shader_2D_vert_glsl, frag, NULL, NULL, defines);
+}
+
+GPUShader *DRW_shader_create_3D(const char *frag, const char *defines)
+{
+ return GPU_shader_create(datatoc_gpu_shader_3D_vert_glsl, frag, NULL, NULL, defines);
+}
+
+GPUShader *DRW_shader_create_fullscreen(const char *frag, const char *defines)
+{
+ return GPU_shader_create(datatoc_common_fullscreen_vert_glsl, frag, NULL, NULL, defines);
+}
+
+GPUShader *DRW_shader_create_3D_depth_only(void)
+{
+ return GPU_shader_get_builtin_shader(GPU_SHADER_3D_DEPTH_ONLY);
+}
+
+GPUMaterial *DRW_shader_find_from_world(World *wo, const void *engine_type, int options)
+{
+ GPUMaterial *mat = GPU_material_from_nodetree_find(&wo->gpumaterial, engine_type, options);
+ if (DRW_state_is_image_render()) {
+ if (mat != NULL && GPU_material_status(mat) == GPU_MAT_QUEUED) {
+ /* XXX Hack : we return NULL so that the engine will call DRW_shader_create_from_XXX
+ * with the shader code and we will resume the compilation from there. */
+ return NULL;
+ }
+ }
+ return mat;
+}
+
+GPUMaterial *DRW_shader_find_from_material(Material *ma, const void *engine_type, int options)
+{
+ GPUMaterial *mat = GPU_material_from_nodetree_find(&ma->gpumaterial, engine_type, options);
+ if (DRW_state_is_image_render()) {
+ if (mat != NULL && GPU_material_status(mat) == GPU_MAT_QUEUED) {
+ /* XXX Hack : we return NULL so that the engine will call DRW_shader_create_from_XXX
+ * with the shader code and we will resume the compilation from there. */
+ return NULL;
+ }
+ }
+ return mat;
+}
+
+GPUMaterial *DRW_shader_create_from_world(
+ struct Scene *scene, World *wo, const void *engine_type, int options,
+ const char *vert, const char *geom, const char *frag_lib, const char *defines)
+{
+ GPUMaterial *mat = NULL;
+ if (DRW_state_is_image_render()) {
+ mat = GPU_material_from_nodetree_find(&wo->gpumaterial, engine_type, options);
+ }
+
+ if (mat == NULL) {
+ mat = GPU_material_from_nodetree(
+ scene, wo->nodetree, &wo->gpumaterial, engine_type, options,
+ vert, geom, frag_lib, defines, true);
+ }
+
+ drw_deferred_shader_add(mat, vert, geom, frag_lib, defines);
+
+ return mat;
+}
+
+GPUMaterial *DRW_shader_create_from_material(
+ struct Scene *scene, Material *ma, const void *engine_type, int options,
+ const char *vert, const char *geom, const char *frag_lib, const char *defines)
+{
+ GPUMaterial *mat = NULL;
+ if (DRW_state_is_image_render()) {
+ mat = GPU_material_from_nodetree_find(&ma->gpumaterial, engine_type, options);
+ }
+
+ if (mat == NULL) {
+ mat = GPU_material_from_nodetree(
+ scene, ma->nodetree, &ma->gpumaterial, engine_type, options,
+ vert, geom, frag_lib, defines, true);
+ }
+
+ drw_deferred_shader_add(mat, vert, geom, frag_lib, defines);
+
+ return mat;
+}
+
+void DRW_shader_free(GPUShader *shader)
+{
+ GPU_shader_free(shader);
+}
diff --git a/source/blender/draw/intern/draw_manager_text.c b/source/blender/draw/intern/draw_manager_text.c
new file mode 100644
index 00000000000..56255af98ce
--- /dev/null
+++ b/source/blender/draw/intern/draw_manager_text.c
@@ -0,0 +1,199 @@
+/*
+ * Copyright 2016, Blender Foundation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ */
+
+/** \file blender/draw/intern/draw_manager_text.c
+ * \ingroup draw
+ */
+
+#include "MEM_guardedalloc.h"
+
+#include "BLI_listbase.h"
+#include "BLI_string.h"
+#include "BLI_math.h"
+
+#include "BIF_gl.h"
+
+#include "GPU_matrix.h"
+
+#include "ED_screen.h"
+#include "ED_view3d.h"
+
+#include "UI_resources.h"
+#include "UI_interface.h"
+
+#include "WM_api.h"
+#include "BLF_api.h"
+
+#include "draw_manager_text.h"
+
+typedef struct ViewCachedString {
+ struct ViewCachedString *next, *prev;
+ float vec[3];
+ union {
+ unsigned char ub[4];
+ int pack;
+ } col;
+ short sco[2];
+ short xoffs;
+ short flag;
+ int str_len;
+
+ /* str is allocated past the end */
+ char str[0];
+} ViewCachedString;
+
+typedef struct DRWTextStore {
+ ListBase list;
+} DRWTextStore;
+
+DRWTextStore *DRW_text_cache_create(void)
+{
+ DRWTextStore *dt = MEM_callocN(sizeof(*dt), __func__);
+ return dt;
+}
+
+void DRW_text_cache_destroy(struct DRWTextStore *dt)
+{
+ BLI_freelistN(&dt->list);
+ MEM_freeN(dt);
+}
+
+void DRW_text_cache_add(
+ DRWTextStore *dt,
+ const float co[3],
+ const char *str, const int str_len,
+ short xoffs, short flag,
+ const unsigned char col[4])
+{
+ int alloc_len;
+ ViewCachedString *vos;
+
+ if (flag & DRW_TEXT_CACHE_STRING_PTR) {
+ BLI_assert(str_len == strlen(str));
+ alloc_len = sizeof(void *);
+ }
+ else {
+ alloc_len = str_len + 1;
+ }
+
+ vos = MEM_mallocN(sizeof(ViewCachedString) + alloc_len, __func__);
+
+ BLI_addtail(&dt->list, vos);
+
+ copy_v3_v3(vos->vec, co);
+ copy_v4_v4_uchar(vos->col.ub, col);
+ vos->xoffs = xoffs;
+ vos->flag = flag;
+ vos->str_len = str_len;
+
+ /* allocate past the end */
+ if (flag & DRW_TEXT_CACHE_STRING_PTR) {
+ memcpy(vos->str, &str, alloc_len);
+ }
+ else {
+ memcpy(vos->str, str, alloc_len);
+ }
+}
+
+void DRW_text_cache_draw(
+ DRWTextStore *dt,
+ View3D *v3d, ARegion *ar, bool depth_write)
+{
+ RegionView3D *rv3d = ar->regiondata;
+ ViewCachedString *vos;
+ int tot = 0;
+
+ /* project first and test */
+ for (vos = dt->list.first; vos; vos = vos->next) {
+ if (ED_view3d_project_short_ex(
+ ar,
+ (vos->flag & DRW_TEXT_CACHE_GLOBALSPACE) ? rv3d->persmat : rv3d->persmatob,
+ (vos->flag & DRW_TEXT_CACHE_LOCALCLIP) != 0,
+ vos->vec, vos->sco,
+ V3D_PROJ_TEST_CLIP_BB | V3D_PROJ_TEST_CLIP_WIN | V3D_PROJ_TEST_CLIP_NEAR) == V3D_PROJ_RET_OK)
+ {
+ tot++;
+ }
+ else {
+ vos->sco[0] = IS_CLIPPED;
+ }
+ }
+
+ if (tot) {
+ int col_pack_prev = 0;
+
+ if (rv3d->rflag & RV3D_CLIPPING) {
+ ED_view3d_clipping_disable();
+ }
+
+ float original_proj[4][4];
+ gpuGetProjectionMatrix(original_proj);
+ wmOrtho2_region_pixelspace(ar);
+
+ gpuPushMatrix();
+ gpuLoadIdentity();
+
+ if (depth_write) {
+ if (v3d->zbuf) glDisable(GL_DEPTH_TEST);
+ }
+ else {
+ glDepthMask(GL_FALSE);
+ }
+
+ const int font_id = BLF_default();
+
+ const uiStyle *style = UI_style_get();
+
+ BLF_size(font_id, style->widget.points * U.pixelsize, U.dpi);
+
+ for (vos = dt->list.first; vos; vos = vos->next) {
+ if (vos->sco[0] != IS_CLIPPED) {
+ if (col_pack_prev != vos->col.pack) {
+ BLF_color4ubv(font_id, vos->col.ub);
+ col_pack_prev = vos->col.pack;
+ }
+
+ BLF_position(
+ font_id,
+ (float)(vos->sco[0] + vos->xoffs), (float)(vos->sco[1]), (depth_write) ? 0.0f : 2.0f);
+
+ ((vos->flag & DRW_TEXT_CACHE_ASCII) ?
+ BLF_draw_ascii :
+ BLF_draw
+ )(font_id,
+ (vos->flag & DRW_TEXT_CACHE_STRING_PTR) ? *((const char **)vos->str) : vos->str,
+ vos->str_len);
+ }
+ }
+
+ if (depth_write) {
+ if (v3d->zbuf) glEnable(GL_DEPTH_TEST);
+ }
+ else {
+ glDepthMask(GL_TRUE);
+ }
+
+ gpuPopMatrix();
+ gpuLoadProjectionMatrix(original_proj);
+
+ if (rv3d->rflag & RV3D_CLIPPING) {
+ ED_view3d_clipping_enable();
+ }
+ }
+}
diff --git a/source/blender/draw/intern/draw_manager_text.h b/source/blender/draw/intern/draw_manager_text.h
new file mode 100644
index 00000000000..a58e167be0d
--- /dev/null
+++ b/source/blender/draw/intern/draw_manager_text.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright 2016, Blender Foundation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ */
+
+/** \file blender/draw/intern/draw_manager_text.h
+ * \ingroup draw
+ */
+
+#ifndef __DRAW_MANAGER_TEXT_H__
+#define __DRAW_MANAGER_TEXT_H__
+
+struct DRWTextStore;
+
+struct DRWTextStore *DRW_text_cache_create(void);
+void DRW_text_cache_destroy(struct DRWTextStore *dt);
+
+void DRW_text_cache_add(
+ struct DRWTextStore *dt,
+ const float co[3],
+ const char *str, const int str_len,
+ short xoffs, short flag,
+ const unsigned char col[4]);
+
+void DRW_text_cache_draw(
+ struct DRWTextStore *dt,
+ struct View3D *v3d, struct ARegion *ar, bool depth_write);
+
+enum {
+ DRW_TEXT_CACHE_ASCII = (1 << 0),
+ DRW_TEXT_CACHE_GLOBALSPACE = (1 << 1),
+ DRW_TEXT_CACHE_LOCALCLIP = (1 << 2),
+ /* reference the string by pointer */
+ DRW_TEXT_CACHE_STRING_PTR = (1 << 3),
+};
+
+/* draw_manager.c */
+struct DRWTextStore *DRW_text_cache_ensure(void);
+
+#endif /* __DRAW_MANAGER_TEXT_H__ */
diff --git a/source/blender/draw/intern/draw_manager_texture.c b/source/blender/draw/intern/draw_manager_texture.c
new file mode 100644
index 00000000000..65856a6bf5c
--- /dev/null
+++ b/source/blender/draw/intern/draw_manager_texture.c
@@ -0,0 +1,237 @@
+/*
+ * Copyright 2016, Blender Foundation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Contributor(s): Blender Institute
+ *
+ */
+
+/** \file blender/draw/intern/draw_manager_texture.c
+ * \ingroup draw
+ */
+
+#include "draw_manager.h"
+
+void drw_texture_get_format(
+ DRWTextureFormat format, bool is_framebuffer,
+ GPUTextureFormat *r_data_type, int *r_channels, bool *r_is_depth)
+{
+ /* Some formats do not work with framebuffers. */
+ if (is_framebuffer) {
+ switch (format) {
+ /* Only add formats that are COMPATIBLE with FB.
+ * Generally they are multiple of 16bit. */
+ case DRW_TEX_R_16:
+ case DRW_TEX_R_16I:
+ case DRW_TEX_R_32:
+ case DRW_TEX_RG_8:
+ case DRW_TEX_RG_16:
+ case DRW_TEX_RG_16I:
+ case DRW_TEX_RG_32:
+ case DRW_TEX_RGBA_8:
+ case DRW_TEX_RGBA_16:
+ case DRW_TEX_RGBA_32:
+ case DRW_TEX_DEPTH_16:
+ case DRW_TEX_DEPTH_24:
+ case DRW_TEX_DEPTH_24_STENCIL_8:
+ case DRW_TEX_DEPTH_32:
+ case DRW_TEX_RGB_11_11_10:
+ break;
+ default:
+ BLI_assert(false && "Texture format unsupported as render target!");
+ *r_channels = 4;
+ *r_data_type = GPU_RGBA8;
+ *r_is_depth = false;
+ return;
+ }
+ }
+
+ switch (format) {
+ case DRW_TEX_RGBA_8: *r_data_type = GPU_RGBA8; break;
+ case DRW_TEX_RGBA_16: *r_data_type = GPU_RGBA16F; break;
+ case DRW_TEX_RGBA_32: *r_data_type = GPU_RGBA32F; break;
+ case DRW_TEX_RGB_16: *r_data_type = GPU_RGB16F; break;
+ case DRW_TEX_RGB_11_11_10: *r_data_type = GPU_R11F_G11F_B10F; break;
+ case DRW_TEX_RG_8: *r_data_type = GPU_RG8; break;
+ case DRW_TEX_RG_16: *r_data_type = GPU_RG16F; break;
+ case DRW_TEX_RG_16I: *r_data_type = GPU_RG16I; break;
+ case DRW_TEX_RG_32: *r_data_type = GPU_RG32F; break;
+ case DRW_TEX_R_8: *r_data_type = GPU_R8; break;
+ case DRW_TEX_R_16: *r_data_type = GPU_R16F; break;
+ case DRW_TEX_R_16I: *r_data_type = GPU_R16I; break;
+ case DRW_TEX_R_32: *r_data_type = GPU_R32F; break;
+#if 0
+ case DRW_TEX_RGB_8: *r_data_type = GPU_RGB8; break;
+ case DRW_TEX_RGB_32: *r_data_type = GPU_RGB32F; break;
+#endif
+ case DRW_TEX_DEPTH_16: *r_data_type = GPU_DEPTH_COMPONENT16; break;
+ case DRW_TEX_DEPTH_24: *r_data_type = GPU_DEPTH_COMPONENT24; break;
+ case DRW_TEX_DEPTH_24_STENCIL_8: *r_data_type = GPU_DEPTH24_STENCIL8; break;
+ case DRW_TEX_DEPTH_32: *r_data_type = GPU_DEPTH_COMPONENT32F; break;
+ default :
+ /* file type not supported you must uncomment it from above */
+ BLI_assert(false);
+ break;
+ }
+
+ switch (format) {
+ case DRW_TEX_RGBA_8:
+ case DRW_TEX_RGBA_16:
+ case DRW_TEX_RGBA_32:
+ *r_channels = 4;
+ break;
+ case DRW_TEX_RGB_8:
+ case DRW_TEX_RGB_16:
+ case DRW_TEX_RGB_32:
+ case DRW_TEX_RGB_11_11_10:
+ *r_channels = 3;
+ break;
+ case DRW_TEX_RG_8:
+ case DRW_TEX_RG_16:
+ case DRW_TEX_RG_16I:
+ case DRW_TEX_RG_32:
+ *r_channels = 2;
+ break;
+ default:
+ *r_channels = 1;
+ break;
+ }
+
+ if (r_is_depth) {
+ *r_is_depth = ELEM(format, DRW_TEX_DEPTH_16, DRW_TEX_DEPTH_24, DRW_TEX_DEPTH_24_STENCIL_8);
+ }
+}
+
+void drw_texture_set_parameters(GPUTexture *tex, DRWTextureFlag flags)
+{
+ GPU_texture_bind(tex, 0);
+ if (flags & DRW_TEX_MIPMAP) {
+ GPU_texture_mipmap_mode(tex, true, flags & DRW_TEX_FILTER);
+ GPU_texture_generate_mipmap(tex);
+ }
+ else {
+ GPU_texture_filter_mode(tex, flags & DRW_TEX_FILTER);
+ }
+ GPU_texture_wrap_mode(tex, flags & DRW_TEX_WRAP);
+ GPU_texture_compare_mode(tex, flags & DRW_TEX_COMPARE);
+ GPU_texture_unbind(tex);
+}
+
+GPUTexture *DRW_texture_create_1D(int w, DRWTextureFormat format, DRWTextureFlag flags, const float *fpixels)
+{
+ GPUTexture *tex;
+ GPUTextureFormat data_type;
+ int channels;
+
+ drw_texture_get_format(format, false, &data_type, &channels, NULL);
+ tex = GPU_texture_create_1D_custom(w, channels, data_type, fpixels, NULL);
+ drw_texture_set_parameters(tex, flags);
+
+ return tex;
+}
+
+GPUTexture *DRW_texture_create_2D(int w, int h, DRWTextureFormat format, DRWTextureFlag flags, const float *fpixels)
+{
+ GPUTexture *tex;
+ GPUTextureFormat data_type;
+ int channels;
+
+ drw_texture_get_format(format, false, &data_type, &channels, NULL);
+ tex = GPU_texture_create_2D_custom(w, h, channels, data_type, fpixels, NULL);
+ drw_texture_set_parameters(tex, flags);
+
+ return tex;
+}
+
+GPUTexture *DRW_texture_create_2D_array(
+ int w, int h, int d, DRWTextureFormat format, DRWTextureFlag flags, const float *fpixels)
+{
+ GPUTexture *tex;
+ GPUTextureFormat data_type;
+ int channels;
+
+ drw_texture_get_format(format, false, &data_type, &channels, NULL);
+ tex = GPU_texture_create_2D_array_custom(w, h, d, channels, data_type, fpixels, NULL);
+ drw_texture_set_parameters(tex, flags);
+
+ return tex;
+}
+
+GPUTexture *DRW_texture_create_3D(
+ int w, int h, int d, DRWTextureFormat format, DRWTextureFlag flags, const float *fpixels)
+{
+ GPUTexture *tex;
+ GPUTextureFormat data_type;
+ int channels;
+
+ drw_texture_get_format(format, false, &data_type, &channels, NULL);
+ tex = GPU_texture_create_3D_custom(w, h, d, channels, data_type, fpixels, NULL);
+ drw_texture_set_parameters(tex, flags);
+
+ return tex;
+}
+
+GPUTexture *DRW_texture_create_cube(int w, DRWTextureFormat format, DRWTextureFlag flags, const float *fpixels)
+{
+ GPUTexture *tex;
+ GPUTextureFormat data_type;
+ int channels;
+
+ drw_texture_get_format(format, false, &data_type, &channels, NULL);
+ tex = GPU_texture_create_cube_custom(w, channels, data_type, fpixels, NULL);
+ drw_texture_set_parameters(tex, flags);
+
+ return tex;
+}
+
+GPUTexture *DRW_texture_pool_query_2D(int w, int h, DRWTextureFormat format, DrawEngineType *engine_type)
+{
+ GPUTexture *tex;
+ GPUTextureFormat data_type;
+ int channels;
+
+ drw_texture_get_format(format, true, &data_type, &channels, NULL);
+ tex = GPU_viewport_texture_pool_query(DST.viewport, engine_type, w, h, channels, data_type);
+
+ return tex;
+}
+
+void DRW_texture_ensure_fullscreen_2D(GPUTexture **tex, DRWTextureFormat format, DRWTextureFlag flags)
+{
+ if (*(tex) == NULL) {
+ const float *size = DRW_viewport_size_get();
+ *(tex) = DRW_texture_create_2D((int)size[0], (int)size[1], format, flags, NULL);
+ }
+}
+
+void DRW_texture_ensure_2D(GPUTexture **tex, int w, int h, DRWTextureFormat format, DRWTextureFlag flags)
+{
+ if (*(tex) == NULL) {
+ *(tex) = DRW_texture_create_2D(w, h, format, flags, NULL);
+ }
+}
+
+void DRW_texture_generate_mipmaps(GPUTexture *tex)
+{
+ GPU_texture_bind(tex, 0);
+ GPU_texture_generate_mipmap(tex);
+ GPU_texture_unbind(tex);
+}
+
+void DRW_texture_free(GPUTexture *tex)
+{
+ GPU_texture_free(tex);
+}
diff --git a/source/blender/draw/intern/draw_view.c b/source/blender/draw/intern/draw_view.c
new file mode 100644
index 00000000000..f38a7689c06
--- /dev/null
+++ b/source/blender/draw/intern/draw_view.c
@@ -0,0 +1,703 @@
+/*
+ * Copyright 2016, Blender Foundation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Contributor(s): Blender Institute
+ *
+ */
+
+/** \file blender/draw/intern/draw_view.c
+ * \ingroup draw
+ *
+ * Contains dynamic drawing using immediate mode
+ */
+
+#include "DNA_brush_types.h"
+#include "DNA_screen_types.h"
+#include "DNA_userdef_types.h"
+#include "DNA_view3d_types.h"
+
+#include "ED_screen.h"
+#include "ED_transform.h"
+#include "ED_view3d.h"
+
+#include "GPU_draw.h"
+#include "GPU_shader.h"
+#include "GPU_immediate.h"
+#include "GPU_matrix.h"
+
+#include "UI_resources.h"
+
+#include "WM_api.h"
+#include "WM_types.h"
+
+#include "BKE_global.h"
+#include "BKE_object.h"
+#include "BKE_paint.h"
+#include "BKE_unit.h"
+
+#include "DRW_render.h"
+
+#include "view3d_intern.h"
+
+#include "draw_view.h"
+
+/* ******************** region info ***************** */
+
+void DRW_draw_region_info(void)
+{
+ const DRWContextState *draw_ctx = DRW_context_state_get();
+ ARegion *ar = draw_ctx->ar;
+ int offset;
+
+ DRW_draw_cursor();
+
+ offset = DRW_draw_region_engine_info_offset();
+
+ view3d_draw_region_info(draw_ctx->evil_C, ar, offset);
+
+ if (offset > 0) {
+ DRW_draw_region_engine_info();
+ }
+}
+
+/* ************************* Grid ************************** */
+
+static void gridline_range(double x0, double dx, double max, int *r_first, int *r_count)
+{
+ /* determine range of gridlines that appear in this Area -- similar calc but separate ranges for x & y
+ * x0 is gridline 0, the axis in screen space
+ * Area covers [0 .. max) pixels */
+
+ int first = (int)ceil(-x0 / dx);
+ int last = (int)floor((max - x0) / dx);
+
+ if (first <= last) {
+ *r_first = first;
+ *r_count = last - first + 1;
+ }
+ else {
+ *r_first = 0;
+ *r_count = 0;
+ }
+}
+
+static int gridline_count(ARegion *ar, double x0, double y0, double dx)
+{
+ /* x0 & y0 establish the "phase" of the grid within this 2D region
+ * dx is the frequency, shared by x & y directions
+ * pass in dx of smallest (highest precision) grid we want to draw */
+
+ int first, x_ct, y_ct;
+
+ gridline_range(x0, dx, ar->winx, &first, &x_ct);
+ gridline_range(y0, dx, ar->winy, &first, &y_ct);
+
+ int total_ct = x_ct + y_ct;
+
+ return total_ct;
+}
+
+static bool drawgrid_draw(
+ ARegion *ar, double x0, double y0, double dx, int skip_mod,
+ unsigned pos, unsigned col, GLubyte col_value[3])
+{
+ /* skip every skip_mod lines relative to each axis; they will be overlaid by another drawgrid_draw
+ * always skip exact x0 & y0 axes; they will be drawn later in color
+ *
+ * set grid color once, just before the first line is drawn
+ * it's harmless to set same color for every line, or every vertex
+ * but if no lines are drawn, color must not be set! */
+
+ const float x_max = (float)ar->winx;
+ const float y_max = (float)ar->winy;
+
+ int first, ct;
+ int x_ct = 0, y_ct = 0; /* count of lines actually drawn */
+ int lines_skipped_for_next_unit = 0;
+
+ /* draw vertical lines */
+ gridline_range(x0, dx, x_max, &first, &ct);
+
+ for (int i = first; i < first + ct; ++i) {
+ if (i == 0)
+ continue;
+ else if (skip_mod && (i % skip_mod) == 0) {
+ ++lines_skipped_for_next_unit;
+ continue;
+ }
+
+ if (x_ct == 0)
+ immAttrib3ub(col, col_value[0], col_value[1], col_value[2]);
+
+ float x = (float)(x0 + i * dx);
+ immVertex2f(pos, x, 0.0f);
+ immVertex2f(pos, x, y_max);
+ ++x_ct;
+ }
+
+ /* draw horizontal lines */
+ gridline_range(y0, dx, y_max, &first, &ct);
+
+ for (int i = first; i < first + ct; ++i) {
+ if (i == 0)
+ continue;
+ else if (skip_mod && (i % skip_mod) == 0) {
+ ++lines_skipped_for_next_unit;
+ continue;
+ }
+
+ if (x_ct + y_ct == 0)
+ immAttrib3ub(col, col_value[0], col_value[1], col_value[2]);
+
+ float y = (float)(y0 + i * dx);
+ immVertex2f(pos, 0.0f, y);
+ immVertex2f(pos, x_max, y);
+ ++y_ct;
+ }
+
+ return lines_skipped_for_next_unit > 0;
+}
+
+#define GRID_MIN_PX_D 6.0
+#define GRID_MIN_PX_F 6.0f
+
+static void drawgrid(UnitSettings *unit, ARegion *ar, View3D *v3d, const char **grid_unit)
+{
+ RegionView3D *rv3d = ar->regiondata;
+
+ double fx = rv3d->persmat[3][0];
+ double fy = rv3d->persmat[3][1];
+ double fw = rv3d->persmat[3][3];
+
+ const double wx = 0.5 * ar->winx; /* use double precision to avoid rounding errors */
+ const double wy = 0.5 * ar->winy;
+
+ double x = wx * fx / fw;
+ double y = wy * fy / fw;
+
+ double vec4[4] = { v3d->grid, v3d->grid, 0.0, 1.0 };
+ mul_m4_v4d(rv3d->persmat, vec4);
+ fx = vec4[0];
+ fy = vec4[1];
+ fw = vec4[3];
+
+ double dx = fabs(x - wx * fx / fw);
+ if (dx == 0) dx = fabs(y - wy * fy / fw);
+
+ x += wx;
+ y += wy;
+
+ /* now x, y, and dx have their final values
+ * (x,y) is the world origin (0,0,0) mapped to Area-relative screen space
+ * dx is the distance in pixels between grid lines -- same for horiz or vert grid lines */
+
+ glLineWidth(1.0f);
+
+#if 0 /* TODO: write to UI/widget depth buffer, not scene depth */
+ glDepthMask(GL_FALSE); /* disable write in zbuffer */
+#endif
+
+ Gwn_VertFormat *format = immVertexFormat();
+ unsigned int pos = GWN_vertformat_attr_add(format, "pos", GWN_COMP_F32, 2, GWN_FETCH_FLOAT);
+ unsigned int color = GWN_vertformat_attr_add(format, "color", GWN_COMP_U8, 3, GWN_FETCH_INT_TO_FLOAT_UNIT);
+
+ immBindBuiltinProgram(GPU_SHADER_2D_FLAT_COLOR);
+
+ unsigned char col[3], col2[3];
+ UI_GetThemeColor3ubv(TH_GRID, col);
+
+ if (unit->system) {
+ const void *usys;
+ int len;
+
+ bUnit_GetSystem(unit->system, B_UNIT_LENGTH, &usys, &len);
+
+ bool first = true;
+
+ if (usys) {
+ int i = len;
+ while (i--) {
+ double scalar = bUnit_GetScaler(usys, i);
+
+ double dx_scalar = dx * scalar / (double)unit->scale_length;
+ if (dx_scalar < (GRID_MIN_PX_D * 2.0)) {
+ /* very very small grid items are less useful when dealing with units */
+ continue;
+ }
+
+ if (first) {
+ first = false;
+
+ /* Store the smallest drawn grid size units name so users know how big each grid cell is */
+ *grid_unit = bUnit_GetNameDisplay(usys, i);
+ rv3d->gridview = (float)((scalar * (double)v3d->grid) / (double)unit->scale_length);
+
+ int gridline_ct = gridline_count(ar, x, y, dx_scalar);
+ if (gridline_ct == 0)
+ goto drawgrid_cleanup; /* nothing to draw */
+
+ immBegin(GWN_PRIM_LINES, gridline_ct * 2);
+ }
+
+ float blend_fac = 1.0f - ((GRID_MIN_PX_F * 2.0f) / (float)dx_scalar);
+ /* tweak to have the fade a bit nicer */
+ blend_fac = (blend_fac * blend_fac) * 2.0f;
+ CLAMP(blend_fac, 0.3f, 1.0f);
+
+ UI_GetThemeColorBlend3ubv(TH_HIGH_GRAD, TH_GRID, blend_fac, col2);
+
+ const int skip_mod = (i == 0) ? 0 : (int)round(bUnit_GetScaler(usys, i - 1) / scalar);
+ if (!drawgrid_draw(ar, x, y, dx_scalar, skip_mod, pos, color, col2))
+ break;
+ }
+ }
+ }
+ else {
+ const double sublines = v3d->gridsubdiv;
+ const float sublines_fl = v3d->gridsubdiv;
+
+ int grids_to_draw = 2; /* first the faint fine grid, then the bold coarse grid */
+
+ if (dx < GRID_MIN_PX_D) {
+ rv3d->gridview *= sublines_fl;
+ dx *= sublines;
+ if (dx < GRID_MIN_PX_D) {
+ rv3d->gridview *= sublines_fl;
+ dx *= sublines;
+ if (dx < GRID_MIN_PX_D) {
+ rv3d->gridview *= sublines_fl;
+ dx *= sublines;
+ grids_to_draw = (dx < GRID_MIN_PX_D) ? 0 : 1;
+ }
+ }
+ }
+ else {
+ if (dx > (GRID_MIN_PX_D * 10.0)) { /* start blending in */
+ rv3d->gridview /= sublines_fl;
+ dx /= sublines;
+ if (dx > (GRID_MIN_PX_D * 10.0)) { /* start blending in */
+ rv3d->gridview /= sublines_fl;
+ dx /= sublines;
+ if (dx > (GRID_MIN_PX_D * 10.0)) {
+ grids_to_draw = 1;
+ }
+ }
+ }
+ }
+
+ int gridline_ct = gridline_count(ar, x, y, dx);
+ if (gridline_ct == 0)
+ goto drawgrid_cleanup; /* nothing to draw */
+
+ immBegin(GWN_PRIM_LINES, gridline_ct * 2);
+
+ if (grids_to_draw == 2) {
+ UI_GetThemeColorBlend3ubv(TH_HIGH_GRAD, TH_GRID, dx / (GRID_MIN_PX_D * 6.0), col2);
+ if (drawgrid_draw(ar, x, y, dx, v3d->gridsubdiv, pos, color, col2)) {
+ drawgrid_draw(ar, x, y, dx * sublines, 0, pos, color, col);
+ }
+ }
+ else if (grids_to_draw == 1) {
+ drawgrid_draw(ar, x, y, dx, 0, pos, color, col);
+ }
+ }
+
+ /* draw visible axes */
+ /* horizontal line */
+ if (0 <= y && y < ar->winy) {
+ UI_make_axis_color(col, col2, ELEM(rv3d->view, RV3D_VIEW_RIGHT, RV3D_VIEW_LEFT) ? 'Y' : 'X');
+ immAttrib3ub(color, col2[0], col2[1], col2[2]);
+ immVertex2f(pos, 0.0f, y);
+ immVertex2f(pos, (float)ar->winx, y);
+ }
+
+ /* vertical line */
+ if (0 <= x && x < ar->winx) {
+ UI_make_axis_color(col, col2, ELEM(rv3d->view, RV3D_VIEW_TOP, RV3D_VIEW_BOTTOM) ? 'Y' : 'Z');
+ immAttrib3ub(color, col2[0], col2[1], col2[2]);
+ immVertex2f(pos, x, 0.0f);
+ immVertex2f(pos, x, (float)ar->winy);
+ }
+
+ immEnd();
+
+drawgrid_cleanup:
+ immUnbindProgram();
+
+#if 0 /* depth write is left enabled above */
+ glDepthMask(GL_TRUE); /* enable write in zbuffer */
+#endif
+}
+
+#undef DEBUG_GRID
+#undef GRID_MIN_PX_D
+#undef GRID_MIN_PX_F
+
+static void drawfloor(Scene *scene, View3D *v3d, const char **grid_unit)
+{
+ /* draw only if there is something to draw */
+ if (v3d->gridflag & (V3D_SHOW_FLOOR | V3D_SHOW_X | V3D_SHOW_Y | V3D_SHOW_Z)) {
+ /* draw how many lines?
+ * trunc(v3d->gridlines / 2) * 4
+ * + 2 for xy axes (possibly with special colors)
+ * + 1 for z axis (the only line not in xy plane)
+ * even v3d->gridlines are honored, odd rounded down */
+ const int gridlines = v3d->gridlines / 2;
+ const float grid_scale = ED_view3d_grid_scale(scene, v3d, grid_unit);
+ const float grid = gridlines * grid_scale;
+
+ const bool show_floor = (v3d->gridflag & V3D_SHOW_FLOOR) && gridlines >= 1;
+
+ bool show_axis_x = (v3d->gridflag & V3D_SHOW_X) != 0;
+ bool show_axis_y = (v3d->gridflag & V3D_SHOW_Y) != 0;
+ bool show_axis_z = (v3d->gridflag & V3D_SHOW_Z) != 0;
+
+ unsigned char col_grid[3], col_axis[3];
+
+ glLineWidth(1.0f);
+
+ UI_GetThemeColor3ubv(TH_GRID, col_grid);
+
+ if (show_floor) {
+ const unsigned vertex_ct = 2 * (gridlines * 4 + 2);
+ const int sublines = v3d->gridsubdiv;
+
+ unsigned char col_bg[3], col_grid_emphasise[3], col_grid_light[3];
+
+ Gwn_VertFormat *format = immVertexFormat();
+ unsigned int pos = GWN_vertformat_attr_add(format, "pos", GWN_COMP_F32, 2, GWN_FETCH_FLOAT);
+ unsigned int color = GWN_vertformat_attr_add(format, "color", GWN_COMP_U8, 3, GWN_FETCH_INT_TO_FLOAT_UNIT);
+
+ immBindBuiltinProgram(GPU_SHADER_3D_FLAT_COLOR);
+
+ immBegin(GWN_PRIM_LINES, vertex_ct);
+
+ /* draw normal grid lines */
+ UI_GetColorPtrShade3ubv(col_grid, col_grid_light, 10);
+
+ for (int a = 1; a <= gridlines; a++) {
+ /* skip emphasised divider lines */
+ if (a % sublines != 0) {
+ const float line = a * grid_scale;
+
+ immAttrib3ubv(color, col_grid_light);
+
+ immVertex2f(pos, -grid, -line);
+ immVertex2f(pos, +grid, -line);
+ immVertex2f(pos, -grid, +line);
+ immVertex2f(pos, +grid, +line);
+
+ immVertex2f(pos, -line, -grid);
+ immVertex2f(pos, -line, +grid);
+ immVertex2f(pos, +line, -grid);
+ immVertex2f(pos, +line, +grid);
+ }
+ }
+
+ /* draw emphasised grid lines */
+ UI_GetThemeColor3ubv(TH_BACK, col_bg);
+ /* emphasise division lines lighter instead of darker, if background is darker than grid */
+ UI_GetColorPtrShade3ubv(col_grid, col_grid_emphasise,
+ (col_grid[0] + col_grid[1] + col_grid[2] + 30 >
+ col_bg[0] + col_bg[1] + col_bg[2]) ? 20 : -10);
+
+ if (sublines <= gridlines) {
+ immAttrib3ubv(color, col_grid_emphasise);
+
+ for (int a = sublines; a <= gridlines; a += sublines) {
+ const float line = a * grid_scale;
+
+ immVertex2f(pos, -grid, -line);
+ immVertex2f(pos, +grid, -line);
+ immVertex2f(pos, -grid, +line);
+ immVertex2f(pos, +grid, +line);
+
+ immVertex2f(pos, -line, -grid);
+ immVertex2f(pos, -line, +grid);
+ immVertex2f(pos, +line, -grid);
+ immVertex2f(pos, +line, +grid);
+ }
+ }
+
+ /* draw X axis */
+ if (show_axis_x) {
+ show_axis_x = false; /* drawing now, won't need to draw later */
+ UI_make_axis_color(col_grid, col_axis, 'X');
+ immAttrib3ubv(color, col_axis);
+ }
+ else
+ immAttrib3ubv(color, col_grid_emphasise);
+
+ immVertex2f(pos, -grid, 0.0f);
+ immVertex2f(pos, +grid, 0.0f);
+
+ /* draw Y axis */
+ if (show_axis_y) {
+ show_axis_y = false; /* drawing now, won't need to draw later */
+ UI_make_axis_color(col_grid, col_axis, 'Y');
+ immAttrib3ubv(color, col_axis);
+ }
+ else
+ immAttrib3ubv(color, col_grid_emphasise);
+
+ immVertex2f(pos, 0.0f, -grid);
+ immVertex2f(pos, 0.0f, +grid);
+
+ immEnd();
+ immUnbindProgram();
+
+ /* done with XY plane */
+ }
+
+ if (show_axis_x || show_axis_y || show_axis_z) {
+ /* draw axis lines -- sometimes grid floor is off, other times we still need to draw the Z axis */
+
+ Gwn_VertFormat *format = immVertexFormat();
+ unsigned int pos = GWN_vertformat_attr_add(format, "pos", GWN_COMP_F32, 3, GWN_FETCH_FLOAT);
+ unsigned int color = GWN_vertformat_attr_add(format, "color", GWN_COMP_U8, 3, GWN_FETCH_INT_TO_FLOAT_UNIT);
+
+ immBindBuiltinProgram(GPU_SHADER_3D_FLAT_COLOR);
+ immBegin(GWN_PRIM_LINES, (show_axis_x + show_axis_y + show_axis_z) * 2);
+
+ if (show_axis_x) {
+ UI_make_axis_color(col_grid, col_axis, 'X');
+ immAttrib3ubv(color, col_axis);
+ immVertex3f(pos, -grid, 0.0f, 0.0f);
+ immVertex3f(pos, +grid, 0.0f, 0.0f);
+ }
+
+ if (show_axis_y) {
+ UI_make_axis_color(col_grid, col_axis, 'Y');
+ immAttrib3ubv(color, col_axis);
+ immVertex3f(pos, 0.0f, -grid, 0.0f);
+ immVertex3f(pos, 0.0f, +grid, 0.0f);
+ }
+
+ if (show_axis_z) {
+ UI_make_axis_color(col_grid, col_axis, 'Z');
+ immAttrib3ubv(color, col_axis);
+ immVertex3f(pos, 0.0f, 0.0f, -grid);
+ immVertex3f(pos, 0.0f, 0.0f, +grid);
+ }
+
+ immEnd();
+ immUnbindProgram();
+ }
+ }
+}
+
+void DRW_draw_grid(void)
+{
+ /* TODO viewport
+ * Missing is the flags to check whether to draw it
+ * for now now we are using the flags in v3d itself.
+ *
+ * Also for now always assume depth is there, so we
+ * draw on top of it.
+ */
+ /**
+ * Calculate pixel-size factor once, is used for lamps and object centers.
+ * Used by #ED_view3d_pixel_size and typically not accessed directly.
+ *
+ * \note #BKE_camera_params_compute_viewplane' also calculates a pixel-size value,
+ * passed to #RE_SetPixelSize, in ortho mode this is compatible with this value,
+ * but in perspective mode its offset by the near-clip.
+ *
+ * 'RegionView3D.pixsize' is used for viewport drawing, not rendering.
+ */
+ const DRWContextState *draw_ctx = DRW_context_state_get();
+ Scene *scene = draw_ctx->scene;
+ View3D *v3d = draw_ctx->v3d;
+ ARegion *ar = draw_ctx->ar;
+ RegionView3D *rv3d = draw_ctx->rv3d;
+
+ const bool draw_floor = (rv3d->view == RV3D_VIEW_USER) || (rv3d->persp != RV3D_ORTHO);
+ const char *grid_unit = NULL;
+
+ /* ortho grid goes first, does not write to depth buffer and doesn't need depth test so it will override
+ * objects if done last
+ * needs to be done always, gridview is adjusted in drawgrid() now, but only for ortho views.
+ */
+ rv3d->gridview = ED_view3d_grid_scale(scene, v3d, &grid_unit);
+
+ glEnable(GL_DEPTH_TEST);
+ glDepthFunc(GL_LESS);
+
+ if (!draw_floor) {
+ /* Do not get in front of overlays */
+ glDepthMask(GL_FALSE);
+
+ ED_region_pixelspace(ar);
+ *(&grid_unit) = NULL; /* drawgrid need this to detect/affect smallest valid unit... */
+ drawgrid(&scene->unit, ar, v3d, &grid_unit);
+
+ gpuLoadProjectionMatrix(rv3d->winmat);
+ gpuLoadMatrix(rv3d->viewmat);
+ }
+ else {
+ glDepthMask(GL_TRUE);
+ drawfloor(scene, v3d, &grid_unit);
+ }
+}
+
+/* ************************* Background ************************** */
+
+void DRW_draw_background(void)
+{
+ /* Just to make sure */
+ glDepthMask(GL_TRUE);
+ glColorMask(GL_TRUE, GL_TRUE, GL_TRUE, GL_TRUE);
+ glStencilMask(0xFF);
+
+ if (UI_GetThemeValue(TH_SHOW_BACK_GRAD)) {
+ float m[4][4];
+ unit_m4(m);
+
+ /* Gradient background Color */
+ glDisable(GL_DEPTH_TEST);
+
+ Gwn_VertFormat *format = immVertexFormat();
+ unsigned pos = GWN_vertformat_attr_add(format, "pos", GWN_COMP_F32, 2, GWN_FETCH_FLOAT);
+ unsigned color = GWN_vertformat_attr_add(format, "color", GWN_COMP_U8, 3, GWN_FETCH_INT_TO_FLOAT_UNIT);
+ unsigned char col_hi[3], col_lo[3];
+
+ gpuPushMatrix();
+ gpuLoadIdentity();
+ gpuLoadProjectionMatrix(m);
+
+ immBindBuiltinProgram(GPU_SHADER_2D_SMOOTH_COLOR);
+
+ UI_GetThemeColor3ubv(TH_LOW_GRAD, col_lo);
+ UI_GetThemeColor3ubv(TH_HIGH_GRAD, col_hi);
+
+ immBegin(GWN_PRIM_TRI_FAN, 4);
+ immAttrib3ubv(color, col_lo);
+ immVertex2f(pos, -1.0f, -1.0f);
+ immVertex2f(pos, 1.0f, -1.0f);
+
+ immAttrib3ubv(color, col_hi);
+ immVertex2f(pos, 1.0f, 1.0f);
+ immVertex2f(pos, -1.0f, 1.0f);
+ immEnd();
+
+ immUnbindProgram();
+
+ gpuPopMatrix();
+
+ glClear(GL_DEPTH_BUFFER_BIT | GL_STENCIL_BUFFER_BIT);
+
+ glEnable(GL_DEPTH_TEST);
+ }
+ else {
+ /* Solid background Color */
+ UI_ThemeClearColorAlpha(TH_HIGH_GRAD, 1.0f);
+ glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT | GL_STENCIL_BUFFER_BIT);
+ }
+}
+
+/* **************************** 3D Cursor ******************************** */
+
+static bool is_cursor_visible(const DRWContextState *draw_ctx, Scene *scene, ViewLayer *view_layer)
+{
+ Object *ob = OBACT(view_layer);
+
+ /* don't draw cursor in paint modes, but with a few exceptions */
+ if (ob && draw_ctx->object_mode & OB_MODE_ALL_PAINT) {
+ /* exception: object is in weight paint and has deforming armature in pose mode */
+ if (draw_ctx->object_mode & OB_MODE_WEIGHT_PAINT) {
+ if (BKE_object_pose_armature_get(ob) != NULL) {
+ return true;
+ }
+ }
+ /* exception: object in texture paint mode, clone brush, use_clone_layer disabled */
+ else if (draw_ctx->object_mode & OB_MODE_TEXTURE_PAINT) {
+ const Paint *p = BKE_paint_get_active(scene, view_layer, draw_ctx->object_mode);
+
+ if (p && p->brush && p->brush->imagepaint_tool == PAINT_TOOL_CLONE) {
+ if ((scene->toolsettings->imapaint.flag & IMAGEPAINT_PROJECT_LAYER_CLONE) == 0) {
+ return true;
+ }
+ }
+ }
+
+ /* no exception met? then don't draw cursor! */
+ return false;
+ }
+
+ return true;
+}
+
+void DRW_draw_cursor(void)
+{
+ const DRWContextState *draw_ctx = DRW_context_state_get();
+ View3D *v3d = draw_ctx->v3d;
+ ARegion *ar = draw_ctx->ar;
+ Scene *scene = draw_ctx->scene;
+ ViewLayer *view_layer = draw_ctx->view_layer;
+
+ glColorMask(GL_TRUE, GL_TRUE, GL_TRUE, GL_TRUE);
+ glDepthMask(GL_FALSE);
+ glDisable(GL_DEPTH_TEST);
+ glLineWidth(1.0f);
+
+ if (is_cursor_visible(draw_ctx, scene, view_layer)) {
+ int co[2];
+ if (ED_view3d_project_int_global(ar, ED_view3d_cursor3d_get(scene, v3d), co, V3D_PROJ_TEST_NOP) == V3D_PROJ_RET_OK) {
+
+ ED_region_pixelspace(ar);
+ gpuTranslate2f(co[0], co[1]);
+ gpuScale2f(U.widget_unit, U.widget_unit);
+
+ Gwn_Batch *cursor_batch = DRW_cache_cursor_get();
+ GPUShader *shader = GPU_shader_get_builtin_shader(GPU_SHADER_2D_FLAT_COLOR);
+ GWN_batch_program_set(cursor_batch, GPU_shader_get_program(shader), GPU_shader_get_interface(shader));
+ GWN_batch_draw(cursor_batch);
+ }
+ }
+}
+
+/* **************************** 3D Manipulator ******************************** */
+
+void DRW_draw_manipulator_3d(void)
+{
+ const DRWContextState *draw_ctx = DRW_context_state_get();
+ View3D *v3d = draw_ctx->v3d;
+ v3d->zbuf = false;
+ ARegion *ar = draw_ctx->ar;
+
+ /* draw depth culled manipulators - manipulators need to be updated *after* view matrix was set up */
+ /* TODO depth culling manipulators is not yet supported, just drawing _3D here, should
+ * later become _IN_SCENE (and draw _3D separate) */
+ WM_manipulatormap_draw(
+ ar->manipulator_map, draw_ctx->evil_C,
+ WM_MANIPULATORMAP_DRAWSTEP_3D);
+
+}
+
+void DRW_draw_manipulator_2d(void)
+{
+ const DRWContextState *draw_ctx = DRW_context_state_get();
+ View3D *v3d = draw_ctx->v3d;
+ v3d->zbuf = false;
+ ARegion *ar = draw_ctx->ar;
+
+ WM_manipulatormap_draw(
+ ar->manipulator_map, draw_ctx->evil_C,
+ WM_MANIPULATORMAP_DRAWSTEP_2D);
+
+ glDepthMask(GL_TRUE);
+}
diff --git a/source/blender/draw/intern/draw_view.h b/source/blender/draw/intern/draw_view.h
new file mode 100644
index 00000000000..203420483a7
--- /dev/null
+++ b/source/blender/draw/intern/draw_view.h
@@ -0,0 +1,36 @@
+/*
+ * Copyright 2016, Blender Foundation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Contributor(s): Blender Institute
+ *
+ */
+
+/** \file draw_view.h
+ * \ingroup draw
+ */
+
+#ifndef __DRAW_VIEW_H__
+#define __DRAW_VIEW_H__
+
+void DRW_draw_grid(void);
+void DRW_draw_region_info(void);
+void DRW_draw_background(void);
+void DRW_draw_cursor(void);
+void DRW_draw_manipulator_3d(void);
+void DRW_draw_manipulator_2d(void);
+
+#endif /* __DRAW_VIEW_H__ */