Welcome to mirror list, hosted at ThFree Co, Russian Federation.

git.blender.org/blender.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorCampbell Barton <ideasman42@gmail.com>2019-04-17 07:17:24 +0300
committerCampbell Barton <ideasman42@gmail.com>2019-04-17 07:21:24 +0300
commite12c08e8d170b7ca40f204a5b0423c23a9fbc2c1 (patch)
tree8cf3453d12edb177a218ef8009357518ec6cab6a /source/blender/draw/intern
parentb3dabc200a4b0399ec6b81f2ff2730d07b44fcaa (diff)
ClangFormat: apply to source, most of intern
Apply clang format as proposed in T53211. For details on usage and instructions for migrating branches without conflicts, see: https://wiki.blender.org/wiki/Tools/ClangFormat
Diffstat (limited to 'source/blender/draw/intern')
-rw-r--r--source/blender/draw/intern/DRW_render.h710
-rw-r--r--source/blender/draw/intern/draw_anim_viz.c406
-rw-r--r--source/blender/draw/intern/draw_armature.c3356
-rw-r--r--source/blender/draw/intern/draw_cache.c5779
-rw-r--r--source/blender/draw/intern/draw_cache.h61
-rw-r--r--source/blender/draw/intern/draw_cache_impl.h118
-rw-r--r--source/blender/draw/intern/draw_cache_impl_curve.c1636
-rw-r--r--source/blender/draw/intern/draw_cache_impl_displist.c1145
-rw-r--r--source/blender/draw/intern/draw_cache_impl_lattice.c752
-rw-r--r--source/blender/draw/intern/draw_cache_impl_mesh.c8639
-rw-r--r--source/blender/draw/intern/draw_cache_impl_metaball.c304
-rw-r--r--source/blender/draw/intern/draw_cache_impl_particles.c2882
-rw-r--r--source/blender/draw/intern/draw_common.c1889
-rw-r--r--source/blender/draw/intern/draw_common.h301
-rw-r--r--source/blender/draw/intern/draw_debug.c227
-rw-r--r--source/blender/draw/intern/draw_hair.c421
-rw-r--r--source/blender/draw/intern/draw_hair_private.h79
-rw-r--r--source/blender/draw/intern/draw_instance_data.c600
-rw-r--r--source/blender/draw/intern/draw_instance_data.h23
-rw-r--r--source/blender/draw/intern/draw_manager.c4066
-rw-r--r--source/blender/draw/intern/draw_manager.h573
-rw-r--r--source/blender/draw/intern/draw_manager_data.c1593
-rw-r--r--source/blender/draw/intern/draw_manager_exec.c2301
-rw-r--r--source/blender/draw/intern/draw_manager_profiling.c523
-rw-r--r--source/blender/draw/intern/draw_manager_shader.c544
-rw-r--r--source/blender/draw/intern/draw_manager_text.c242
-rw-r--r--source/blender/draw/intern/draw_manager_text.h24
-rw-r--r--source/blender/draw/intern/draw_manager_texture.c167
-rw-r--r--source/blender/draw/intern/draw_view.c413
29 files changed, 20445 insertions, 19329 deletions
diff --git a/source/blender/draw/intern/DRW_render.h b/source/blender/draw/intern/DRW_render.h
index 98fd3712b97..b0320a522f8 100644
--- a/source/blender/draw/intern/DRW_render.h
+++ b/source/blender/draw/intern/DRW_render.h
@@ -85,309 +85,386 @@ typedef struct DRWUniform DRWUniform;
/* TODO Put it somewhere else? */
typedef struct BoundSphere {
- float center[3], radius;
+ float center[3], radius;
} BoundSphere;
/* declare members as empty (unused) */
typedef char DRWViewportEmptyList;
#define DRW_VIEWPORT_LIST_SIZE(list) \
- (sizeof(list) == sizeof(DRWViewportEmptyList) ? 0 : ((sizeof(list)) / sizeof(void *)))
+ (sizeof(list) == sizeof(DRWViewportEmptyList) ? 0 : ((sizeof(list)) / sizeof(void *)))
/* Unused members must be either pass list or 'char *' when not usd. */
-#define DRW_VIEWPORT_DATA_SIZE(ty) { \
- DRW_VIEWPORT_LIST_SIZE(*(((ty *)NULL)->fbl)), \
- DRW_VIEWPORT_LIST_SIZE(*(((ty *)NULL)->txl)), \
- DRW_VIEWPORT_LIST_SIZE(*(((ty *)NULL)->psl)), \
- DRW_VIEWPORT_LIST_SIZE(*(((ty *)NULL)->stl)), \
-}
+#define DRW_VIEWPORT_DATA_SIZE(ty) \
+ { \
+ DRW_VIEWPORT_LIST_SIZE(*(((ty *)NULL)->fbl)), DRW_VIEWPORT_LIST_SIZE(*(((ty *)NULL)->txl)), \
+ DRW_VIEWPORT_LIST_SIZE(*(((ty *)NULL)->psl)), \
+ DRW_VIEWPORT_LIST_SIZE(*(((ty *)NULL)->stl)), \
+ }
/* Use of multisample framebuffers. */
-#define MULTISAMPLE_SYNC_ENABLE(dfbl, dtxl) { \
- if (dfbl->multisample_fb != NULL) { \
- DRW_stats_query_start("Multisample Blit"); \
- GPU_framebuffer_bind(dfbl->multisample_fb); \
- /* TODO clear only depth but need to do alpha to coverage for transparencies. */ \
- GPU_framebuffer_clear_color_depth(dfbl->multisample_fb, (const float[4]){0.0f}, 1.0f); \
- DRW_stats_query_end(); \
- } \
-} ((void)0)
-
-#define MULTISAMPLE_SYNC_DISABLE(dfbl, dtxl) { \
- if (dfbl->multisample_fb != NULL) { \
- DRW_stats_query_start("Multisample Resolve"); \
- GPU_framebuffer_bind(dfbl->default_fb); \
- DRW_multisamples_resolve(dtxl->multisample_depth, dtxl->multisample_color, true); \
- DRW_stats_query_end(); \
- } \
-} ((void)0)
-
-#define MULTISAMPLE_SYNC_DISABLE_NO_DEPTH(dfbl, dtxl) { \
- if (dfbl->multisample_fb != NULL) { \
- DRW_stats_query_start("Multisample Resolve"); \
- GPU_framebuffer_bind(dfbl->default_fb); \
- DRW_multisamples_resolve(dtxl->multisample_depth, dtxl->multisample_color, false); \
- DRW_stats_query_end(); \
- } \
-} ((void)0)
-
-
-
+#define MULTISAMPLE_SYNC_ENABLE(dfbl, dtxl) \
+ { \
+ if (dfbl->multisample_fb != NULL) { \
+ DRW_stats_query_start("Multisample Blit"); \
+ GPU_framebuffer_bind(dfbl->multisample_fb); \
+ /* TODO clear only depth but need to do alpha to coverage for transparencies. */ \
+ GPU_framebuffer_clear_color_depth(dfbl->multisample_fb, (const float[4]){0.0f}, 1.0f); \
+ DRW_stats_query_end(); \
+ } \
+ } \
+ ((void)0)
+
+#define MULTISAMPLE_SYNC_DISABLE(dfbl, dtxl) \
+ { \
+ if (dfbl->multisample_fb != NULL) { \
+ DRW_stats_query_start("Multisample Resolve"); \
+ GPU_framebuffer_bind(dfbl->default_fb); \
+ DRW_multisamples_resolve(dtxl->multisample_depth, dtxl->multisample_color, true); \
+ DRW_stats_query_end(); \
+ } \
+ } \
+ ((void)0)
+
+#define MULTISAMPLE_SYNC_DISABLE_NO_DEPTH(dfbl, dtxl) \
+ { \
+ if (dfbl->multisample_fb != NULL) { \
+ DRW_stats_query_start("Multisample Resolve"); \
+ GPU_framebuffer_bind(dfbl->default_fb); \
+ DRW_multisamples_resolve(dtxl->multisample_depth, dtxl->multisample_color, false); \
+ DRW_stats_query_end(); \
+ } \
+ } \
+ ((void)0)
typedef struct DrawEngineDataSize {
- int fbl_len;
- int txl_len;
- int psl_len;
- int stl_len;
+ int fbl_len;
+ int txl_len;
+ int psl_len;
+ int stl_len;
} DrawEngineDataSize;
typedef struct DrawEngineType {
- struct DrawEngineType *next, *prev;
+ struct DrawEngineType *next, *prev;
- char idname[32];
+ char idname[32];
- const DrawEngineDataSize *vedata_size;
+ const DrawEngineDataSize *vedata_size;
- void (*engine_init)(void *vedata);
- void (*engine_free)(void);
+ void (*engine_init)(void *vedata);
+ void (*engine_free)(void);
- void (*cache_init)(void *vedata);
- void (*cache_populate)(void *vedata, struct Object *ob);
- void (*cache_finish)(void *vedata);
+ void (*cache_init)(void *vedata);
+ void (*cache_populate)(void *vedata, struct Object *ob);
+ void (*cache_finish)(void *vedata);
- void (*draw_background)(void *vedata);
- void (*draw_scene)(void *vedata);
+ void (*draw_background)(void *vedata);
+ void (*draw_scene)(void *vedata);
- void (*view_update)(void *vedata);
- void (*id_update)(void *vedata, struct ID *id);
+ void (*view_update)(void *vedata);
+ void (*id_update)(void *vedata, struct ID *id);
- void (*render_to_image)(
- void *vedata, struct RenderEngine *engine,
- struct RenderLayer *layer, const struct rcti *rect);
+ void (*render_to_image)(void *vedata,
+ struct RenderEngine *engine,
+ struct RenderLayer *layer,
+ const struct rcti *rect);
} DrawEngineType;
#ifndef __DRW_ENGINE_H__
/* Buffer and textures used by the viewport by default */
typedef struct DefaultFramebufferList {
- struct GPUFrameBuffer *default_fb;
- struct GPUFrameBuffer *color_only_fb;
- struct GPUFrameBuffer *depth_only_fb;
- struct GPUFrameBuffer *multisample_fb;
+ struct GPUFrameBuffer *default_fb;
+ struct GPUFrameBuffer *color_only_fb;
+ struct GPUFrameBuffer *depth_only_fb;
+ struct GPUFrameBuffer *multisample_fb;
} DefaultFramebufferList;
typedef struct DefaultTextureList {
- struct GPUTexture *color;
- struct GPUTexture *depth;
- struct GPUTexture *multisample_color;
- struct GPUTexture *multisample_depth;
+ struct GPUTexture *color;
+ struct GPUTexture *depth;
+ struct GPUTexture *multisample_color;
+ struct GPUTexture *multisample_depth;
} DefaultTextureList;
#endif
/* Textures */
typedef enum {
- DRW_TEX_FILTER = (1 << 0),
- DRW_TEX_WRAP = (1 << 1),
- DRW_TEX_COMPARE = (1 << 2),
- DRW_TEX_MIPMAP = (1 << 3),
+ DRW_TEX_FILTER = (1 << 0),
+ DRW_TEX_WRAP = (1 << 1),
+ DRW_TEX_COMPARE = (1 << 2),
+ DRW_TEX_MIPMAP = (1 << 3),
} DRWTextureFlag;
/* Textures from DRW_texture_pool_query_* have the options
* DRW_TEX_FILTER for color float textures, and no options
* for depth textures and integer textures. */
-struct GPUTexture *DRW_texture_pool_query_2d(int w, int h, eGPUTextureFormat format, DrawEngineType *engine_type);
-
-struct GPUTexture *DRW_texture_create_1d(
- int w, eGPUTextureFormat format, DRWTextureFlag flags, const float *fpixels);
+struct GPUTexture *DRW_texture_pool_query_2d(int w,
+ int h,
+ eGPUTextureFormat format,
+ DrawEngineType *engine_type);
+
+struct GPUTexture *DRW_texture_create_1d(int w,
+ eGPUTextureFormat format,
+ DRWTextureFlag flags,
+ const float *fpixels);
struct GPUTexture *DRW_texture_create_2d(
- int w, int h, eGPUTextureFormat format, DRWTextureFlag flags, const float *fpixels);
+ int w, int h, eGPUTextureFormat format, DRWTextureFlag flags, const float *fpixels);
struct GPUTexture *DRW_texture_create_2d_array(
- int w, int h, int d, eGPUTextureFormat format, DRWTextureFlag flags, const float *fpixels);
+ int w, int h, int d, eGPUTextureFormat format, DRWTextureFlag flags, const float *fpixels);
struct GPUTexture *DRW_texture_create_3d(
- int w, int h, int d, eGPUTextureFormat format, DRWTextureFlag flags, const float *fpixels);
-struct GPUTexture *DRW_texture_create_cube(
- int w, eGPUTextureFormat format, DRWTextureFlag flags, const float *fpixels);
-
-void DRW_texture_ensure_fullscreen_2d(
- struct GPUTexture **tex, eGPUTextureFormat format, DRWTextureFlag flags);
+ int w, int h, int d, eGPUTextureFormat format, DRWTextureFlag flags, const float *fpixels);
+struct GPUTexture *DRW_texture_create_cube(int w,
+ eGPUTextureFormat format,
+ DRWTextureFlag flags,
+ const float *fpixels);
+
+void DRW_texture_ensure_fullscreen_2d(struct GPUTexture **tex,
+ eGPUTextureFormat format,
+ DRWTextureFlag flags);
void DRW_texture_ensure_2d(
- struct GPUTexture **tex, int w, int h, eGPUTextureFormat format, DRWTextureFlag flags);
+ struct GPUTexture **tex, int w, int h, eGPUTextureFormat format, DRWTextureFlag flags);
void DRW_texture_generate_mipmaps(struct GPUTexture *tex);
void DRW_texture_free(struct GPUTexture *tex);
-#define DRW_TEXTURE_FREE_SAFE(tex) do { \
- if (tex != NULL) { \
- DRW_texture_free(tex); \
- tex = NULL; \
- } \
-} while (0)
+#define DRW_TEXTURE_FREE_SAFE(tex) \
+ do { \
+ if (tex != NULL) { \
+ DRW_texture_free(tex); \
+ tex = NULL; \
+ } \
+ } while (0)
/* UBOs */
struct GPUUniformBuffer *DRW_uniformbuffer_create(int size, const void *data);
void DRW_uniformbuffer_update(struct GPUUniformBuffer *ubo, const void *data);
void DRW_uniformbuffer_free(struct GPUUniformBuffer *ubo);
-#define DRW_UBO_FREE_SAFE(ubo) do { \
- if (ubo != NULL) { \
- DRW_uniformbuffer_free(ubo); \
- ubo = NULL; \
- } \
-} while (0)
-
-void DRW_transform_to_display(struct GPUTexture *tex, bool use_view_transform, bool use_render_settings);
+#define DRW_UBO_FREE_SAFE(ubo) \
+ do { \
+ if (ubo != NULL) { \
+ DRW_uniformbuffer_free(ubo); \
+ ubo = NULL; \
+ } \
+ } while (0)
+
+void DRW_transform_to_display(struct GPUTexture *tex,
+ bool use_view_transform,
+ bool use_render_settings);
void DRW_transform_none(struct GPUTexture *tex);
-void DRW_multisamples_resolve(
- struct GPUTexture *src_depth, struct GPUTexture *src_color, bool use_depth);
+void DRW_multisamples_resolve(struct GPUTexture *src_depth,
+ struct GPUTexture *src_color,
+ bool use_depth);
/* Shaders */
-struct GPUShader *DRW_shader_create(
- const char *vert, const char *geom, const char *frag, const char *defines);
+struct GPUShader *DRW_shader_create(const char *vert,
+ const char *geom,
+ const char *frag,
+ const char *defines);
struct GPUShader *DRW_shader_create_with_lib(
- const char *vert, const char *geom, const char *frag, const char *lib, const char *defines);
-struct GPUShader *DRW_shader_create_with_transform_feedback(
- const char *vert, const char *geom, const char *defines,
- const eGPUShaderTFBType prim_type, const char **varying_names, const int varying_count);
+ const char *vert, const char *geom, const char *frag, const char *lib, const char *defines);
+struct GPUShader *DRW_shader_create_with_transform_feedback(const char *vert,
+ const char *geom,
+ const char *defines,
+ const eGPUShaderTFBType prim_type,
+ const char **varying_names,
+ const int varying_count);
struct GPUShader *DRW_shader_create_2d(const char *frag, const char *defines);
struct GPUShader *DRW_shader_create_3d(const char *frag, const char *defines);
struct GPUShader *DRW_shader_create_fullscreen(const char *frag, const char *defines);
struct GPUShader *DRW_shader_create_3d_depth_only(eGPUShaderConfig slot);
-struct GPUMaterial *DRW_shader_find_from_world(struct World *wo, const void *engine_type, int options, bool deferred);
-struct GPUMaterial *DRW_shader_find_from_material(struct Material *ma, const void *engine_type, int options, bool deferred);
-struct GPUMaterial *DRW_shader_create_from_world(
- struct Scene *scene, struct World *wo, const void *engine_type, int options,
- const char *vert, const char *geom, const char *frag_lib, const char *defines, bool deferred);
-struct GPUMaterial *DRW_shader_create_from_material(
- struct Scene *scene, struct Material *ma, const void *engine_type, int options,
- const char *vert, const char *geom, const char *frag_lib, const char *defines, bool deferred);
+struct GPUMaterial *DRW_shader_find_from_world(struct World *wo,
+ const void *engine_type,
+ int options,
+ bool deferred);
+struct GPUMaterial *DRW_shader_find_from_material(struct Material *ma,
+ const void *engine_type,
+ int options,
+ bool deferred);
+struct GPUMaterial *DRW_shader_create_from_world(struct Scene *scene,
+ struct World *wo,
+ const void *engine_type,
+ int options,
+ const char *vert,
+ const char *geom,
+ const char *frag_lib,
+ const char *defines,
+ bool deferred);
+struct GPUMaterial *DRW_shader_create_from_material(struct Scene *scene,
+ struct Material *ma,
+ const void *engine_type,
+ int options,
+ const char *vert,
+ const char *geom,
+ const char *frag_lib,
+ const char *defines,
+ bool deferred);
void DRW_shader_free(struct GPUShader *shader);
-#define DRW_SHADER_FREE_SAFE(shader) do { \
- if (shader != NULL) { \
- DRW_shader_free(shader); \
- shader = NULL; \
- } \
-} while (0)
+#define DRW_SHADER_FREE_SAFE(shader) \
+ do { \
+ if (shader != NULL) { \
+ DRW_shader_free(shader); \
+ shader = NULL; \
+ } \
+ } while (0)
/* Batches */
typedef enum {
- DRW_STATE_WRITE_DEPTH = (1 << 0),
- DRW_STATE_WRITE_COLOR = (1 << 1),
- DRW_STATE_DEPTH_ALWAYS = (1 << 2),
- DRW_STATE_DEPTH_LESS = (1 << 3),
- DRW_STATE_DEPTH_LESS_EQUAL = (1 << 4),
- DRW_STATE_DEPTH_EQUAL = (1 << 5),
- DRW_STATE_DEPTH_GREATER = (1 << 6),
- DRW_STATE_DEPTH_GREATER_EQUAL = (1 << 7),
- DRW_STATE_CULL_BACK = (1 << 8),
- DRW_STATE_CULL_FRONT = (1 << 9),
- DRW_STATE_WIRE = (1 << 10),
- DRW_STATE_POINT = (1 << 11),
- /** Polygon offset. Does not work with lines and points. */
- DRW_STATE_OFFSET_POSITIVE = (1 << 12),
- /** Polygon offset. Does not work with lines and points. */
- DRW_STATE_OFFSET_NEGATIVE = (1 << 13),
- DRW_STATE_WIRE_WIDE = (1 << 14),
- DRW_STATE_BLEND = (1 << 15),
- DRW_STATE_ADDITIVE = (1 << 16),
- DRW_STATE_MULTIPLY = (1 << 17),
- /* DRW_STATE_TRANSMISSION = (1 << 18), */ /* Not used */
- DRW_STATE_CLIP_PLANES = (1 << 19),
- /** Same as DRW_STATE_ADDITIVE but let alpha accumulate without premult. */
- DRW_STATE_ADDITIVE_FULL = (1 << 20),
- /** Use that if color is already premult by alpha. */
- DRW_STATE_BLEND_PREMUL = (1 << 21),
- DRW_STATE_WIRE_SMOOTH = (1 << 22),
- DRW_STATE_TRANS_FEEDBACK = (1 << 23),
- DRW_STATE_BLEND_OIT = (1 << 24),
- DRW_STATE_FIRST_VERTEX_CONVENTION = (1 << 25),
-
- DRW_STATE_WRITE_STENCIL = (1 << 27),
- DRW_STATE_WRITE_STENCIL_SHADOW_PASS = (1 << 28),
- DRW_STATE_WRITE_STENCIL_SHADOW_FAIL = (1 << 29),
- DRW_STATE_STENCIL_EQUAL = (1 << 30),
- DRW_STATE_STENCIL_NEQUAL = (1 << 31),
+ DRW_STATE_WRITE_DEPTH = (1 << 0),
+ DRW_STATE_WRITE_COLOR = (1 << 1),
+ DRW_STATE_DEPTH_ALWAYS = (1 << 2),
+ DRW_STATE_DEPTH_LESS = (1 << 3),
+ DRW_STATE_DEPTH_LESS_EQUAL = (1 << 4),
+ DRW_STATE_DEPTH_EQUAL = (1 << 5),
+ DRW_STATE_DEPTH_GREATER = (1 << 6),
+ DRW_STATE_DEPTH_GREATER_EQUAL = (1 << 7),
+ DRW_STATE_CULL_BACK = (1 << 8),
+ DRW_STATE_CULL_FRONT = (1 << 9),
+ DRW_STATE_WIRE = (1 << 10),
+ DRW_STATE_POINT = (1 << 11),
+ /** Polygon offset. Does not work with lines and points. */
+ DRW_STATE_OFFSET_POSITIVE = (1 << 12),
+ /** Polygon offset. Does not work with lines and points. */
+ DRW_STATE_OFFSET_NEGATIVE = (1 << 13),
+ DRW_STATE_WIRE_WIDE = (1 << 14),
+ DRW_STATE_BLEND = (1 << 15),
+ DRW_STATE_ADDITIVE = (1 << 16),
+ DRW_STATE_MULTIPLY = (1 << 17),
+ /* DRW_STATE_TRANSMISSION = (1 << 18), */ /* Not used */
+ DRW_STATE_CLIP_PLANES = (1 << 19),
+ /** Same as DRW_STATE_ADDITIVE but let alpha accumulate without premult. */
+ DRW_STATE_ADDITIVE_FULL = (1 << 20),
+ /** Use that if color is already premult by alpha. */
+ DRW_STATE_BLEND_PREMUL = (1 << 21),
+ DRW_STATE_WIRE_SMOOTH = (1 << 22),
+ DRW_STATE_TRANS_FEEDBACK = (1 << 23),
+ DRW_STATE_BLEND_OIT = (1 << 24),
+ DRW_STATE_FIRST_VERTEX_CONVENTION = (1 << 25),
+
+ DRW_STATE_WRITE_STENCIL = (1 << 27),
+ DRW_STATE_WRITE_STENCIL_SHADOW_PASS = (1 << 28),
+ DRW_STATE_WRITE_STENCIL_SHADOW_FAIL = (1 << 29),
+ DRW_STATE_STENCIL_EQUAL = (1 << 30),
+ DRW_STATE_STENCIL_NEQUAL = (1 << 31),
} DRWState;
-#define DRW_STATE_DEFAULT (DRW_STATE_WRITE_DEPTH | DRW_STATE_WRITE_COLOR | DRW_STATE_DEPTH_LESS_EQUAL)
-#define DRW_STATE_RASTERIZER_ENABLED (DRW_STATE_WRITE_DEPTH | DRW_STATE_WRITE_COLOR | DRW_STATE_WRITE_STENCIL | \
- DRW_STATE_WRITE_STENCIL_SHADOW_PASS | DRW_STATE_WRITE_STENCIL_SHADOW_FAIL)
+#define DRW_STATE_DEFAULT \
+ (DRW_STATE_WRITE_DEPTH | DRW_STATE_WRITE_COLOR | DRW_STATE_DEPTH_LESS_EQUAL)
+#define DRW_STATE_RASTERIZER_ENABLED \
+ (DRW_STATE_WRITE_DEPTH | DRW_STATE_WRITE_COLOR | DRW_STATE_WRITE_STENCIL | \
+ DRW_STATE_WRITE_STENCIL_SHADOW_PASS | DRW_STATE_WRITE_STENCIL_SHADOW_FAIL)
typedef enum {
- DRW_ATTR_INT,
- DRW_ATTR_FLOAT,
+ DRW_ATTR_INT,
+ DRW_ATTR_FLOAT,
} eDRWAttrType;
typedef struct DRWInstanceAttrFormat {
- char name[32];
- eDRWAttrType type;
- int components;
+ char name[32];
+ eDRWAttrType type;
+ int components;
} DRWInstanceAttrFormat;
-struct GPUVertFormat *DRW_shgroup_instance_format_array(const DRWInstanceAttrFormat attrs[], int arraysize);
-#define DRW_shgroup_instance_format(format, ...) do { \
- if (format == NULL) { \
- DRWInstanceAttrFormat drw_format[] = __VA_ARGS__;\
- format = DRW_shgroup_instance_format_array(drw_format, (sizeof(drw_format) / sizeof(DRWInstanceAttrFormat))); \
- } \
-} while (0)
+struct GPUVertFormat *DRW_shgroup_instance_format_array(const DRWInstanceAttrFormat attrs[],
+ int arraysize);
+#define DRW_shgroup_instance_format(format, ...) \
+ do { \
+ if (format == NULL) { \
+ DRWInstanceAttrFormat drw_format[] = __VA_ARGS__; \
+ format = DRW_shgroup_instance_format_array( \
+ drw_format, (sizeof(drw_format) / sizeof(DRWInstanceAttrFormat))); \
+ } \
+ } while (0)
DRWShadingGroup *DRW_shgroup_create(struct GPUShader *shader, DRWPass *pass);
DRWShadingGroup *DRW_shgroup_create_sub(DRWShadingGroup *shgroup);
DRWShadingGroup *DRW_shgroup_material_create(struct GPUMaterial *material, DRWPass *pass);
-DRWShadingGroup *DRW_shgroup_material_instance_create(
- struct GPUMaterial *material, DRWPass *pass, struct GPUBatch *geom, struct Object *ob,
- struct GPUVertFormat *format);
-DRWShadingGroup *DRW_shgroup_material_empty_tri_batch_create(struct GPUMaterial *material, DRWPass *pass, int size);
-DRWShadingGroup *DRW_shgroup_instance_create(
- struct GPUShader *shader, DRWPass *pass, struct GPUBatch *geom, struct GPUVertFormat *format);
+DRWShadingGroup *DRW_shgroup_material_instance_create(struct GPUMaterial *material,
+ DRWPass *pass,
+ struct GPUBatch *geom,
+ struct Object *ob,
+ struct GPUVertFormat *format);
+DRWShadingGroup *DRW_shgroup_material_empty_tri_batch_create(struct GPUMaterial *material,
+ DRWPass *pass,
+ int size);
+DRWShadingGroup *DRW_shgroup_instance_create(struct GPUShader *shader,
+ DRWPass *pass,
+ struct GPUBatch *geom,
+ struct GPUVertFormat *format);
DRWShadingGroup *DRW_shgroup_point_batch_create(struct GPUShader *shader, DRWPass *pass);
-DRWShadingGroup *DRW_shgroup_line_batch_create_with_format(
- struct GPUShader *shader, DRWPass *pass, struct GPUVertFormat *format);
-DRWShadingGroup *DRW_shgroup_line_batch_create(
- struct GPUShader *shader, DRWPass *pass);
-DRWShadingGroup *DRW_shgroup_empty_tri_batch_create(
- struct GPUShader *shader, DRWPass *pass, int size);
-DRWShadingGroup *DRW_shgroup_transform_feedback_create(
- struct GPUShader *shader, DRWPass *pass, struct GPUVertBuf *tf_target);
-
-
-typedef void (DRWCallGenerateFn)(
- DRWShadingGroup *shgroup,
- void (*draw_fn)(DRWShadingGroup *shgroup, struct GPUBatch *geom),
- void *user_data);
+DRWShadingGroup *DRW_shgroup_line_batch_create_with_format(struct GPUShader *shader,
+ DRWPass *pass,
+ struct GPUVertFormat *format);
+DRWShadingGroup *DRW_shgroup_line_batch_create(struct GPUShader *shader, DRWPass *pass);
+DRWShadingGroup *DRW_shgroup_empty_tri_batch_create(struct GPUShader *shader,
+ DRWPass *pass,
+ int size);
+DRWShadingGroup *DRW_shgroup_transform_feedback_create(struct GPUShader *shader,
+ DRWPass *pass,
+ struct GPUVertBuf *tf_target);
+
+typedef void(DRWCallGenerateFn)(DRWShadingGroup *shgroup,
+ void (*draw_fn)(DRWShadingGroup *shgroup, struct GPUBatch *geom),
+ void *user_data);
/* return final visibility */
-typedef bool (DRWCallVisibilityFn)(
- bool vis_in,
- void *user_data);
+typedef bool(DRWCallVisibilityFn)(bool vis_in, void *user_data);
void DRW_shgroup_instance_batch(DRWShadingGroup *shgroup, struct GPUBatch *batch);
void DRW_shgroup_call_add(DRWShadingGroup *shgroup, struct GPUBatch *geom, float (*obmat)[4]);
void DRW_shgroup_call_range_add(
- DRWShadingGroup *shgroup, struct GPUBatch *geom, float (*obmat)[4], uint v_sta, uint v_count);
-void DRW_shgroup_call_procedural_points_add(DRWShadingGroup *shgroup, uint point_len, float (*obmat)[4]);
-void DRW_shgroup_call_procedural_lines_add(DRWShadingGroup *shgroup, uint line_count, float (*obmat)[4]);
-void DRW_shgroup_call_procedural_triangles_add(DRWShadingGroup *shgroup, uint tria_count, float (*obmat)[4]);
-void DRW_shgroup_call_object_procedural_triangles_culled_add(DRWShadingGroup *shgroup, uint tria_count, struct Object *ob);
-void DRW_shgroup_call_object_add_ex(
- DRWShadingGroup *shgroup, struct GPUBatch *geom, struct Object *ob, struct Material *ma, bool bypass_culling);
-#define DRW_shgroup_call_object_add(shgroup, geom, ob) DRW_shgroup_call_object_add_ex(shgroup, geom, ob, NULL, false)
-#define DRW_shgroup_call_object_add_no_cull(shgroup, geom, ob) DRW_shgroup_call_object_add_ex(shgroup, geom, ob, NULL, true)
-void DRW_shgroup_call_object_add_with_callback(
- DRWShadingGroup *shgroup, struct GPUBatch *geom, struct Object *ob, struct Material *ma,
- DRWCallVisibilityFn *callback, void *user_data);
+ DRWShadingGroup *shgroup, struct GPUBatch *geom, float (*obmat)[4], uint v_sta, uint v_count);
+void DRW_shgroup_call_procedural_points_add(DRWShadingGroup *shgroup,
+ uint point_len,
+ float (*obmat)[4]);
+void DRW_shgroup_call_procedural_lines_add(DRWShadingGroup *shgroup,
+ uint line_count,
+ float (*obmat)[4]);
+void DRW_shgroup_call_procedural_triangles_add(DRWShadingGroup *shgroup,
+ uint tria_count,
+ float (*obmat)[4]);
+void DRW_shgroup_call_object_procedural_triangles_culled_add(DRWShadingGroup *shgroup,
+ uint tria_count,
+ struct Object *ob);
+void DRW_shgroup_call_object_add_ex(DRWShadingGroup *shgroup,
+ struct GPUBatch *geom,
+ struct Object *ob,
+ struct Material *ma,
+ bool bypass_culling);
+#define DRW_shgroup_call_object_add(shgroup, geom, ob) \
+ DRW_shgroup_call_object_add_ex(shgroup, geom, ob, NULL, false)
+#define DRW_shgroup_call_object_add_no_cull(shgroup, geom, ob) \
+ DRW_shgroup_call_object_add_ex(shgroup, geom, ob, NULL, true)
+void DRW_shgroup_call_object_add_with_callback(DRWShadingGroup *shgroup,
+ struct GPUBatch *geom,
+ struct Object *ob,
+ struct Material *ma,
+ DRWCallVisibilityFn *callback,
+ void *user_data);
/* Used for drawing a batch with instancing without instance attributes. */
-void DRW_shgroup_call_instances_add(
- DRWShadingGroup *shgroup, struct GPUBatch *geom, float (*obmat)[4], uint *count);
-void DRW_shgroup_call_object_instances_add(
- DRWShadingGroup *shgroup, struct GPUBatch *geom, struct Object *ob, uint *count);
+void DRW_shgroup_call_instances_add(DRWShadingGroup *shgroup,
+ struct GPUBatch *geom,
+ float (*obmat)[4],
+ uint *count);
+void DRW_shgroup_call_object_instances_add(DRWShadingGroup *shgroup,
+ struct GPUBatch *geom,
+ struct Object *ob,
+ uint *count);
void DRW_shgroup_call_sculpt_add(DRWShadingGroup *shgroup, struct Object *ob, float (*obmat)[4]);
-void DRW_shgroup_call_sculpt_wires_add(DRWShadingGroup *shgroup, struct Object *ob, float (*obmat)[4]);
-void DRW_shgroup_call_generate_add(
- DRWShadingGroup *shgroup, DRWCallGenerateFn *geometry_fn, void *user_data, float (*obmat)[4]);
-void DRW_shgroup_call_dynamic_add_array(DRWShadingGroup *shgroup, const void *attr[], uint attr_len);
-#define DRW_shgroup_call_dynamic_add(shgroup, ...) do { \
- const void *array[] = {__VA_ARGS__}; \
- DRW_shgroup_call_dynamic_add_array(shgroup, array, (sizeof(array) / sizeof(*array))); \
-} while (0)
+void DRW_shgroup_call_sculpt_wires_add(DRWShadingGroup *shgroup,
+ struct Object *ob,
+ float (*obmat)[4]);
+void DRW_shgroup_call_generate_add(DRWShadingGroup *shgroup,
+ DRWCallGenerateFn *geometry_fn,
+ void *user_data,
+ float (*obmat)[4]);
+void DRW_shgroup_call_dynamic_add_array(DRWShadingGroup *shgroup,
+ const void *attr[],
+ uint attr_len);
+#define DRW_shgroup_call_dynamic_add(shgroup, ...) \
+ do { \
+ const void *array[] = {__VA_ARGS__}; \
+ DRW_shgroup_call_dynamic_add_array(shgroup, array, (sizeof(array) / sizeof(*array))); \
+ } while (0)
uint DRW_shgroup_get_instance_count(const DRWShadingGroup *shgroup);
@@ -395,23 +472,66 @@ void DRW_shgroup_state_enable(DRWShadingGroup *shgroup, DRWState state);
void DRW_shgroup_state_disable(DRWShadingGroup *shgroup, DRWState state);
void DRW_shgroup_stencil_mask(DRWShadingGroup *shgroup, uint mask);
-void DRW_shgroup_uniform_texture(DRWShadingGroup *shgroup, const char *name, const struct GPUTexture *tex);
-void DRW_shgroup_uniform_texture_persistent(DRWShadingGroup *shgroup, const char *name, const struct GPUTexture *tex);
-void DRW_shgroup_uniform_block(DRWShadingGroup *shgroup, const char *name, const struct GPUUniformBuffer *ubo);
-void DRW_shgroup_uniform_block_persistent(DRWShadingGroup *shgroup, const char *name, const struct GPUUniformBuffer *ubo);
-void DRW_shgroup_uniform_texture_ref(DRWShadingGroup *shgroup, const char *name, struct GPUTexture **tex);
-void DRW_shgroup_uniform_float(DRWShadingGroup *shgroup, const char *name, const float *value, int arraysize);
-void DRW_shgroup_uniform_vec2(DRWShadingGroup *shgroup, const char *name, const float *value, int arraysize);
-void DRW_shgroup_uniform_vec3(DRWShadingGroup *shgroup, const char *name, const float *value, int arraysize);
-void DRW_shgroup_uniform_vec4(DRWShadingGroup *shgroup, const char *name, const float *value, int arraysize);
-void DRW_shgroup_uniform_short_to_int(DRWShadingGroup *shgroup, const char *name, const short *value, int arraysize);
-void DRW_shgroup_uniform_short_to_float(DRWShadingGroup *shgroup, const char *name, const short *value, int arraysize);
+void DRW_shgroup_uniform_texture(DRWShadingGroup *shgroup,
+ const char *name,
+ const struct GPUTexture *tex);
+void DRW_shgroup_uniform_texture_persistent(DRWShadingGroup *shgroup,
+ const char *name,
+ const struct GPUTexture *tex);
+void DRW_shgroup_uniform_block(DRWShadingGroup *shgroup,
+ const char *name,
+ const struct GPUUniformBuffer *ubo);
+void DRW_shgroup_uniform_block_persistent(DRWShadingGroup *shgroup,
+ const char *name,
+ const struct GPUUniformBuffer *ubo);
+void DRW_shgroup_uniform_texture_ref(DRWShadingGroup *shgroup,
+ const char *name,
+ struct GPUTexture **tex);
+void DRW_shgroup_uniform_float(DRWShadingGroup *shgroup,
+ const char *name,
+ const float *value,
+ int arraysize);
+void DRW_shgroup_uniform_vec2(DRWShadingGroup *shgroup,
+ const char *name,
+ const float *value,
+ int arraysize);
+void DRW_shgroup_uniform_vec3(DRWShadingGroup *shgroup,
+ const char *name,
+ const float *value,
+ int arraysize);
+void DRW_shgroup_uniform_vec4(DRWShadingGroup *shgroup,
+ const char *name,
+ const float *value,
+ int arraysize);
+void DRW_shgroup_uniform_short_to_int(DRWShadingGroup *shgroup,
+ const char *name,
+ const short *value,
+ int arraysize);
+void DRW_shgroup_uniform_short_to_float(DRWShadingGroup *shgroup,
+ const char *name,
+ const short *value,
+ int arraysize);
/* Boolean are expected to be 4bytes longs for opengl! */
-void DRW_shgroup_uniform_bool(DRWShadingGroup *shgroup, const char *name, const int *value, int arraysize);
-void DRW_shgroup_uniform_int(DRWShadingGroup *shgroup, const char *name, const int *value, int arraysize);
-void DRW_shgroup_uniform_ivec2(DRWShadingGroup *shgroup, const char *name, const int *value, int arraysize);
-void DRW_shgroup_uniform_ivec3(DRWShadingGroup *shgroup, const char *name, const int *value, int arraysize);
-void DRW_shgroup_uniform_ivec4(DRWShadingGroup *shgroup, const char *name, const int *value, int arraysize);
+void DRW_shgroup_uniform_bool(DRWShadingGroup *shgroup,
+ const char *name,
+ const int *value,
+ int arraysize);
+void DRW_shgroup_uniform_int(DRWShadingGroup *shgroup,
+ const char *name,
+ const int *value,
+ int arraysize);
+void DRW_shgroup_uniform_ivec2(DRWShadingGroup *shgroup,
+ const char *name,
+ const int *value,
+ int arraysize);
+void DRW_shgroup_uniform_ivec3(DRWShadingGroup *shgroup,
+ const char *name,
+ const int *value,
+ int arraysize);
+void DRW_shgroup_uniform_ivec4(DRWShadingGroup *shgroup,
+ const char *name,
+ const int *value,
+ int arraysize);
void DRW_shgroup_uniform_mat3(DRWShadingGroup *shgroup, const char *name, const float (*value)[3]);
void DRW_shgroup_uniform_mat4(DRWShadingGroup *shgroup, const char *name, const float (*value)[4]);
/* Store value instead of referencing it. */
@@ -426,37 +546,39 @@ DRWPass *DRW_pass_create(const char *name, DRWState state);
void DRW_pass_state_set(DRWPass *pass, DRWState state);
void DRW_pass_state_add(DRWPass *pass, DRWState state);
void DRW_pass_state_remove(DRWPass *pass, DRWState state);
-void DRW_pass_foreach_shgroup(DRWPass *pass, void (*callback)(void *userData, DRWShadingGroup *shgrp), void *userData);
+void DRW_pass_foreach_shgroup(DRWPass *pass,
+ void (*callback)(void *userData, DRWShadingGroup *shgrp),
+ void *userData);
void DRW_pass_sort_shgroup_z(DRWPass *pass);
bool DRW_pass_is_empty(DRWPass *pass);
/* Viewport */
typedef enum {
- /* keep in sync with the union struct DRWMatrixState. */
- DRW_MAT_PERS = 0,
- DRW_MAT_PERSINV,
- DRW_MAT_VIEW,
- DRW_MAT_VIEWINV,
- DRW_MAT_WIN,
- DRW_MAT_WININV,
-
- DRW_MAT_COUNT, // Don't use this.
+ /* keep in sync with the union struct DRWMatrixState. */
+ DRW_MAT_PERS = 0,
+ DRW_MAT_PERSINV,
+ DRW_MAT_VIEW,
+ DRW_MAT_VIEWINV,
+ DRW_MAT_WIN,
+ DRW_MAT_WININV,
+
+ DRW_MAT_COUNT, // Don't use this.
} DRWViewportMatrixType;
typedef struct DRWMatrixState {
- union {
- float mat[DRW_MAT_COUNT][4][4];
- struct {
- /* keep in sync with the enum DRWViewportMatrixType. */
- float persmat[4][4];
- float persinv[4][4];
- float viewmat[4][4];
- float viewinv[4][4];
- float winmat[4][4];
- float wininv[4][4];
- };
- };
+ union {
+ float mat[DRW_MAT_COUNT][4][4];
+ struct {
+ /* keep in sync with the enum DRWViewportMatrixType. */
+ float persmat[4][4];
+ float persinv[4][4];
+ float viewmat[4][4];
+ float viewinv[4][4];
+ float winmat[4][4];
+ float wininv[4][4];
+ };
+ };
} DRWMatrixState;
void DRW_viewport_matrix_get(float mat[4][4], DRWViewportMatrixType type);
@@ -478,38 +600,41 @@ const float *DRW_viewport_pixelsize_get(void);
bool DRW_viewport_is_persp_get(void);
struct DefaultFramebufferList *DRW_viewport_framebuffer_list_get(void);
-struct DefaultTextureList *DRW_viewport_texture_list_get(void);
+struct DefaultTextureList *DRW_viewport_texture_list_get(void);
void DRW_viewport_request_redraw(void);
void DRW_render_to_image(struct RenderEngine *engine, struct Depsgraph *depsgraph);
-void DRW_render_object_iter(
- void *vedata, struct RenderEngine *engine, struct Depsgraph *depsgraph,
- void (*callback)(void *vedata, struct Object *ob, struct RenderEngine *engine, struct Depsgraph *depsgraph));
+void DRW_render_object_iter(void *vedata,
+ struct RenderEngine *engine,
+ struct Depsgraph *depsgraph,
+ void (*callback)(void *vedata,
+ struct Object *ob,
+ struct RenderEngine *engine,
+ struct Depsgraph *depsgraph));
void DRW_render_instance_buffer_finish(void);
void DRW_render_viewport_size_set(int size[2]);
-void DRW_custom_pipeline(
- DrawEngineType *draw_engine_type,
- struct Depsgraph *depsgraph,
- void (*callback)(void *vedata, void *user_data),
- void *user_data);
+void DRW_custom_pipeline(DrawEngineType *draw_engine_type,
+ struct Depsgraph *depsgraph,
+ void (*callback)(void *vedata, void *user_data),
+ void *user_data);
/* ViewLayers */
void *DRW_view_layer_engine_data_get(DrawEngineType *engine_type);
-void **DRW_view_layer_engine_data_ensure_ex(
- struct ViewLayer *view_layer, DrawEngineType *engine_type, void (*callback)(void *storage));
-void **DRW_view_layer_engine_data_ensure(
- DrawEngineType *engine_type, void (*callback)(void *storage));
+void **DRW_view_layer_engine_data_ensure_ex(struct ViewLayer *view_layer,
+ DrawEngineType *engine_type,
+ void (*callback)(void *storage));
+void **DRW_view_layer_engine_data_ensure(DrawEngineType *engine_type,
+ void (*callback)(void *storage));
/* DrawData */
DrawData *DRW_drawdata_get(ID *ib, DrawEngineType *engine_type);
-DrawData *DRW_drawdata_ensure(
- ID *id,
- DrawEngineType *engine_type,
- size_t size,
- DrawDataInitCb init_cb,
- DrawDataFreeCb free_cb);
+DrawData *DRW_drawdata_ensure(ID *id,
+ DrawEngineType *engine_type,
+ size_t size,
+ DrawDataInitCb init_cb,
+ DrawDataFreeCb free_cb);
/* Settings */
bool DRW_object_is_renderable(const struct Object *ob);
@@ -517,7 +642,8 @@ int DRW_object_visibility_in_active_context(const struct Object *ob);
bool DRW_object_is_flat_normal(const struct Object *ob);
bool DRW_object_use_hide_faces(const struct Object *ob);
-bool DRW_object_is_visible_psys_in_active_context(const struct Object *object, const struct ParticleSystem *psys);
+bool DRW_object_is_visible_psys_in_active_context(const struct Object *object,
+ const struct ParticleSystem *psys);
struct Object *DRW_object_get_dupli_parent(const struct Object *ob);
struct DupliObject *DRW_object_get_dupli(const struct Object *ob);
@@ -566,33 +692,33 @@ bool DRW_state_draw_background(void);
/* Avoid too many lookups while drawing */
typedef struct DRWContextState {
- struct ARegion *ar; /* 'CTX_wm_region(C)' */
- struct RegionView3D *rv3d; /* 'CTX_wm_region_view3d(C)' */
- struct View3D *v3d; /* 'CTX_wm_view3d(C)' */
+ struct ARegion *ar; /* 'CTX_wm_region(C)' */
+ struct RegionView3D *rv3d; /* 'CTX_wm_region_view3d(C)' */
+ struct View3D *v3d; /* 'CTX_wm_view3d(C)' */
- struct Scene *scene; /* 'CTX_data_scene(C)' */
- struct ViewLayer *view_layer; /* 'CTX_data_view_layer(C)' */
+ struct Scene *scene; /* 'CTX_data_scene(C)' */
+ struct ViewLayer *view_layer; /* 'CTX_data_view_layer(C)' */
- /* Use 'object_edit' for edit-mode */
- struct Object *obact; /* 'OBACT' */
+ /* Use 'object_edit' for edit-mode */
+ struct Object *obact; /* 'OBACT' */
- struct RenderEngineType *engine_type;
+ struct RenderEngineType *engine_type;
- struct Depsgraph *depsgraph;
+ struct Depsgraph *depsgraph;
- eObjectMode object_mode;
+ eObjectMode object_mode;
- eGPUShaderConfig sh_cfg;
+ eGPUShaderConfig sh_cfg;
- /** Last resort (some functions take this as an arg so we can't easily avoid).
- * May be NULL when used for selection or depth buffer. */
- const struct bContext *evil_C;
+ /** Last resort (some functions take this as an arg so we can't easily avoid).
+ * May be NULL when used for selection or depth buffer. */
+ const struct bContext *evil_C;
- /* ---- */
+ /* ---- */
- /* Cache: initialized by 'drw_context_state_init'. */
- struct Object *object_pose;
- struct Object *object_edit;
+ /* Cache: initialized by 'drw_context_state_init'. */
+ struct Object *object_pose;
+ struct Object *object_edit;
} DRWContextState;
diff --git a/source/blender/draw/intern/draw_anim_viz.c b/source/blender/draw/intern/draw_anim_viz.c
index 03566de01d2..8ff2916b040 100644
--- a/source/blender/draw/intern/draw_anim_viz.c
+++ b/source/blender/draw/intern/draw_anim_viz.c
@@ -20,7 +20,6 @@
* \ingroup draw
*/
-
#include <stdlib.h>
#include <string.h>
#include <math.h>
@@ -57,26 +56,26 @@
/* XXX: How to show frame numbers, etc.? Currently only doing the dots and lines */
typedef struct MPATH_PassList {
- struct DRWPass *lines;
- struct DRWPass *points;
+ struct DRWPass *lines;
+ struct DRWPass *points;
} MPATH_PassList;
typedef struct MPATH_StorageList {
- struct MPATH_PrivateData *g_data;
+ struct MPATH_PrivateData *g_data;
} MPATH_StorageList;
typedef struct MPATH_Data {
- void *engine_type;
- DRWViewportEmptyList *fbl;
- DRWViewportEmptyList *txl;
- MPATH_PassList *psl;
- MPATH_StorageList *stl;
+ void *engine_type;
+ DRWViewportEmptyList *fbl;
+ DRWViewportEmptyList *txl;
+ MPATH_PassList *psl;
+ MPATH_StorageList *stl;
} MPATH_Data;
#if 0
static struct {
- GPUShader *mpath_line_sh;
- GPUShader *mpath_points_sh;
+ GPUShader *mpath_line_sh;
+ GPUShader *mpath_points_sh;
} e_data = {0};
#endif
@@ -85,36 +84,37 @@ static struct {
/* Just convert the CPU cache to GPU cache. */
static GPUVertBuf *mpath_vbo_get(bMotionPath *mpath)
{
- if (!mpath->points_vbo) {
- GPUVertFormat format = {0};
- /* Match structure of bMotionPathVert. */
- uint pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
- GPU_vertformat_attr_add(&format, "flag", GPU_COMP_I32, 1, GPU_FETCH_INT);
- mpath->points_vbo = GPU_vertbuf_create_with_format(&format);
- GPU_vertbuf_data_alloc(mpath->points_vbo, mpath->length);
-
- /* meh... a useless memcpy. */
- GPUVertBufRaw raw_data;
- GPU_vertbuf_attr_get_raw_data(mpath->points_vbo, pos, &raw_data);
- memcpy(GPU_vertbuf_raw_step(&raw_data), mpath->points, sizeof(bMotionPathVert) * mpath->length);
- }
- return mpath->points_vbo;
+ if (!mpath->points_vbo) {
+ GPUVertFormat format = {0};
+ /* Match structure of bMotionPathVert. */
+ uint pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
+ GPU_vertformat_attr_add(&format, "flag", GPU_COMP_I32, 1, GPU_FETCH_INT);
+ mpath->points_vbo = GPU_vertbuf_create_with_format(&format);
+ GPU_vertbuf_data_alloc(mpath->points_vbo, mpath->length);
+
+ /* meh... a useless memcpy. */
+ GPUVertBufRaw raw_data;
+ GPU_vertbuf_attr_get_raw_data(mpath->points_vbo, pos, &raw_data);
+ memcpy(
+ GPU_vertbuf_raw_step(&raw_data), mpath->points, sizeof(bMotionPathVert) * mpath->length);
+ }
+ return mpath->points_vbo;
}
static GPUBatch *mpath_batch_line_get(bMotionPath *mpath)
{
- if (!mpath->batch_line) {
- mpath->batch_line = GPU_batch_create(GPU_PRIM_LINE_STRIP, mpath_vbo_get(mpath), NULL);
- }
- return mpath->batch_line;
+ if (!mpath->batch_line) {
+ mpath->batch_line = GPU_batch_create(GPU_PRIM_LINE_STRIP, mpath_vbo_get(mpath), NULL);
+ }
+ return mpath->batch_line;
}
static GPUBatch *mpath_batch_points_get(bMotionPath *mpath)
{
- if (!mpath->batch_points) {
- mpath->batch_points = GPU_batch_create(GPU_PRIM_POINTS, mpath_vbo_get(mpath), NULL);
- }
- return mpath->batch_points;
+ if (!mpath->batch_points) {
+ mpath->batch_points = GPU_batch_create(GPU_PRIM_POINTS, mpath_vbo_get(mpath), NULL);
+ }
+ return mpath->batch_points;
}
/* *************************** Draw Engine Entrypoints ************************** */
@@ -131,185 +131,188 @@ static void MPATH_engine_free(void)
* Assume that all Passes are NULL */
static void MPATH_cache_init(void *vedata)
{
- MPATH_PassList *psl = ((MPATH_Data *)vedata)->psl;
+ MPATH_PassList *psl = ((MPATH_Data *)vedata)->psl;
- {
- DRWState state = DRW_STATE_WRITE_COLOR;
- psl->lines = DRW_pass_create("Motionpath Line Pass", state);
- }
+ {
+ DRWState state = DRW_STATE_WRITE_COLOR;
+ psl->lines = DRW_pass_create("Motionpath Line Pass", state);
+ }
- {
- DRWState state = DRW_STATE_WRITE_COLOR | DRW_STATE_POINT;
- psl->points = DRW_pass_create("Motionpath Point Pass", state);
- }
+ {
+ DRWState state = DRW_STATE_WRITE_COLOR | DRW_STATE_POINT;
+ psl->points = DRW_pass_create("Motionpath Point Pass", state);
+ }
}
-static void MPATH_get_frame_range_to_draw(
- bAnimVizSettings *avs, bMotionPath *mpath, int current_frame,
- int *r_start, int *r_end, int *r_step)
+static void MPATH_get_frame_range_to_draw(bAnimVizSettings *avs,
+ bMotionPath *mpath,
+ int current_frame,
+ int *r_start,
+ int *r_end,
+ int *r_step)
{
- int start, end;
-
- if (avs->path_type == MOTIONPATH_TYPE_ACFRA) {
- start = current_frame - avs->path_bc;
- end = current_frame + avs->path_ac + 1;
- }
- else {
- start = avs->path_sf;
- end = avs->path_ef;
- }
-
- if (start > end) {
- SWAP(int, start, end);
- }
-
- CLAMP(start, mpath->start_frame, mpath->end_frame);
- CLAMP(end, mpath->start_frame, mpath->end_frame);
-
- *r_start = start;
- *r_end = end;
- *r_step = max_ii(avs->path_step, 1);
+ int start, end;
+
+ if (avs->path_type == MOTIONPATH_TYPE_ACFRA) {
+ start = current_frame - avs->path_bc;
+ end = current_frame + avs->path_ac + 1;
+ }
+ else {
+ start = avs->path_sf;
+ end = avs->path_ef;
+ }
+
+ if (start > end) {
+ SWAP(int, start, end);
+ }
+
+ CLAMP(start, mpath->start_frame, mpath->end_frame);
+ CLAMP(end, mpath->start_frame, mpath->end_frame);
+
+ *r_start = start;
+ *r_end = end;
+ *r_step = max_ii(avs->path_step, 1);
}
static void MPATH_cache_motion_path(MPATH_PassList *psl,
- Object *ob, bPoseChannel *pchan,
- bAnimVizSettings *avs, bMotionPath *mpath)
+ Object *ob,
+ bPoseChannel *pchan,
+ bAnimVizSettings *avs,
+ bMotionPath *mpath)
{
- const DRWContextState *draw_ctx = DRW_context_state_get();
- struct DRWTextStore *dt = DRW_text_cache_ensure();
- int txt_flag = DRW_TEXT_CACHE_GLOBALSPACE | DRW_TEXT_CACHE_ASCII;
- int cfra = (int)DEG_get_ctime(draw_ctx->depsgraph);
- bool sel = (pchan) ? (pchan->bone->flag & BONE_SELECTED) : (ob->flag & SELECT);
- bool show_keyframes = (avs->path_viewflag & MOTIONPATH_VIEW_KFRAS) != 0;
-
- int sfra, efra, stepsize;
- MPATH_get_frame_range_to_draw(avs, mpath, cfra, &sfra, &efra, &stepsize);
-
- int len = efra - sfra;
- if (len == 0) {
- return;
- }
- int start_index = sfra - mpath->start_frame;
-
- bool use_custom_col = (mpath->flag & MOTIONPATH_FLAG_CUSTOM) != 0;
-
- /* draw curve-line of path */
- /* Draw lines only if line drawing option is enabled */
- if (mpath->flag & MOTIONPATH_FLAG_LINES) {
- DRWShadingGroup *shgrp = DRW_shgroup_create(mpath_line_shader_get(), psl->lines);
- DRW_shgroup_uniform_int_copy(shgrp, "frameCurrent", cfra);
- DRW_shgroup_uniform_int_copy(shgrp, "frameStart", sfra);
- DRW_shgroup_uniform_int_copy(shgrp, "frameEnd", efra);
- DRW_shgroup_uniform_int_copy(shgrp, "cacheStart", mpath->start_frame);
- DRW_shgroup_uniform_int_copy(shgrp, "lineThickness", mpath->line_thickness);
- DRW_shgroup_uniform_bool_copy(shgrp, "selected", sel);
- DRW_shgroup_uniform_bool_copy(shgrp, "useCustomColor", use_custom_col);
- DRW_shgroup_uniform_vec2(shgrp, "viewportSize", DRW_viewport_size_get(), 1);
- DRW_shgroup_uniform_block(shgrp, "globalsBlock", G_draw.block_ubo);
- if (use_custom_col) {
- DRW_shgroup_uniform_vec3(shgrp, "customColor", mpath->color, 1);
- }
- /* Only draw the required range. */
- DRW_shgroup_call_range_add(shgrp, mpath_batch_line_get(mpath), NULL, start_index, len);
- }
-
- /* Draw points. */
- DRWShadingGroup *shgrp = DRW_shgroup_create(mpath_points_shader_get(), psl->points);
- DRW_shgroup_uniform_int_copy(shgrp, "frameCurrent", cfra);
- DRW_shgroup_uniform_int_copy(shgrp, "cacheStart", mpath->start_frame);
- DRW_shgroup_uniform_int_copy(shgrp, "pointSize", max_ii(mpath->line_thickness - 1, 1));
- DRW_shgroup_uniform_int_copy(shgrp, "stepSize", stepsize);
- DRW_shgroup_uniform_bool_copy(shgrp, "showKeyFrames", show_keyframes);
- DRW_shgroup_uniform_bool_copy(shgrp, "useCustomColor", use_custom_col);
- DRW_shgroup_uniform_block(shgrp, "globalsBlock", G_draw.block_ubo);
- if (use_custom_col) {
- DRW_shgroup_uniform_vec3(shgrp, "customColor", mpath->color, 1);
- }
- /* Only draw the required range. */
- DRW_shgroup_call_range_add(shgrp, mpath_batch_points_get(mpath), NULL, start_index, len);
-
- /* Draw frame numbers at each framestep value */
- bool show_kf_no = (avs->path_viewflag & MOTIONPATH_VIEW_KFNOS) != 0;
- if ((avs->path_viewflag & (MOTIONPATH_VIEW_FNUMS)) || (show_kf_no && show_keyframes)) {
- int i;
- uchar col[4], col_kf[4];
- UI_GetThemeColor3ubv(TH_TEXT_HI, col);
- UI_GetThemeColor3ubv(TH_VERTEX_SELECT, col_kf);
- col[3] = col_kf[3] = 255;
-
- bMotionPathVert *mpv;
- bMotionPathVert *mpv_start = mpath->points + start_index;
- for (i = 0, mpv = mpv_start; i < len; i += stepsize, mpv += stepsize) {
- int frame = sfra + i;
- char numstr[32];
- size_t numstr_len;
- bool is_keyframe = (mpv->flag & MOTIONPATH_VERT_KEY) != 0;
-
- if ((show_keyframes && show_kf_no && is_keyframe) ||
- ((avs->path_viewflag & MOTIONPATH_VIEW_FNUMS) && (i == 0)))
- {
- numstr_len = sprintf(numstr, " %d", frame);
- DRW_text_cache_add(dt, mpv->co, numstr, numstr_len, 0, 0, txt_flag, (is_keyframe) ? col_kf : col);
- }
- else if (avs->path_viewflag & MOTIONPATH_VIEW_FNUMS) {
- bMotionPathVert *mpvP = (mpv - stepsize);
- bMotionPathVert *mpvN = (mpv + stepsize);
- /* only draw framenum if several consecutive highlighted points don't occur on same point */
- if ((equals_v3v3(mpv->co, mpvP->co) == 0) || (equals_v3v3(mpv->co, mpvN->co) == 0)) {
- numstr_len = sprintf(numstr, " %d", frame);
- DRW_text_cache_add(dt, mpv->co, numstr, numstr_len, 0, 0, txt_flag, col);
- }
- }
- }
- }
+ const DRWContextState *draw_ctx = DRW_context_state_get();
+ struct DRWTextStore *dt = DRW_text_cache_ensure();
+ int txt_flag = DRW_TEXT_CACHE_GLOBALSPACE | DRW_TEXT_CACHE_ASCII;
+ int cfra = (int)DEG_get_ctime(draw_ctx->depsgraph);
+ bool sel = (pchan) ? (pchan->bone->flag & BONE_SELECTED) : (ob->flag & SELECT);
+ bool show_keyframes = (avs->path_viewflag & MOTIONPATH_VIEW_KFRAS) != 0;
+
+ int sfra, efra, stepsize;
+ MPATH_get_frame_range_to_draw(avs, mpath, cfra, &sfra, &efra, &stepsize);
+
+ int len = efra - sfra;
+ if (len == 0) {
+ return;
+ }
+ int start_index = sfra - mpath->start_frame;
+
+ bool use_custom_col = (mpath->flag & MOTIONPATH_FLAG_CUSTOM) != 0;
+
+ /* draw curve-line of path */
+ /* Draw lines only if line drawing option is enabled */
+ if (mpath->flag & MOTIONPATH_FLAG_LINES) {
+ DRWShadingGroup *shgrp = DRW_shgroup_create(mpath_line_shader_get(), psl->lines);
+ DRW_shgroup_uniform_int_copy(shgrp, "frameCurrent", cfra);
+ DRW_shgroup_uniform_int_copy(shgrp, "frameStart", sfra);
+ DRW_shgroup_uniform_int_copy(shgrp, "frameEnd", efra);
+ DRW_shgroup_uniform_int_copy(shgrp, "cacheStart", mpath->start_frame);
+ DRW_shgroup_uniform_int_copy(shgrp, "lineThickness", mpath->line_thickness);
+ DRW_shgroup_uniform_bool_copy(shgrp, "selected", sel);
+ DRW_shgroup_uniform_bool_copy(shgrp, "useCustomColor", use_custom_col);
+ DRW_shgroup_uniform_vec2(shgrp, "viewportSize", DRW_viewport_size_get(), 1);
+ DRW_shgroup_uniform_block(shgrp, "globalsBlock", G_draw.block_ubo);
+ if (use_custom_col) {
+ DRW_shgroup_uniform_vec3(shgrp, "customColor", mpath->color, 1);
+ }
+ /* Only draw the required range. */
+ DRW_shgroup_call_range_add(shgrp, mpath_batch_line_get(mpath), NULL, start_index, len);
+ }
+
+ /* Draw points. */
+ DRWShadingGroup *shgrp = DRW_shgroup_create(mpath_points_shader_get(), psl->points);
+ DRW_shgroup_uniform_int_copy(shgrp, "frameCurrent", cfra);
+ DRW_shgroup_uniform_int_copy(shgrp, "cacheStart", mpath->start_frame);
+ DRW_shgroup_uniform_int_copy(shgrp, "pointSize", max_ii(mpath->line_thickness - 1, 1));
+ DRW_shgroup_uniform_int_copy(shgrp, "stepSize", stepsize);
+ DRW_shgroup_uniform_bool_copy(shgrp, "showKeyFrames", show_keyframes);
+ DRW_shgroup_uniform_bool_copy(shgrp, "useCustomColor", use_custom_col);
+ DRW_shgroup_uniform_block(shgrp, "globalsBlock", G_draw.block_ubo);
+ if (use_custom_col) {
+ DRW_shgroup_uniform_vec3(shgrp, "customColor", mpath->color, 1);
+ }
+ /* Only draw the required range. */
+ DRW_shgroup_call_range_add(shgrp, mpath_batch_points_get(mpath), NULL, start_index, len);
+
+ /* Draw frame numbers at each framestep value */
+ bool show_kf_no = (avs->path_viewflag & MOTIONPATH_VIEW_KFNOS) != 0;
+ if ((avs->path_viewflag & (MOTIONPATH_VIEW_FNUMS)) || (show_kf_no && show_keyframes)) {
+ int i;
+ uchar col[4], col_kf[4];
+ UI_GetThemeColor3ubv(TH_TEXT_HI, col);
+ UI_GetThemeColor3ubv(TH_VERTEX_SELECT, col_kf);
+ col[3] = col_kf[3] = 255;
+
+ bMotionPathVert *mpv;
+ bMotionPathVert *mpv_start = mpath->points + start_index;
+ for (i = 0, mpv = mpv_start; i < len; i += stepsize, mpv += stepsize) {
+ int frame = sfra + i;
+ char numstr[32];
+ size_t numstr_len;
+ bool is_keyframe = (mpv->flag & MOTIONPATH_VERT_KEY) != 0;
+
+ if ((show_keyframes && show_kf_no && is_keyframe) ||
+ ((avs->path_viewflag & MOTIONPATH_VIEW_FNUMS) && (i == 0))) {
+ numstr_len = sprintf(numstr, " %d", frame);
+ DRW_text_cache_add(
+ dt, mpv->co, numstr, numstr_len, 0, 0, txt_flag, (is_keyframe) ? col_kf : col);
+ }
+ else if (avs->path_viewflag & MOTIONPATH_VIEW_FNUMS) {
+ bMotionPathVert *mpvP = (mpv - stepsize);
+ bMotionPathVert *mpvN = (mpv + stepsize);
+ /* only draw framenum if several consecutive highlighted points don't occur on same point */
+ if ((equals_v3v3(mpv->co, mpvP->co) == 0) || (equals_v3v3(mpv->co, mpvN->co) == 0)) {
+ numstr_len = sprintf(numstr, " %d", frame);
+ DRW_text_cache_add(dt, mpv->co, numstr, numstr_len, 0, 0, txt_flag, col);
+ }
+ }
+ }
+ }
}
/* Add geometry to shading groups. Execute for each objects */
static void MPATH_cache_populate(void *vedata, Object *ob)
{
- MPATH_PassList *psl = ((MPATH_Data *)vedata)->psl;
- const DRWContextState *draw_ctx = DRW_context_state_get();
-
- if (draw_ctx->v3d->overlay.flag & V3D_OVERLAY_HIDE_MOTION_PATHS) {
- return;
- }
-
- if (ob->type == OB_ARMATURE) {
- if (DRW_pose_mode_armature(ob, draw_ctx->obact)) {
- for (bPoseChannel *pchan = ob->pose->chanbase.first; pchan; pchan = pchan->next) {
- if (pchan->mpath) {
- MPATH_cache_motion_path(psl, ob, pchan, &ob->pose->avs, pchan->mpath);
- }
- }
- }
- }
- else {
- if (ob->mpath) {
- MPATH_cache_motion_path(psl, ob, NULL, &ob->avs, ob->mpath);
- }
- }
+ MPATH_PassList *psl = ((MPATH_Data *)vedata)->psl;
+ const DRWContextState *draw_ctx = DRW_context_state_get();
+
+ if (draw_ctx->v3d->overlay.flag & V3D_OVERLAY_HIDE_MOTION_PATHS) {
+ return;
+ }
+
+ if (ob->type == OB_ARMATURE) {
+ if (DRW_pose_mode_armature(ob, draw_ctx->obact)) {
+ for (bPoseChannel *pchan = ob->pose->chanbase.first; pchan; pchan = pchan->next) {
+ if (pchan->mpath) {
+ MPATH_cache_motion_path(psl, ob, pchan, &ob->pose->avs, pchan->mpath);
+ }
+ }
+ }
+ }
+ else {
+ if (ob->mpath) {
+ MPATH_cache_motion_path(psl, ob, NULL, &ob->avs, ob->mpath);
+ }
+ }
}
/* Draw time! Control rendering pipeline from here */
static void MPATH_draw_scene(void *vedata)
{
- MPATH_PassList *psl = ((MPATH_Data *)vedata)->psl;
- DefaultFramebufferList *dfbl = DRW_viewport_framebuffer_list_get();
- DefaultTextureList *dtxl = DRW_viewport_texture_list_get();
+ MPATH_PassList *psl = ((MPATH_Data *)vedata)->psl;
+ DefaultFramebufferList *dfbl = DRW_viewport_framebuffer_list_get();
+ DefaultTextureList *dtxl = DRW_viewport_texture_list_get();
- if (DRW_pass_is_empty(psl->lines) &&
- DRW_pass_is_empty(psl->points))
- {
- /* Nothing to draw. */
- return;
- }
+ if (DRW_pass_is_empty(psl->lines) && DRW_pass_is_empty(psl->points)) {
+ /* Nothing to draw. */
+ return;
+ }
- MULTISAMPLE_SYNC_ENABLE(dfbl, dtxl);
+ MULTISAMPLE_SYNC_ENABLE(dfbl, dtxl);
- DRW_draw_pass(psl->lines);
- DRW_draw_pass(psl->points);
+ DRW_draw_pass(psl->lines);
+ DRW_draw_pass(psl->points);
- MULTISAMPLE_SYNC_DISABLE_NO_DEPTH(dfbl, dtxl);
+ MULTISAMPLE_SYNC_DISABLE_NO_DEPTH(dfbl, dtxl);
}
/* *************************** Draw Engine Defines ****************************** */
@@ -317,16 +320,17 @@ static void MPATH_draw_scene(void *vedata)
static const DrawEngineDataSize MPATH_data_size = DRW_VIEWPORT_DATA_SIZE(MPATH_Data);
DrawEngineType draw_engine_motion_path_type = {
- NULL, NULL,
- N_("MotionPath"),
- &MPATH_data_size,
- &MPATH_engine_init,
- &MPATH_engine_free,
- &MPATH_cache_init,
- &MPATH_cache_populate,
- NULL,
- NULL,
- &MPATH_draw_scene,
- NULL,
- NULL,
+ NULL,
+ NULL,
+ N_("MotionPath"),
+ &MPATH_data_size,
+ &MPATH_engine_init,
+ &MPATH_engine_free,
+ &MPATH_cache_init,
+ &MPATH_cache_populate,
+ NULL,
+ NULL,
+ &MPATH_draw_scene,
+ NULL,
+ NULL,
};
diff --git a/source/blender/draw/intern/draw_armature.c b/source/blender/draw/intern/draw_armature.c
index a32b4089fcf..12d4ca95a39 100644
--- a/source/blender/draw/intern/draw_armature.c
+++ b/source/blender/draw/intern/draw_armature.c
@@ -37,10 +37,8 @@
#include "BKE_armature.h"
-
#include "ED_armature.h"
-
#include "UI_resources.h"
#include "draw_common.h"
@@ -56,439 +54,458 @@
/* Reset for drawing each armature object */
static struct {
- /* Current armature object */
- Object *ob;
- /* Reset when changing current_armature */
- DRWShadingGroup *bone_octahedral_solid;
- DRWShadingGroup *bone_octahedral_wire;
- DRWShadingGroup *bone_octahedral_outline;
- DRWShadingGroup *bone_box_solid;
- DRWShadingGroup *bone_box_wire;
- DRWShadingGroup *bone_box_outline;
- DRWShadingGroup *bone_wire;
- DRWShadingGroup *bone_stick;
- DRWShadingGroup *bone_dof_sphere;
- DRWShadingGroup *bone_dof_lines;
- DRWShadingGroup *bone_envelope_solid;
- DRWShadingGroup *bone_envelope_distance;
- DRWShadingGroup *bone_envelope_wire;
- DRWShadingGroup *bone_point_solid;
- DRWShadingGroup *bone_point_wire;
- DRWShadingGroup *bone_axes;
- DRWShadingGroup *lines_relationship;
- DRWShadingGroup *lines_ik;
- DRWShadingGroup *lines_ik_no_target;
- DRWShadingGroup *lines_ik_spline;
-
- DRWArmaturePasses passes;
-
- bool transparent;
+ /* Current armature object */
+ Object *ob;
+ /* Reset when changing current_armature */
+ DRWShadingGroup *bone_octahedral_solid;
+ DRWShadingGroup *bone_octahedral_wire;
+ DRWShadingGroup *bone_octahedral_outline;
+ DRWShadingGroup *bone_box_solid;
+ DRWShadingGroup *bone_box_wire;
+ DRWShadingGroup *bone_box_outline;
+ DRWShadingGroup *bone_wire;
+ DRWShadingGroup *bone_stick;
+ DRWShadingGroup *bone_dof_sphere;
+ DRWShadingGroup *bone_dof_lines;
+ DRWShadingGroup *bone_envelope_solid;
+ DRWShadingGroup *bone_envelope_distance;
+ DRWShadingGroup *bone_envelope_wire;
+ DRWShadingGroup *bone_point_solid;
+ DRWShadingGroup *bone_point_wire;
+ DRWShadingGroup *bone_axes;
+ DRWShadingGroup *lines_relationship;
+ DRWShadingGroup *lines_ik;
+ DRWShadingGroup *lines_ik_no_target;
+ DRWShadingGroup *lines_ik_spline;
+
+ DRWArmaturePasses passes;
+
+ bool transparent;
} g_data = {NULL};
-
/**
* Follow `TH_*` naming except for mixed colors.
*/
static struct {
- float select_color[4];
- float edge_select_color[4];
- float bone_select_color[4]; /* tint */
- float wire_color[4];
- float wire_edit_color[4];
- float bone_solid_color[4];
- float bone_active_unselect_color[4]; /* mix */
- float bone_pose_color[4];
- float bone_pose_active_color[4];
- float bone_pose_active_unselect_color[4]; /* mix */
- float text_hi_color[4];
- float text_color[4];
- float vertex_select_color[4];
- float vertex_color[4];
-
- /* not a theme, this is an override */
- const float *const_color;
- float const_wire;
+ float select_color[4];
+ float edge_select_color[4];
+ float bone_select_color[4]; /* tint */
+ float wire_color[4];
+ float wire_edit_color[4];
+ float bone_solid_color[4];
+ float bone_active_unselect_color[4]; /* mix */
+ float bone_pose_color[4];
+ float bone_pose_active_color[4];
+ float bone_pose_active_unselect_color[4]; /* mix */
+ float text_hi_color[4];
+ float text_color[4];
+ float vertex_select_color[4];
+ float vertex_color[4];
+
+ /* not a theme, this is an override */
+ const float *const_color;
+ float const_wire;
} g_theme;
-
/* -------------------------------------------------------------------- */
/** \name Shader Groups (DRW_shgroup)
* \{ */
/* Octahedral */
-static void drw_shgroup_bone_octahedral(
- const float (*bone_mat)[4],
- const float bone_color[4], const float hint_color[4], const float outline_color[4],
- const eGPUShaderConfig sh_cfg)
+static void drw_shgroup_bone_octahedral(const float (*bone_mat)[4],
+ const float bone_color[4],
+ const float hint_color[4],
+ const float outline_color[4],
+ const eGPUShaderConfig sh_cfg)
{
- if (g_data.bone_octahedral_outline == NULL) {
- struct GPUBatch *geom = DRW_cache_bone_octahedral_wire_get();
- g_data.bone_octahedral_outline = shgroup_instance_bone_shape_outline(
- g_data.passes.bone_outline, geom, sh_cfg);
- }
- if (g_data.bone_octahedral_solid == NULL &&
- g_data.passes.bone_solid != NULL)
- {
- struct GPUBatch *geom = DRW_cache_bone_octahedral_get();
- g_data.bone_octahedral_solid = shgroup_instance_bone_shape_solid(
- g_data.passes.bone_solid, geom, g_data.transparent, sh_cfg);
- }
- float final_bonemat[4][4];
- mul_m4_m4m4(final_bonemat, g_data.ob->obmat, bone_mat);
- if (g_data.bone_octahedral_solid != NULL) {
- DRW_shgroup_call_dynamic_add(g_data.bone_octahedral_solid, final_bonemat, bone_color, hint_color);
- }
- if (outline_color[3] > 0.0f) {
- DRW_shgroup_call_dynamic_add(g_data.bone_octahedral_outline, final_bonemat, outline_color);
- }
+ if (g_data.bone_octahedral_outline == NULL) {
+ struct GPUBatch *geom = DRW_cache_bone_octahedral_wire_get();
+ g_data.bone_octahedral_outline = shgroup_instance_bone_shape_outline(
+ g_data.passes.bone_outline, geom, sh_cfg);
+ }
+ if (g_data.bone_octahedral_solid == NULL && g_data.passes.bone_solid != NULL) {
+ struct GPUBatch *geom = DRW_cache_bone_octahedral_get();
+ g_data.bone_octahedral_solid = shgroup_instance_bone_shape_solid(
+ g_data.passes.bone_solid, geom, g_data.transparent, sh_cfg);
+ }
+ float final_bonemat[4][4];
+ mul_m4_m4m4(final_bonemat, g_data.ob->obmat, bone_mat);
+ if (g_data.bone_octahedral_solid != NULL) {
+ DRW_shgroup_call_dynamic_add(
+ g_data.bone_octahedral_solid, final_bonemat, bone_color, hint_color);
+ }
+ if (outline_color[3] > 0.0f) {
+ DRW_shgroup_call_dynamic_add(g_data.bone_octahedral_outline, final_bonemat, outline_color);
+ }
}
/* Box / B-Bone */
-static void drw_shgroup_bone_box(
- const float (*bone_mat)[4],
- const float bone_color[4], const float hint_color[4], const float outline_color[4],
- const eGPUShaderConfig sh_cfg)
+static void drw_shgroup_bone_box(const float (*bone_mat)[4],
+ const float bone_color[4],
+ const float hint_color[4],
+ const float outline_color[4],
+ const eGPUShaderConfig sh_cfg)
{
- if (g_data.bone_box_wire == NULL) {
- struct GPUBatch *geom = DRW_cache_bone_box_wire_get();
- g_data.bone_box_outline = shgroup_instance_bone_shape_outline(
- g_data.passes.bone_outline, geom, sh_cfg);
- }
- if (g_data.bone_box_solid == NULL &&
- g_data.passes.bone_solid != NULL)
- {
- struct GPUBatch *geom = DRW_cache_bone_box_get();
- g_data.bone_box_solid = shgroup_instance_bone_shape_solid(
- g_data.passes.bone_solid, geom, g_data.transparent, sh_cfg);
- }
- float final_bonemat[4][4];
- mul_m4_m4m4(final_bonemat, g_data.ob->obmat, bone_mat);
- if (g_data.bone_box_solid != NULL) {
- DRW_shgroup_call_dynamic_add(g_data.bone_box_solid, final_bonemat, bone_color, hint_color);
- }
- if (outline_color[3] > 0.0f) {
- DRW_shgroup_call_dynamic_add(g_data.bone_box_outline, final_bonemat, outline_color);
- }
+ if (g_data.bone_box_wire == NULL) {
+ struct GPUBatch *geom = DRW_cache_bone_box_wire_get();
+ g_data.bone_box_outline = shgroup_instance_bone_shape_outline(
+ g_data.passes.bone_outline, geom, sh_cfg);
+ }
+ if (g_data.bone_box_solid == NULL && g_data.passes.bone_solid != NULL) {
+ struct GPUBatch *geom = DRW_cache_bone_box_get();
+ g_data.bone_box_solid = shgroup_instance_bone_shape_solid(
+ g_data.passes.bone_solid, geom, g_data.transparent, sh_cfg);
+ }
+ float final_bonemat[4][4];
+ mul_m4_m4m4(final_bonemat, g_data.ob->obmat, bone_mat);
+ if (g_data.bone_box_solid != NULL) {
+ DRW_shgroup_call_dynamic_add(g_data.bone_box_solid, final_bonemat, bone_color, hint_color);
+ }
+ if (outline_color[3] > 0.0f) {
+ DRW_shgroup_call_dynamic_add(g_data.bone_box_outline, final_bonemat, outline_color);
+ }
}
/* Wire */
-static void drw_shgroup_bone_wire(
- const float (*bone_mat)[4], const float color[4],
- const eGPUShaderConfig sh_cfg)
+static void drw_shgroup_bone_wire(const float (*bone_mat)[4],
+ const float color[4],
+ const eGPUShaderConfig sh_cfg)
{
- if (g_data.bone_wire == NULL) {
- g_data.bone_wire = shgroup_dynlines_flat_color(g_data.passes.bone_wire, sh_cfg);
- }
- float head[3], tail[3];
- mul_v3_m4v3(head, g_data.ob->obmat, bone_mat[3]);
- DRW_shgroup_call_dynamic_add(g_data.bone_wire, head, color);
-
- add_v3_v3v3(tail, bone_mat[3], bone_mat[1]);
- mul_m4_v3(g_data.ob->obmat, tail);
- DRW_shgroup_call_dynamic_add(g_data.bone_wire, tail, color);
+ if (g_data.bone_wire == NULL) {
+ g_data.bone_wire = shgroup_dynlines_flat_color(g_data.passes.bone_wire, sh_cfg);
+ }
+ float head[3], tail[3];
+ mul_v3_m4v3(head, g_data.ob->obmat, bone_mat[3]);
+ DRW_shgroup_call_dynamic_add(g_data.bone_wire, head, color);
+
+ add_v3_v3v3(tail, bone_mat[3], bone_mat[1]);
+ mul_m4_v3(g_data.ob->obmat, tail);
+ DRW_shgroup_call_dynamic_add(g_data.bone_wire, tail, color);
}
/* Stick */
-static void drw_shgroup_bone_stick(
- const float (*bone_mat)[4],
- const float col_wire[4], const float col_bone[4], const float col_head[4], const float col_tail[4],
- const eGPUShaderConfig sh_cfg)
+static void drw_shgroup_bone_stick(const float (*bone_mat)[4],
+ const float col_wire[4],
+ const float col_bone[4],
+ const float col_head[4],
+ const float col_tail[4],
+ const eGPUShaderConfig sh_cfg)
{
- if (g_data.bone_stick == NULL) {
- g_data.bone_stick = shgroup_instance_bone_stick(g_data.passes.bone_wire, sh_cfg);
- }
- float final_bonemat[4][4], tail[4];
- mul_m4_m4m4(final_bonemat, g_data.ob->obmat, bone_mat);
- add_v3_v3v3(tail, final_bonemat[3], final_bonemat[1]);
- DRW_shgroup_call_dynamic_add(g_data.bone_stick, final_bonemat[3], tail, col_wire, col_bone, col_head, col_tail);
+ if (g_data.bone_stick == NULL) {
+ g_data.bone_stick = shgroup_instance_bone_stick(g_data.passes.bone_wire, sh_cfg);
+ }
+ float final_bonemat[4][4], tail[4];
+ mul_m4_m4m4(final_bonemat, g_data.ob->obmat, bone_mat);
+ add_v3_v3v3(tail, final_bonemat[3], final_bonemat[1]);
+ DRW_shgroup_call_dynamic_add(
+ g_data.bone_stick, final_bonemat[3], tail, col_wire, col_bone, col_head, col_tail);
}
-
/* Envelope */
-static void drw_shgroup_bone_envelope_distance(
- const float (*bone_mat)[4],
- const float *radius_head, const float *radius_tail, const float *distance,
- const eGPUShaderConfig sh_cfg)
+static void drw_shgroup_bone_envelope_distance(const float (*bone_mat)[4],
+ const float *radius_head,
+ const float *radius_tail,
+ const float *distance,
+ const eGPUShaderConfig sh_cfg)
{
- if (g_data.passes.bone_envelope != NULL) {
- if (g_data.bone_envelope_distance == NULL) {
- g_data.bone_envelope_distance = shgroup_instance_bone_envelope_distance(g_data.passes.bone_envelope, sh_cfg);
- /* passes.bone_envelope should have the DRW_STATE_CULL_FRONT state enabled. */
- }
- float head_sphere[4] = {0.0f, 0.0f, 0.0f, 1.0f}, tail_sphere[4] = {0.0f, 1.0f, 0.0f, 1.0f};
- float final_bonemat[4][4];
- mul_m4_m4m4(final_bonemat, g_data.ob->obmat, bone_mat);
- /* We need matrix mul because we need shear applied. */
- /* NOTE: could be done in shader if that becomes a bottleneck. */
- mul_m4_v4(final_bonemat, head_sphere);
- mul_m4_v4(final_bonemat, tail_sphere);
- head_sphere[3] = *radius_head;
- head_sphere[3] += *distance;
- tail_sphere[3] = *radius_tail;
- tail_sphere[3] += *distance;
- DRW_shgroup_call_dynamic_add(g_data.bone_envelope_distance, head_sphere, tail_sphere, final_bonemat[0]);
- }
+ if (g_data.passes.bone_envelope != NULL) {
+ if (g_data.bone_envelope_distance == NULL) {
+ g_data.bone_envelope_distance = shgroup_instance_bone_envelope_distance(
+ g_data.passes.bone_envelope, sh_cfg);
+ /* passes.bone_envelope should have the DRW_STATE_CULL_FRONT state enabled. */
+ }
+ float head_sphere[4] = {0.0f, 0.0f, 0.0f, 1.0f}, tail_sphere[4] = {0.0f, 1.0f, 0.0f, 1.0f};
+ float final_bonemat[4][4];
+ mul_m4_m4m4(final_bonemat, g_data.ob->obmat, bone_mat);
+ /* We need matrix mul because we need shear applied. */
+ /* NOTE: could be done in shader if that becomes a bottleneck. */
+ mul_m4_v4(final_bonemat, head_sphere);
+ mul_m4_v4(final_bonemat, tail_sphere);
+ head_sphere[3] = *radius_head;
+ head_sphere[3] += *distance;
+ tail_sphere[3] = *radius_tail;
+ tail_sphere[3] += *distance;
+ DRW_shgroup_call_dynamic_add(
+ g_data.bone_envelope_distance, head_sphere, tail_sphere, final_bonemat[0]);
+ }
}
-static void drw_shgroup_bone_envelope(
- const float (*bone_mat)[4],
- const float bone_color[4], const float hint_color[4], const float outline_color[4],
- const float *radius_head, const float *radius_tail,
- const eGPUShaderConfig sh_cfg)
+static void drw_shgroup_bone_envelope(const float (*bone_mat)[4],
+ const float bone_color[4],
+ const float hint_color[4],
+ const float outline_color[4],
+ const float *radius_head,
+ const float *radius_tail,
+ const eGPUShaderConfig sh_cfg)
{
- if (g_data.bone_point_wire == NULL) {
- g_data.bone_point_wire = shgroup_instance_bone_sphere_outline(g_data.passes.bone_wire, sh_cfg);
- }
- if (g_data.bone_point_solid == NULL &&
- g_data.passes.bone_solid != NULL)
- {
- g_data.bone_point_solid = shgroup_instance_bone_sphere_solid(g_data.passes.bone_solid, g_data.transparent, sh_cfg);
- }
- if (g_data.bone_envelope_wire == NULL) {
- g_data.bone_envelope_wire = shgroup_instance_bone_envelope_outline(g_data.passes.bone_wire, sh_cfg);
- }
- if (g_data.bone_envelope_solid == NULL &&
- g_data.passes.bone_solid != NULL)
- {
- g_data.bone_envelope_solid = shgroup_instance_bone_envelope_solid(g_data.passes.bone_solid, g_data.transparent, sh_cfg);
- /* We can have a lot of overdraw if we don't do this. Also envelope are not subject to
- * inverted matrix. */
- DRW_shgroup_state_enable(g_data.bone_envelope_solid, DRW_STATE_CULL_BACK);
- }
-
- float head_sphere[4] = {0.0f, 0.0f, 0.0f, 1.0f}, tail_sphere[4] = {0.0f, 1.0f, 0.0f, 1.0f};
- float final_bonemat[4][4];
- mul_m4_m4m4(final_bonemat, g_data.ob->obmat, bone_mat);
- mul_m4_v4(final_bonemat, head_sphere);
- mul_m4_v4(final_bonemat, tail_sphere);
- head_sphere[3] = *radius_head;
- tail_sphere[3] = *radius_tail;
-
- if (head_sphere[3] < 0.0f) {
- /* Draw Tail only */
- float tmp[4][4] = {{0.0f}};
- tmp[0][0] = tmp[1][1] = tmp[2][2] = tail_sphere[3] / PT_DEFAULT_RAD;
- tmp[3][3] = 1.0f;
- copy_v3_v3(tmp[3], tail_sphere);
- if (g_data.bone_point_solid != NULL) {
- DRW_shgroup_call_dynamic_add(g_data.bone_point_solid, tmp, bone_color, hint_color);
- }
- if (outline_color[3] > 0.0f) {
- DRW_shgroup_call_dynamic_add(g_data.bone_point_wire, tmp, outline_color);
- }
- }
- else if (tail_sphere[3] < 0.0f) {
- /* Draw Head only */
- float tmp[4][4] = {{0.0f}};
- tmp[0][0] = tmp[1][1] = tmp[2][2] = head_sphere[3] / PT_DEFAULT_RAD;
- tmp[3][3] = 1.0f;
- copy_v3_v3(tmp[3], head_sphere);
- if (g_data.bone_point_solid != NULL) {
- DRW_shgroup_call_dynamic_add(g_data.bone_point_solid, tmp, bone_color, hint_color);
- }
- if (outline_color[3] > 0.0f) {
- DRW_shgroup_call_dynamic_add(g_data.bone_point_wire, tmp, outline_color);
- }
- }
- else {
- /* Draw Body */
- float tmp_sphere[4];
- float len = len_v3v3(tail_sphere, head_sphere);
- float fac_head = (len - head_sphere[3]) / len;
- float fac_tail = (len - tail_sphere[3]) / len;
-
- /* Small epsilon to avoid problem with float precision in shader. */
- if (len > (tail_sphere[3] + head_sphere[3]) + 1e-8f) {
-
- copy_v4_v4(tmp_sphere, head_sphere);
- interp_v4_v4v4(head_sphere, tail_sphere, head_sphere, fac_head);
- interp_v4_v4v4(tail_sphere, tmp_sphere, tail_sphere, fac_tail);
- if (g_data.bone_envelope_solid != NULL) {
- DRW_shgroup_call_dynamic_add(
- g_data.bone_envelope_solid, head_sphere, tail_sphere, bone_color, hint_color, final_bonemat[0]);
- }
- if (outline_color[3] > 0.0f) {
- DRW_shgroup_call_dynamic_add(
- g_data.bone_envelope_wire, head_sphere, tail_sphere, outline_color, final_bonemat[0]);
- }
- }
- else {
- float tmp[4][4] = {{0.0f}};
- float fac = max_ff(fac_head, 1.0f - fac_tail);
- interp_v4_v4v4(tmp_sphere, tail_sphere, head_sphere, clamp_f(fac, 0.0f, 1.0f));
- tmp[0][0] = tmp[1][1] = tmp[2][2] = tmp_sphere[3] / PT_DEFAULT_RAD;
- tmp[3][3] = 1.0f;
- copy_v3_v3(tmp[3], tmp_sphere);
- if (g_data.bone_point_solid != NULL) {
- DRW_shgroup_call_dynamic_add(g_data.bone_point_solid, tmp, bone_color, hint_color);
- }
- if (outline_color[3] > 0.0f) {
- DRW_shgroup_call_dynamic_add(g_data.bone_point_wire, tmp, outline_color);
- }
- }
- }
+ if (g_data.bone_point_wire == NULL) {
+ g_data.bone_point_wire = shgroup_instance_bone_sphere_outline(g_data.passes.bone_wire, sh_cfg);
+ }
+ if (g_data.bone_point_solid == NULL && g_data.passes.bone_solid != NULL) {
+ g_data.bone_point_solid = shgroup_instance_bone_sphere_solid(
+ g_data.passes.bone_solid, g_data.transparent, sh_cfg);
+ }
+ if (g_data.bone_envelope_wire == NULL) {
+ g_data.bone_envelope_wire = shgroup_instance_bone_envelope_outline(g_data.passes.bone_wire,
+ sh_cfg);
+ }
+ if (g_data.bone_envelope_solid == NULL && g_data.passes.bone_solid != NULL) {
+ g_data.bone_envelope_solid = shgroup_instance_bone_envelope_solid(
+ g_data.passes.bone_solid, g_data.transparent, sh_cfg);
+ /* We can have a lot of overdraw if we don't do this. Also envelope are not subject to
+ * inverted matrix. */
+ DRW_shgroup_state_enable(g_data.bone_envelope_solid, DRW_STATE_CULL_BACK);
+ }
+
+ float head_sphere[4] = {0.0f, 0.0f, 0.0f, 1.0f}, tail_sphere[4] = {0.0f, 1.0f, 0.0f, 1.0f};
+ float final_bonemat[4][4];
+ mul_m4_m4m4(final_bonemat, g_data.ob->obmat, bone_mat);
+ mul_m4_v4(final_bonemat, head_sphere);
+ mul_m4_v4(final_bonemat, tail_sphere);
+ head_sphere[3] = *radius_head;
+ tail_sphere[3] = *radius_tail;
+
+ if (head_sphere[3] < 0.0f) {
+ /* Draw Tail only */
+ float tmp[4][4] = {{0.0f}};
+ tmp[0][0] = tmp[1][1] = tmp[2][2] = tail_sphere[3] / PT_DEFAULT_RAD;
+ tmp[3][3] = 1.0f;
+ copy_v3_v3(tmp[3], tail_sphere);
+ if (g_data.bone_point_solid != NULL) {
+ DRW_shgroup_call_dynamic_add(g_data.bone_point_solid, tmp, bone_color, hint_color);
+ }
+ if (outline_color[3] > 0.0f) {
+ DRW_shgroup_call_dynamic_add(g_data.bone_point_wire, tmp, outline_color);
+ }
+ }
+ else if (tail_sphere[3] < 0.0f) {
+ /* Draw Head only */
+ float tmp[4][4] = {{0.0f}};
+ tmp[0][0] = tmp[1][1] = tmp[2][2] = head_sphere[3] / PT_DEFAULT_RAD;
+ tmp[3][3] = 1.0f;
+ copy_v3_v3(tmp[3], head_sphere);
+ if (g_data.bone_point_solid != NULL) {
+ DRW_shgroup_call_dynamic_add(g_data.bone_point_solid, tmp, bone_color, hint_color);
+ }
+ if (outline_color[3] > 0.0f) {
+ DRW_shgroup_call_dynamic_add(g_data.bone_point_wire, tmp, outline_color);
+ }
+ }
+ else {
+ /* Draw Body */
+ float tmp_sphere[4];
+ float len = len_v3v3(tail_sphere, head_sphere);
+ float fac_head = (len - head_sphere[3]) / len;
+ float fac_tail = (len - tail_sphere[3]) / len;
+
+ /* Small epsilon to avoid problem with float precision in shader. */
+ if (len > (tail_sphere[3] + head_sphere[3]) + 1e-8f) {
+
+ copy_v4_v4(tmp_sphere, head_sphere);
+ interp_v4_v4v4(head_sphere, tail_sphere, head_sphere, fac_head);
+ interp_v4_v4v4(tail_sphere, tmp_sphere, tail_sphere, fac_tail);
+ if (g_data.bone_envelope_solid != NULL) {
+ DRW_shgroup_call_dynamic_add(g_data.bone_envelope_solid,
+ head_sphere,
+ tail_sphere,
+ bone_color,
+ hint_color,
+ final_bonemat[0]);
+ }
+ if (outline_color[3] > 0.0f) {
+ DRW_shgroup_call_dynamic_add(
+ g_data.bone_envelope_wire, head_sphere, tail_sphere, outline_color, final_bonemat[0]);
+ }
+ }
+ else {
+ float tmp[4][4] = {{0.0f}};
+ float fac = max_ff(fac_head, 1.0f - fac_tail);
+ interp_v4_v4v4(tmp_sphere, tail_sphere, head_sphere, clamp_f(fac, 0.0f, 1.0f));
+ tmp[0][0] = tmp[1][1] = tmp[2][2] = tmp_sphere[3] / PT_DEFAULT_RAD;
+ tmp[3][3] = 1.0f;
+ copy_v3_v3(tmp[3], tmp_sphere);
+ if (g_data.bone_point_solid != NULL) {
+ DRW_shgroup_call_dynamic_add(g_data.bone_point_solid, tmp, bone_color, hint_color);
+ }
+ if (outline_color[3] > 0.0f) {
+ DRW_shgroup_call_dynamic_add(g_data.bone_point_wire, tmp, outline_color);
+ }
+ }
+ }
}
/* Custom (geometry) */
extern void drw_batch_cache_generate_requested(Object *custom);
-static void drw_shgroup_bone_custom_solid(
- const float (*bone_mat)[4],
- const float bone_color[4], const float hint_color[4], const float outline_color[4],
- const eGPUShaderConfig sh_cfg, Object *custom)
+static void drw_shgroup_bone_custom_solid(const float (*bone_mat)[4],
+ const float bone_color[4],
+ const float hint_color[4],
+ const float outline_color[4],
+ const eGPUShaderConfig sh_cfg,
+ Object *custom)
{
- /* grr, not re-using instances! */
- struct GPUBatch *surf = DRW_cache_object_surface_get(custom);
- struct GPUBatch *edges = DRW_cache_object_edge_detection_get(custom, NULL);
- struct GPUBatch *ledges = DRW_cache_object_loose_edges_get(custom);
- float final_bonemat[4][4];
-
- /* XXXXXXX needs to be moved elsewhere. */
- drw_batch_cache_generate_requested(custom);
-
- if (surf || edges || ledges) {
- mul_m4_m4m4(final_bonemat, g_data.ob->obmat, bone_mat);
- }
-
- if (surf && g_data.passes.bone_solid != NULL) {
- DRWShadingGroup *shgrp_geom_solid = shgroup_instance_bone_shape_solid(
- g_data.passes.bone_solid, surf, g_data.transparent, sh_cfg);
- DRW_shgroup_call_dynamic_add(shgrp_geom_solid, final_bonemat, bone_color, hint_color);
- }
-
- if (edges && outline_color[3] > 0.0f) {
- DRWShadingGroup *shgrp_geom_wire = shgroup_instance_bone_shape_outline(
- g_data.passes.bone_outline, edges, sh_cfg);
- DRW_shgroup_call_dynamic_add(shgrp_geom_wire, final_bonemat, outline_color);
- }
-
- if (ledges) {
- DRWShadingGroup *shgrp_geom_ledges = shgroup_instance_wire(g_data.passes.bone_wire, ledges);
- float final_color[4];
- copy_v3_v3(final_color, outline_color);
- final_color[3] = 1.0f; /* hack */
- DRW_shgroup_call_dynamic_add(shgrp_geom_ledges, final_bonemat, final_color);
- }
+ /* grr, not re-using instances! */
+ struct GPUBatch *surf = DRW_cache_object_surface_get(custom);
+ struct GPUBatch *edges = DRW_cache_object_edge_detection_get(custom, NULL);
+ struct GPUBatch *ledges = DRW_cache_object_loose_edges_get(custom);
+ float final_bonemat[4][4];
+
+ /* XXXXXXX needs to be moved elsewhere. */
+ drw_batch_cache_generate_requested(custom);
+
+ if (surf || edges || ledges) {
+ mul_m4_m4m4(final_bonemat, g_data.ob->obmat, bone_mat);
+ }
+
+ if (surf && g_data.passes.bone_solid != NULL) {
+ DRWShadingGroup *shgrp_geom_solid = shgroup_instance_bone_shape_solid(
+ g_data.passes.bone_solid, surf, g_data.transparent, sh_cfg);
+ DRW_shgroup_call_dynamic_add(shgrp_geom_solid, final_bonemat, bone_color, hint_color);
+ }
+
+ if (edges && outline_color[3] > 0.0f) {
+ DRWShadingGroup *shgrp_geom_wire = shgroup_instance_bone_shape_outline(
+ g_data.passes.bone_outline, edges, sh_cfg);
+ DRW_shgroup_call_dynamic_add(shgrp_geom_wire, final_bonemat, outline_color);
+ }
+
+ if (ledges) {
+ DRWShadingGroup *shgrp_geom_ledges = shgroup_instance_wire(g_data.passes.bone_wire, ledges);
+ float final_color[4];
+ copy_v3_v3(final_color, outline_color);
+ final_color[3] = 1.0f; /* hack */
+ DRW_shgroup_call_dynamic_add(shgrp_geom_ledges, final_bonemat, final_color);
+ }
}
-static void drw_shgroup_bone_custom_wire(
- const float (*bone_mat)[4],
- const float color[4], Object *custom)
+static void drw_shgroup_bone_custom_wire(const float (*bone_mat)[4],
+ const float color[4],
+ Object *custom)
{
- /* grr, not re-using instances! */
- struct GPUBatch *geom = DRW_cache_object_all_edges_get(custom);
-
- /* XXXXXXX needs to be moved elsewhere. */
- drw_batch_cache_generate_requested(custom);
-
- if (geom) {
- DRWShadingGroup *shgrp_geom_wire = shgroup_instance_wire(g_data.passes.bone_wire, geom);
- float final_bonemat[4][4], final_color[4];
- mul_m4_m4m4(final_bonemat, g_data.ob->obmat, bone_mat);
- copy_v3_v3(final_color, color);
- final_color[3] = 1.0f; /* hack */
- DRW_shgroup_call_dynamic_add(shgrp_geom_wire, final_bonemat, final_color);
- }
+ /* grr, not re-using instances! */
+ struct GPUBatch *geom = DRW_cache_object_all_edges_get(custom);
+
+ /* XXXXXXX needs to be moved elsewhere. */
+ drw_batch_cache_generate_requested(custom);
+
+ if (geom) {
+ DRWShadingGroup *shgrp_geom_wire = shgroup_instance_wire(g_data.passes.bone_wire, geom);
+ float final_bonemat[4][4], final_color[4];
+ mul_m4_m4m4(final_bonemat, g_data.ob->obmat, bone_mat);
+ copy_v3_v3(final_color, color);
+ final_color[3] = 1.0f; /* hack */
+ DRW_shgroup_call_dynamic_add(shgrp_geom_wire, final_bonemat, final_color);
+ }
}
/* Head and tail sphere */
-static void drw_shgroup_bone_point(
- const float (*bone_mat)[4],
- const float bone_color[4], const float hint_color[4], const float outline_color[4],
- const eGPUShaderConfig sh_cfg)
+static void drw_shgroup_bone_point(const float (*bone_mat)[4],
+ const float bone_color[4],
+ const float hint_color[4],
+ const float outline_color[4],
+ const eGPUShaderConfig sh_cfg)
{
- if (g_data.bone_point_wire == NULL) {
- g_data.bone_point_wire = shgroup_instance_bone_sphere_outline(g_data.passes.bone_wire, sh_cfg);
- }
- if (g_data.bone_point_solid == NULL &&
- g_data.passes.bone_solid != NULL)
- {
- g_data.bone_point_solid = shgroup_instance_bone_sphere_solid(g_data.passes.bone_solid, g_data.transparent, sh_cfg);
- }
- float final_bonemat[4][4];
- mul_m4_m4m4(final_bonemat, g_data.ob->obmat, bone_mat);
- if (g_data.bone_point_solid != NULL) {
- DRW_shgroup_call_dynamic_add(g_data.bone_point_solid, final_bonemat, bone_color, hint_color);
- }
- if (outline_color[3] > 0.0f) {
- DRW_shgroup_call_dynamic_add(g_data.bone_point_wire, final_bonemat, outline_color);
- }
+ if (g_data.bone_point_wire == NULL) {
+ g_data.bone_point_wire = shgroup_instance_bone_sphere_outline(g_data.passes.bone_wire, sh_cfg);
+ }
+ if (g_data.bone_point_solid == NULL && g_data.passes.bone_solid != NULL) {
+ g_data.bone_point_solid = shgroup_instance_bone_sphere_solid(
+ g_data.passes.bone_solid, g_data.transparent, sh_cfg);
+ }
+ float final_bonemat[4][4];
+ mul_m4_m4m4(final_bonemat, g_data.ob->obmat, bone_mat);
+ if (g_data.bone_point_solid != NULL) {
+ DRW_shgroup_call_dynamic_add(g_data.bone_point_solid, final_bonemat, bone_color, hint_color);
+ }
+ if (outline_color[3] > 0.0f) {
+ DRW_shgroup_call_dynamic_add(g_data.bone_point_wire, final_bonemat, outline_color);
+ }
}
/* Axes */
-static void drw_shgroup_bone_axes(
- const float (*bone_mat)[4], const float color[4],
- const eGPUShaderConfig sh_cfg)
+static void drw_shgroup_bone_axes(const float (*bone_mat)[4],
+ const float color[4],
+ const eGPUShaderConfig sh_cfg)
{
- if (g_data.bone_axes == NULL) {
- g_data.bone_axes = shgroup_instance_bone_axes(g_data.passes.bone_axes, sh_cfg);
- }
- float final_bonemat[4][4];
- mul_m4_m4m4(final_bonemat, g_data.ob->obmat, bone_mat);
- DRW_shgroup_call_dynamic_add(g_data.bone_axes, final_bonemat, color);
+ if (g_data.bone_axes == NULL) {
+ g_data.bone_axes = shgroup_instance_bone_axes(g_data.passes.bone_axes, sh_cfg);
+ }
+ float final_bonemat[4][4];
+ mul_m4_m4m4(final_bonemat, g_data.ob->obmat, bone_mat);
+ DRW_shgroup_call_dynamic_add(g_data.bone_axes, final_bonemat, color);
}
/* Relationship lines */
-static void drw_shgroup_bone_relationship_lines(const float start[3], const float end[3], const eGPUShaderConfig sh_cfg)
+static void drw_shgroup_bone_relationship_lines(const float start[3],
+ const float end[3],
+ const eGPUShaderConfig sh_cfg)
{
- if (g_data.lines_relationship == NULL) {
- g_data.lines_relationship = shgroup_dynlines_dashed_uniform_color(
- g_data.passes.relationship_lines, g_theme.wire_color, sh_cfg);
- }
- /* reverse order to have less stipple overlap */
- float v[3];
- mul_v3_m4v3(v, g_data.ob->obmat, end);
- DRW_shgroup_call_dynamic_add(g_data.lines_relationship, v);
- mul_v3_m4v3(v, g_data.ob->obmat, start);
- DRW_shgroup_call_dynamic_add(g_data.lines_relationship, v);
+ if (g_data.lines_relationship == NULL) {
+ g_data.lines_relationship = shgroup_dynlines_dashed_uniform_color(
+ g_data.passes.relationship_lines, g_theme.wire_color, sh_cfg);
+ }
+ /* reverse order to have less stipple overlap */
+ float v[3];
+ mul_v3_m4v3(v, g_data.ob->obmat, end);
+ DRW_shgroup_call_dynamic_add(g_data.lines_relationship, v);
+ mul_v3_m4v3(v, g_data.ob->obmat, start);
+ DRW_shgroup_call_dynamic_add(g_data.lines_relationship, v);
}
-static void drw_shgroup_bone_ik_lines(const float start[3], const float end[3], const eGPUShaderConfig sh_cfg)
+static void drw_shgroup_bone_ik_lines(const float start[3],
+ const float end[3],
+ const eGPUShaderConfig sh_cfg)
{
- if (g_data.lines_ik == NULL) {
- static float fcolor[4] = {0.8f, 0.5f, 0.0f, 1.0f}; /* add theme! */
- g_data.lines_ik = shgroup_dynlines_dashed_uniform_color(g_data.passes.relationship_lines, fcolor, sh_cfg);
- }
- /* reverse order to have less stipple overlap */
- float v[3];
- mul_v3_m4v3(v, g_data.ob->obmat, end);
- DRW_shgroup_call_dynamic_add(g_data.lines_ik, v);
- mul_v3_m4v3(v, g_data.ob->obmat, start);
- DRW_shgroup_call_dynamic_add(g_data.lines_ik, v);
+ if (g_data.lines_ik == NULL) {
+ static float fcolor[4] = {0.8f, 0.5f, 0.0f, 1.0f}; /* add theme! */
+ g_data.lines_ik = shgroup_dynlines_dashed_uniform_color(
+ g_data.passes.relationship_lines, fcolor, sh_cfg);
+ }
+ /* reverse order to have less stipple overlap */
+ float v[3];
+ mul_v3_m4v3(v, g_data.ob->obmat, end);
+ DRW_shgroup_call_dynamic_add(g_data.lines_ik, v);
+ mul_v3_m4v3(v, g_data.ob->obmat, start);
+ DRW_shgroup_call_dynamic_add(g_data.lines_ik, v);
}
-static void drw_shgroup_bone_ik_no_target_lines(const float start[3], const float end[3], const eGPUShaderConfig sh_cfg)
+static void drw_shgroup_bone_ik_no_target_lines(const float start[3],
+ const float end[3],
+ const eGPUShaderConfig sh_cfg)
{
- if (g_data.lines_ik_no_target == NULL) {
- static float fcolor[4] = {0.8f, 0.8f, 0.2f, 1.0f}; /* add theme! */
- g_data.lines_ik_no_target = shgroup_dynlines_dashed_uniform_color(g_data.passes.relationship_lines, fcolor, sh_cfg);
- }
- /* reverse order to have less stipple overlap */
- float v[3];
- mul_v3_m4v3(v, g_data.ob->obmat, end);
- DRW_shgroup_call_dynamic_add(g_data.lines_ik_no_target, v);
- mul_v3_m4v3(v, g_data.ob->obmat, start);
- DRW_shgroup_call_dynamic_add(g_data.lines_ik_no_target, v);
+ if (g_data.lines_ik_no_target == NULL) {
+ static float fcolor[4] = {0.8f, 0.8f, 0.2f, 1.0f}; /* add theme! */
+ g_data.lines_ik_no_target = shgroup_dynlines_dashed_uniform_color(
+ g_data.passes.relationship_lines, fcolor, sh_cfg);
+ }
+ /* reverse order to have less stipple overlap */
+ float v[3];
+ mul_v3_m4v3(v, g_data.ob->obmat, end);
+ DRW_shgroup_call_dynamic_add(g_data.lines_ik_no_target, v);
+ mul_v3_m4v3(v, g_data.ob->obmat, start);
+ DRW_shgroup_call_dynamic_add(g_data.lines_ik_no_target, v);
}
-static void drw_shgroup_bone_ik_spline_lines(const float start[3], const float end[3], const eGPUShaderConfig sh_cfg)
+static void drw_shgroup_bone_ik_spline_lines(const float start[3],
+ const float end[3],
+ const eGPUShaderConfig sh_cfg)
{
- if (g_data.lines_ik_spline == NULL) {
- static float fcolor[4] = {0.8f, 0.8f, 0.2f, 1.0f}; /* add theme! */
- g_data.lines_ik_spline = shgroup_dynlines_dashed_uniform_color(g_data.passes.relationship_lines, fcolor, sh_cfg);
- }
- /* reverse order to have less stipple overlap */
- float v[3];
- mul_v3_m4v3(v, g_data.ob->obmat, end);
- DRW_shgroup_call_dynamic_add(g_data.lines_ik_spline, v);
- mul_v3_m4v3(v, g_data.ob->obmat, start);
- DRW_shgroup_call_dynamic_add(g_data.lines_ik_spline, v);
+ if (g_data.lines_ik_spline == NULL) {
+ static float fcolor[4] = {0.8f, 0.8f, 0.2f, 1.0f}; /* add theme! */
+ g_data.lines_ik_spline = shgroup_dynlines_dashed_uniform_color(
+ g_data.passes.relationship_lines, fcolor, sh_cfg);
+ }
+ /* reverse order to have less stipple overlap */
+ float v[3];
+ mul_v3_m4v3(v, g_data.ob->obmat, end);
+ DRW_shgroup_call_dynamic_add(g_data.lines_ik_spline, v);
+ mul_v3_m4v3(v, g_data.ob->obmat, start);
+ DRW_shgroup_call_dynamic_add(g_data.lines_ik_spline, v);
}
/** \} */
-
/* -------------------------------------------------------------------- */
/** \name Drawing Theme Helpers
*
@@ -498,263 +515,276 @@ static void drw_shgroup_bone_ik_spline_lines(const float start[3], const float e
/* global here is reset before drawing each bone */
static struct {
- const ThemeWireColor *bcolor;
+ const ThemeWireColor *bcolor;
} g_color;
/* values of colCode for set_pchan_color */
enum {
- PCHAN_COLOR_NORMAL = 0, /* normal drawing */
- PCHAN_COLOR_SOLID, /* specific case where "solid" color is needed */
- PCHAN_COLOR_CONSTS, /* "constraint" colors (which may/may-not be suppressed) */
+ PCHAN_COLOR_NORMAL = 0, /* normal drawing */
+ PCHAN_COLOR_SOLID, /* specific case where "solid" color is needed */
+ PCHAN_COLOR_CONSTS, /* "constraint" colors (which may/may-not be suppressed) */
- PCHAN_COLOR_SPHEREBONE_BASE, /* for the 'stick' of sphere (envelope) bones */
- PCHAN_COLOR_SPHEREBONE_END, /* for the ends of sphere (envelope) bones */
- PCHAN_COLOR_LINEBONE, /* for the middle of line-bones */
+ PCHAN_COLOR_SPHEREBONE_BASE, /* for the 'stick' of sphere (envelope) bones */
+ PCHAN_COLOR_SPHEREBONE_END, /* for the ends of sphere (envelope) bones */
+ PCHAN_COLOR_LINEBONE, /* for the middle of line-bones */
};
/* This function sets the color-set for coloring a certain bone */
static void set_pchan_colorset(Object *ob, bPoseChannel *pchan)
{
- bPose *pose = (ob) ? ob->pose : NULL;
- bArmature *arm = (ob) ? ob->data : NULL;
- bActionGroup *grp = NULL;
- short color_index = 0;
-
- /* sanity check */
- if (ELEM(NULL, ob, arm, pose, pchan)) {
- g_color.bcolor = NULL;
- return;
- }
-
- /* only try to set custom color if enabled for armature */
- if (arm->flag & ARM_COL_CUSTOM) {
- /* currently, a bone can only use a custom color set if it's group (if it has one),
- * has been set to use one
- */
- if (pchan->agrp_index) {
- grp = (bActionGroup *)BLI_findlink(&pose->agroups, (pchan->agrp_index - 1));
- if (grp) {
- color_index = grp->customCol;
- }
- }
- }
-
- /* bcolor is a pointer to the color set to use. If NULL, then the default
- * color set (based on the theme colors for 3d-view) is used.
- */
- if (color_index > 0) {
- bTheme *btheme = UI_GetTheme();
- g_color.bcolor = &btheme->tarm[(color_index - 1)];
- }
- else if (color_index == -1) {
- /* use the group's own custom color set (grp is always != NULL here) */
- g_color.bcolor = &grp->cs;
- }
- else {
- g_color.bcolor = NULL;
- }
+ bPose *pose = (ob) ? ob->pose : NULL;
+ bArmature *arm = (ob) ? ob->data : NULL;
+ bActionGroup *grp = NULL;
+ short color_index = 0;
+
+ /* sanity check */
+ if (ELEM(NULL, ob, arm, pose, pchan)) {
+ g_color.bcolor = NULL;
+ return;
+ }
+
+ /* only try to set custom color if enabled for armature */
+ if (arm->flag & ARM_COL_CUSTOM) {
+ /* currently, a bone can only use a custom color set if it's group (if it has one),
+ * has been set to use one
+ */
+ if (pchan->agrp_index) {
+ grp = (bActionGroup *)BLI_findlink(&pose->agroups, (pchan->agrp_index - 1));
+ if (grp) {
+ color_index = grp->customCol;
+ }
+ }
+ }
+
+ /* bcolor is a pointer to the color set to use. If NULL, then the default
+ * color set (based on the theme colors for 3d-view) is used.
+ */
+ if (color_index > 0) {
+ bTheme *btheme = UI_GetTheme();
+ g_color.bcolor = &btheme->tarm[(color_index - 1)];
+ }
+ else if (color_index == -1) {
+ /* use the group's own custom color set (grp is always != NULL here) */
+ g_color.bcolor = &grp->cs;
+ }
+ else {
+ g_color.bcolor = NULL;
+ }
}
/* This function is for brightening/darkening a given color (like UI_GetThemeColorShade3ubv()) */
static void cp_shade_color3ub(uchar cp[3], const int offset)
{
- int r, g, b;
-
- r = offset + (int) cp[0];
- CLAMP(r, 0, 255);
- g = offset + (int) cp[1];
- CLAMP(g, 0, 255);
- b = offset + (int) cp[2];
- CLAMP(b, 0, 255);
-
- cp[0] = r;
- cp[1] = g;
- cp[2] = b;
+ int r, g, b;
+
+ r = offset + (int)cp[0];
+ CLAMP(r, 0, 255);
+ g = offset + (int)cp[1];
+ CLAMP(g, 0, 255);
+ b = offset + (int)cp[2];
+ CLAMP(b, 0, 255);
+
+ cp[0] = r;
+ cp[1] = g;
+ cp[2] = b;
}
static void cp_shade_color3f(float cp[3], const float offset)
{
- add_v3_fl(cp, offset);
- CLAMP(cp[0], 0, 255);
- CLAMP(cp[1], 0, 255);
- CLAMP(cp[2], 0, 255);
+ add_v3_fl(cp, offset);
+ CLAMP(cp[0], 0, 255);
+ CLAMP(cp[1], 0, 255);
+ CLAMP(cp[2], 0, 255);
}
-
/* This function sets the gl-color for coloring a certain bone (based on bcolor) */
-static bool set_pchan_color(short colCode, const int boneflag, const short constflag, float r_color[4])
+static bool set_pchan_color(short colCode,
+ const int boneflag,
+ const short constflag,
+ float r_color[4])
{
- float *fcolor = r_color;
- const ThemeWireColor *bcolor = g_color.bcolor;
-
- switch (colCode) {
- case PCHAN_COLOR_NORMAL:
- {
- if (bcolor) {
- uchar cp[4] = {255};
-
- if (boneflag & BONE_DRAW_ACTIVE) {
- copy_v3_v3_char((char *)cp, bcolor->active);
- if (!(boneflag & BONE_SELECTED)) {
- cp_shade_color3ub(cp, -80);
- }
- }
- else if (boneflag & BONE_SELECTED) {
- copy_v3_v3_char((char *)cp, bcolor->select);
- }
- else {
- /* a bit darker than solid */
- copy_v3_v3_char((char *)cp, bcolor->solid);
- cp_shade_color3ub(cp, -50);
- }
-
- rgb_uchar_to_float(fcolor, cp);
- }
- else {
- if ((boneflag & BONE_DRAW_ACTIVE) && (boneflag & BONE_SELECTED)) {
- UI_GetThemeColor4fv(TH_BONE_POSE_ACTIVE, fcolor);
- }
- else if (boneflag & BONE_DRAW_ACTIVE) {
- UI_GetThemeColorBlendShade4fv(TH_WIRE, TH_BONE_POSE, 0.15f, 0, fcolor);
- }
- else if (boneflag & BONE_SELECTED) {
- UI_GetThemeColor4fv(TH_BONE_POSE, fcolor);
- }
- else {
- UI_GetThemeColor4fv(TH_WIRE, fcolor);
- }
- }
-
- return true;
- }
- case PCHAN_COLOR_SOLID:
- {
- UI_GetThemeColor4fv(TH_BONE_SOLID, fcolor);
-
- if (bcolor) {
- float solid_bcolor[3];
- rgb_uchar_to_float(solid_bcolor, (uchar *)bcolor->solid);
- interp_v3_v3v3(fcolor, fcolor, solid_bcolor, 1.0f);
- }
-
- return true;
- }
- case PCHAN_COLOR_CONSTS:
- {
- if ((bcolor == NULL) || (bcolor->flag & TH_WIRECOLOR_CONSTCOLS)) {
- uchar cp[4];
- if (constflag & PCHAN_HAS_TARGET) { rgba_char_args_set((char *)cp, 255, 150, 0, 80); }
- else if (constflag & PCHAN_HAS_IK) { rgba_char_args_set((char *)cp, 255, 255, 0, 80); }
- else if (constflag & PCHAN_HAS_SPLINEIK) { rgba_char_args_set((char *)cp, 200, 255, 0, 80); }
- else if (constflag & PCHAN_HAS_CONST) { rgba_char_args_set((char *)cp, 0, 255, 120, 80); }
- else {
- return false;
- }
-
- rgba_uchar_to_float(fcolor, cp);
-
- return true;
- }
- return false;
- }
- case PCHAN_COLOR_SPHEREBONE_BASE:
- {
- if (bcolor) {
- uchar cp[4] = {255};
-
- if (boneflag & BONE_DRAW_ACTIVE) {
- copy_v3_v3_char((char *)cp, bcolor->active);
- }
- else if (boneflag & BONE_SELECTED) {
- copy_v3_v3_char((char *)cp, bcolor->select);
- }
- else {
- copy_v3_v3_char((char *)cp, bcolor->solid);
- }
-
- rgb_uchar_to_float(fcolor, cp);
- }
- else {
- if (boneflag & BONE_DRAW_ACTIVE) {
- UI_GetThemeColorShade4fv(TH_BONE_POSE, 40, fcolor);
- }
- else if (boneflag & BONE_SELECTED) {
- UI_GetThemeColor4fv(TH_BONE_POSE, fcolor);
- }
- else {
- UI_GetThemeColor4fv(TH_BONE_SOLID, fcolor);
- }
- }
-
- return true;
- }
- case PCHAN_COLOR_SPHEREBONE_END:
- {
- if (bcolor) {
- uchar cp[4] = {255};
-
- if (boneflag & BONE_DRAW_ACTIVE) {
- copy_v3_v3_char((char *)cp, bcolor->active);
- cp_shade_color3ub(cp, 10);
- }
- else if (boneflag & BONE_SELECTED) {
- copy_v3_v3_char((char *)cp, bcolor->select);
- cp_shade_color3ub(cp, -30);
- }
- else {
- copy_v3_v3_char((char *)cp, bcolor->solid);
- cp_shade_color3ub(cp, -30);
- }
-
- rgb_uchar_to_float(fcolor, cp);
- }
- else {
- if (boneflag & BONE_DRAW_ACTIVE) {
- UI_GetThemeColorShade4fv(TH_BONE_POSE, 10, fcolor);
- }
- else if (boneflag & BONE_SELECTED) {
- UI_GetThemeColorShade4fv(TH_BONE_POSE, -30, fcolor);
- }
- else {
- UI_GetThemeColorShade4fv(TH_BONE_SOLID, -30, fcolor);
- }
- }
- break;
- }
- case PCHAN_COLOR_LINEBONE:
- {
- /* inner part in background color or constraint */
- if ((constflag) && ((bcolor == NULL) || (bcolor->flag & TH_WIRECOLOR_CONSTCOLS))) {
- uchar cp[4];
- if (constflag & PCHAN_HAS_TARGET) { rgba_char_args_set((char *)cp, 255, 150, 0, 255); }
- else if (constflag & PCHAN_HAS_IK) { rgba_char_args_set((char *)cp, 255, 255, 0, 255); }
- else if (constflag & PCHAN_HAS_SPLINEIK) { rgba_char_args_set((char *)cp, 200, 255, 0, 255); }
- else if (constflag & PCHAN_HAS_CONST) { rgba_char_args_set((char *)cp, 0, 255, 120, 255); }
- else if (constflag) { UI_GetThemeColor4ubv(TH_BONE_POSE, cp); } /* PCHAN_HAS_ACTION */
-
- rgb_uchar_to_float(fcolor, cp);
- }
- else {
- if (bcolor) {
- const char *cp = bcolor->solid;
- rgb_uchar_to_float(fcolor, (uchar *)cp);
- fcolor[3] = 204.f / 255.f;
- }
- else {
- UI_GetThemeColorShade4fv(TH_BACK, -30, fcolor);
- }
- }
-
- return true;
- }
- }
-
- return false;
+ float *fcolor = r_color;
+ const ThemeWireColor *bcolor = g_color.bcolor;
+
+ switch (colCode) {
+ case PCHAN_COLOR_NORMAL: {
+ if (bcolor) {
+ uchar cp[4] = {255};
+
+ if (boneflag & BONE_DRAW_ACTIVE) {
+ copy_v3_v3_char((char *)cp, bcolor->active);
+ if (!(boneflag & BONE_SELECTED)) {
+ cp_shade_color3ub(cp, -80);
+ }
+ }
+ else if (boneflag & BONE_SELECTED) {
+ copy_v3_v3_char((char *)cp, bcolor->select);
+ }
+ else {
+ /* a bit darker than solid */
+ copy_v3_v3_char((char *)cp, bcolor->solid);
+ cp_shade_color3ub(cp, -50);
+ }
+
+ rgb_uchar_to_float(fcolor, cp);
+ }
+ else {
+ if ((boneflag & BONE_DRAW_ACTIVE) && (boneflag & BONE_SELECTED)) {
+ UI_GetThemeColor4fv(TH_BONE_POSE_ACTIVE, fcolor);
+ }
+ else if (boneflag & BONE_DRAW_ACTIVE) {
+ UI_GetThemeColorBlendShade4fv(TH_WIRE, TH_BONE_POSE, 0.15f, 0, fcolor);
+ }
+ else if (boneflag & BONE_SELECTED) {
+ UI_GetThemeColor4fv(TH_BONE_POSE, fcolor);
+ }
+ else {
+ UI_GetThemeColor4fv(TH_WIRE, fcolor);
+ }
+ }
+
+ return true;
+ }
+ case PCHAN_COLOR_SOLID: {
+ UI_GetThemeColor4fv(TH_BONE_SOLID, fcolor);
+
+ if (bcolor) {
+ float solid_bcolor[3];
+ rgb_uchar_to_float(solid_bcolor, (uchar *)bcolor->solid);
+ interp_v3_v3v3(fcolor, fcolor, solid_bcolor, 1.0f);
+ }
+
+ return true;
+ }
+ case PCHAN_COLOR_CONSTS: {
+ if ((bcolor == NULL) || (bcolor->flag & TH_WIRECOLOR_CONSTCOLS)) {
+ uchar cp[4];
+ if (constflag & PCHAN_HAS_TARGET) {
+ rgba_char_args_set((char *)cp, 255, 150, 0, 80);
+ }
+ else if (constflag & PCHAN_HAS_IK) {
+ rgba_char_args_set((char *)cp, 255, 255, 0, 80);
+ }
+ else if (constflag & PCHAN_HAS_SPLINEIK) {
+ rgba_char_args_set((char *)cp, 200, 255, 0, 80);
+ }
+ else if (constflag & PCHAN_HAS_CONST) {
+ rgba_char_args_set((char *)cp, 0, 255, 120, 80);
+ }
+ else {
+ return false;
+ }
+
+ rgba_uchar_to_float(fcolor, cp);
+
+ return true;
+ }
+ return false;
+ }
+ case PCHAN_COLOR_SPHEREBONE_BASE: {
+ if (bcolor) {
+ uchar cp[4] = {255};
+
+ if (boneflag & BONE_DRAW_ACTIVE) {
+ copy_v3_v3_char((char *)cp, bcolor->active);
+ }
+ else if (boneflag & BONE_SELECTED) {
+ copy_v3_v3_char((char *)cp, bcolor->select);
+ }
+ else {
+ copy_v3_v3_char((char *)cp, bcolor->solid);
+ }
+
+ rgb_uchar_to_float(fcolor, cp);
+ }
+ else {
+ if (boneflag & BONE_DRAW_ACTIVE) {
+ UI_GetThemeColorShade4fv(TH_BONE_POSE, 40, fcolor);
+ }
+ else if (boneflag & BONE_SELECTED) {
+ UI_GetThemeColor4fv(TH_BONE_POSE, fcolor);
+ }
+ else {
+ UI_GetThemeColor4fv(TH_BONE_SOLID, fcolor);
+ }
+ }
+
+ return true;
+ }
+ case PCHAN_COLOR_SPHEREBONE_END: {
+ if (bcolor) {
+ uchar cp[4] = {255};
+
+ if (boneflag & BONE_DRAW_ACTIVE) {
+ copy_v3_v3_char((char *)cp, bcolor->active);
+ cp_shade_color3ub(cp, 10);
+ }
+ else if (boneflag & BONE_SELECTED) {
+ copy_v3_v3_char((char *)cp, bcolor->select);
+ cp_shade_color3ub(cp, -30);
+ }
+ else {
+ copy_v3_v3_char((char *)cp, bcolor->solid);
+ cp_shade_color3ub(cp, -30);
+ }
+
+ rgb_uchar_to_float(fcolor, cp);
+ }
+ else {
+ if (boneflag & BONE_DRAW_ACTIVE) {
+ UI_GetThemeColorShade4fv(TH_BONE_POSE, 10, fcolor);
+ }
+ else if (boneflag & BONE_SELECTED) {
+ UI_GetThemeColorShade4fv(TH_BONE_POSE, -30, fcolor);
+ }
+ else {
+ UI_GetThemeColorShade4fv(TH_BONE_SOLID, -30, fcolor);
+ }
+ }
+ break;
+ }
+ case PCHAN_COLOR_LINEBONE: {
+ /* inner part in background color or constraint */
+ if ((constflag) && ((bcolor == NULL) || (bcolor->flag & TH_WIRECOLOR_CONSTCOLS))) {
+ uchar cp[4];
+ if (constflag & PCHAN_HAS_TARGET) {
+ rgba_char_args_set((char *)cp, 255, 150, 0, 255);
+ }
+ else if (constflag & PCHAN_HAS_IK) {
+ rgba_char_args_set((char *)cp, 255, 255, 0, 255);
+ }
+ else if (constflag & PCHAN_HAS_SPLINEIK) {
+ rgba_char_args_set((char *)cp, 200, 255, 0, 255);
+ }
+ else if (constflag & PCHAN_HAS_CONST) {
+ rgba_char_args_set((char *)cp, 0, 255, 120, 255);
+ }
+ else if (constflag) {
+ UI_GetThemeColor4ubv(TH_BONE_POSE, cp);
+ } /* PCHAN_HAS_ACTION */
+
+ rgb_uchar_to_float(fcolor, cp);
+ }
+ else {
+ if (bcolor) {
+ const char *cp = bcolor->solid;
+ rgb_uchar_to_float(fcolor, (uchar *)cp);
+ fcolor[3] = 204.f / 255.f;
+ }
+ else {
+ UI_GetThemeColorShade4fv(TH_BACK, -30, fcolor);
+ }
+ }
+
+ return true;
+ }
+ }
+
+ return false;
}
/** \} */
-
/* -------------------------------------------------------------------- */
/** \name Drawing Color Helpers
* \{ */
@@ -762,121 +792,129 @@ static bool set_pchan_color(short colCode, const int boneflag, const short const
/** See: 'set_pchan_color'*/
static void update_color(const Object *ob, const float const_color[4])
{
- const bArmature *arm = ob->data;
- g_theme.const_color = const_color;
- g_theme.const_wire = (
- ((ob->base_flag & BASE_SELECTED) ||
- (arm->drawtype == ARM_WIRE)) ? 1.5f : ((g_data.transparent) ? 1.0f : 0.0f));
+ const bArmature *arm = ob->data;
+ g_theme.const_color = const_color;
+ g_theme.const_wire = (((ob->base_flag & BASE_SELECTED) || (arm->drawtype == ARM_WIRE)) ?
+ 1.5f :
+ ((g_data.transparent) ? 1.0f : 0.0f));
#define NO_ALPHA(c) (((c)[3] = 1.0f), (c))
- UI_GetThemeColor3fv(TH_SELECT, NO_ALPHA(g_theme.select_color));
- UI_GetThemeColor3fv(TH_EDGE_SELECT, NO_ALPHA(g_theme.edge_select_color));
- UI_GetThemeColorShade3fv(TH_EDGE_SELECT, -20, NO_ALPHA(g_theme.bone_select_color));
- UI_GetThemeColor3fv(TH_WIRE, NO_ALPHA(g_theme.wire_color));
- UI_GetThemeColor3fv(TH_WIRE_EDIT, NO_ALPHA(g_theme.wire_edit_color));
- UI_GetThemeColor3fv(TH_BONE_SOLID, NO_ALPHA(g_theme.bone_solid_color));
- UI_GetThemeColorBlendShade3fv(TH_WIRE_EDIT, TH_EDGE_SELECT, 0.15f, 0, NO_ALPHA(g_theme.bone_active_unselect_color));
- UI_GetThemeColor3fv(TH_BONE_POSE, NO_ALPHA(g_theme.bone_pose_color));
- UI_GetThemeColor3fv(TH_BONE_POSE_ACTIVE, NO_ALPHA(g_theme.bone_pose_active_color));
- UI_GetThemeColorBlendShade3fv(TH_WIRE, TH_BONE_POSE, 0.15f, 0, NO_ALPHA(g_theme.bone_pose_active_unselect_color));
- UI_GetThemeColor3fv(TH_TEXT_HI, NO_ALPHA(g_theme.text_hi_color));
- UI_GetThemeColor3fv(TH_TEXT, NO_ALPHA(g_theme.text_color));
- UI_GetThemeColor3fv(TH_VERTEX_SELECT, NO_ALPHA(g_theme.vertex_select_color));
- UI_GetThemeColor3fv(TH_VERTEX, NO_ALPHA(g_theme.vertex_color));
+ UI_GetThemeColor3fv(TH_SELECT, NO_ALPHA(g_theme.select_color));
+ UI_GetThemeColor3fv(TH_EDGE_SELECT, NO_ALPHA(g_theme.edge_select_color));
+ UI_GetThemeColorShade3fv(TH_EDGE_SELECT, -20, NO_ALPHA(g_theme.bone_select_color));
+ UI_GetThemeColor3fv(TH_WIRE, NO_ALPHA(g_theme.wire_color));
+ UI_GetThemeColor3fv(TH_WIRE_EDIT, NO_ALPHA(g_theme.wire_edit_color));
+ UI_GetThemeColor3fv(TH_BONE_SOLID, NO_ALPHA(g_theme.bone_solid_color));
+ UI_GetThemeColorBlendShade3fv(
+ TH_WIRE_EDIT, TH_EDGE_SELECT, 0.15f, 0, NO_ALPHA(g_theme.bone_active_unselect_color));
+ UI_GetThemeColor3fv(TH_BONE_POSE, NO_ALPHA(g_theme.bone_pose_color));
+ UI_GetThemeColor3fv(TH_BONE_POSE_ACTIVE, NO_ALPHA(g_theme.bone_pose_active_color));
+ UI_GetThemeColorBlendShade3fv(
+ TH_WIRE, TH_BONE_POSE, 0.15f, 0, NO_ALPHA(g_theme.bone_pose_active_unselect_color));
+ UI_GetThemeColor3fv(TH_TEXT_HI, NO_ALPHA(g_theme.text_hi_color));
+ UI_GetThemeColor3fv(TH_TEXT, NO_ALPHA(g_theme.text_color));
+ UI_GetThemeColor3fv(TH_VERTEX_SELECT, NO_ALPHA(g_theme.vertex_select_color));
+ UI_GetThemeColor3fv(TH_VERTEX, NO_ALPHA(g_theme.vertex_color));
#undef NO_ALPHA
}
-static const float *get_bone_solid_color(
- const EditBone *UNUSED(eBone), const bPoseChannel *pchan, const bArmature *arm,
- const int boneflag, const short constflag)
+static const float *get_bone_solid_color(const EditBone *UNUSED(eBone),
+ const bPoseChannel *pchan,
+ const bArmature *arm,
+ const int boneflag,
+ const short constflag)
{
- if (g_theme.const_color) {
- return g_theme.bone_solid_color;
- }
-
- if (arm->flag & ARM_POSEMODE) {
- static float disp_color[4];
- copy_v4_v4(disp_color, pchan->draw_data->solid_color);
- set_pchan_color(PCHAN_COLOR_SOLID, boneflag, constflag, disp_color);
- return disp_color;
- }
-
- return g_theme.bone_solid_color;
+ if (g_theme.const_color) {
+ return g_theme.bone_solid_color;
+ }
+
+ if (arm->flag & ARM_POSEMODE) {
+ static float disp_color[4];
+ copy_v4_v4(disp_color, pchan->draw_data->solid_color);
+ set_pchan_color(PCHAN_COLOR_SOLID, boneflag, constflag, disp_color);
+ return disp_color;
+ }
+
+ return g_theme.bone_solid_color;
}
-static const float *get_bone_solid_with_consts_color(
- const EditBone *eBone, const bPoseChannel *pchan, const bArmature *arm,
- const int boneflag, const short constflag)
+static const float *get_bone_solid_with_consts_color(const EditBone *eBone,
+ const bPoseChannel *pchan,
+ const bArmature *arm,
+ const int boneflag,
+ const short constflag)
{
- if (g_theme.const_color) {
- return g_theme.bone_solid_color;
- }
-
- const float *col = get_bone_solid_color(eBone, pchan, arm, boneflag, constflag);
-
- static float consts_color[4];
- if (set_pchan_color(PCHAN_COLOR_CONSTS, boneflag, constflag, consts_color)) {
- interp_v3_v3v3(consts_color, col, consts_color, 0.5f);
- }
- else {
- copy_v4_v4(consts_color, col);
- }
- return consts_color;
+ if (g_theme.const_color) {
+ return g_theme.bone_solid_color;
+ }
+
+ const float *col = get_bone_solid_color(eBone, pchan, arm, boneflag, constflag);
+
+ static float consts_color[4];
+ if (set_pchan_color(PCHAN_COLOR_CONSTS, boneflag, constflag, consts_color)) {
+ interp_v3_v3v3(consts_color, col, consts_color, 0.5f);
+ }
+ else {
+ copy_v4_v4(consts_color, col);
+ }
+ return consts_color;
}
static float get_bone_wire_thickness(int boneflag)
{
- if (g_theme.const_color) {
- return g_theme.const_wire;
- }
- else if (boneflag & (BONE_DRAW_ACTIVE | BONE_SELECTED)) {
- return 2.0f;
- }
- else {
- return 1.0f;
- }
+ if (g_theme.const_color) {
+ return g_theme.const_wire;
+ }
+ else if (boneflag & (BONE_DRAW_ACTIVE | BONE_SELECTED)) {
+ return 2.0f;
+ }
+ else {
+ return 1.0f;
+ }
}
-static const float *get_bone_wire_color(
- const EditBone *eBone, const bPoseChannel *pchan, const bArmature *arm,
- const int boneflag, const short constflag)
+static const float *get_bone_wire_color(const EditBone *eBone,
+ const bPoseChannel *pchan,
+ const bArmature *arm,
+ const int boneflag,
+ const short constflag)
{
- static float disp_color[4];
-
- if (g_theme.const_color) {
- copy_v3_v3(disp_color, g_theme.const_color);
- }
- else if (eBone) {
- if (boneflag & BONE_SELECTED) {
- if (boneflag & BONE_DRAW_ACTIVE) {
- copy_v3_v3(disp_color, g_theme.edge_select_color);
- }
- else {
- copy_v3_v3(disp_color, g_theme.bone_select_color);
- }
- }
- else {
- if (boneflag & BONE_DRAW_ACTIVE) {
- copy_v3_v3(disp_color, g_theme.bone_active_unselect_color);
- }
- else {
- copy_v3_v3(disp_color, g_theme.wire_edit_color);
- }
- }
- }
- else if (arm->flag & ARM_POSEMODE) {
- copy_v4_v4(disp_color, pchan->draw_data->wire_color);
- set_pchan_color(PCHAN_COLOR_NORMAL, boneflag, constflag, disp_color);
- }
- else {
- copy_v3_v3(disp_color, g_theme.vertex_color);
- }
-
- disp_color[3] = get_bone_wire_thickness(boneflag);
-
- return disp_color;
+ static float disp_color[4];
+
+ if (g_theme.const_color) {
+ copy_v3_v3(disp_color, g_theme.const_color);
+ }
+ else if (eBone) {
+ if (boneflag & BONE_SELECTED) {
+ if (boneflag & BONE_DRAW_ACTIVE) {
+ copy_v3_v3(disp_color, g_theme.edge_select_color);
+ }
+ else {
+ copy_v3_v3(disp_color, g_theme.bone_select_color);
+ }
+ }
+ else {
+ if (boneflag & BONE_DRAW_ACTIVE) {
+ copy_v3_v3(disp_color, g_theme.bone_active_unselect_color);
+ }
+ else {
+ copy_v3_v3(disp_color, g_theme.wire_edit_color);
+ }
+ }
+ }
+ else if (arm->flag & ARM_POSEMODE) {
+ copy_v4_v4(disp_color, pchan->draw_data->wire_color);
+ set_pchan_color(PCHAN_COLOR_NORMAL, boneflag, constflag, disp_color);
+ }
+ else {
+ copy_v3_v3(disp_color, g_theme.vertex_color);
+ }
+
+ disp_color[3] = get_bone_wire_thickness(boneflag);
+
+ return disp_color;
}
#define HINT_MUL 0.5f
@@ -884,820 +922,869 @@ static const float *get_bone_wire_color(
static void bone_hint_color_shade(float hint_color[4], const float color[4])
{
- mul_v3_v3fl(hint_color, color, HINT_MUL);
- cp_shade_color3f(hint_color, -HINT_SHADE);
- hint_color[3] = 1.0f;
+ mul_v3_v3fl(hint_color, color, HINT_MUL);
+ cp_shade_color3f(hint_color, -HINT_SHADE);
+ hint_color[3] = 1.0f;
}
-static const float *get_bone_hint_color(
- const EditBone *eBone, const bPoseChannel *pchan, const bArmature *arm,
- const int boneflag, const short constflag)
+static const float *get_bone_hint_color(const EditBone *eBone,
+ const bPoseChannel *pchan,
+ const bArmature *arm,
+ const int boneflag,
+ const short constflag)
{
- static float hint_color[4] = {0.0f, 0.0f, 0.0f, 1.0f};
+ static float hint_color[4] = {0.0f, 0.0f, 0.0f, 1.0f};
- if (g_theme.const_color) {
- bone_hint_color_shade(hint_color, g_theme.bone_solid_color);
- }
- else {
- const float *wire_color = get_bone_wire_color(eBone, pchan, arm, boneflag, constflag);
- bone_hint_color_shade(hint_color, wire_color);
- }
+ if (g_theme.const_color) {
+ bone_hint_color_shade(hint_color, g_theme.bone_solid_color);
+ }
+ else {
+ const float *wire_color = get_bone_wire_color(eBone, pchan, arm, boneflag, constflag);
+ bone_hint_color_shade(hint_color, wire_color);
+ }
- return hint_color;
+ return hint_color;
}
/** \} */
-
/* -------------------------------------------------------------------- */
/** \name Helper Utils
* \{ */
static void pchan_draw_data_init(bPoseChannel *pchan)
{
- if (pchan->draw_data != NULL) {
- if (pchan->draw_data->bbone_matrix_len != pchan->bone->segments) {
- MEM_SAFE_FREE(pchan->draw_data);
- }
- }
-
- if (pchan->draw_data == NULL) {
- pchan->draw_data = MEM_mallocN(sizeof(*pchan->draw_data) + sizeof(Mat4) * pchan->bone->segments, __func__);
- pchan->draw_data->bbone_matrix_len = pchan->bone->segments;
- }
+ if (pchan->draw_data != NULL) {
+ if (pchan->draw_data->bbone_matrix_len != pchan->bone->segments) {
+ MEM_SAFE_FREE(pchan->draw_data);
+ }
+ }
+
+ if (pchan->draw_data == NULL) {
+ pchan->draw_data = MEM_mallocN(
+ sizeof(*pchan->draw_data) + sizeof(Mat4) * pchan->bone->segments, __func__);
+ pchan->draw_data->bbone_matrix_len = pchan->bone->segments;
+ }
}
static void draw_bone_update_disp_matrix_default(EditBone *eBone, bPoseChannel *pchan)
{
- float s[4][4], ebmat[4][4];
- float length;
- float (*bone_mat)[4];
- float (*disp_mat)[4];
- float (*disp_tail_mat)[4];
-
- /* TODO : This should be moved to depsgraph or armature refresh
- * and not be tight to the draw pass creation.
- * This would refresh armature without invalidating the draw cache */
- if (pchan) {
- length = pchan->bone->length;
- bone_mat = pchan->pose_mat;
- disp_mat = pchan->disp_mat;
- disp_tail_mat = pchan->disp_tail_mat;
- }
- else {
- eBone->length = len_v3v3(eBone->tail, eBone->head);
- ED_armature_ebone_to_mat4(eBone, ebmat);
-
- length = eBone->length;
- bone_mat = ebmat;
- disp_mat = eBone->disp_mat;
- disp_tail_mat = eBone->disp_tail_mat;
- }
-
- scale_m4_fl(s, length);
- mul_m4_m4m4(disp_mat, bone_mat, s);
- copy_m4_m4(disp_tail_mat, disp_mat);
- translate_m4(disp_tail_mat, 0.0f, 1.0f, 0.0f);
+ float s[4][4], ebmat[4][4];
+ float length;
+ float(*bone_mat)[4];
+ float(*disp_mat)[4];
+ float(*disp_tail_mat)[4];
+
+ /* TODO : This should be moved to depsgraph or armature refresh
+ * and not be tight to the draw pass creation.
+ * This would refresh armature without invalidating the draw cache */
+ if (pchan) {
+ length = pchan->bone->length;
+ bone_mat = pchan->pose_mat;
+ disp_mat = pchan->disp_mat;
+ disp_tail_mat = pchan->disp_tail_mat;
+ }
+ else {
+ eBone->length = len_v3v3(eBone->tail, eBone->head);
+ ED_armature_ebone_to_mat4(eBone, ebmat);
+
+ length = eBone->length;
+ bone_mat = ebmat;
+ disp_mat = eBone->disp_mat;
+ disp_tail_mat = eBone->disp_tail_mat;
+ }
+
+ scale_m4_fl(s, length);
+ mul_m4_m4m4(disp_mat, bone_mat, s);
+ copy_m4_m4(disp_tail_mat, disp_mat);
+ translate_m4(disp_tail_mat, 0.0f, 1.0f, 0.0f);
}
/* compute connected child pointer for B-Bone drawing */
static void edbo_compute_bbone_child(bArmature *arm)
{
- EditBone *eBone;
+ EditBone *eBone;
- for (eBone = arm->edbo->first; eBone; eBone = eBone->next) {
- eBone->bbone_child = NULL;
- }
+ for (eBone = arm->edbo->first; eBone; eBone = eBone->next) {
+ eBone->bbone_child = NULL;
+ }
- for (eBone = arm->edbo->first; eBone; eBone = eBone->next) {
- if (eBone->parent && (eBone->flag & BONE_CONNECTED)) {
- eBone->parent->bbone_child = eBone;
- }
- }
+ for (eBone = arm->edbo->first; eBone; eBone = eBone->next) {
+ if (eBone->parent && (eBone->flag & BONE_CONNECTED)) {
+ eBone->parent->bbone_child = eBone;
+ }
+ }
}
/* A version of BKE_pchan_bbone_spline_setup() for previewing editmode curve settings. */
static void ebone_spline_preview(EditBone *ebone, float result_array[MAX_BBONE_SUBDIV][4][4])
{
- BBoneSplineParameters param;
- EditBone *prev, *next;
- float imat[4][4], bonemat[4][4];
- float tmp[3];
-
- memset(&param, 0, sizeof(param));
-
- param.segments = ebone->segments;
- param.length = ebone->length;
-
- /* Get "next" and "prev" bones - these are used for handle calculations. */
- if (ebone->bbone_prev_type == BBONE_HANDLE_AUTO) {
- /* Use connected parent. */
- if (ebone->flag & BONE_CONNECTED) {
- prev = ebone->parent;
- }
- else {
- prev = NULL;
- }
- }
- else {
- prev = ebone->bbone_prev;
- }
-
- if (ebone->bbone_next_type == BBONE_HANDLE_AUTO) {
- /* Use connected child. */
- next = ebone->bbone_child;
- }
- else {
- next = ebone->bbone_next;
- }
-
- /* compute handles from connected bones */
- if (prev || next) {
- ED_armature_ebone_to_mat4(ebone, imat);
- invert_m4(imat);
-
- if (prev) {
- param.use_prev = true;
-
- if (ebone->bbone_prev_type == BBONE_HANDLE_RELATIVE) {
- zero_v3(param.prev_h);
- }
- else if (ebone->bbone_prev_type == BBONE_HANDLE_TANGENT) {
- sub_v3_v3v3(tmp, prev->tail, prev->head);
- sub_v3_v3v3(tmp, ebone->head, tmp);
- mul_v3_m4v3(param.prev_h, imat, tmp);
- }
- else {
- param.prev_bbone = (prev->segments > 1);
-
- mul_v3_m4v3(param.prev_h, imat, prev->head);
- }
-
- if (!param.prev_bbone) {
- ED_armature_ebone_to_mat4(prev, bonemat);
- mul_m4_m4m4(param.prev_mat, imat, bonemat);
- }
- }
-
- if (next) {
- param.use_next = true;
-
- if (ebone->bbone_next_type == BBONE_HANDLE_RELATIVE) {
- copy_v3_fl3(param.next_h, 0.0f, param.length, 0.0);
- }
- else if (ebone->bbone_next_type == BBONE_HANDLE_TANGENT) {
- sub_v3_v3v3(tmp, next->tail, next->head);
- add_v3_v3v3(tmp, ebone->tail, tmp);
- mul_v3_m4v3(param.next_h, imat, tmp);
- }
- else {
- param.next_bbone = (next->segments > 1);
-
- mul_v3_m4v3(param.next_h, imat, next->tail);
- }
-
- ED_armature_ebone_to_mat4(next, bonemat);
- mul_m4_m4m4(param.next_mat, imat, bonemat);
- }
- }
-
- param.ease1 = ebone->ease1;
- param.ease2 = ebone->ease2;
- param.roll1 = ebone->roll1;
- param.roll2 = ebone->roll2;
-
- if (prev && (ebone->flag & BONE_ADD_PARENT_END_ROLL)) {
- param.roll1 += prev->roll2;
- }
-
- param.scaleIn = ebone->scaleIn;
- param.scaleOut = ebone->scaleOut;
-
- param.curveInX = ebone->curveInX;
- param.curveInY = ebone->curveInY;
-
- param.curveOutX = ebone->curveOutX;
- param.curveOutY = ebone->curveOutY;
-
- ebone->segments = BKE_pchan_bbone_spline_compute(&param, false, (Mat4 *)result_array);
+ BBoneSplineParameters param;
+ EditBone *prev, *next;
+ float imat[4][4], bonemat[4][4];
+ float tmp[3];
+
+ memset(&param, 0, sizeof(param));
+
+ param.segments = ebone->segments;
+ param.length = ebone->length;
+
+ /* Get "next" and "prev" bones - these are used for handle calculations. */
+ if (ebone->bbone_prev_type == BBONE_HANDLE_AUTO) {
+ /* Use connected parent. */
+ if (ebone->flag & BONE_CONNECTED) {
+ prev = ebone->parent;
+ }
+ else {
+ prev = NULL;
+ }
+ }
+ else {
+ prev = ebone->bbone_prev;
+ }
+
+ if (ebone->bbone_next_type == BBONE_HANDLE_AUTO) {
+ /* Use connected child. */
+ next = ebone->bbone_child;
+ }
+ else {
+ next = ebone->bbone_next;
+ }
+
+ /* compute handles from connected bones */
+ if (prev || next) {
+ ED_armature_ebone_to_mat4(ebone, imat);
+ invert_m4(imat);
+
+ if (prev) {
+ param.use_prev = true;
+
+ if (ebone->bbone_prev_type == BBONE_HANDLE_RELATIVE) {
+ zero_v3(param.prev_h);
+ }
+ else if (ebone->bbone_prev_type == BBONE_HANDLE_TANGENT) {
+ sub_v3_v3v3(tmp, prev->tail, prev->head);
+ sub_v3_v3v3(tmp, ebone->head, tmp);
+ mul_v3_m4v3(param.prev_h, imat, tmp);
+ }
+ else {
+ param.prev_bbone = (prev->segments > 1);
+
+ mul_v3_m4v3(param.prev_h, imat, prev->head);
+ }
+
+ if (!param.prev_bbone) {
+ ED_armature_ebone_to_mat4(prev, bonemat);
+ mul_m4_m4m4(param.prev_mat, imat, bonemat);
+ }
+ }
+
+ if (next) {
+ param.use_next = true;
+
+ if (ebone->bbone_next_type == BBONE_HANDLE_RELATIVE) {
+ copy_v3_fl3(param.next_h, 0.0f, param.length, 0.0);
+ }
+ else if (ebone->bbone_next_type == BBONE_HANDLE_TANGENT) {
+ sub_v3_v3v3(tmp, next->tail, next->head);
+ add_v3_v3v3(tmp, ebone->tail, tmp);
+ mul_v3_m4v3(param.next_h, imat, tmp);
+ }
+ else {
+ param.next_bbone = (next->segments > 1);
+
+ mul_v3_m4v3(param.next_h, imat, next->tail);
+ }
+
+ ED_armature_ebone_to_mat4(next, bonemat);
+ mul_m4_m4m4(param.next_mat, imat, bonemat);
+ }
+ }
+
+ param.ease1 = ebone->ease1;
+ param.ease2 = ebone->ease2;
+ param.roll1 = ebone->roll1;
+ param.roll2 = ebone->roll2;
+
+ if (prev && (ebone->flag & BONE_ADD_PARENT_END_ROLL)) {
+ param.roll1 += prev->roll2;
+ }
+
+ param.scaleIn = ebone->scaleIn;
+ param.scaleOut = ebone->scaleOut;
+
+ param.curveInX = ebone->curveInX;
+ param.curveInY = ebone->curveInY;
+
+ param.curveOutX = ebone->curveOutX;
+ param.curveOutY = ebone->curveOutY;
+
+ ebone->segments = BKE_pchan_bbone_spline_compute(&param, false, (Mat4 *)result_array);
}
static void draw_bone_update_disp_matrix_bbone(EditBone *eBone, bPoseChannel *pchan)
{
- float s[4][4], ebmat[4][4];
- float length, xwidth, zwidth;
- float (*bone_mat)[4];
- short bbone_segments;
-
- /* TODO : This should be moved to depsgraph or armature refresh
- * and not be tight to the draw pass creation.
- * This would refresh armature without invalidating the draw cache */
- if (pchan) {
- length = pchan->bone->length;
- xwidth = pchan->bone->xwidth;
- zwidth = pchan->bone->zwidth;
- bone_mat = pchan->pose_mat;
- bbone_segments = pchan->bone->segments;
- }
- else {
- eBone->length = len_v3v3(eBone->tail, eBone->head);
- ED_armature_ebone_to_mat4(eBone, ebmat);
-
- length = eBone->length;
- xwidth = eBone->xwidth;
- zwidth = eBone->zwidth;
- bone_mat = ebmat;
- bbone_segments = eBone->segments;
- }
-
- size_to_mat4(s, (const float[3]){xwidth, length / bbone_segments, zwidth});
-
- /* Compute BBones segment matrices... */
- /* Note that we need this even for one-segment bones, because box drawing need specific weirdo matrix for the box,
- * that we cannot use to draw end points & co. */
- if (pchan) {
- Mat4 *bbones_mat = (Mat4 *)pchan->draw_data->bbone_matrix;
- if (bbone_segments > 1) {
- BKE_pchan_bbone_spline_setup(pchan, false, false, bbones_mat);
-
- for (int i = bbone_segments; i--; bbones_mat++) {
- mul_m4_m4m4(bbones_mat->mat, bbones_mat->mat, s);
- mul_m4_m4m4(bbones_mat->mat, bone_mat, bbones_mat->mat);
- }
- }
- else {
- mul_m4_m4m4(bbones_mat->mat, bone_mat, s);
- }
- }
- else {
- float (*bbones_mat)[4][4] = eBone->disp_bbone_mat;
-
- if (bbone_segments > 1) {
- ebone_spline_preview(eBone, bbones_mat);
-
- for (int i = bbone_segments; i--; bbones_mat++) {
- mul_m4_m4m4(*bbones_mat, *bbones_mat, s);
- mul_m4_m4m4(*bbones_mat, bone_mat, *bbones_mat);
- }
- }
- else {
- mul_m4_m4m4(*bbones_mat, bone_mat, s);
- }
- }
-
- /* Grrr... We need default display matrix to draw end points, axes, etc. :( */
- draw_bone_update_disp_matrix_default(eBone, pchan);
+ float s[4][4], ebmat[4][4];
+ float length, xwidth, zwidth;
+ float(*bone_mat)[4];
+ short bbone_segments;
+
+ /* TODO : This should be moved to depsgraph or armature refresh
+ * and not be tight to the draw pass creation.
+ * This would refresh armature without invalidating the draw cache */
+ if (pchan) {
+ length = pchan->bone->length;
+ xwidth = pchan->bone->xwidth;
+ zwidth = pchan->bone->zwidth;
+ bone_mat = pchan->pose_mat;
+ bbone_segments = pchan->bone->segments;
+ }
+ else {
+ eBone->length = len_v3v3(eBone->tail, eBone->head);
+ ED_armature_ebone_to_mat4(eBone, ebmat);
+
+ length = eBone->length;
+ xwidth = eBone->xwidth;
+ zwidth = eBone->zwidth;
+ bone_mat = ebmat;
+ bbone_segments = eBone->segments;
+ }
+
+ size_to_mat4(s, (const float[3]){xwidth, length / bbone_segments, zwidth});
+
+ /* Compute BBones segment matrices... */
+ /* Note that we need this even for one-segment bones, because box drawing need specific weirdo matrix for the box,
+ * that we cannot use to draw end points & co. */
+ if (pchan) {
+ Mat4 *bbones_mat = (Mat4 *)pchan->draw_data->bbone_matrix;
+ if (bbone_segments > 1) {
+ BKE_pchan_bbone_spline_setup(pchan, false, false, bbones_mat);
+
+ for (int i = bbone_segments; i--; bbones_mat++) {
+ mul_m4_m4m4(bbones_mat->mat, bbones_mat->mat, s);
+ mul_m4_m4m4(bbones_mat->mat, bone_mat, bbones_mat->mat);
+ }
+ }
+ else {
+ mul_m4_m4m4(bbones_mat->mat, bone_mat, s);
+ }
+ }
+ else {
+ float(*bbones_mat)[4][4] = eBone->disp_bbone_mat;
+
+ if (bbone_segments > 1) {
+ ebone_spline_preview(eBone, bbones_mat);
+
+ for (int i = bbone_segments; i--; bbones_mat++) {
+ mul_m4_m4m4(*bbones_mat, *bbones_mat, s);
+ mul_m4_m4m4(*bbones_mat, bone_mat, *bbones_mat);
+ }
+ }
+ else {
+ mul_m4_m4m4(*bbones_mat, bone_mat, s);
+ }
+ }
+
+ /* Grrr... We need default display matrix to draw end points, axes, etc. :( */
+ draw_bone_update_disp_matrix_default(eBone, pchan);
}
static void draw_bone_update_disp_matrix_custom(bPoseChannel *pchan)
{
- float s[4][4];
- float length;
- float (*bone_mat)[4];
- float (*disp_mat)[4];
- float (*disp_tail_mat)[4];
-
- /* See TODO above */
- length = PCHAN_CUSTOM_DRAW_SIZE(pchan);
- bone_mat = pchan->custom_tx ? pchan->custom_tx->pose_mat : pchan->pose_mat;
- disp_mat = pchan->disp_mat;
- disp_tail_mat = pchan->disp_tail_mat;
-
- scale_m4_fl(s, length);
- mul_m4_m4m4(disp_mat, bone_mat, s);
- copy_m4_m4(disp_tail_mat, disp_mat);
- translate_m4(disp_tail_mat, 0.0f, 1.0f, 0.0f);
+ float s[4][4];
+ float length;
+ float(*bone_mat)[4];
+ float(*disp_mat)[4];
+ float(*disp_tail_mat)[4];
+
+ /* See TODO above */
+ length = PCHAN_CUSTOM_DRAW_SIZE(pchan);
+ bone_mat = pchan->custom_tx ? pchan->custom_tx->pose_mat : pchan->pose_mat;
+ disp_mat = pchan->disp_mat;
+ disp_tail_mat = pchan->disp_tail_mat;
+
+ scale_m4_fl(s, length);
+ mul_m4_m4m4(disp_mat, bone_mat, s);
+ copy_m4_m4(disp_tail_mat, disp_mat);
+ translate_m4(disp_tail_mat, 0.0f, 1.0f, 0.0f);
}
-static void draw_axes(
- EditBone *eBone, bPoseChannel *pchan,
- const eGPUShaderConfig sh_cfg)
+static void draw_axes(EditBone *eBone, bPoseChannel *pchan, const eGPUShaderConfig sh_cfg)
{
- float final_col[4];
- const float *col = (g_theme.const_color) ? g_theme.const_color :
- (BONE_FLAG(eBone, pchan) & BONE_SELECTED) ? g_theme.text_hi_color : g_theme.text_color;
- copy_v4_v4(final_col, col);
- /* Mix with axes color. */
- final_col[3] = (g_theme.const_color) ? 1.0 : (BONE_FLAG(eBone, pchan) & BONE_SELECTED) ? 0.3 : 0.8;
- drw_shgroup_bone_axes(BONE_VAR(eBone, pchan, disp_mat), final_col, sh_cfg);
+ float final_col[4];
+ const float *col = (g_theme.const_color) ?
+ g_theme.const_color :
+ (BONE_FLAG(eBone, pchan) & BONE_SELECTED) ? g_theme.text_hi_color :
+ g_theme.text_color;
+ copy_v4_v4(final_col, col);
+ /* Mix with axes color. */
+ final_col[3] = (g_theme.const_color) ? 1.0 :
+ (BONE_FLAG(eBone, pchan) & BONE_SELECTED) ? 0.3 : 0.8;
+ drw_shgroup_bone_axes(BONE_VAR(eBone, pchan, disp_mat), final_col, sh_cfg);
}
-static void draw_points(
- const EditBone *eBone, const bPoseChannel *pchan, const bArmature *arm,
- const int boneflag, const short constflag,
- const eGPUShaderConfig sh_cfg, const int select_id)
+static void draw_points(const EditBone *eBone,
+ const bPoseChannel *pchan,
+ const bArmature *arm,
+ const int boneflag,
+ const short constflag,
+ const eGPUShaderConfig sh_cfg,
+ const int select_id)
{
- float col_solid_root[4], col_solid_tail[4], col_wire_root[4], col_wire_tail[4];
- float col_hint_root[4], col_hint_tail[4];
-
- copy_v4_v4(col_solid_root, g_theme.bone_solid_color);
- copy_v4_v4(col_solid_tail, g_theme.bone_solid_color);
- copy_v4_v4(col_wire_root, (g_theme.const_color) ? g_theme.const_color : g_theme.vertex_color);
- copy_v4_v4(col_wire_tail, (g_theme.const_color) ? g_theme.const_color : g_theme.vertex_color);
-
- const bool is_envelope_draw = (arm->drawtype == ARM_ENVELOPE);
- static const float envelope_ignore = -1.0f;
-
- col_wire_tail[3] = col_wire_root[3] = get_bone_wire_thickness(boneflag);
-
- /* Edit bone points can be selected */
- if (eBone) {
- if (eBone->flag & BONE_ROOTSEL) {
- copy_v3_v3(col_wire_root, g_theme.vertex_select_color);
- }
- if (eBone->flag & BONE_TIPSEL) {
- copy_v3_v3(col_wire_tail, g_theme.vertex_select_color);
- }
- }
- else if (arm->flag & ARM_POSEMODE) {
- const float *solid_color = get_bone_solid_color(eBone, pchan, arm, boneflag, constflag);
- const float *wire_color = get_bone_wire_color(eBone, pchan, arm, boneflag, constflag);
- copy_v4_v4(col_wire_tail, wire_color);
- copy_v4_v4(col_wire_root, wire_color);
- copy_v4_v4(col_solid_tail, solid_color);
- copy_v4_v4(col_solid_root, solid_color);
- }
-
- bone_hint_color_shade(col_hint_root, (g_theme.const_color) ? col_solid_root : col_wire_root);
- bone_hint_color_shade(col_hint_tail, (g_theme.const_color) ? col_solid_tail : col_wire_tail);
-
- /* Draw root point if we are not connected to our parent */
- if ((BONE_FLAG(eBone, pchan) & BONE_CONNECTED) == 0) {
- if (select_id != -1) {
- DRW_select_load_id(select_id | BONESEL_ROOT);
- }
-
- if (eBone) {
- if (is_envelope_draw) {
- drw_shgroup_bone_envelope(
- eBone->disp_mat, col_solid_root, col_hint_root, col_wire_root,
- &eBone->rad_head, &envelope_ignore, sh_cfg);
- }
- else {
- drw_shgroup_bone_point(eBone->disp_mat, col_solid_root, col_hint_root, col_wire_root, sh_cfg);
- }
- }
- else {
- Bone *bone = pchan->bone;
- if (is_envelope_draw) {
- drw_shgroup_bone_envelope(
- pchan->disp_mat, col_solid_root, col_hint_root, col_wire_root,
- &bone->rad_head, &envelope_ignore, sh_cfg);
- }
- else {
- drw_shgroup_bone_point(pchan->disp_mat, col_solid_root, col_hint_root, col_wire_root, sh_cfg);
- }
- }
- }
-
- /* Draw tip point */
- if (select_id != -1) {
- DRW_select_load_id(select_id | BONESEL_TIP);
- }
-
- if (is_envelope_draw) {
- const float *rad_tail = eBone ? &eBone->rad_tail : &pchan->bone->rad_tail;
- drw_shgroup_bone_envelope(
- BONE_VAR(eBone, pchan, disp_mat), col_solid_tail, col_hint_tail, col_wire_tail,
- &envelope_ignore, rad_tail, sh_cfg);
- }
- else {
- drw_shgroup_bone_point(BONE_VAR(eBone, pchan, disp_tail_mat), col_solid_tail, col_hint_tail, col_wire_tail, sh_cfg);
- }
-
- if (select_id != -1) {
- DRW_select_load_id(-1);
- }
+ float col_solid_root[4], col_solid_tail[4], col_wire_root[4], col_wire_tail[4];
+ float col_hint_root[4], col_hint_tail[4];
+
+ copy_v4_v4(col_solid_root, g_theme.bone_solid_color);
+ copy_v4_v4(col_solid_tail, g_theme.bone_solid_color);
+ copy_v4_v4(col_wire_root, (g_theme.const_color) ? g_theme.const_color : g_theme.vertex_color);
+ copy_v4_v4(col_wire_tail, (g_theme.const_color) ? g_theme.const_color : g_theme.vertex_color);
+
+ const bool is_envelope_draw = (arm->drawtype == ARM_ENVELOPE);
+ static const float envelope_ignore = -1.0f;
+
+ col_wire_tail[3] = col_wire_root[3] = get_bone_wire_thickness(boneflag);
+
+ /* Edit bone points can be selected */
+ if (eBone) {
+ if (eBone->flag & BONE_ROOTSEL) {
+ copy_v3_v3(col_wire_root, g_theme.vertex_select_color);
+ }
+ if (eBone->flag & BONE_TIPSEL) {
+ copy_v3_v3(col_wire_tail, g_theme.vertex_select_color);
+ }
+ }
+ else if (arm->flag & ARM_POSEMODE) {
+ const float *solid_color = get_bone_solid_color(eBone, pchan, arm, boneflag, constflag);
+ const float *wire_color = get_bone_wire_color(eBone, pchan, arm, boneflag, constflag);
+ copy_v4_v4(col_wire_tail, wire_color);
+ copy_v4_v4(col_wire_root, wire_color);
+ copy_v4_v4(col_solid_tail, solid_color);
+ copy_v4_v4(col_solid_root, solid_color);
+ }
+
+ bone_hint_color_shade(col_hint_root, (g_theme.const_color) ? col_solid_root : col_wire_root);
+ bone_hint_color_shade(col_hint_tail, (g_theme.const_color) ? col_solid_tail : col_wire_tail);
+
+ /* Draw root point if we are not connected to our parent */
+ if ((BONE_FLAG(eBone, pchan) & BONE_CONNECTED) == 0) {
+ if (select_id != -1) {
+ DRW_select_load_id(select_id | BONESEL_ROOT);
+ }
+
+ if (eBone) {
+ if (is_envelope_draw) {
+ drw_shgroup_bone_envelope(eBone->disp_mat,
+ col_solid_root,
+ col_hint_root,
+ col_wire_root,
+ &eBone->rad_head,
+ &envelope_ignore,
+ sh_cfg);
+ }
+ else {
+ drw_shgroup_bone_point(
+ eBone->disp_mat, col_solid_root, col_hint_root, col_wire_root, sh_cfg);
+ }
+ }
+ else {
+ Bone *bone = pchan->bone;
+ if (is_envelope_draw) {
+ drw_shgroup_bone_envelope(pchan->disp_mat,
+ col_solid_root,
+ col_hint_root,
+ col_wire_root,
+ &bone->rad_head,
+ &envelope_ignore,
+ sh_cfg);
+ }
+ else {
+ drw_shgroup_bone_point(
+ pchan->disp_mat, col_solid_root, col_hint_root, col_wire_root, sh_cfg);
+ }
+ }
+ }
+
+ /* Draw tip point */
+ if (select_id != -1) {
+ DRW_select_load_id(select_id | BONESEL_TIP);
+ }
+
+ if (is_envelope_draw) {
+ const float *rad_tail = eBone ? &eBone->rad_tail : &pchan->bone->rad_tail;
+ drw_shgroup_bone_envelope(BONE_VAR(eBone, pchan, disp_mat),
+ col_solid_tail,
+ col_hint_tail,
+ col_wire_tail,
+ &envelope_ignore,
+ rad_tail,
+ sh_cfg);
+ }
+ else {
+ drw_shgroup_bone_point(BONE_VAR(eBone, pchan, disp_tail_mat),
+ col_solid_tail,
+ col_hint_tail,
+ col_wire_tail,
+ sh_cfg);
+ }
+
+ if (select_id != -1) {
+ DRW_select_load_id(-1);
+ }
}
/** \} */
-
/* -------------------------------------------------------------------- */
/** \name Draw Bones
* \{ */
-static void draw_bone_custom_shape(
- EditBone *eBone, bPoseChannel *pchan, bArmature *arm,
- const int boneflag, const short constflag,
- const eGPUShaderConfig sh_cfg, const int select_id)
+static void draw_bone_custom_shape(EditBone *eBone,
+ bPoseChannel *pchan,
+ bArmature *arm,
+ const int boneflag,
+ const short constflag,
+ const eGPUShaderConfig sh_cfg,
+ const int select_id)
{
- const float *col_solid = get_bone_solid_color(eBone, pchan, arm, boneflag, constflag);
- const float *col_wire = get_bone_wire_color(eBone, pchan, arm, boneflag, constflag);
- const float *col_hint = get_bone_hint_color(eBone, pchan, arm, boneflag, constflag);
- const float (*disp_mat)[4] = pchan->disp_mat;
-
- if (select_id != -1) {
- DRW_select_load_id(select_id | BONESEL_BONE);
- }
-
- if ((boneflag & BONE_DRAWWIRE) == 0) {
- drw_shgroup_bone_custom_solid(disp_mat, col_solid, col_hint, col_wire, sh_cfg, pchan->custom);
- }
- else {
- drw_shgroup_bone_custom_wire(disp_mat, col_wire, pchan->custom);
- }
-
- if (select_id != -1) {
- DRW_select_load_id(-1);
- }
+ const float *col_solid = get_bone_solid_color(eBone, pchan, arm, boneflag, constflag);
+ const float *col_wire = get_bone_wire_color(eBone, pchan, arm, boneflag, constflag);
+ const float *col_hint = get_bone_hint_color(eBone, pchan, arm, boneflag, constflag);
+ const float(*disp_mat)[4] = pchan->disp_mat;
+
+ if (select_id != -1) {
+ DRW_select_load_id(select_id | BONESEL_BONE);
+ }
+
+ if ((boneflag & BONE_DRAWWIRE) == 0) {
+ drw_shgroup_bone_custom_solid(disp_mat, col_solid, col_hint, col_wire, sh_cfg, pchan->custom);
+ }
+ else {
+ drw_shgroup_bone_custom_wire(disp_mat, col_wire, pchan->custom);
+ }
+
+ if (select_id != -1) {
+ DRW_select_load_id(-1);
+ }
}
-static void draw_bone_envelope(
- EditBone *eBone, bPoseChannel *pchan, bArmature *arm,
- const int boneflag, const short constflag,
- const eGPUShaderConfig sh_cfg, const int select_id)
+static void draw_bone_envelope(EditBone *eBone,
+ bPoseChannel *pchan,
+ bArmature *arm,
+ const int boneflag,
+ const short constflag,
+ const eGPUShaderConfig sh_cfg,
+ const int select_id)
{
- const float *col_solid = get_bone_solid_with_consts_color(eBone, pchan, arm, boneflag, constflag);
- const float *col_wire = get_bone_wire_color(eBone, pchan, arm, boneflag, constflag);
- const float *col_hint = get_bone_hint_color(eBone, pchan, arm, boneflag, constflag);
-
- float *rad_head, *rad_tail, *distance;
- if (eBone) {
- rad_tail = &eBone->rad_tail;
- distance = &eBone->dist;
- rad_head = (eBone->parent && (boneflag & BONE_CONNECTED)) ? &eBone->parent->rad_tail : &eBone->rad_head;
- }
- else {
- rad_tail = &pchan->bone->rad_tail;
- distance = &pchan->bone->dist;
- rad_head = (pchan->parent && (boneflag & BONE_CONNECTED)) ? &pchan->parent->bone->rad_tail : &pchan->bone->rad_head;
- }
-
- if ((select_id == -1) &&
- (boneflag & BONE_NO_DEFORM) == 0 &&
- ((boneflag & BONE_SELECTED) || (eBone && (boneflag & (BONE_ROOTSEL | BONE_TIPSEL)))))
- {
- drw_shgroup_bone_envelope_distance(BONE_VAR(eBone, pchan, disp_mat), rad_head, rad_tail, distance, sh_cfg);
- }
-
- if (select_id != -1) {
- DRW_select_load_id(select_id | BONESEL_BONE);
- }
-
- drw_shgroup_bone_envelope(
- BONE_VAR(eBone, pchan, disp_mat), col_solid, col_hint, col_wire,
- rad_head, rad_tail, sh_cfg);
-
- if (select_id != -1) {
- DRW_select_load_id(-1);
- }
-
- draw_points(eBone, pchan, arm, boneflag, constflag, sh_cfg, select_id);
+ const float *col_solid = get_bone_solid_with_consts_color(
+ eBone, pchan, arm, boneflag, constflag);
+ const float *col_wire = get_bone_wire_color(eBone, pchan, arm, boneflag, constflag);
+ const float *col_hint = get_bone_hint_color(eBone, pchan, arm, boneflag, constflag);
+
+ float *rad_head, *rad_tail, *distance;
+ if (eBone) {
+ rad_tail = &eBone->rad_tail;
+ distance = &eBone->dist;
+ rad_head = (eBone->parent && (boneflag & BONE_CONNECTED)) ? &eBone->parent->rad_tail :
+ &eBone->rad_head;
+ }
+ else {
+ rad_tail = &pchan->bone->rad_tail;
+ distance = &pchan->bone->dist;
+ rad_head = (pchan->parent && (boneflag & BONE_CONNECTED)) ? &pchan->parent->bone->rad_tail :
+ &pchan->bone->rad_head;
+ }
+
+ if ((select_id == -1) && (boneflag & BONE_NO_DEFORM) == 0 &&
+ ((boneflag & BONE_SELECTED) || (eBone && (boneflag & (BONE_ROOTSEL | BONE_TIPSEL))))) {
+ drw_shgroup_bone_envelope_distance(
+ BONE_VAR(eBone, pchan, disp_mat), rad_head, rad_tail, distance, sh_cfg);
+ }
+
+ if (select_id != -1) {
+ DRW_select_load_id(select_id | BONESEL_BONE);
+ }
+
+ drw_shgroup_bone_envelope(
+ BONE_VAR(eBone, pchan, disp_mat), col_solid, col_hint, col_wire, rad_head, rad_tail, sh_cfg);
+
+ if (select_id != -1) {
+ DRW_select_load_id(-1);
+ }
+
+ draw_points(eBone, pchan, arm, boneflag, constflag, sh_cfg, select_id);
}
-static void draw_bone_line(
- EditBone *eBone, bPoseChannel *pchan, bArmature *arm,
- const int boneflag, const short constflag,
- const eGPUShaderConfig sh_cfg, const int select_id)
+static void draw_bone_line(EditBone *eBone,
+ bPoseChannel *pchan,
+ bArmature *arm,
+ const int boneflag,
+ const short constflag,
+ const eGPUShaderConfig sh_cfg,
+ const int select_id)
{
- const float *col_bone = get_bone_solid_with_consts_color(eBone, pchan, arm, boneflag, constflag);
- const float *col_wire = get_bone_wire_color(eBone, pchan, arm, boneflag, constflag);
- const float no_display[4] = {0.0f, 0.0f, 0.0f, 0.0f};
- const float *col_head = no_display;
- const float *col_tail = col_bone;
-
- if (eBone) {
- if (eBone->flag & BONE_TIPSEL) {
- col_tail = g_theme.vertex_select_color;
- }
- if (boneflag & BONE_SELECTED) {
- col_bone = g_theme.edge_select_color;
- }
- col_wire = g_theme.wire_color;
- }
-
- /* Draw root point if we are not connected to our parent */
- if ((BONE_FLAG(eBone, pchan) & BONE_CONNECTED) == 0) {
- if (eBone) {
- col_head = (eBone->flag & BONE_ROOTSEL) ? g_theme.vertex_select_color : col_bone;
- }
- else if (pchan) {
- col_head = col_bone;
- }
- }
-
- if (g_theme.const_color != NULL) {
- col_wire = no_display; /* actually shrink the display. */
- col_bone = col_head = col_tail = g_theme.const_color;
- }
-
- if (select_id == -1) {
- /* Not in selection mode, draw everything at once. */
- drw_shgroup_bone_stick(BONE_VAR(eBone, pchan, disp_mat), col_wire, col_bone, col_head, col_tail, sh_cfg);
- }
- else {
- /* In selection mode, draw bone, root and tip separately. */
- DRW_select_load_id(select_id | BONESEL_BONE);
- drw_shgroup_bone_stick(BONE_VAR(eBone, pchan, disp_mat), col_wire, col_bone, no_display, no_display, sh_cfg);
-
- if (col_head[3] > 0.0f) {
- DRW_select_load_id(select_id | BONESEL_ROOT);
- drw_shgroup_bone_stick(BONE_VAR(eBone, pchan, disp_mat), col_wire, no_display, col_head, no_display, sh_cfg);
- }
-
- DRW_select_load_id(select_id | BONESEL_TIP);
- drw_shgroup_bone_stick(BONE_VAR(eBone, pchan, disp_mat), col_wire, no_display, no_display, col_tail, sh_cfg);
-
- DRW_select_load_id(-1);
- }
+ const float *col_bone = get_bone_solid_with_consts_color(eBone, pchan, arm, boneflag, constflag);
+ const float *col_wire = get_bone_wire_color(eBone, pchan, arm, boneflag, constflag);
+ const float no_display[4] = {0.0f, 0.0f, 0.0f, 0.0f};
+ const float *col_head = no_display;
+ const float *col_tail = col_bone;
+
+ if (eBone) {
+ if (eBone->flag & BONE_TIPSEL) {
+ col_tail = g_theme.vertex_select_color;
+ }
+ if (boneflag & BONE_SELECTED) {
+ col_bone = g_theme.edge_select_color;
+ }
+ col_wire = g_theme.wire_color;
+ }
+
+ /* Draw root point if we are not connected to our parent */
+ if ((BONE_FLAG(eBone, pchan) & BONE_CONNECTED) == 0) {
+ if (eBone) {
+ col_head = (eBone->flag & BONE_ROOTSEL) ? g_theme.vertex_select_color : col_bone;
+ }
+ else if (pchan) {
+ col_head = col_bone;
+ }
+ }
+
+ if (g_theme.const_color != NULL) {
+ col_wire = no_display; /* actually shrink the display. */
+ col_bone = col_head = col_tail = g_theme.const_color;
+ }
+
+ if (select_id == -1) {
+ /* Not in selection mode, draw everything at once. */
+ drw_shgroup_bone_stick(
+ BONE_VAR(eBone, pchan, disp_mat), col_wire, col_bone, col_head, col_tail, sh_cfg);
+ }
+ else {
+ /* In selection mode, draw bone, root and tip separately. */
+ DRW_select_load_id(select_id | BONESEL_BONE);
+ drw_shgroup_bone_stick(
+ BONE_VAR(eBone, pchan, disp_mat), col_wire, col_bone, no_display, no_display, sh_cfg);
+
+ if (col_head[3] > 0.0f) {
+ DRW_select_load_id(select_id | BONESEL_ROOT);
+ drw_shgroup_bone_stick(
+ BONE_VAR(eBone, pchan, disp_mat), col_wire, no_display, col_head, no_display, sh_cfg);
+ }
+
+ DRW_select_load_id(select_id | BONESEL_TIP);
+ drw_shgroup_bone_stick(
+ BONE_VAR(eBone, pchan, disp_mat), col_wire, no_display, no_display, col_tail, sh_cfg);
+
+ DRW_select_load_id(-1);
+ }
}
-static void draw_bone_wire(
- EditBone *eBone, bPoseChannel *pchan, bArmature *arm,
- const int boneflag, const short constflag,
- const eGPUShaderConfig sh_cfg, const int select_id)
+static void draw_bone_wire(EditBone *eBone,
+ bPoseChannel *pchan,
+ bArmature *arm,
+ const int boneflag,
+ const short constflag,
+ const eGPUShaderConfig sh_cfg,
+ const int select_id)
{
- const float *col_wire = get_bone_wire_color(eBone, pchan, arm, boneflag, constflag);
-
- if (select_id != -1) {
- DRW_select_load_id(select_id | BONESEL_BONE);
- }
-
- if (pchan) {
- Mat4 *bbones_mat = (Mat4 *)pchan->draw_data->bbone_matrix;
- BLI_assert(bbones_mat != NULL);
-
- for (int i = pchan->bone->segments; i--; bbones_mat++) {
- drw_shgroup_bone_wire(bbones_mat->mat, col_wire, sh_cfg);
- }
- }
- else if (eBone) {
- for (int i = 0; i < eBone->segments; i++) {
- drw_shgroup_bone_wire(eBone->disp_bbone_mat[i], col_wire, sh_cfg);
- }
- }
-
- if (select_id != -1) {
- DRW_select_load_id(-1);
- }
-
- if (eBone) {
- draw_points(eBone, pchan, arm, boneflag, constflag, sh_cfg, select_id);
- }
+ const float *col_wire = get_bone_wire_color(eBone, pchan, arm, boneflag, constflag);
+
+ if (select_id != -1) {
+ DRW_select_load_id(select_id | BONESEL_BONE);
+ }
+
+ if (pchan) {
+ Mat4 *bbones_mat = (Mat4 *)pchan->draw_data->bbone_matrix;
+ BLI_assert(bbones_mat != NULL);
+
+ for (int i = pchan->bone->segments; i--; bbones_mat++) {
+ drw_shgroup_bone_wire(bbones_mat->mat, col_wire, sh_cfg);
+ }
+ }
+ else if (eBone) {
+ for (int i = 0; i < eBone->segments; i++) {
+ drw_shgroup_bone_wire(eBone->disp_bbone_mat[i], col_wire, sh_cfg);
+ }
+ }
+
+ if (select_id != -1) {
+ DRW_select_load_id(-1);
+ }
+
+ if (eBone) {
+ draw_points(eBone, pchan, arm, boneflag, constflag, sh_cfg, select_id);
+ }
}
-static void draw_bone_box(
- EditBone *eBone, bPoseChannel *pchan, bArmature *arm,
- const int boneflag, const short constflag,
- const eGPUShaderConfig sh_cfg, const int select_id)
+static void draw_bone_box(EditBone *eBone,
+ bPoseChannel *pchan,
+ bArmature *arm,
+ const int boneflag,
+ const short constflag,
+ const eGPUShaderConfig sh_cfg,
+ const int select_id)
{
- const float *col_solid = get_bone_solid_with_consts_color(eBone, pchan, arm, boneflag, constflag);
- const float *col_wire = get_bone_wire_color(eBone, pchan, arm, boneflag, constflag);
- const float *col_hint = get_bone_hint_color(eBone, pchan, arm, boneflag, constflag);
-
- if (select_id != -1) {
- DRW_select_load_id(select_id | BONESEL_BONE);
- }
-
- if (pchan) {
- Mat4 *bbones_mat = (Mat4 *)pchan->draw_data->bbone_matrix;
- BLI_assert(bbones_mat != NULL);
-
- for (int i = pchan->bone->segments; i--; bbones_mat++) {
- drw_shgroup_bone_box(bbones_mat->mat, col_solid, col_hint, col_wire, sh_cfg);
- }
- }
- else if (eBone) {
- for (int i = 0; i < eBone->segments; i++) {
- drw_shgroup_bone_box(eBone->disp_bbone_mat[i], col_solid, col_hint, col_wire, sh_cfg);
- }
- }
-
- if (select_id != -1) {
- DRW_select_load_id(-1);
- }
-
- if (eBone) {
- draw_points(eBone, pchan, arm, boneflag, constflag, sh_cfg, select_id);
- }
+ const float *col_solid = get_bone_solid_with_consts_color(
+ eBone, pchan, arm, boneflag, constflag);
+ const float *col_wire = get_bone_wire_color(eBone, pchan, arm, boneflag, constflag);
+ const float *col_hint = get_bone_hint_color(eBone, pchan, arm, boneflag, constflag);
+
+ if (select_id != -1) {
+ DRW_select_load_id(select_id | BONESEL_BONE);
+ }
+
+ if (pchan) {
+ Mat4 *bbones_mat = (Mat4 *)pchan->draw_data->bbone_matrix;
+ BLI_assert(bbones_mat != NULL);
+
+ for (int i = pchan->bone->segments; i--; bbones_mat++) {
+ drw_shgroup_bone_box(bbones_mat->mat, col_solid, col_hint, col_wire, sh_cfg);
+ }
+ }
+ else if (eBone) {
+ for (int i = 0; i < eBone->segments; i++) {
+ drw_shgroup_bone_box(eBone->disp_bbone_mat[i], col_solid, col_hint, col_wire, sh_cfg);
+ }
+ }
+
+ if (select_id != -1) {
+ DRW_select_load_id(-1);
+ }
+
+ if (eBone) {
+ draw_points(eBone, pchan, arm, boneflag, constflag, sh_cfg, select_id);
+ }
}
-static void draw_bone_octahedral(
- EditBone *eBone, bPoseChannel *pchan, bArmature *arm,
- const int boneflag, const short constflag,
- const eGPUShaderConfig sh_cfg, const int select_id)
+static void draw_bone_octahedral(EditBone *eBone,
+ bPoseChannel *pchan,
+ bArmature *arm,
+ const int boneflag,
+ const short constflag,
+ const eGPUShaderConfig sh_cfg,
+ const int select_id)
{
- const float *col_solid = get_bone_solid_with_consts_color(eBone, pchan, arm, boneflag, constflag);
- const float *col_wire = get_bone_wire_color(eBone, pchan, arm, boneflag, constflag);
- const float *col_hint = get_bone_hint_color(eBone, pchan, arm, boneflag, constflag);
+ const float *col_solid = get_bone_solid_with_consts_color(
+ eBone, pchan, arm, boneflag, constflag);
+ const float *col_wire = get_bone_wire_color(eBone, pchan, arm, boneflag, constflag);
+ const float *col_hint = get_bone_hint_color(eBone, pchan, arm, boneflag, constflag);
- if (select_id != -1) {
- DRW_select_load_id(select_id | BONESEL_BONE);
- }
+ if (select_id != -1) {
+ DRW_select_load_id(select_id | BONESEL_BONE);
+ }
- drw_shgroup_bone_octahedral(BONE_VAR(eBone, pchan, disp_mat), col_solid, col_hint, col_wire, sh_cfg);
+ drw_shgroup_bone_octahedral(
+ BONE_VAR(eBone, pchan, disp_mat), col_solid, col_hint, col_wire, sh_cfg);
- if (select_id != -1) {
- DRW_select_load_id(-1);
- }
+ if (select_id != -1) {
+ DRW_select_load_id(-1);
+ }
- draw_points(eBone, pchan, arm, boneflag, constflag, sh_cfg, select_id);
+ draw_points(eBone, pchan, arm, boneflag, constflag, sh_cfg, select_id);
}
/** \} */
-
/* -------------------------------------------------------------------- */
/** \name Draw Degrees of Freedom
* \{ */
static void draw_bone_dofs(bPoseChannel *pchan)
{
- float final_bonemat[4][4], posetrans[4][4], mat[4][4];
- float amin[2], amax[2], xminmax[2], zminmax[2];
- float col_sphere[4] = {0.25f, 0.25f, 0.25f, 0.25f};
- float col_lines[4] = {0.0f, 0.0f, 0.0f, 1.0f};
- float col_xaxis[4] = {1.0f, 0.0f, 0.0f, 1.0f};
- float col_zaxis[4] = {0.0f, 0.0f, 1.0f, 1.0f};
-
- if (g_data.passes.bone_envelope == NULL) {
- return;
- }
-
- if (g_data.bone_dof_sphere == NULL) {
- g_data.bone_dof_lines = shgroup_instance_bone_dof(g_data.passes.bone_wire, DRW_cache_bone_dof_lines_get());
- g_data.bone_dof_sphere = shgroup_instance_bone_dof(g_data.passes.bone_envelope, DRW_cache_bone_dof_sphere_get());
- DRW_shgroup_state_enable(g_data.bone_dof_sphere, DRW_STATE_BLEND);
- DRW_shgroup_state_disable(g_data.bone_dof_sphere, DRW_STATE_CULL_FRONT);
- }
-
- /* *0.5f here comes from M_PI/360.0f when rotations were still in degrees */
- xminmax[0] = sinf(pchan->limitmin[0] * 0.5f);
- xminmax[1] = sinf(pchan->limitmax[0] * 0.5f);
- zminmax[0] = sinf(pchan->limitmin[2] * 0.5f);
- zminmax[1] = sinf(pchan->limitmax[2] * 0.5f);
-
- unit_m4(posetrans);
- translate_m4(posetrans, pchan->pose_mat[3][0], pchan->pose_mat[3][1], pchan->pose_mat[3][2]);
- /* in parent-bone pose space... */
- if (pchan->parent) {
- copy_m4_m4(mat, pchan->parent->pose_mat);
- mat[3][0] = mat[3][1] = mat[3][2] = 0.0f;
- mul_m4_m4m4(posetrans, posetrans, mat);
- }
- /* ... but own restspace */
- mul_m4_m4m3(posetrans, posetrans, pchan->bone->bone_mat);
-
- float scale = pchan->bone->length * pchan->size[1];
- scale_m4_fl(mat, scale);
- mat[1][1] = -mat[1][1];
- mul_m4_m4m4(posetrans, posetrans, mat);
-
- /* into world space. */
- mul_m4_m4m4(final_bonemat, g_data.ob->obmat, posetrans);
-
- if ((pchan->ikflag & BONE_IK_XLIMIT) &&
- (pchan->ikflag & BONE_IK_ZLIMIT))
- {
- amin[0] = xminmax[0];
- amax[0] = xminmax[1];
- amin[1] = zminmax[0];
- amax[1] = zminmax[1];
- DRW_shgroup_call_dynamic_add(g_data.bone_dof_sphere, final_bonemat, col_sphere, amin, amax);
- DRW_shgroup_call_dynamic_add(g_data.bone_dof_lines, final_bonemat, col_lines, amin, amax);
- }
- if (pchan->ikflag & BONE_IK_XLIMIT) {
- amin[0] = xminmax[0];
- amax[0] = xminmax[1];
- amin[1] = amax[1] = 0.0f;
- DRW_shgroup_call_dynamic_add(g_data.bone_dof_lines, final_bonemat, col_xaxis, amin, amax);
- }
- if (pchan->ikflag & BONE_IK_ZLIMIT) {
- amin[1] = zminmax[0];
- amax[1] = zminmax[1];
- amin[0] = amax[0] = 0.0f;
- DRW_shgroup_call_dynamic_add(g_data.bone_dof_lines, final_bonemat, col_zaxis, amin, amax);
- }
+ float final_bonemat[4][4], posetrans[4][4], mat[4][4];
+ float amin[2], amax[2], xminmax[2], zminmax[2];
+ float col_sphere[4] = {0.25f, 0.25f, 0.25f, 0.25f};
+ float col_lines[4] = {0.0f, 0.0f, 0.0f, 1.0f};
+ float col_xaxis[4] = {1.0f, 0.0f, 0.0f, 1.0f};
+ float col_zaxis[4] = {0.0f, 0.0f, 1.0f, 1.0f};
+
+ if (g_data.passes.bone_envelope == NULL) {
+ return;
+ }
+
+ if (g_data.bone_dof_sphere == NULL) {
+ g_data.bone_dof_lines = shgroup_instance_bone_dof(g_data.passes.bone_wire,
+ DRW_cache_bone_dof_lines_get());
+ g_data.bone_dof_sphere = shgroup_instance_bone_dof(g_data.passes.bone_envelope,
+ DRW_cache_bone_dof_sphere_get());
+ DRW_shgroup_state_enable(g_data.bone_dof_sphere, DRW_STATE_BLEND);
+ DRW_shgroup_state_disable(g_data.bone_dof_sphere, DRW_STATE_CULL_FRONT);
+ }
+
+ /* *0.5f here comes from M_PI/360.0f when rotations were still in degrees */
+ xminmax[0] = sinf(pchan->limitmin[0] * 0.5f);
+ xminmax[1] = sinf(pchan->limitmax[0] * 0.5f);
+ zminmax[0] = sinf(pchan->limitmin[2] * 0.5f);
+ zminmax[1] = sinf(pchan->limitmax[2] * 0.5f);
+
+ unit_m4(posetrans);
+ translate_m4(posetrans, pchan->pose_mat[3][0], pchan->pose_mat[3][1], pchan->pose_mat[3][2]);
+ /* in parent-bone pose space... */
+ if (pchan->parent) {
+ copy_m4_m4(mat, pchan->parent->pose_mat);
+ mat[3][0] = mat[3][1] = mat[3][2] = 0.0f;
+ mul_m4_m4m4(posetrans, posetrans, mat);
+ }
+ /* ... but own restspace */
+ mul_m4_m4m3(posetrans, posetrans, pchan->bone->bone_mat);
+
+ float scale = pchan->bone->length * pchan->size[1];
+ scale_m4_fl(mat, scale);
+ mat[1][1] = -mat[1][1];
+ mul_m4_m4m4(posetrans, posetrans, mat);
+
+ /* into world space. */
+ mul_m4_m4m4(final_bonemat, g_data.ob->obmat, posetrans);
+
+ if ((pchan->ikflag & BONE_IK_XLIMIT) && (pchan->ikflag & BONE_IK_ZLIMIT)) {
+ amin[0] = xminmax[0];
+ amax[0] = xminmax[1];
+ amin[1] = zminmax[0];
+ amax[1] = zminmax[1];
+ DRW_shgroup_call_dynamic_add(g_data.bone_dof_sphere, final_bonemat, col_sphere, amin, amax);
+ DRW_shgroup_call_dynamic_add(g_data.bone_dof_lines, final_bonemat, col_lines, amin, amax);
+ }
+ if (pchan->ikflag & BONE_IK_XLIMIT) {
+ amin[0] = xminmax[0];
+ amax[0] = xminmax[1];
+ amin[1] = amax[1] = 0.0f;
+ DRW_shgroup_call_dynamic_add(g_data.bone_dof_lines, final_bonemat, col_xaxis, amin, amax);
+ }
+ if (pchan->ikflag & BONE_IK_ZLIMIT) {
+ amin[1] = zminmax[0];
+ amax[1] = zminmax[1];
+ amin[0] = amax[0] = 0.0f;
+ DRW_shgroup_call_dynamic_add(g_data.bone_dof_lines, final_bonemat, col_zaxis, amin, amax);
+ }
}
/** \} */
-
/* -------------------------------------------------------------------- */
/** \name Draw Relationships
* \{ */
-static void pchan_draw_ik_lines(
- bPoseChannel *pchan, const bool only_temp, const int constflag,
- const eGPUShaderConfig sh_cfg)
+static void pchan_draw_ik_lines(bPoseChannel *pchan,
+ const bool only_temp,
+ const int constflag,
+ const eGPUShaderConfig sh_cfg)
{
- bConstraint *con;
- bPoseChannel *parchan;
- float *line_start = NULL, *line_end = NULL;
-
- for (con = pchan->constraints.first; con; con = con->next) {
- if (con->enforce == 0.0f) {
- continue;
- }
-
- switch (con->type) {
- case CONSTRAINT_TYPE_KINEMATIC:
- {
- bKinematicConstraint *data = (bKinematicConstraint *)con->data;
- int segcount = 0;
-
- /* if only_temp, only draw if it is a temporary ik-chain */
- if (only_temp && !(data->flag & CONSTRAINT_IK_TEMP)) {
- continue;
- }
-
- /* exclude tip from chain? */
- parchan = ((data->flag & CONSTRAINT_IK_TIP) == 0) ? pchan->parent : pchan;
- line_start = parchan->pose_tail;
-
- /* Find the chain's root */
- while (parchan->parent) {
- segcount++;
- if (segcount == data->rootbone || segcount > 255) {
- break; /* 255 is weak */
- }
- parchan = parchan->parent;
- }
-
- if (parchan) {
- line_end = parchan->pose_head;
-
- if (constflag & PCHAN_HAS_TARGET) {
- drw_shgroup_bone_ik_lines(line_start, line_end, sh_cfg);
- }
- else {
- drw_shgroup_bone_ik_no_target_lines(line_start, line_end, sh_cfg);
- }
- }
- break;
- }
- case CONSTRAINT_TYPE_SPLINEIK:
- {
- bSplineIKConstraint *data = (bSplineIKConstraint *)con->data;
- int segcount = 0;
-
- /* don't draw if only_temp, as Spline IK chains cannot be temporary */
- if (only_temp) {
- continue;
- }
-
- parchan = pchan;
- line_start = parchan->pose_tail;
-
- /* Find the chain's root */
- while (parchan->parent) {
- segcount++;
- /* FIXME: revise the breaking conditions */
- if (segcount == data->chainlen || segcount > 255) {
- break; /* 255 is weak */
- }
- parchan = parchan->parent;
- }
- /* Only draw line in case our chain is more than one bone long! */
- if (parchan != pchan) { /* XXX revise the breaking conditions to only stop at the tail? */
- line_end = parchan->pose_head;
- drw_shgroup_bone_ik_spline_lines(line_start, line_end, sh_cfg);
- }
- break;
- }
- }
- }
+ bConstraint *con;
+ bPoseChannel *parchan;
+ float *line_start = NULL, *line_end = NULL;
+
+ for (con = pchan->constraints.first; con; con = con->next) {
+ if (con->enforce == 0.0f) {
+ continue;
+ }
+
+ switch (con->type) {
+ case CONSTRAINT_TYPE_KINEMATIC: {
+ bKinematicConstraint *data = (bKinematicConstraint *)con->data;
+ int segcount = 0;
+
+ /* if only_temp, only draw if it is a temporary ik-chain */
+ if (only_temp && !(data->flag & CONSTRAINT_IK_TEMP)) {
+ continue;
+ }
+
+ /* exclude tip from chain? */
+ parchan = ((data->flag & CONSTRAINT_IK_TIP) == 0) ? pchan->parent : pchan;
+ line_start = parchan->pose_tail;
+
+ /* Find the chain's root */
+ while (parchan->parent) {
+ segcount++;
+ if (segcount == data->rootbone || segcount > 255) {
+ break; /* 255 is weak */
+ }
+ parchan = parchan->parent;
+ }
+
+ if (parchan) {
+ line_end = parchan->pose_head;
+
+ if (constflag & PCHAN_HAS_TARGET) {
+ drw_shgroup_bone_ik_lines(line_start, line_end, sh_cfg);
+ }
+ else {
+ drw_shgroup_bone_ik_no_target_lines(line_start, line_end, sh_cfg);
+ }
+ }
+ break;
+ }
+ case CONSTRAINT_TYPE_SPLINEIK: {
+ bSplineIKConstraint *data = (bSplineIKConstraint *)con->data;
+ int segcount = 0;
+
+ /* don't draw if only_temp, as Spline IK chains cannot be temporary */
+ if (only_temp) {
+ continue;
+ }
+
+ parchan = pchan;
+ line_start = parchan->pose_tail;
+
+ /* Find the chain's root */
+ while (parchan->parent) {
+ segcount++;
+ /* FIXME: revise the breaking conditions */
+ if (segcount == data->chainlen || segcount > 255) {
+ break; /* 255 is weak */
+ }
+ parchan = parchan->parent;
+ }
+ /* Only draw line in case our chain is more than one bone long! */
+ if (parchan != pchan) { /* XXX revise the breaking conditions to only stop at the tail? */
+ line_end = parchan->pose_head;
+ drw_shgroup_bone_ik_spline_lines(line_start, line_end, sh_cfg);
+ }
+ break;
+ }
+ }
+ }
}
-static void draw_bone_relations(
- EditBone *ebone, bPoseChannel *pchan, bArmature *arm,
- const int boneflag, const short constflag, const bool do_relations,
- const eGPUShaderConfig sh_cfg)
+static void draw_bone_relations(EditBone *ebone,
+ bPoseChannel *pchan,
+ bArmature *arm,
+ const int boneflag,
+ const short constflag,
+ const bool do_relations,
+ const eGPUShaderConfig sh_cfg)
{
- if (g_data.passes.relationship_lines) {
- if (ebone && ebone->parent) {
- if (do_relations) {
- /* Always draw for unconnected bones, regardless of selection,
- * since riggers will want to know about the links between bones
- */
- if ((boneflag & BONE_CONNECTED) == 0) {
- drw_shgroup_bone_relationship_lines(ebone->head, ebone->parent->tail, sh_cfg);
- }
- }
- }
- else if (pchan && pchan->parent) {
- if (do_relations) {
- /* Only draw if bone or its parent is selected - reduces viewport complexity with complex rigs */
- if ((boneflag & BONE_SELECTED) ||
- (pchan->parent->bone && (pchan->parent->bone->flag & BONE_SELECTED)))
- {
- if ((boneflag & BONE_CONNECTED) == 0) {
- drw_shgroup_bone_relationship_lines(pchan->pose_head, pchan->parent->pose_tail, sh_cfg);
- }
- }
- }
-
- /* Draw a line to IK root bone if bone is selected. */
- if (arm->flag & ARM_POSEMODE) {
- if (constflag & (PCHAN_HAS_IK | PCHAN_HAS_SPLINEIK)) {
- if (boneflag & BONE_SELECTED) {
- pchan_draw_ik_lines(pchan, !do_relations, constflag, sh_cfg);
- }
- }
- }
- }
- }
+ if (g_data.passes.relationship_lines) {
+ if (ebone && ebone->parent) {
+ if (do_relations) {
+ /* Always draw for unconnected bones, regardless of selection,
+ * since riggers will want to know about the links between bones
+ */
+ if ((boneflag & BONE_CONNECTED) == 0) {
+ drw_shgroup_bone_relationship_lines(ebone->head, ebone->parent->tail, sh_cfg);
+ }
+ }
+ }
+ else if (pchan && pchan->parent) {
+ if (do_relations) {
+ /* Only draw if bone or its parent is selected - reduces viewport complexity with complex rigs */
+ if ((boneflag & BONE_SELECTED) ||
+ (pchan->parent->bone && (pchan->parent->bone->flag & BONE_SELECTED))) {
+ if ((boneflag & BONE_CONNECTED) == 0) {
+ drw_shgroup_bone_relationship_lines(
+ pchan->pose_head, pchan->parent->pose_tail, sh_cfg);
+ }
+ }
+ }
+
+ /* Draw a line to IK root bone if bone is selected. */
+ if (arm->flag & ARM_POSEMODE) {
+ if (constflag & (PCHAN_HAS_IK | PCHAN_HAS_SPLINEIK)) {
+ if (boneflag & BONE_SELECTED) {
+ pchan_draw_ik_lines(pchan, !do_relations, constflag, sh_cfg);
+ }
+ }
+ }
+ }
+ }
}
/** \} */
@@ -1707,210 +1794,222 @@ static void draw_bone_relations(
static void draw_armature_edit(Object *ob)
{
- const DRWContextState *draw_ctx = DRW_context_state_get();
- EditBone *eBone;
- bArmature *arm = ob->data;
- int index;
- const bool is_select = DRW_state_is_select();
-
- update_color(ob, NULL);
- edbo_compute_bbone_child(arm);
-
- const bool show_text = DRW_state_show_text();
- const bool show_relations = ((draw_ctx->v3d->flag & V3D_HIDE_HELPLINES) == 0);
-
- for (eBone = arm->edbo->first, index = ob->select_id; eBone; eBone = eBone->next, index += 0x10000) {
- if (eBone->layer & arm->layer) {
- if ((eBone->flag & BONE_HIDDEN_A) == 0) {
- const int select_id = is_select ? index : (uint)-1;
-
- const short constflag = 0;
-
- /* catch exception for bone with hidden parent */
- int boneflag = eBone->flag;
- if ((eBone->parent) && !EBONE_VISIBLE(arm, eBone->parent)) {
- boneflag &= ~BONE_CONNECTED;
- }
-
- /* set temporary flag for drawing bone as active, but only if selected */
- if (eBone == arm->act_edbone) {
- boneflag |= BONE_DRAW_ACTIVE;
- }
-
- draw_bone_relations(eBone, NULL, arm, boneflag, constflag, show_relations, draw_ctx->sh_cfg);
-
- if (arm->drawtype == ARM_ENVELOPE) {
- draw_bone_update_disp_matrix_default(eBone, NULL);
- draw_bone_envelope(eBone, NULL, arm, boneflag, constflag, draw_ctx->sh_cfg, select_id);
- }
- else if (arm->drawtype == ARM_LINE) {
- draw_bone_update_disp_matrix_default(eBone, NULL);
- draw_bone_line(eBone, NULL, arm, boneflag, constflag, draw_ctx->sh_cfg, select_id);
- }
- else if (arm->drawtype == ARM_WIRE) {
- draw_bone_update_disp_matrix_bbone(eBone, NULL);
- draw_bone_wire(eBone, NULL, arm, boneflag, constflag, draw_ctx->sh_cfg, select_id);
- }
- else if (arm->drawtype == ARM_B_BONE) {
- draw_bone_update_disp_matrix_bbone(eBone, NULL);
- draw_bone_box(eBone, NULL, arm, boneflag, constflag, draw_ctx->sh_cfg, select_id);
- }
- else {
- draw_bone_update_disp_matrix_default(eBone, NULL);
- draw_bone_octahedral(eBone, NULL, arm, boneflag, constflag, draw_ctx->sh_cfg, select_id);
- }
-
- /* Draw names of bone */
- if (show_text && (arm->flag & ARM_DRAWNAMES)) {
- uchar color[4];
- UI_GetThemeColor4ubv((eBone->flag & BONE_SELECTED) ? TH_TEXT_HI : TH_TEXT, color);
-
- float vec[3];
- mid_v3_v3v3(vec, eBone->head, eBone->tail);
- mul_m4_v3(ob->obmat, vec);
-
- struct DRWTextStore *dt = DRW_text_cache_ensure();
- DRW_text_cache_add(
- dt, vec, eBone->name, strlen(eBone->name),
- 10, 0, DRW_TEXT_CACHE_GLOBALSPACE | DRW_TEXT_CACHE_STRING_PTR, color);
- }
-
- /* Draw additional axes */
- if (arm->flag & ARM_DRAWAXES) {
- draw_axes(eBone, NULL, draw_ctx->sh_cfg);
- }
- }
- }
- }
+ const DRWContextState *draw_ctx = DRW_context_state_get();
+ EditBone *eBone;
+ bArmature *arm = ob->data;
+ int index;
+ const bool is_select = DRW_state_is_select();
+
+ update_color(ob, NULL);
+ edbo_compute_bbone_child(arm);
+
+ const bool show_text = DRW_state_show_text();
+ const bool show_relations = ((draw_ctx->v3d->flag & V3D_HIDE_HELPLINES) == 0);
+
+ for (eBone = arm->edbo->first, index = ob->select_id; eBone;
+ eBone = eBone->next, index += 0x10000) {
+ if (eBone->layer & arm->layer) {
+ if ((eBone->flag & BONE_HIDDEN_A) == 0) {
+ const int select_id = is_select ? index : (uint)-1;
+
+ const short constflag = 0;
+
+ /* catch exception for bone with hidden parent */
+ int boneflag = eBone->flag;
+ if ((eBone->parent) && !EBONE_VISIBLE(arm, eBone->parent)) {
+ boneflag &= ~BONE_CONNECTED;
+ }
+
+ /* set temporary flag for drawing bone as active, but only if selected */
+ if (eBone == arm->act_edbone) {
+ boneflag |= BONE_DRAW_ACTIVE;
+ }
+
+ draw_bone_relations(
+ eBone, NULL, arm, boneflag, constflag, show_relations, draw_ctx->sh_cfg);
+
+ if (arm->drawtype == ARM_ENVELOPE) {
+ draw_bone_update_disp_matrix_default(eBone, NULL);
+ draw_bone_envelope(eBone, NULL, arm, boneflag, constflag, draw_ctx->sh_cfg, select_id);
+ }
+ else if (arm->drawtype == ARM_LINE) {
+ draw_bone_update_disp_matrix_default(eBone, NULL);
+ draw_bone_line(eBone, NULL, arm, boneflag, constflag, draw_ctx->sh_cfg, select_id);
+ }
+ else if (arm->drawtype == ARM_WIRE) {
+ draw_bone_update_disp_matrix_bbone(eBone, NULL);
+ draw_bone_wire(eBone, NULL, arm, boneflag, constflag, draw_ctx->sh_cfg, select_id);
+ }
+ else if (arm->drawtype == ARM_B_BONE) {
+ draw_bone_update_disp_matrix_bbone(eBone, NULL);
+ draw_bone_box(eBone, NULL, arm, boneflag, constflag, draw_ctx->sh_cfg, select_id);
+ }
+ else {
+ draw_bone_update_disp_matrix_default(eBone, NULL);
+ draw_bone_octahedral(eBone, NULL, arm, boneflag, constflag, draw_ctx->sh_cfg, select_id);
+ }
+
+ /* Draw names of bone */
+ if (show_text && (arm->flag & ARM_DRAWNAMES)) {
+ uchar color[4];
+ UI_GetThemeColor4ubv((eBone->flag & BONE_SELECTED) ? TH_TEXT_HI : TH_TEXT, color);
+
+ float vec[3];
+ mid_v3_v3v3(vec, eBone->head, eBone->tail);
+ mul_m4_v3(ob->obmat, vec);
+
+ struct DRWTextStore *dt = DRW_text_cache_ensure();
+ DRW_text_cache_add(dt,
+ vec,
+ eBone->name,
+ strlen(eBone->name),
+ 10,
+ 0,
+ DRW_TEXT_CACHE_GLOBALSPACE | DRW_TEXT_CACHE_STRING_PTR,
+ color);
+ }
+
+ /* Draw additional axes */
+ if (arm->flag & ARM_DRAWAXES) {
+ draw_axes(eBone, NULL, draw_ctx->sh_cfg);
+ }
+ }
+ }
+ }
}
/* if const_color is NULL do pose mode coloring */
static void draw_armature_pose(Object *ob, const float const_color[4])
{
- const DRWContextState *draw_ctx = DRW_context_state_get();
- bArmature *arm = ob->data;
- bPoseChannel *pchan;
- int index = -1;
- Bone *bone;
-
- update_color(ob, const_color);
-
- /* We can't safely draw non-updated pose, might contain NULL bone pointers... */
- if (ob->pose->flag & POSE_RECALC) {
- return;
- }
-
- // if (!(base->flag & OB_FROMDUPLI)) // TODO
- {
- if ((draw_ctx->object_mode & OB_MODE_POSE) || (ob == draw_ctx->object_pose)) {
- arm->flag |= ARM_POSEMODE;
- }
-
- if (arm->flag & ARM_POSEMODE) {
- index = ob->select_id;
- }
- }
-
- const bool is_pose_select = (arm->flag & ARM_POSEMODE) && DRW_state_is_select();
- const bool show_text = DRW_state_show_text();
- const bool show_relations = ((draw_ctx->v3d->flag & V3D_HIDE_HELPLINES) == 0);
-
- /* being set below */
- for (pchan = ob->pose->chanbase.first; pchan; pchan = pchan->next) {
- bone = pchan->bone;
-
- /* bone must be visible */
- if ((bone->flag & (BONE_HIDDEN_P | BONE_HIDDEN_PG)) == 0) {
- if (bone->layer & arm->layer) {
- const int select_id = is_pose_select ? index : (uint)-1;
-
- const short constflag = pchan->constflag;
-
- pchan_draw_data_init(pchan);
-
- if (const_color) {
- /* keep color */
- }
- else {
- /* set color-set to use */
- set_pchan_colorset(ob, pchan);
- }
-
- int boneflag = bone->flag;
- /* catch exception for bone with hidden parent */
- boneflag = bone->flag;
- if ((bone->parent) && (bone->parent->flag & (BONE_HIDDEN_P | BONE_HIDDEN_PG))) {
- boneflag &= ~BONE_CONNECTED;
- }
-
- /* set temporary flag for drawing bone as active, but only if selected */
- if (bone == arm->act_bone) {
- boneflag |= BONE_DRAW_ACTIVE;
- }
-
- draw_bone_relations(NULL, pchan, arm, boneflag, constflag, show_relations, draw_ctx->sh_cfg);
-
- if ((pchan->custom) && !(arm->flag & ARM_NO_CUSTOM)) {
- draw_bone_update_disp_matrix_custom(pchan);
- draw_bone_custom_shape(NULL, pchan, arm, boneflag, constflag, draw_ctx->sh_cfg, select_id);
- }
- else if (arm->drawtype == ARM_ENVELOPE) {
- draw_bone_update_disp_matrix_default(NULL, pchan);
- draw_bone_envelope(NULL, pchan, arm, boneflag, constflag, draw_ctx->sh_cfg, select_id);
- }
- else if (arm->drawtype == ARM_LINE) {
- draw_bone_update_disp_matrix_default(NULL, pchan);
- draw_bone_line(NULL, pchan, arm, boneflag, constflag, draw_ctx->sh_cfg, select_id);
- }
- else if (arm->drawtype == ARM_WIRE) {
- draw_bone_update_disp_matrix_bbone(NULL, pchan);
- draw_bone_wire(NULL, pchan, arm, boneflag, constflag, draw_ctx->sh_cfg, select_id);
- }
- else if (arm->drawtype == ARM_B_BONE) {
- draw_bone_update_disp_matrix_bbone(NULL, pchan);
- draw_bone_box(NULL, pchan, arm, boneflag, constflag, draw_ctx->sh_cfg, select_id);
- }
- else {
- draw_bone_update_disp_matrix_default(NULL, pchan);
- draw_bone_octahedral(NULL, pchan, arm, boneflag, constflag, draw_ctx->sh_cfg, select_id);
- }
-
- if (!is_pose_select && show_relations &&
- (arm->flag & ARM_POSEMODE) &&
- (bone->flag & BONE_SELECTED) &&
- ((ob->base_flag & BASE_FROM_DUPLI) == 0) &&
- (pchan->ikflag & (BONE_IK_XLIMIT | BONE_IK_ZLIMIT)))
- {
- draw_bone_dofs(pchan);
- }
-
- /* Draw names of bone */
- if (show_text && (arm->flag & ARM_DRAWNAMES)) {
- uchar color[4];
- UI_GetThemeColor4ubv((arm->flag & ARM_POSEMODE) &&
- (bone->flag & BONE_SELECTED) ? TH_TEXT_HI : TH_TEXT, color);
- float vec[3];
- mid_v3_v3v3(vec, pchan->pose_head, pchan->pose_tail);
- mul_m4_v3(ob->obmat, vec);
-
- struct DRWTextStore *dt = DRW_text_cache_ensure();
- DRW_text_cache_add(
- dt, vec, pchan->name, strlen(pchan->name),
- 10, 0, DRW_TEXT_CACHE_GLOBALSPACE | DRW_TEXT_CACHE_STRING_PTR, color);
- }
-
- /* Draw additional axes */
- if (arm->flag & ARM_DRAWAXES) {
- draw_axes(NULL, pchan, draw_ctx->sh_cfg);
- }
- }
- }
- if (is_pose_select) {
- index += 0x10000;
- }
- }
-
- arm->flag &= ~ARM_POSEMODE;
+ const DRWContextState *draw_ctx = DRW_context_state_get();
+ bArmature *arm = ob->data;
+ bPoseChannel *pchan;
+ int index = -1;
+ Bone *bone;
+
+ update_color(ob, const_color);
+
+ /* We can't safely draw non-updated pose, might contain NULL bone pointers... */
+ if (ob->pose->flag & POSE_RECALC) {
+ return;
+ }
+
+ // if (!(base->flag & OB_FROMDUPLI)) // TODO
+ {
+ if ((draw_ctx->object_mode & OB_MODE_POSE) || (ob == draw_ctx->object_pose)) {
+ arm->flag |= ARM_POSEMODE;
+ }
+
+ if (arm->flag & ARM_POSEMODE) {
+ index = ob->select_id;
+ }
+ }
+
+ const bool is_pose_select = (arm->flag & ARM_POSEMODE) && DRW_state_is_select();
+ const bool show_text = DRW_state_show_text();
+ const bool show_relations = ((draw_ctx->v3d->flag & V3D_HIDE_HELPLINES) == 0);
+
+ /* being set below */
+ for (pchan = ob->pose->chanbase.first; pchan; pchan = pchan->next) {
+ bone = pchan->bone;
+
+ /* bone must be visible */
+ if ((bone->flag & (BONE_HIDDEN_P | BONE_HIDDEN_PG)) == 0) {
+ if (bone->layer & arm->layer) {
+ const int select_id = is_pose_select ? index : (uint)-1;
+
+ const short constflag = pchan->constflag;
+
+ pchan_draw_data_init(pchan);
+
+ if (const_color) {
+ /* keep color */
+ }
+ else {
+ /* set color-set to use */
+ set_pchan_colorset(ob, pchan);
+ }
+
+ int boneflag = bone->flag;
+ /* catch exception for bone with hidden parent */
+ boneflag = bone->flag;
+ if ((bone->parent) && (bone->parent->flag & (BONE_HIDDEN_P | BONE_HIDDEN_PG))) {
+ boneflag &= ~BONE_CONNECTED;
+ }
+
+ /* set temporary flag for drawing bone as active, but only if selected */
+ if (bone == arm->act_bone) {
+ boneflag |= BONE_DRAW_ACTIVE;
+ }
+
+ draw_bone_relations(
+ NULL, pchan, arm, boneflag, constflag, show_relations, draw_ctx->sh_cfg);
+
+ if ((pchan->custom) && !(arm->flag & ARM_NO_CUSTOM)) {
+ draw_bone_update_disp_matrix_custom(pchan);
+ draw_bone_custom_shape(
+ NULL, pchan, arm, boneflag, constflag, draw_ctx->sh_cfg, select_id);
+ }
+ else if (arm->drawtype == ARM_ENVELOPE) {
+ draw_bone_update_disp_matrix_default(NULL, pchan);
+ draw_bone_envelope(NULL, pchan, arm, boneflag, constflag, draw_ctx->sh_cfg, select_id);
+ }
+ else if (arm->drawtype == ARM_LINE) {
+ draw_bone_update_disp_matrix_default(NULL, pchan);
+ draw_bone_line(NULL, pchan, arm, boneflag, constflag, draw_ctx->sh_cfg, select_id);
+ }
+ else if (arm->drawtype == ARM_WIRE) {
+ draw_bone_update_disp_matrix_bbone(NULL, pchan);
+ draw_bone_wire(NULL, pchan, arm, boneflag, constflag, draw_ctx->sh_cfg, select_id);
+ }
+ else if (arm->drawtype == ARM_B_BONE) {
+ draw_bone_update_disp_matrix_bbone(NULL, pchan);
+ draw_bone_box(NULL, pchan, arm, boneflag, constflag, draw_ctx->sh_cfg, select_id);
+ }
+ else {
+ draw_bone_update_disp_matrix_default(NULL, pchan);
+ draw_bone_octahedral(NULL, pchan, arm, boneflag, constflag, draw_ctx->sh_cfg, select_id);
+ }
+
+ if (!is_pose_select && show_relations && (arm->flag & ARM_POSEMODE) &&
+ (bone->flag & BONE_SELECTED) && ((ob->base_flag & BASE_FROM_DUPLI) == 0) &&
+ (pchan->ikflag & (BONE_IK_XLIMIT | BONE_IK_ZLIMIT))) {
+ draw_bone_dofs(pchan);
+ }
+
+ /* Draw names of bone */
+ if (show_text && (arm->flag & ARM_DRAWNAMES)) {
+ uchar color[4];
+ UI_GetThemeColor4ubv(
+ (arm->flag & ARM_POSEMODE) && (bone->flag & BONE_SELECTED) ? TH_TEXT_HI : TH_TEXT,
+ color);
+ float vec[3];
+ mid_v3_v3v3(vec, pchan->pose_head, pchan->pose_tail);
+ mul_m4_v3(ob->obmat, vec);
+
+ struct DRWTextStore *dt = DRW_text_cache_ensure();
+ DRW_text_cache_add(dt,
+ vec,
+ pchan->name,
+ strlen(pchan->name),
+ 10,
+ 0,
+ DRW_TEXT_CACHE_GLOBALSPACE | DRW_TEXT_CACHE_STRING_PTR,
+ color);
+ }
+
+ /* Draw additional axes */
+ if (arm->flag & ARM_DRAWAXES) {
+ draw_axes(NULL, pchan, draw_ctx->sh_cfg);
+ }
+ }
+ }
+ if (is_pose_select) {
+ index += 0x10000;
+ }
+ }
+
+ arm->flag &= ~ARM_POSEMODE;
}
/**
@@ -1918,32 +2017,35 @@ static void draw_armature_pose(Object *ob, const float const_color[4])
*/
static void drw_shgroup_armature(Object *ob, DRWArmaturePasses passes, bool transp)
{
- memset(&g_data, 0x0, sizeof(g_data));
- g_data.ob = ob;
- g_data.passes = passes;
- g_data.transparent = transp;
- memset(&g_color, 0x0, sizeof(g_color));
+ memset(&g_data, 0x0, sizeof(g_data));
+ g_data.ob = ob;
+ g_data.passes = passes;
+ g_data.transparent = transp;
+ memset(&g_color, 0x0, sizeof(g_color));
}
-void DRW_shgroup_armature_object(Object *ob, ViewLayer *view_layer, DRWArmaturePasses passes, bool transp)
+void DRW_shgroup_armature_object(Object *ob,
+ ViewLayer *view_layer,
+ DRWArmaturePasses passes,
+ bool transp)
{
- float *color;
- DRW_object_wire_theme_get(ob, view_layer, &color);
- passes.bone_envelope = NULL; /* Don't do envelope distance in object mode. */
- drw_shgroup_armature(ob, passes, transp);
- draw_armature_pose(ob, color);
+ float *color;
+ DRW_object_wire_theme_get(ob, view_layer, &color);
+ passes.bone_envelope = NULL; /* Don't do envelope distance in object mode. */
+ drw_shgroup_armature(ob, passes, transp);
+ draw_armature_pose(ob, color);
}
void DRW_shgroup_armature_pose(Object *ob, DRWArmaturePasses passes, bool transp)
{
- drw_shgroup_armature(ob, passes, transp);
- draw_armature_pose(ob, NULL);
+ drw_shgroup_armature(ob, passes, transp);
+ draw_armature_pose(ob, NULL);
}
void DRW_shgroup_armature_edit(Object *ob, DRWArmaturePasses passes, bool transp)
{
- drw_shgroup_armature(ob, passes, transp);
- draw_armature_edit(ob);
+ drw_shgroup_armature(ob, passes, transp);
+ draw_armature_edit(ob);
}
/** \} */
diff --git a/source/blender/draw/intern/draw_cache.c b/source/blender/draw/intern/draw_cache.c
index 717fd69149c..74c87162afd 100644
--- a/source/blender/draw/intern/draw_cache.c
+++ b/source/blender/draw/intern/draw_cache.c
@@ -18,7 +18,6 @@
* \ingroup draw
*/
-
#include "DNA_scene_types.h"
#include "DNA_mesh_types.h"
#include "DNA_meta_types.h"
@@ -47,221 +46,228 @@
/* Batch's only (free'd as an array) */
static struct DRWShapeCache {
- GPUBatch *drw_single_vertice;
- GPUBatch *drw_cursor;
- GPUBatch *drw_cursor_only_circle;
- GPUBatch *drw_fullscreen_quad;
- GPUBatch *drw_fullscreen_quad_texcoord;
- GPUBatch *drw_quad;
- GPUBatch *drw_quad_wires;
- GPUBatch *drw_grid;
- GPUBatch *drw_sphere;
- GPUBatch *drw_screenspace_circle;
- GPUBatch *drw_plain_axes;
- GPUBatch *drw_single_arrow;
- GPUBatch *drw_cube;
- GPUBatch *drw_circle;
- GPUBatch *drw_square;
- GPUBatch *drw_line;
- GPUBatch *drw_line_endpoints;
- GPUBatch *drw_empty_cube;
- GPUBatch *drw_empty_sphere;
- GPUBatch *drw_empty_cylinder;
- GPUBatch *drw_empty_capsule_body;
- GPUBatch *drw_empty_capsule_cap;
- GPUBatch *drw_empty_cone;
- GPUBatch *drw_arrows;
- GPUBatch *drw_axis_names;
- GPUBatch *drw_image_plane;
- GPUBatch *drw_image_plane_wire;
- GPUBatch *drw_field_wind;
- GPUBatch *drw_field_force;
- GPUBatch *drw_field_vortex;
- GPUBatch *drw_field_tube_limit;
- GPUBatch *drw_field_cone_limit;
- GPUBatch *drw_light;
- GPUBatch *drw_light_shadows;
- GPUBatch *drw_light_sunrays;
- GPUBatch *drw_light_area_square;
- GPUBatch *drw_light_area_disk;
- GPUBatch *drw_light_hemi;
- GPUBatch *drw_light_spot;
- GPUBatch *drw_light_spot_volume;
- GPUBatch *drw_light_spot_square;
- GPUBatch *drw_light_spot_square_volume;
- GPUBatch *drw_speaker;
- GPUBatch *drw_lightprobe_cube;
- GPUBatch *drw_lightprobe_planar;
- GPUBatch *drw_lightprobe_grid;
- GPUBatch *drw_bone_octahedral;
- GPUBatch *drw_bone_octahedral_wire;
- GPUBatch *drw_bone_box;
- GPUBatch *drw_bone_box_wire;
- GPUBatch *drw_bone_wire_wire;
- GPUBatch *drw_bone_envelope;
- GPUBatch *drw_bone_envelope_outline;
- GPUBatch *drw_bone_point;
- GPUBatch *drw_bone_point_wire;
- GPUBatch *drw_bone_stick;
- GPUBatch *drw_bone_arrows;
- GPUBatch *drw_bone_dof_sphere;
- GPUBatch *drw_bone_dof_lines;
- GPUBatch *drw_camera;
- GPUBatch *drw_camera_frame;
- GPUBatch *drw_camera_tria;
- GPUBatch *drw_camera_focus;
- GPUBatch *drw_particle_cross;
- GPUBatch *drw_particle_circle;
- GPUBatch *drw_particle_axis;
- GPUBatch *drw_gpencil_axes;
+ GPUBatch *drw_single_vertice;
+ GPUBatch *drw_cursor;
+ GPUBatch *drw_cursor_only_circle;
+ GPUBatch *drw_fullscreen_quad;
+ GPUBatch *drw_fullscreen_quad_texcoord;
+ GPUBatch *drw_quad;
+ GPUBatch *drw_quad_wires;
+ GPUBatch *drw_grid;
+ GPUBatch *drw_sphere;
+ GPUBatch *drw_screenspace_circle;
+ GPUBatch *drw_plain_axes;
+ GPUBatch *drw_single_arrow;
+ GPUBatch *drw_cube;
+ GPUBatch *drw_circle;
+ GPUBatch *drw_square;
+ GPUBatch *drw_line;
+ GPUBatch *drw_line_endpoints;
+ GPUBatch *drw_empty_cube;
+ GPUBatch *drw_empty_sphere;
+ GPUBatch *drw_empty_cylinder;
+ GPUBatch *drw_empty_capsule_body;
+ GPUBatch *drw_empty_capsule_cap;
+ GPUBatch *drw_empty_cone;
+ GPUBatch *drw_arrows;
+ GPUBatch *drw_axis_names;
+ GPUBatch *drw_image_plane;
+ GPUBatch *drw_image_plane_wire;
+ GPUBatch *drw_field_wind;
+ GPUBatch *drw_field_force;
+ GPUBatch *drw_field_vortex;
+ GPUBatch *drw_field_tube_limit;
+ GPUBatch *drw_field_cone_limit;
+ GPUBatch *drw_light;
+ GPUBatch *drw_light_shadows;
+ GPUBatch *drw_light_sunrays;
+ GPUBatch *drw_light_area_square;
+ GPUBatch *drw_light_area_disk;
+ GPUBatch *drw_light_hemi;
+ GPUBatch *drw_light_spot;
+ GPUBatch *drw_light_spot_volume;
+ GPUBatch *drw_light_spot_square;
+ GPUBatch *drw_light_spot_square_volume;
+ GPUBatch *drw_speaker;
+ GPUBatch *drw_lightprobe_cube;
+ GPUBatch *drw_lightprobe_planar;
+ GPUBatch *drw_lightprobe_grid;
+ GPUBatch *drw_bone_octahedral;
+ GPUBatch *drw_bone_octahedral_wire;
+ GPUBatch *drw_bone_box;
+ GPUBatch *drw_bone_box_wire;
+ GPUBatch *drw_bone_wire_wire;
+ GPUBatch *drw_bone_envelope;
+ GPUBatch *drw_bone_envelope_outline;
+ GPUBatch *drw_bone_point;
+ GPUBatch *drw_bone_point_wire;
+ GPUBatch *drw_bone_stick;
+ GPUBatch *drw_bone_arrows;
+ GPUBatch *drw_bone_dof_sphere;
+ GPUBatch *drw_bone_dof_lines;
+ GPUBatch *drw_camera;
+ GPUBatch *drw_camera_frame;
+ GPUBatch *drw_camera_tria;
+ GPUBatch *drw_camera_focus;
+ GPUBatch *drw_particle_cross;
+ GPUBatch *drw_particle_circle;
+ GPUBatch *drw_particle_axis;
+ GPUBatch *drw_gpencil_axes;
} SHC = {NULL};
void DRW_shape_cache_free(void)
{
- uint i = sizeof(SHC) / sizeof(GPUBatch *);
- GPUBatch **batch = (GPUBatch **)&SHC;
- while (i--) {
- GPU_BATCH_DISCARD_SAFE(*batch);
- batch++;
- }
+ uint i = sizeof(SHC) / sizeof(GPUBatch *);
+ GPUBatch **batch = (GPUBatch **)&SHC;
+ while (i--) {
+ GPU_BATCH_DISCARD_SAFE(*batch);
+ batch++;
+ }
}
void DRW_shape_cache_reset(void)
{
- uint i = sizeof(SHC) / sizeof(GPUBatch *);
- GPUBatch **batch = (GPUBatch **)&SHC;
- while (i--) {
- if (*batch) {
- GPU_batch_vao_cache_clear(*batch);
- }
- batch++;
- }
+ uint i = sizeof(SHC) / sizeof(GPUBatch *);
+ GPUBatch **batch = (GPUBatch **)&SHC;
+ while (i--) {
+ if (*batch) {
+ GPU_batch_vao_cache_clear(*batch);
+ }
+ batch++;
+ }
}
/* -------------------------------------------------------------------- */
/** \name Helper functions
* \{ */
-static void UNUSED_FUNCTION(add_fancy_edge)(
- GPUVertBuf *vbo, uint pos_id, uint n1_id, uint n2_id,
- uint *v_idx, const float co1[3], const float co2[3],
- const float n1[3], const float n2[3])
+static void UNUSED_FUNCTION(add_fancy_edge)(GPUVertBuf *vbo,
+ uint pos_id,
+ uint n1_id,
+ uint n2_id,
+ uint *v_idx,
+ const float co1[3],
+ const float co2[3],
+ const float n1[3],
+ const float n2[3])
{
- GPU_vertbuf_attr_set(vbo, n1_id, *v_idx, n1);
- GPU_vertbuf_attr_set(vbo, n2_id, *v_idx, n2);
- GPU_vertbuf_attr_set(vbo, pos_id, (*v_idx)++, co1);
+ GPU_vertbuf_attr_set(vbo, n1_id, *v_idx, n1);
+ GPU_vertbuf_attr_set(vbo, n2_id, *v_idx, n2);
+ GPU_vertbuf_attr_set(vbo, pos_id, (*v_idx)++, co1);
- GPU_vertbuf_attr_set(vbo, n1_id, *v_idx, n1);
- GPU_vertbuf_attr_set(vbo, n2_id, *v_idx, n2);
- GPU_vertbuf_attr_set(vbo, pos_id, (*v_idx)++, co2);
+ GPU_vertbuf_attr_set(vbo, n1_id, *v_idx, n1);
+ GPU_vertbuf_attr_set(vbo, n2_id, *v_idx, n2);
+ GPU_vertbuf_attr_set(vbo, pos_id, (*v_idx)++, co2);
}
-#if 0 /* UNUSED */
+#if 0 /* UNUSED */
static void add_lat_lon_vert(
GPUVertBuf *vbo, uint pos_id, uint nor_id,
uint *v_idx, const float rad, const float lat, const float lon)
{
- float pos[3], nor[3];
- nor[0] = sinf(lat) * cosf(lon);
- nor[1] = cosf(lat);
- nor[2] = sinf(lat) * sinf(lon);
- mul_v3_v3fl(pos, nor, rad);
+ float pos[3], nor[3];
+ nor[0] = sinf(lat) * cosf(lon);
+ nor[1] = cosf(lat);
+ nor[2] = sinf(lat) * sinf(lon);
+ mul_v3_v3fl(pos, nor, rad);
- GPU_vertbuf_attr_set(vbo, nor_id, *v_idx, nor);
- GPU_vertbuf_attr_set(vbo, pos_id, (*v_idx)++, pos);
+ GPU_vertbuf_attr_set(vbo, nor_id, *v_idx, nor);
+ GPU_vertbuf_attr_set(vbo, pos_id, (*v_idx)++, pos);
}
static GPUVertBuf *fill_arrows_vbo(const float scale)
{
- /* Position Only 3D format */
- static GPUVertFormat format = { 0 };
- static struct { uint pos; } attr_id;
- if (format.attr_len == 0) {
- attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
- }
+ /* Position Only 3D format */
+ static GPUVertFormat format = { 0 };
+ static struct { uint pos; } attr_id;
+ if (format.attr_len == 0) {
+ attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
+ }
- /* Line */
- GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
- GPU_vertbuf_data_alloc(vbo, 6 * 3);
+ /* Line */
+ GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
+ GPU_vertbuf_data_alloc(vbo, 6 * 3);
- float v1[3] = {0.0, 0.0, 0.0};
- float v2[3] = {0.0, 0.0, 0.0};
- float vtmp1[3], vtmp2[3];
+ float v1[3] = {0.0, 0.0, 0.0};
+ float v2[3] = {0.0, 0.0, 0.0};
+ float vtmp1[3], vtmp2[3];
- for (int axis = 0; axis < 3; axis++) {
- const int arrow_axis = (axis == 0) ? 1 : 0;
+ for (int axis = 0; axis < 3; axis++) {
+ const int arrow_axis = (axis == 0) ? 1 : 0;
- v2[axis] = 1.0f;
- mul_v3_v3fl(vtmp1, v1, scale);
- mul_v3_v3fl(vtmp2, v2, scale);
- GPU_vertbuf_attr_set(vbo, attr_id.pos, axis * 6 + 0, vtmp1);
- GPU_vertbuf_attr_set(vbo, attr_id.pos, axis * 6 + 1, vtmp2);
+ v2[axis] = 1.0f;
+ mul_v3_v3fl(vtmp1, v1, scale);
+ mul_v3_v3fl(vtmp2, v2, scale);
+ GPU_vertbuf_attr_set(vbo, attr_id.pos, axis * 6 + 0, vtmp1);
+ GPU_vertbuf_attr_set(vbo, attr_id.pos, axis * 6 + 1, vtmp2);
- v1[axis] = 0.85f;
- v1[arrow_axis] = -0.08f;
- mul_v3_v3fl(vtmp1, v1, scale);
- mul_v3_v3fl(vtmp2, v2, scale);
- GPU_vertbuf_attr_set(vbo, attr_id.pos, axis * 6 + 2, vtmp1);
- GPU_vertbuf_attr_set(vbo, attr_id.pos, axis * 6 + 3, vtmp2);
+ v1[axis] = 0.85f;
+ v1[arrow_axis] = -0.08f;
+ mul_v3_v3fl(vtmp1, v1, scale);
+ mul_v3_v3fl(vtmp2, v2, scale);
+ GPU_vertbuf_attr_set(vbo, attr_id.pos, axis * 6 + 2, vtmp1);
+ GPU_vertbuf_attr_set(vbo, attr_id.pos, axis * 6 + 3, vtmp2);
- v1[arrow_axis] = 0.08f;
- mul_v3_v3fl(vtmp1, v1, scale);
- mul_v3_v3fl(vtmp2, v2, scale);
- GPU_vertbuf_attr_set(vbo, attr_id.pos, axis * 6 + 4, vtmp1);
- GPU_vertbuf_attr_set(vbo, attr_id.pos, axis * 6 + 5, vtmp2);
+ v1[arrow_axis] = 0.08f;
+ mul_v3_v3fl(vtmp1, v1, scale);
+ mul_v3_v3fl(vtmp2, v2, scale);
+ GPU_vertbuf_attr_set(vbo, attr_id.pos, axis * 6 + 4, vtmp1);
+ GPU_vertbuf_attr_set(vbo, attr_id.pos, axis * 6 + 5, vtmp2);
- /* reset v1 & v2 to zero */
- v1[arrow_axis] = v1[axis] = v2[axis] = 0.0f;
- }
+ /* reset v1 & v2 to zero */
+ v1[arrow_axis] = v1[axis] = v2[axis] = 0.0f;
+ }
- return vbo;
+ return vbo;
}
-#endif /* UNUSED */
+#endif /* UNUSED */
static GPUVertBuf *sphere_wire_vbo(const float rad)
{
#define NSEGMENTS 32
- /* Position Only 3D format */
- static GPUVertFormat format = { 0 };
- static struct { uint pos; } attr_id;
- if (format.attr_len == 0) {
- attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
- }
-
- GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
- GPU_vertbuf_data_alloc(vbo, NSEGMENTS * 2 * 3);
-
- /* a single ring of vertices */
- float p[NSEGMENTS][2];
- for (int i = 0; i < NSEGMENTS; ++i) {
- float angle = 2 * M_PI * ((float)i / (float)NSEGMENTS);
- p[i][0] = rad * cosf(angle);
- p[i][1] = rad * sinf(angle);
- }
-
- for (int axis = 0; axis < 3; ++axis) {
- for (int i = 0; i < NSEGMENTS; ++i) {
- for (int j = 0; j < 2; ++j) {
- float cv[2], v[3];
-
- cv[0] = p[(i + j) % NSEGMENTS][0];
- cv[1] = p[(i + j) % NSEGMENTS][1];
-
- if (axis == 0) {
- ARRAY_SET_ITEMS(v, cv[0], cv[1], 0.0f);
- }
- else if (axis == 1) {
- ARRAY_SET_ITEMS(v, cv[0], 0.0f, cv[1]);
- }
- else {
- ARRAY_SET_ITEMS(v, 0.0f, cv[0], cv[1]);
- }
- GPU_vertbuf_attr_set(vbo, attr_id.pos, i * 2 + j + (NSEGMENTS * 2 * axis), v);
- }
- }
- }
-
- return vbo;
+ /* Position Only 3D format */
+ static GPUVertFormat format = {0};
+ static struct {
+ uint pos;
+ } attr_id;
+ if (format.attr_len == 0) {
+ attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
+ }
+
+ GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
+ GPU_vertbuf_data_alloc(vbo, NSEGMENTS * 2 * 3);
+
+ /* a single ring of vertices */
+ float p[NSEGMENTS][2];
+ for (int i = 0; i < NSEGMENTS; ++i) {
+ float angle = 2 * M_PI * ((float)i / (float)NSEGMENTS);
+ p[i][0] = rad * cosf(angle);
+ p[i][1] = rad * sinf(angle);
+ }
+
+ for (int axis = 0; axis < 3; ++axis) {
+ for (int i = 0; i < NSEGMENTS; ++i) {
+ for (int j = 0; j < 2; ++j) {
+ float cv[2], v[3];
+
+ cv[0] = p[(i + j) % NSEGMENTS][0];
+ cv[1] = p[(i + j) % NSEGMENTS][1];
+
+ if (axis == 0) {
+ ARRAY_SET_ITEMS(v, cv[0], cv[1], 0.0f);
+ }
+ else if (axis == 1) {
+ ARRAY_SET_ITEMS(v, cv[0], 0.0f, cv[1]);
+ }
+ else {
+ ARRAY_SET_ITEMS(v, 0.0f, cv[0], cv[1]);
+ }
+ GPU_vertbuf_attr_set(vbo, attr_id.pos, i * 2 + j + (NSEGMENTS * 2 * axis), v);
+ }
+ }
+ }
+
+ return vbo;
#undef NSEGMENTS
}
@@ -269,137 +275,145 @@ static GPUVertBuf *sphere_wire_vbo(const float rad)
/* Use this one for rendering fullscreen passes. For 3D objects use DRW_cache_quad_get(). */
GPUBatch *DRW_cache_fullscreen_quad_get(void)
{
- if (!SHC.drw_fullscreen_quad) {
- /* Use a triangle instead of a real quad */
- /* https://www.slideshare.net/DevCentralAMD/vertex-shader-tricks-bill-bilodeau - slide 14 */
- float pos[3][2] = {{-1.0f, -1.0f}, { 3.0f, -1.0f}, {-1.0f, 3.0f}};
- float uvs[3][2] = {{ 0.0f, 0.0f}, { 2.0f, 0.0f}, { 0.0f, 2.0f}};
+ if (!SHC.drw_fullscreen_quad) {
+ /* Use a triangle instead of a real quad */
+ /* https://www.slideshare.net/DevCentralAMD/vertex-shader-tricks-bill-bilodeau - slide 14 */
+ float pos[3][2] = {{-1.0f, -1.0f}, {3.0f, -1.0f}, {-1.0f, 3.0f}};
+ float uvs[3][2] = {{0.0f, 0.0f}, {2.0f, 0.0f}, {0.0f, 2.0f}};
- /* Position Only 2D format */
- static GPUVertFormat format = { 0 };
- static struct { uint pos, uvs; } attr_id;
- if (format.attr_len == 0) {
- attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 2, GPU_FETCH_FLOAT);
- attr_id.uvs = GPU_vertformat_attr_add(&format, "uvs", GPU_COMP_F32, 2, GPU_FETCH_FLOAT);
- GPU_vertformat_alias_add(&format, "texCoord");
- }
+ /* Position Only 2D format */
+ static GPUVertFormat format = {0};
+ static struct {
+ uint pos, uvs;
+ } attr_id;
+ if (format.attr_len == 0) {
+ attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 2, GPU_FETCH_FLOAT);
+ attr_id.uvs = GPU_vertformat_attr_add(&format, "uvs", GPU_COMP_F32, 2, GPU_FETCH_FLOAT);
+ GPU_vertformat_alias_add(&format, "texCoord");
+ }
- GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
- GPU_vertbuf_data_alloc(vbo, 3);
+ GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
+ GPU_vertbuf_data_alloc(vbo, 3);
- for (int i = 0; i < 3; ++i) {
- GPU_vertbuf_attr_set(vbo, attr_id.pos, i, pos[i]);
- GPU_vertbuf_attr_set(vbo, attr_id.uvs, i, uvs[i]);
- }
+ for (int i = 0; i < 3; ++i) {
+ GPU_vertbuf_attr_set(vbo, attr_id.pos, i, pos[i]);
+ GPU_vertbuf_attr_set(vbo, attr_id.uvs, i, uvs[i]);
+ }
- SHC.drw_fullscreen_quad = GPU_batch_create_ex(GPU_PRIM_TRIS, vbo, NULL, GPU_BATCH_OWNS_VBO);
- }
- return SHC.drw_fullscreen_quad;
+ SHC.drw_fullscreen_quad = GPU_batch_create_ex(GPU_PRIM_TRIS, vbo, NULL, GPU_BATCH_OWNS_VBO);
+ }
+ return SHC.drw_fullscreen_quad;
}
/* Just a regular quad with 4 vertices. */
GPUBatch *DRW_cache_quad_get(void)
{
- if (!SHC.drw_quad) {
- float pos[4][2] = {{-1.0f, -1.0f}, { 1.0f, -1.0f}, {1.0f, 1.0f}, {-1.0f, 1.0f}};
- float uvs[4][2] = {{ 0.0f, 0.0f}, { 1.0f, 0.0f}, {1.0f, 1.0f}, { 0.0f, 1.0f}};
+ if (!SHC.drw_quad) {
+ float pos[4][2] = {{-1.0f, -1.0f}, {1.0f, -1.0f}, {1.0f, 1.0f}, {-1.0f, 1.0f}};
+ float uvs[4][2] = {{0.0f, 0.0f}, {1.0f, 0.0f}, {1.0f, 1.0f}, {0.0f, 1.0f}};
- /* Position Only 2D format */
- static GPUVertFormat format = { 0 };
- static struct { uint pos, uvs; } attr_id;
- if (format.attr_len == 0) {
- attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 2, GPU_FETCH_FLOAT);
- attr_id.uvs = GPU_vertformat_attr_add(&format, "uvs", GPU_COMP_F32, 2, GPU_FETCH_FLOAT);
- }
+ /* Position Only 2D format */
+ static GPUVertFormat format = {0};
+ static struct {
+ uint pos, uvs;
+ } attr_id;
+ if (format.attr_len == 0) {
+ attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 2, GPU_FETCH_FLOAT);
+ attr_id.uvs = GPU_vertformat_attr_add(&format, "uvs", GPU_COMP_F32, 2, GPU_FETCH_FLOAT);
+ }
- GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
- GPU_vertbuf_data_alloc(vbo, 4);
+ GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
+ GPU_vertbuf_data_alloc(vbo, 4);
- for (int i = 0; i < 4; ++i) {
- GPU_vertbuf_attr_set(vbo, attr_id.pos, i, pos[i]);
- GPU_vertbuf_attr_set(vbo, attr_id.uvs, i, uvs[i]);
- }
+ for (int i = 0; i < 4; ++i) {
+ GPU_vertbuf_attr_set(vbo, attr_id.pos, i, pos[i]);
+ GPU_vertbuf_attr_set(vbo, attr_id.uvs, i, uvs[i]);
+ }
- SHC.drw_quad = GPU_batch_create_ex(GPU_PRIM_TRI_FAN, vbo, NULL, GPU_BATCH_OWNS_VBO);
- }
- return SHC.drw_quad;
+ SHC.drw_quad = GPU_batch_create_ex(GPU_PRIM_TRI_FAN, vbo, NULL, GPU_BATCH_OWNS_VBO);
+ }
+ return SHC.drw_quad;
}
/* Just a regular quad with 4 vertices - wires. */
GPUBatch *DRW_cache_quad_wires_get(void)
{
- if (!SHC.drw_quad_wires) {
- float pos[4][2] = {{-1.0f, -1.0f}, { 1.0f, -1.0f}, {1.0f, 1.0f}, {-1.0f, 1.0f}};
+ if (!SHC.drw_quad_wires) {
+ float pos[4][2] = {{-1.0f, -1.0f}, {1.0f, -1.0f}, {1.0f, 1.0f}, {-1.0f, 1.0f}};
- /* Position Only 2D format */
- static GPUVertFormat format = { 0 };
- static struct { uint pos; } attr_id;
- if (format.attr_len == 0) {
- attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 2, GPU_FETCH_FLOAT);
- }
+ /* Position Only 2D format */
+ static GPUVertFormat format = {0};
+ static struct {
+ uint pos;
+ } attr_id;
+ if (format.attr_len == 0) {
+ attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 2, GPU_FETCH_FLOAT);
+ }
- GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
- GPU_vertbuf_data_alloc(vbo, 8);
+ GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
+ GPU_vertbuf_data_alloc(vbo, 8);
- for (int i = 0; i < 4; i++) {
- GPU_vertbuf_attr_set(vbo, attr_id.pos, i * 2, pos[i % 4]);
- GPU_vertbuf_attr_set(vbo, attr_id.pos, i * 2 + 1, pos[(i + 1) % 4]);
- }
+ for (int i = 0; i < 4; i++) {
+ GPU_vertbuf_attr_set(vbo, attr_id.pos, i * 2, pos[i % 4]);
+ GPU_vertbuf_attr_set(vbo, attr_id.pos, i * 2 + 1, pos[(i + 1) % 4]);
+ }
- SHC.drw_quad_wires = GPU_batch_create_ex(GPU_PRIM_LINES, vbo, NULL, GPU_BATCH_OWNS_VBO);
- }
- return SHC.drw_quad_wires;
+ SHC.drw_quad_wires = GPU_batch_create_ex(GPU_PRIM_LINES, vbo, NULL, GPU_BATCH_OWNS_VBO);
+ }
+ return SHC.drw_quad_wires;
}
/* Grid */
GPUBatch *DRW_cache_grid_get(void)
{
- if (!SHC.drw_grid) {
- /* Position Only 2D format */
- static GPUVertFormat format = { 0 };
- static struct { uint pos; } attr_id;
- if (format.attr_len == 0) {
- attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 2, GPU_FETCH_FLOAT);
- }
-
- GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
- GPU_vertbuf_data_alloc(vbo, 8 * 8 * 2 * 3);
-
- uint v_idx = 0;
- for (int i = 0; i < 8; ++i) {
- for (int j = 0; j < 8; ++j) {
- float pos0[2] = {(float)i / 8.0f, (float)j / 8.0f};
- float pos1[2] = {(float)(i + 1) / 8.0f, (float)j / 8.0f};
- float pos2[2] = {(float)i / 8.0f, (float)(j + 1) / 8.0f};
- float pos3[2] = {(float)(i + 1) / 8.0f, (float)(j + 1) / 8.0f};
-
- madd_v2_v2v2fl(pos0, (float[2]){-1.0f, -1.0f}, pos0, 2.0f);
- madd_v2_v2v2fl(pos1, (float[2]){-1.0f, -1.0f}, pos1, 2.0f);
- madd_v2_v2v2fl(pos2, (float[2]){-1.0f, -1.0f}, pos2, 2.0f);
- madd_v2_v2v2fl(pos3, (float[2]){-1.0f, -1.0f}, pos3, 2.0f);
-
- GPU_vertbuf_attr_set(vbo, attr_id.pos, v_idx++, pos0);
- GPU_vertbuf_attr_set(vbo, attr_id.pos, v_idx++, pos1);
- GPU_vertbuf_attr_set(vbo, attr_id.pos, v_idx++, pos2);
-
- GPU_vertbuf_attr_set(vbo, attr_id.pos, v_idx++, pos2);
- GPU_vertbuf_attr_set(vbo, attr_id.pos, v_idx++, pos1);
- GPU_vertbuf_attr_set(vbo, attr_id.pos, v_idx++, pos3);
- }
- }
-
- SHC.drw_grid = GPU_batch_create_ex(GPU_PRIM_TRIS, vbo, NULL, GPU_BATCH_OWNS_VBO);
- }
- return SHC.drw_grid;
+ if (!SHC.drw_grid) {
+ /* Position Only 2D format */
+ static GPUVertFormat format = {0};
+ static struct {
+ uint pos;
+ } attr_id;
+ if (format.attr_len == 0) {
+ attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 2, GPU_FETCH_FLOAT);
+ }
+
+ GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
+ GPU_vertbuf_data_alloc(vbo, 8 * 8 * 2 * 3);
+
+ uint v_idx = 0;
+ for (int i = 0; i < 8; ++i) {
+ for (int j = 0; j < 8; ++j) {
+ float pos0[2] = {(float)i / 8.0f, (float)j / 8.0f};
+ float pos1[2] = {(float)(i + 1) / 8.0f, (float)j / 8.0f};
+ float pos2[2] = {(float)i / 8.0f, (float)(j + 1) / 8.0f};
+ float pos3[2] = {(float)(i + 1) / 8.0f, (float)(j + 1) / 8.0f};
+
+ madd_v2_v2v2fl(pos0, (float[2]){-1.0f, -1.0f}, pos0, 2.0f);
+ madd_v2_v2v2fl(pos1, (float[2]){-1.0f, -1.0f}, pos1, 2.0f);
+ madd_v2_v2v2fl(pos2, (float[2]){-1.0f, -1.0f}, pos2, 2.0f);
+ madd_v2_v2v2fl(pos3, (float[2]){-1.0f, -1.0f}, pos3, 2.0f);
+
+ GPU_vertbuf_attr_set(vbo, attr_id.pos, v_idx++, pos0);
+ GPU_vertbuf_attr_set(vbo, attr_id.pos, v_idx++, pos1);
+ GPU_vertbuf_attr_set(vbo, attr_id.pos, v_idx++, pos2);
+
+ GPU_vertbuf_attr_set(vbo, attr_id.pos, v_idx++, pos2);
+ GPU_vertbuf_attr_set(vbo, attr_id.pos, v_idx++, pos1);
+ GPU_vertbuf_attr_set(vbo, attr_id.pos, v_idx++, pos3);
+ }
+ }
+
+ SHC.drw_grid = GPU_batch_create_ex(GPU_PRIM_TRIS, vbo, NULL, GPU_BATCH_OWNS_VBO);
+ }
+ return SHC.drw_grid;
}
/* Sphere */
GPUBatch *DRW_cache_sphere_get(void)
{
- if (!SHC.drw_sphere) {
- SHC.drw_sphere = gpu_batch_sphere(32, 24);
- }
- return SHC.drw_sphere;
+ if (!SHC.drw_sphere) {
+ SHC.drw_sphere = gpu_batch_sphere(32, 24);
+ }
+ return SHC.drw_sphere;
}
/** \} */
@@ -410,918 +424,955 @@ GPUBatch *DRW_cache_sphere_get(void)
GPUBatch *DRW_cache_cube_get(void)
{
- if (!SHC.drw_cube) {
- const GLfloat verts[8][3] = {
- {-1.0f, -1.0f, -1.0f},
- {-1.0f, -1.0f, 1.0f},
- {-1.0f, 1.0f, -1.0f},
- {-1.0f, 1.0f, 1.0f},
- { 1.0f, -1.0f, -1.0f},
- { 1.0f, -1.0f, 1.0f},
- { 1.0f, 1.0f, -1.0f},
- { 1.0f, 1.0f, 1.0f},
- };
-
- const uint indices[36] = {
- 0, 1, 2,
- 1, 3, 2,
- 0, 4, 1,
- 4, 5, 1,
- 6, 5, 4,
- 6, 7, 5,
- 2, 7, 6,
- 2, 3, 7,
- 3, 1, 7,
- 1, 5, 7,
- 0, 2, 4,
- 2, 6, 4,
- };
-
- /* Position Only 3D format */
- static GPUVertFormat format = { 0 };
- static struct { uint pos; } attr_id;
- if (format.attr_len == 0) {
- attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
- }
-
- GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
- GPU_vertbuf_data_alloc(vbo, 36);
-
- for (int i = 0; i < 36; ++i) {
- GPU_vertbuf_attr_set(vbo, attr_id.pos, i, verts[indices[i]]);
- }
-
- SHC.drw_cube = GPU_batch_create_ex(GPU_PRIM_TRIS, vbo, NULL, GPU_BATCH_OWNS_VBO);
- }
- return SHC.drw_cube;
+ if (!SHC.drw_cube) {
+ const GLfloat verts[8][3] = {
+ {-1.0f, -1.0f, -1.0f},
+ {-1.0f, -1.0f, 1.0f},
+ {-1.0f, 1.0f, -1.0f},
+ {-1.0f, 1.0f, 1.0f},
+ {1.0f, -1.0f, -1.0f},
+ {1.0f, -1.0f, 1.0f},
+ {1.0f, 1.0f, -1.0f},
+ {1.0f, 1.0f, 1.0f},
+ };
+
+ const uint indices[36] = {
+ 0, 1, 2, 1, 3, 2, 0, 4, 1, 4, 5, 1, 6, 5, 4, 6, 7, 5,
+ 2, 7, 6, 2, 3, 7, 3, 1, 7, 1, 5, 7, 0, 2, 4, 2, 6, 4,
+ };
+
+ /* Position Only 3D format */
+ static GPUVertFormat format = {0};
+ static struct {
+ uint pos;
+ } attr_id;
+ if (format.attr_len == 0) {
+ attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
+ }
+
+ GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
+ GPU_vertbuf_data_alloc(vbo, 36);
+
+ for (int i = 0; i < 36; ++i) {
+ GPU_vertbuf_attr_set(vbo, attr_id.pos, i, verts[indices[i]]);
+ }
+
+ SHC.drw_cube = GPU_batch_create_ex(GPU_PRIM_TRIS, vbo, NULL, GPU_BATCH_OWNS_VBO);
+ }
+ return SHC.drw_cube;
}
GPUBatch *DRW_cache_empty_cube_get(void)
{
- if (!SHC.drw_empty_cube) {
- const GLfloat verts[8][3] = {
- {-1.0f, -1.0f, -1.0f},
- {-1.0f, -1.0f, 1.0f},
- {-1.0f, 1.0f, -1.0f},
- {-1.0f, 1.0f, 1.0f},
- { 1.0f, -1.0f, -1.0f},
- { 1.0f, -1.0f, 1.0f},
- { 1.0f, 1.0f, -1.0f},
- { 1.0f, 1.0f, 1.0f},
- };
-
- const GLubyte indices[24] = {0, 1, 1, 3, 3, 2, 2, 0, 0, 4, 4, 5, 5, 7, 7, 6, 6, 4, 1, 5, 3, 7, 2, 6};
-
- /* Position Only 3D format */
- static GPUVertFormat format = { 0 };
- static struct { uint pos; } attr_id;
- if (format.attr_len == 0) {
- attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
- }
-
- GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
- GPU_vertbuf_data_alloc(vbo, 24);
-
- for (int i = 0; i < 24; ++i) {
- GPU_vertbuf_attr_set(vbo, attr_id.pos, i, verts[indices[i]]);
- }
-
- SHC.drw_empty_cube = GPU_batch_create_ex(GPU_PRIM_LINES, vbo, NULL, GPU_BATCH_OWNS_VBO);
- }
- return SHC.drw_empty_cube;
+ if (!SHC.drw_empty_cube) {
+ const GLfloat verts[8][3] = {
+ {-1.0f, -1.0f, -1.0f},
+ {-1.0f, -1.0f, 1.0f},
+ {-1.0f, 1.0f, -1.0f},
+ {-1.0f, 1.0f, 1.0f},
+ {1.0f, -1.0f, -1.0f},
+ {1.0f, -1.0f, 1.0f},
+ {1.0f, 1.0f, -1.0f},
+ {1.0f, 1.0f, 1.0f},
+ };
+
+ const GLubyte indices[24] = {0, 1, 1, 3, 3, 2, 2, 0, 0, 4, 4, 5,
+ 5, 7, 7, 6, 6, 4, 1, 5, 3, 7, 2, 6};
+
+ /* Position Only 3D format */
+ static GPUVertFormat format = {0};
+ static struct {
+ uint pos;
+ } attr_id;
+ if (format.attr_len == 0) {
+ attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
+ }
+
+ GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
+ GPU_vertbuf_data_alloc(vbo, 24);
+
+ for (int i = 0; i < 24; ++i) {
+ GPU_vertbuf_attr_set(vbo, attr_id.pos, i, verts[indices[i]]);
+ }
+
+ SHC.drw_empty_cube = GPU_batch_create_ex(GPU_PRIM_LINES, vbo, NULL, GPU_BATCH_OWNS_VBO);
+ }
+ return SHC.drw_empty_cube;
}
GPUBatch *DRW_cache_circle_get(void)
{
#define CIRCLE_RESOL 64
- if (!SHC.drw_circle) {
- float v[3] = {0.0f, 0.0f, 0.0f};
-
- /* Position Only 3D format */
- static GPUVertFormat format = { 0 };
- static struct { uint pos; } attr_id;
- if (format.attr_len == 0) {
- attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
- }
-
- GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
- GPU_vertbuf_data_alloc(vbo, CIRCLE_RESOL);
-
- for (int a = 0; a < CIRCLE_RESOL; a++) {
- v[0] = sinf((2.0f * M_PI * a) / ((float)CIRCLE_RESOL));
- v[2] = cosf((2.0f * M_PI * a) / ((float)CIRCLE_RESOL));
- v[1] = 0.0f;
- GPU_vertbuf_attr_set(vbo, attr_id.pos, a, v);
- }
-
- SHC.drw_circle = GPU_batch_create_ex(GPU_PRIM_LINE_LOOP, vbo, NULL, GPU_BATCH_OWNS_VBO);
- }
- return SHC.drw_circle;
+ if (!SHC.drw_circle) {
+ float v[3] = {0.0f, 0.0f, 0.0f};
+
+ /* Position Only 3D format */
+ static GPUVertFormat format = {0};
+ static struct {
+ uint pos;
+ } attr_id;
+ if (format.attr_len == 0) {
+ attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
+ }
+
+ GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
+ GPU_vertbuf_data_alloc(vbo, CIRCLE_RESOL);
+
+ for (int a = 0; a < CIRCLE_RESOL; a++) {
+ v[0] = sinf((2.0f * M_PI * a) / ((float)CIRCLE_RESOL));
+ v[2] = cosf((2.0f * M_PI * a) / ((float)CIRCLE_RESOL));
+ v[1] = 0.0f;
+ GPU_vertbuf_attr_set(vbo, attr_id.pos, a, v);
+ }
+
+ SHC.drw_circle = GPU_batch_create_ex(GPU_PRIM_LINE_LOOP, vbo, NULL, GPU_BATCH_OWNS_VBO);
+ }
+ return SHC.drw_circle;
#undef CIRCLE_RESOL
}
GPUBatch *DRW_cache_square_get(void)
{
- if (!SHC.drw_square) {
- float p[4][3] = {
- { 1.0f, 0.0f, 1.0f},
- { 1.0f, 0.0f, -1.0f},
- {-1.0f, 0.0f, -1.0f},
- {-1.0f, 0.0f, 1.0f}};
+ if (!SHC.drw_square) {
+ float p[4][3] = {
+ {1.0f, 0.0f, 1.0f}, {1.0f, 0.0f, -1.0f}, {-1.0f, 0.0f, -1.0f}, {-1.0f, 0.0f, 1.0f}};
- /* Position Only 3D format */
- static GPUVertFormat format = { 0 };
- static struct { uint pos; } attr_id;
- if (format.attr_len == 0) {
- attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
- }
+ /* Position Only 3D format */
+ static GPUVertFormat format = {0};
+ static struct {
+ uint pos;
+ } attr_id;
+ if (format.attr_len == 0) {
+ attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
+ }
- GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
- GPU_vertbuf_data_alloc(vbo, 8);
+ GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
+ GPU_vertbuf_data_alloc(vbo, 8);
- for (int i = 0; i < 4; i++) {
- GPU_vertbuf_attr_set(vbo, attr_id.pos, i * 2, p[i % 4]);
- GPU_vertbuf_attr_set(vbo, attr_id.pos, i * 2 + 1, p[(i + 1) % 4]);
- }
+ for (int i = 0; i < 4; i++) {
+ GPU_vertbuf_attr_set(vbo, attr_id.pos, i * 2, p[i % 4]);
+ GPU_vertbuf_attr_set(vbo, attr_id.pos, i * 2 + 1, p[(i + 1) % 4]);
+ }
- SHC.drw_square = GPU_batch_create_ex(GPU_PRIM_LINES, vbo, NULL, GPU_BATCH_OWNS_VBO);
- }
- return SHC.drw_square;
+ SHC.drw_square = GPU_batch_create_ex(GPU_PRIM_LINES, vbo, NULL, GPU_BATCH_OWNS_VBO);
+ }
+ return SHC.drw_square;
}
GPUBatch *DRW_cache_single_line_get(void)
{
- /* Z axis line */
- if (!SHC.drw_line) {
- float v1[3] = {0.0f, 0.0f, 0.0f};
- float v2[3] = {0.0f, 0.0f, 1.0f};
+ /* Z axis line */
+ if (!SHC.drw_line) {
+ float v1[3] = {0.0f, 0.0f, 0.0f};
+ float v2[3] = {0.0f, 0.0f, 1.0f};
- /* Position Only 3D format */
- static GPUVertFormat format = { 0 };
- static struct { uint pos; } attr_id;
- if (format.attr_len == 0) {
- attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
- }
+ /* Position Only 3D format */
+ static GPUVertFormat format = {0};
+ static struct {
+ uint pos;
+ } attr_id;
+ if (format.attr_len == 0) {
+ attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
+ }
- GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
- GPU_vertbuf_data_alloc(vbo, 2);
+ GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
+ GPU_vertbuf_data_alloc(vbo, 2);
- GPU_vertbuf_attr_set(vbo, attr_id.pos, 0, v1);
- GPU_vertbuf_attr_set(vbo, attr_id.pos, 1, v2);
+ GPU_vertbuf_attr_set(vbo, attr_id.pos, 0, v1);
+ GPU_vertbuf_attr_set(vbo, attr_id.pos, 1, v2);
- SHC.drw_line = GPU_batch_create_ex(GPU_PRIM_LINES, vbo, NULL, GPU_BATCH_OWNS_VBO);
- }
- return SHC.drw_line;
+ SHC.drw_line = GPU_batch_create_ex(GPU_PRIM_LINES, vbo, NULL, GPU_BATCH_OWNS_VBO);
+ }
+ return SHC.drw_line;
}
GPUBatch *DRW_cache_single_line_endpoints_get(void)
{
- /* Z axis line */
- if (!SHC.drw_line_endpoints) {
- float v1[3] = {0.0f, 0.0f, 0.0f};
- float v2[3] = {0.0f, 0.0f, 1.0f};
+ /* Z axis line */
+ if (!SHC.drw_line_endpoints) {
+ float v1[3] = {0.0f, 0.0f, 0.0f};
+ float v2[3] = {0.0f, 0.0f, 1.0f};
- /* Position Only 3D format */
- static GPUVertFormat format = { 0 };
- static struct { uint pos; } attr_id;
- if (format.attr_len == 0) {
- attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
- }
+ /* Position Only 3D format */
+ static GPUVertFormat format = {0};
+ static struct {
+ uint pos;
+ } attr_id;
+ if (format.attr_len == 0) {
+ attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
+ }
- GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
- GPU_vertbuf_data_alloc(vbo, 2);
+ GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
+ GPU_vertbuf_data_alloc(vbo, 2);
- GPU_vertbuf_attr_set(vbo, attr_id.pos, 0, v1);
- GPU_vertbuf_attr_set(vbo, attr_id.pos, 1, v2);
+ GPU_vertbuf_attr_set(vbo, attr_id.pos, 0, v1);
+ GPU_vertbuf_attr_set(vbo, attr_id.pos, 1, v2);
- SHC.drw_line_endpoints = GPU_batch_create_ex(GPU_PRIM_POINTS, vbo, NULL, GPU_BATCH_OWNS_VBO);
- }
- return SHC.drw_line_endpoints;
+ SHC.drw_line_endpoints = GPU_batch_create_ex(GPU_PRIM_POINTS, vbo, NULL, GPU_BATCH_OWNS_VBO);
+ }
+ return SHC.drw_line_endpoints;
}
GPUBatch *DRW_cache_screenspace_circle_get(void)
{
#define CIRCLE_RESOL 32
- if (!SHC.drw_screenspace_circle) {
- float v[3] = {0.0f, 0.0f, 0.0f};
-
- /* Position Only 3D format */
- static GPUVertFormat format = { 0 };
- static struct { uint pos; } attr_id;
- if (format.attr_len == 0) {
- attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
- }
-
- GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
- GPU_vertbuf_data_alloc(vbo, CIRCLE_RESOL + 1);
-
- for (int a = 0; a <= CIRCLE_RESOL; a++) {
- v[0] = sinf((2.0f * M_PI * a) / ((float)CIRCLE_RESOL));
- v[1] = cosf((2.0f * M_PI * a) / ((float)CIRCLE_RESOL));
- GPU_vertbuf_attr_set(vbo, attr_id.pos, a, v);
- }
-
- SHC.drw_screenspace_circle = GPU_batch_create_ex(GPU_PRIM_LINE_STRIP, vbo, NULL, GPU_BATCH_OWNS_VBO);
- }
- return SHC.drw_screenspace_circle;
+ if (!SHC.drw_screenspace_circle) {
+ float v[3] = {0.0f, 0.0f, 0.0f};
+
+ /* Position Only 3D format */
+ static GPUVertFormat format = {0};
+ static struct {
+ uint pos;
+ } attr_id;
+ if (format.attr_len == 0) {
+ attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
+ }
+
+ GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
+ GPU_vertbuf_data_alloc(vbo, CIRCLE_RESOL + 1);
+
+ for (int a = 0; a <= CIRCLE_RESOL; a++) {
+ v[0] = sinf((2.0f * M_PI * a) / ((float)CIRCLE_RESOL));
+ v[1] = cosf((2.0f * M_PI * a) / ((float)CIRCLE_RESOL));
+ GPU_vertbuf_attr_set(vbo, attr_id.pos, a, v);
+ }
+
+ SHC.drw_screenspace_circle = GPU_batch_create_ex(
+ GPU_PRIM_LINE_STRIP, vbo, NULL, GPU_BATCH_OWNS_VBO);
+ }
+ return SHC.drw_screenspace_circle;
#undef CIRCLE_RESOL
}
/* Grease Pencil object */
GPUBatch *DRW_cache_gpencil_axes_get(void)
{
- if (!SHC.drw_gpencil_axes) {
- int axis;
- float v1[3] = { 0.0f, 0.0f, 0.0f };
- float v2[3] = { 0.0f, 0.0f, 0.0f };
+ if (!SHC.drw_gpencil_axes) {
+ int axis;
+ float v1[3] = {0.0f, 0.0f, 0.0f};
+ float v2[3] = {0.0f, 0.0f, 0.0f};
- /* cube data */
- const GLfloat verts[8][3] = {
- { -0.25f, -0.25f, -0.25f },
- { -0.25f, -0.25f, 0.25f },
- { -0.25f, 0.25f, -0.25f },
- { -0.25f, 0.25f, 0.25f },
- { 0.25f, -0.25f, -0.25f },
- { 0.25f, -0.25f, 0.25f },
- { 0.25f, 0.25f, -0.25f },
- { 0.25f, 0.25f, 0.25f }
- };
+ /* cube data */
+ const GLfloat verts[8][3] = {{-0.25f, -0.25f, -0.25f},
+ {-0.25f, -0.25f, 0.25f},
+ {-0.25f, 0.25f, -0.25f},
+ {-0.25f, 0.25f, 0.25f},
+ {0.25f, -0.25f, -0.25f},
+ {0.25f, -0.25f, 0.25f},
+ {0.25f, 0.25f, -0.25f},
+ {0.25f, 0.25f, 0.25f}};
- const GLubyte indices[24] = { 0, 1, 1, 3, 3, 2, 2, 0, 0, 4, 4, 5, 5, 7, 7, 6, 6, 4, 1, 5, 3, 7, 2, 6 };
+ const GLubyte indices[24] = {0, 1, 1, 3, 3, 2, 2, 0, 0, 4, 4, 5,
+ 5, 7, 7, 6, 6, 4, 1, 5, 3, 7, 2, 6};
- /* Position Only 3D format */
- static GPUVertFormat format = { 0 };
- static uint pos_id;
- if (format.attr_len == 0) {
- pos_id = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
- }
+ /* Position Only 3D format */
+ static GPUVertFormat format = {0};
+ static uint pos_id;
+ if (format.attr_len == 0) {
+ pos_id = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
+ }
- GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
+ GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
- /* alloc 30 elements for cube and 3 axis */
- GPU_vertbuf_data_alloc(vbo, ARRAY_SIZE(indices) + 6);
+ /* alloc 30 elements for cube and 3 axis */
+ GPU_vertbuf_data_alloc(vbo, ARRAY_SIZE(indices) + 6);
- /* draw axis */
- for (axis = 0; axis < 3; axis++) {
- v1[axis] = 1.0f;
- v2[axis] = -1.0f;
+ /* draw axis */
+ for (axis = 0; axis < 3; axis++) {
+ v1[axis] = 1.0f;
+ v2[axis] = -1.0f;
- GPU_vertbuf_attr_set(vbo, pos_id, axis * 2, v1);
- GPU_vertbuf_attr_set(vbo, pos_id, axis * 2 + 1, v2);
+ GPU_vertbuf_attr_set(vbo, pos_id, axis * 2, v1);
+ GPU_vertbuf_attr_set(vbo, pos_id, axis * 2 + 1, v2);
- /* reset v1 & v2 to zero for next axis */
- v1[axis] = v2[axis] = 0.0f;
- }
+ /* reset v1 & v2 to zero for next axis */
+ v1[axis] = v2[axis] = 0.0f;
+ }
- /* draw cube */
- for (int i = 0; i < 24; ++i) {
- GPU_vertbuf_attr_set(vbo, pos_id, i + 6, verts[indices[i]]);
- }
+ /* draw cube */
+ for (int i = 0; i < 24; ++i) {
+ GPU_vertbuf_attr_set(vbo, pos_id, i + 6, verts[indices[i]]);
+ }
- SHC.drw_gpencil_axes = GPU_batch_create_ex(GPU_PRIM_LINES, vbo, NULL, GPU_BATCH_OWNS_VBO);
- }
- return SHC.drw_gpencil_axes;
+ SHC.drw_gpencil_axes = GPU_batch_create_ex(GPU_PRIM_LINES, vbo, NULL, GPU_BATCH_OWNS_VBO);
+ }
+ return SHC.drw_gpencil_axes;
}
-
/* -------------------------------------------------------------------- */
/** \name Common Object API
* \{ */
GPUBatch *DRW_cache_object_all_edges_get(Object *ob)
{
- switch (ob->type) {
- case OB_MESH:
- return DRW_cache_mesh_all_edges_get(ob);
+ switch (ob->type) {
+ case OB_MESH:
+ return DRW_cache_mesh_all_edges_get(ob);
- /* TODO, should match 'DRW_cache_object_surface_get' */
- default:
- return NULL;
- }
+ /* TODO, should match 'DRW_cache_object_surface_get' */
+ default:
+ return NULL;
+ }
}
GPUBatch *DRW_cache_object_edge_detection_get(Object *ob, bool *r_is_manifold)
{
- switch (ob->type) {
- case OB_MESH:
- return DRW_cache_mesh_edge_detection_get(ob, r_is_manifold);
- case OB_CURVE:
- return DRW_cache_curve_edge_detection_get(ob, r_is_manifold);
- case OB_SURF:
- return DRW_cache_surf_edge_detection_get(ob, r_is_manifold);
- case OB_FONT:
- return DRW_cache_text_edge_detection_get(ob, r_is_manifold);
- case OB_MBALL:
- return DRW_cache_mball_edge_detection_get(ob, r_is_manifold);
- default:
- return NULL;
- }
+ switch (ob->type) {
+ case OB_MESH:
+ return DRW_cache_mesh_edge_detection_get(ob, r_is_manifold);
+ case OB_CURVE:
+ return DRW_cache_curve_edge_detection_get(ob, r_is_manifold);
+ case OB_SURF:
+ return DRW_cache_surf_edge_detection_get(ob, r_is_manifold);
+ case OB_FONT:
+ return DRW_cache_text_edge_detection_get(ob, r_is_manifold);
+ case OB_MBALL:
+ return DRW_cache_mball_edge_detection_get(ob, r_is_manifold);
+ default:
+ return NULL;
+ }
}
GPUBatch *DRW_cache_object_face_wireframe_get(Object *ob)
{
- switch (ob->type) {
- case OB_MESH:
- return DRW_cache_mesh_face_wireframe_get(ob);
- case OB_CURVE:
- return DRW_cache_curve_face_wireframe_get(ob);
- case OB_SURF:
- return DRW_cache_surf_face_wireframe_get(ob);
- case OB_FONT:
- return DRW_cache_text_face_wireframe_get(ob);
- case OB_MBALL:
- return DRW_cache_mball_face_wireframe_get(ob);
- default:
- return NULL;
- }
+ switch (ob->type) {
+ case OB_MESH:
+ return DRW_cache_mesh_face_wireframe_get(ob);
+ case OB_CURVE:
+ return DRW_cache_curve_face_wireframe_get(ob);
+ case OB_SURF:
+ return DRW_cache_surf_face_wireframe_get(ob);
+ case OB_FONT:
+ return DRW_cache_text_face_wireframe_get(ob);
+ case OB_MBALL:
+ return DRW_cache_mball_face_wireframe_get(ob);
+ default:
+ return NULL;
+ }
}
GPUBatch *DRW_cache_object_loose_edges_get(struct Object *ob)
{
- switch (ob->type) {
- case OB_MESH:
- return DRW_cache_mesh_loose_edges_get(ob);
- case OB_CURVE:
- return DRW_cache_curve_loose_edges_get(ob);
- case OB_SURF:
- return DRW_cache_surf_loose_edges_get(ob);
- case OB_FONT:
- return DRW_cache_text_loose_edges_get(ob);
- case OB_MBALL:
- /* Cannot have any loose edge */
- default:
- return NULL;
- }
+ switch (ob->type) {
+ case OB_MESH:
+ return DRW_cache_mesh_loose_edges_get(ob);
+ case OB_CURVE:
+ return DRW_cache_curve_loose_edges_get(ob);
+ case OB_SURF:
+ return DRW_cache_surf_loose_edges_get(ob);
+ case OB_FONT:
+ return DRW_cache_text_loose_edges_get(ob);
+ case OB_MBALL:
+ /* Cannot have any loose edge */
+ default:
+ return NULL;
+ }
}
GPUBatch *DRW_cache_object_surface_get(Object *ob)
{
- switch (ob->type) {
- case OB_MESH:
- return DRW_cache_mesh_surface_get(ob);
- case OB_CURVE:
- return DRW_cache_curve_surface_get(ob);
- case OB_SURF:
- return DRW_cache_surf_surface_get(ob);
- case OB_FONT:
- return DRW_cache_text_surface_get(ob);
- case OB_MBALL:
- return DRW_cache_mball_surface_get(ob);
- default:
- return NULL;
- }
-}
-
-GPUBatch **DRW_cache_object_surface_material_get(
- struct Object *ob, struct GPUMaterial **gpumat_array, uint gpumat_array_len,
- char **auto_layer_names, int **auto_layer_is_srgb, int *auto_layer_count)
-{
- if (auto_layer_names != NULL) {
- *auto_layer_names = NULL;
- *auto_layer_is_srgb = NULL;
- *auto_layer_count = 0;
- }
-
- switch (ob->type) {
- case OB_MESH:
- return DRW_cache_mesh_surface_shaded_get(
- ob, gpumat_array, gpumat_array_len,
- auto_layer_names, auto_layer_is_srgb, auto_layer_count);
- case OB_CURVE:
- return DRW_cache_curve_surface_shaded_get(ob, gpumat_array, gpumat_array_len);
- case OB_SURF:
- return DRW_cache_surf_surface_shaded_get(ob, gpumat_array, gpumat_array_len);
- case OB_FONT:
- return DRW_cache_text_surface_shaded_get(ob, gpumat_array, gpumat_array_len);
- case OB_MBALL:
- return DRW_cache_mball_surface_shaded_get(ob, gpumat_array, gpumat_array_len);
- default:
- return NULL;
- }
+ switch (ob->type) {
+ case OB_MESH:
+ return DRW_cache_mesh_surface_get(ob);
+ case OB_CURVE:
+ return DRW_cache_curve_surface_get(ob);
+ case OB_SURF:
+ return DRW_cache_surf_surface_get(ob);
+ case OB_FONT:
+ return DRW_cache_text_surface_get(ob);
+ case OB_MBALL:
+ return DRW_cache_mball_surface_get(ob);
+ default:
+ return NULL;
+ }
+}
+
+GPUBatch **DRW_cache_object_surface_material_get(struct Object *ob,
+ struct GPUMaterial **gpumat_array,
+ uint gpumat_array_len,
+ char **auto_layer_names,
+ int **auto_layer_is_srgb,
+ int *auto_layer_count)
+{
+ if (auto_layer_names != NULL) {
+ *auto_layer_names = NULL;
+ *auto_layer_is_srgb = NULL;
+ *auto_layer_count = 0;
+ }
+
+ switch (ob->type) {
+ case OB_MESH:
+ return DRW_cache_mesh_surface_shaded_get(ob,
+ gpumat_array,
+ gpumat_array_len,
+ auto_layer_names,
+ auto_layer_is_srgb,
+ auto_layer_count);
+ case OB_CURVE:
+ return DRW_cache_curve_surface_shaded_get(ob, gpumat_array, gpumat_array_len);
+ case OB_SURF:
+ return DRW_cache_surf_surface_shaded_get(ob, gpumat_array, gpumat_array_len);
+ case OB_FONT:
+ return DRW_cache_text_surface_shaded_get(ob, gpumat_array, gpumat_array_len);
+ case OB_MBALL:
+ return DRW_cache_mball_surface_shaded_get(ob, gpumat_array, gpumat_array_len);
+ default:
+ return NULL;
+ }
}
/** \} */
-
/* -------------------------------------------------------------------- */
/** \name Empties
* \{ */
GPUBatch *DRW_cache_plain_axes_get(void)
{
- if (!SHC.drw_plain_axes) {
- int axis;
- float v1[3] = {0.0f, 0.0f, 0.0f};
- float v2[3] = {0.0f, 0.0f, 0.0f};
+ if (!SHC.drw_plain_axes) {
+ int axis;
+ float v1[3] = {0.0f, 0.0f, 0.0f};
+ float v2[3] = {0.0f, 0.0f, 0.0f};
- /* Position Only 3D format */
- static GPUVertFormat format = { 0 };
- static struct { uint pos; } attr_id;
- if (format.attr_len == 0) {
- attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
- }
+ /* Position Only 3D format */
+ static GPUVertFormat format = {0};
+ static struct {
+ uint pos;
+ } attr_id;
+ if (format.attr_len == 0) {
+ attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
+ }
- GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
- GPU_vertbuf_data_alloc(vbo, 6);
+ GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
+ GPU_vertbuf_data_alloc(vbo, 6);
- for (axis = 0; axis < 3; axis++) {
- v1[axis] = 1.0f;
- v2[axis] = -1.0f;
+ for (axis = 0; axis < 3; axis++) {
+ v1[axis] = 1.0f;
+ v2[axis] = -1.0f;
- GPU_vertbuf_attr_set(vbo, attr_id.pos, axis * 2, v1);
- GPU_vertbuf_attr_set(vbo, attr_id.pos, axis * 2 + 1, v2);
+ GPU_vertbuf_attr_set(vbo, attr_id.pos, axis * 2, v1);
+ GPU_vertbuf_attr_set(vbo, attr_id.pos, axis * 2 + 1, v2);
- /* reset v1 & v2 to zero for next axis */
- v1[axis] = v2[axis] = 0.0f;
- }
+ /* reset v1 & v2 to zero for next axis */
+ v1[axis] = v2[axis] = 0.0f;
+ }
- SHC.drw_plain_axes = GPU_batch_create_ex(GPU_PRIM_LINES, vbo, NULL, GPU_BATCH_OWNS_VBO);
- }
- return SHC.drw_plain_axes;
+ SHC.drw_plain_axes = GPU_batch_create_ex(GPU_PRIM_LINES, vbo, NULL, GPU_BATCH_OWNS_VBO);
+ }
+ return SHC.drw_plain_axes;
}
GPUBatch *DRW_cache_single_arrow_get(void)
{
- if (!SHC.drw_single_arrow) {
- float v1[3] = {0.0f, 0.0f, 1.0f}, v2[3], v3[3];
-
- /* Position Only 3D format */
- static GPUVertFormat format = { 0 };
- static struct { uint pos; } attr_id;
- if (format.attr_len == 0) {
- attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
- }
-
- /* Square Pyramid */
- GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
- GPU_vertbuf_data_alloc(vbo, 12);
-
- v2[0] = 0.035f; v2[1] = 0.035f;
- v3[0] = -0.035f; v3[1] = 0.035f;
- v2[2] = v3[2] = 0.75f;
-
- for (int sides = 0; sides < 4; sides++) {
- if (sides % 2 == 1) {
- v2[0] = -v2[0];
- v3[1] = -v3[1];
- }
- else {
- v2[1] = -v2[1];
- v3[0] = -v3[0];
- }
-
- GPU_vertbuf_attr_set(vbo, attr_id.pos, sides * 3 + 0, v1);
- GPU_vertbuf_attr_set(vbo, attr_id.pos, sides * 3 + 1, v2);
- GPU_vertbuf_attr_set(vbo, attr_id.pos, sides * 3 + 2, v3);
- }
-
- SHC.drw_single_arrow = GPU_batch_create_ex(GPU_PRIM_TRIS, vbo, NULL, GPU_BATCH_OWNS_VBO);
- }
- return SHC.drw_single_arrow;
+ if (!SHC.drw_single_arrow) {
+ float v1[3] = {0.0f, 0.0f, 1.0f}, v2[3], v3[3];
+
+ /* Position Only 3D format */
+ static GPUVertFormat format = {0};
+ static struct {
+ uint pos;
+ } attr_id;
+ if (format.attr_len == 0) {
+ attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
+ }
+
+ /* Square Pyramid */
+ GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
+ GPU_vertbuf_data_alloc(vbo, 12);
+
+ v2[0] = 0.035f;
+ v2[1] = 0.035f;
+ v3[0] = -0.035f;
+ v3[1] = 0.035f;
+ v2[2] = v3[2] = 0.75f;
+
+ for (int sides = 0; sides < 4; sides++) {
+ if (sides % 2 == 1) {
+ v2[0] = -v2[0];
+ v3[1] = -v3[1];
+ }
+ else {
+ v2[1] = -v2[1];
+ v3[0] = -v3[0];
+ }
+
+ GPU_vertbuf_attr_set(vbo, attr_id.pos, sides * 3 + 0, v1);
+ GPU_vertbuf_attr_set(vbo, attr_id.pos, sides * 3 + 1, v2);
+ GPU_vertbuf_attr_set(vbo, attr_id.pos, sides * 3 + 2, v3);
+ }
+
+ SHC.drw_single_arrow = GPU_batch_create_ex(GPU_PRIM_TRIS, vbo, NULL, GPU_BATCH_OWNS_VBO);
+ }
+ return SHC.drw_single_arrow;
}
GPUBatch *DRW_cache_empty_sphere_get(void)
{
- if (!SHC.drw_empty_sphere) {
- GPUVertBuf *vbo = sphere_wire_vbo(1.0f);
- SHC.drw_empty_sphere = GPU_batch_create_ex(GPU_PRIM_LINES, vbo, NULL, GPU_BATCH_OWNS_VBO);
- }
- return SHC.drw_empty_sphere;
+ if (!SHC.drw_empty_sphere) {
+ GPUVertBuf *vbo = sphere_wire_vbo(1.0f);
+ SHC.drw_empty_sphere = GPU_batch_create_ex(GPU_PRIM_LINES, vbo, NULL, GPU_BATCH_OWNS_VBO);
+ }
+ return SHC.drw_empty_sphere;
}
GPUBatch *DRW_cache_empty_cone_get(void)
{
#define NSEGMENTS 8
- if (!SHC.drw_empty_cone) {
- /* a single ring of vertices */
- float p[NSEGMENTS][2];
- for (int i = 0; i < NSEGMENTS; ++i) {
- float angle = 2 * M_PI * ((float)i / (float)NSEGMENTS);
- p[i][0] = cosf(angle);
- p[i][1] = sinf(angle);
- }
-
- /* Position Only 3D format */
- static GPUVertFormat format = { 0 };
- static struct { uint pos; } attr_id;
- if (format.attr_len == 0) {
- attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
- }
-
- GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
- GPU_vertbuf_data_alloc(vbo, NSEGMENTS * 4);
-
- for (int i = 0; i < NSEGMENTS; ++i) {
- float cv[2], v[3];
- cv[0] = p[(i) % NSEGMENTS][0];
- cv[1] = p[(i) % NSEGMENTS][1];
-
- /* cone sides */
- ARRAY_SET_ITEMS(v, cv[0], 0.0f, cv[1]);
- GPU_vertbuf_attr_set(vbo, attr_id.pos, i * 4, v);
- ARRAY_SET_ITEMS(v, 0.0f, 2.0f, 0.0f);
- GPU_vertbuf_attr_set(vbo, attr_id.pos, i * 4 + 1, v);
-
- /* end ring */
- ARRAY_SET_ITEMS(v, cv[0], 0.0f, cv[1]);
- GPU_vertbuf_attr_set(vbo, attr_id.pos, i * 4 + 2, v);
- cv[0] = p[(i + 1) % NSEGMENTS][0];
- cv[1] = p[(i + 1) % NSEGMENTS][1];
- ARRAY_SET_ITEMS(v, cv[0], 0.0f, cv[1]);
- GPU_vertbuf_attr_set(vbo, attr_id.pos, i * 4 + 3, v);
- }
-
- SHC.drw_empty_cone = GPU_batch_create_ex(GPU_PRIM_LINES, vbo, NULL, GPU_BATCH_OWNS_VBO);
- }
- return SHC.drw_empty_cone;
+ if (!SHC.drw_empty_cone) {
+ /* a single ring of vertices */
+ float p[NSEGMENTS][2];
+ for (int i = 0; i < NSEGMENTS; ++i) {
+ float angle = 2 * M_PI * ((float)i / (float)NSEGMENTS);
+ p[i][0] = cosf(angle);
+ p[i][1] = sinf(angle);
+ }
+
+ /* Position Only 3D format */
+ static GPUVertFormat format = {0};
+ static struct {
+ uint pos;
+ } attr_id;
+ if (format.attr_len == 0) {
+ attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
+ }
+
+ GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
+ GPU_vertbuf_data_alloc(vbo, NSEGMENTS * 4);
+
+ for (int i = 0; i < NSEGMENTS; ++i) {
+ float cv[2], v[3];
+ cv[0] = p[(i) % NSEGMENTS][0];
+ cv[1] = p[(i) % NSEGMENTS][1];
+
+ /* cone sides */
+ ARRAY_SET_ITEMS(v, cv[0], 0.0f, cv[1]);
+ GPU_vertbuf_attr_set(vbo, attr_id.pos, i * 4, v);
+ ARRAY_SET_ITEMS(v, 0.0f, 2.0f, 0.0f);
+ GPU_vertbuf_attr_set(vbo, attr_id.pos, i * 4 + 1, v);
+
+ /* end ring */
+ ARRAY_SET_ITEMS(v, cv[0], 0.0f, cv[1]);
+ GPU_vertbuf_attr_set(vbo, attr_id.pos, i * 4 + 2, v);
+ cv[0] = p[(i + 1) % NSEGMENTS][0];
+ cv[1] = p[(i + 1) % NSEGMENTS][1];
+ ARRAY_SET_ITEMS(v, cv[0], 0.0f, cv[1]);
+ GPU_vertbuf_attr_set(vbo, attr_id.pos, i * 4 + 3, v);
+ }
+
+ SHC.drw_empty_cone = GPU_batch_create_ex(GPU_PRIM_LINES, vbo, NULL, GPU_BATCH_OWNS_VBO);
+ }
+ return SHC.drw_empty_cone;
#undef NSEGMENTS
}
GPUBatch *DRW_cache_empty_cylinder_get(void)
{
#define NSEGMENTS 12
- if (!SHC.drw_empty_cylinder) {
- /* a single ring of vertices */
- float p[NSEGMENTS][2];
- for (int i = 0; i < NSEGMENTS; ++i) {
- float angle = 2 * M_PI * ((float)i / (float)NSEGMENTS);
- p[i][0] = cosf(angle);
- p[i][1] = sinf(angle);
- }
-
- /* Position Only 3D format */
- static GPUVertFormat format = { 0 };
- static struct { uint pos; } attr_id;
- if (format.attr_len == 0) {
- attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
- }
-
- GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
- GPU_vertbuf_data_alloc(vbo, NSEGMENTS * 6);
-
- for (int i = 0; i < NSEGMENTS; ++i) {
- float cv[2], pv[2], v[3];
- cv[0] = p[(i) % NSEGMENTS][0];
- cv[1] = p[(i) % NSEGMENTS][1];
- pv[0] = p[(i + 1) % NSEGMENTS][0];
- pv[1] = p[(i + 1) % NSEGMENTS][1];
-
- /* cylinder sides */
- copy_v3_fl3(v, cv[0], cv[1], -1.0f);
- GPU_vertbuf_attr_set(vbo, attr_id.pos, i * 6, v);
- copy_v3_fl3(v, cv[0], cv[1], 1.0f);
- GPU_vertbuf_attr_set(vbo, attr_id.pos, i * 6 + 1, v);
-
- /* top ring */
- copy_v3_fl3(v, cv[0], cv[1], 1.0f);
- GPU_vertbuf_attr_set(vbo, attr_id.pos, i * 6 + 2, v);
- copy_v3_fl3(v, pv[0], pv[1], 1.0f);
- GPU_vertbuf_attr_set(vbo, attr_id.pos, i * 6 + 3, v);
-
- /* bottom ring */
- copy_v3_fl3(v, cv[0], cv[1], -1.0f);
- GPU_vertbuf_attr_set(vbo, attr_id.pos, i * 6 + 4, v);
- copy_v3_fl3(v, pv[0], pv[1], -1.0f);
- GPU_vertbuf_attr_set(vbo, attr_id.pos, i * 6 + 5, v);
- }
-
- SHC.drw_empty_cylinder = GPU_batch_create_ex(GPU_PRIM_LINES, vbo, NULL, GPU_BATCH_OWNS_VBO);
- }
- return SHC.drw_empty_cylinder;
+ if (!SHC.drw_empty_cylinder) {
+ /* a single ring of vertices */
+ float p[NSEGMENTS][2];
+ for (int i = 0; i < NSEGMENTS; ++i) {
+ float angle = 2 * M_PI * ((float)i / (float)NSEGMENTS);
+ p[i][0] = cosf(angle);
+ p[i][1] = sinf(angle);
+ }
+
+ /* Position Only 3D format */
+ static GPUVertFormat format = {0};
+ static struct {
+ uint pos;
+ } attr_id;
+ if (format.attr_len == 0) {
+ attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
+ }
+
+ GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
+ GPU_vertbuf_data_alloc(vbo, NSEGMENTS * 6);
+
+ for (int i = 0; i < NSEGMENTS; ++i) {
+ float cv[2], pv[2], v[3];
+ cv[0] = p[(i) % NSEGMENTS][0];
+ cv[1] = p[(i) % NSEGMENTS][1];
+ pv[0] = p[(i + 1) % NSEGMENTS][0];
+ pv[1] = p[(i + 1) % NSEGMENTS][1];
+
+ /* cylinder sides */
+ copy_v3_fl3(v, cv[0], cv[1], -1.0f);
+ GPU_vertbuf_attr_set(vbo, attr_id.pos, i * 6, v);
+ copy_v3_fl3(v, cv[0], cv[1], 1.0f);
+ GPU_vertbuf_attr_set(vbo, attr_id.pos, i * 6 + 1, v);
+
+ /* top ring */
+ copy_v3_fl3(v, cv[0], cv[1], 1.0f);
+ GPU_vertbuf_attr_set(vbo, attr_id.pos, i * 6 + 2, v);
+ copy_v3_fl3(v, pv[0], pv[1], 1.0f);
+ GPU_vertbuf_attr_set(vbo, attr_id.pos, i * 6 + 3, v);
+
+ /* bottom ring */
+ copy_v3_fl3(v, cv[0], cv[1], -1.0f);
+ GPU_vertbuf_attr_set(vbo, attr_id.pos, i * 6 + 4, v);
+ copy_v3_fl3(v, pv[0], pv[1], -1.0f);
+ GPU_vertbuf_attr_set(vbo, attr_id.pos, i * 6 + 5, v);
+ }
+
+ SHC.drw_empty_cylinder = GPU_batch_create_ex(GPU_PRIM_LINES, vbo, NULL, GPU_BATCH_OWNS_VBO);
+ }
+ return SHC.drw_empty_cylinder;
#undef NSEGMENTS
}
GPUBatch *DRW_cache_empty_capsule_body_get(void)
{
- if (!SHC.drw_empty_capsule_body) {
- const float pos[8][3] = {
- { 1.0f, 0.0f, 1.0f},
- { 1.0f, 0.0f, 0.0f},
- { 0.0f, 1.0f, 1.0f},
- { 0.0f, 1.0f, 0.0f},
- {-1.0f, 0.0f, 1.0f},
- {-1.0f, 0.0f, 0.0f},
- { 0.0f, -1.0f, 1.0f},
- { 0.0f, -1.0f, 0.0f},
- };
-
- /* Position Only 3D format */
- static GPUVertFormat format = { 0 };
- static struct { uint pos; } attr_id;
- if (format.attr_len == 0) {
- attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
- }
-
- GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
- GPU_vertbuf_data_alloc(vbo, 8);
- GPU_vertbuf_attr_fill(vbo, attr_id.pos, pos);
-
- SHC.drw_empty_capsule_body = GPU_batch_create_ex(GPU_PRIM_LINES, vbo, NULL, GPU_BATCH_OWNS_VBO);
- }
- return SHC.drw_empty_capsule_body;
+ if (!SHC.drw_empty_capsule_body) {
+ const float pos[8][3] = {
+ {1.0f, 0.0f, 1.0f},
+ {1.0f, 0.0f, 0.0f},
+ {0.0f, 1.0f, 1.0f},
+ {0.0f, 1.0f, 0.0f},
+ {-1.0f, 0.0f, 1.0f},
+ {-1.0f, 0.0f, 0.0f},
+ {0.0f, -1.0f, 1.0f},
+ {0.0f, -1.0f, 0.0f},
+ };
+
+ /* Position Only 3D format */
+ static GPUVertFormat format = {0};
+ static struct {
+ uint pos;
+ } attr_id;
+ if (format.attr_len == 0) {
+ attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
+ }
+
+ GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
+ GPU_vertbuf_data_alloc(vbo, 8);
+ GPU_vertbuf_attr_fill(vbo, attr_id.pos, pos);
+
+ SHC.drw_empty_capsule_body = GPU_batch_create_ex(
+ GPU_PRIM_LINES, vbo, NULL, GPU_BATCH_OWNS_VBO);
+ }
+ return SHC.drw_empty_capsule_body;
}
GPUBatch *DRW_cache_empty_capsule_cap_get(void)
{
#define NSEGMENTS 24 /* Must be multiple of 2. */
- if (!SHC.drw_empty_capsule_cap) {
- /* a single ring of vertices */
- float p[NSEGMENTS][2];
- for (int i = 0; i < NSEGMENTS; ++i) {
- float angle = 2 * M_PI * ((float)i / (float)NSEGMENTS);
- p[i][0] = cosf(angle);
- p[i][1] = sinf(angle);
- }
-
- /* Position Only 3D format */
- static GPUVertFormat format = { 0 };
- static struct { uint pos; } attr_id;
- if (format.attr_len == 0) {
- attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
- }
-
- GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
- GPU_vertbuf_data_alloc(vbo, (NSEGMENTS * 2) * 2);
-
- /* Base circle */
- int vidx = 0;
- for (int i = 0; i < NSEGMENTS; ++i) {
- float v[3] = {0.0f, 0.0f, 0.0f};
- copy_v2_v2(v, p[(i) % NSEGMENTS]);
- GPU_vertbuf_attr_set(vbo, attr_id.pos, vidx++, v);
- copy_v2_v2(v, p[(i + 1) % NSEGMENTS]);
- GPU_vertbuf_attr_set(vbo, attr_id.pos, vidx++, v);
- }
-
- for (int i = 0; i < NSEGMENTS / 2; ++i) {
- float v[3] = {0.0f, 0.0f, 0.0f};
- int ci = i % NSEGMENTS;
- int pi = (i + 1) % NSEGMENTS;
- /* Y half circle */
- copy_v3_fl3(v, p[ci][0], 0.0f, p[ci][1]);
- GPU_vertbuf_attr_set(vbo, attr_id.pos, vidx++, v);
- copy_v3_fl3(v, p[pi][0], 0.0f, p[pi][1]);
- GPU_vertbuf_attr_set(vbo, attr_id.pos, vidx++, v);
- /* X half circle */
- copy_v3_fl3(v, 0.0f, p[ci][0], p[ci][1]);
- GPU_vertbuf_attr_set(vbo, attr_id.pos, vidx++, v);
- copy_v3_fl3(v, 0.0f, p[pi][0], p[pi][1]);
- GPU_vertbuf_attr_set(vbo, attr_id.pos, vidx++, v);
- }
-
- SHC.drw_empty_capsule_cap = GPU_batch_create_ex(GPU_PRIM_LINES, vbo, NULL, GPU_BATCH_OWNS_VBO);
- }
- return SHC.drw_empty_capsule_cap;
+ if (!SHC.drw_empty_capsule_cap) {
+ /* a single ring of vertices */
+ float p[NSEGMENTS][2];
+ for (int i = 0; i < NSEGMENTS; ++i) {
+ float angle = 2 * M_PI * ((float)i / (float)NSEGMENTS);
+ p[i][0] = cosf(angle);
+ p[i][1] = sinf(angle);
+ }
+
+ /* Position Only 3D format */
+ static GPUVertFormat format = {0};
+ static struct {
+ uint pos;
+ } attr_id;
+ if (format.attr_len == 0) {
+ attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
+ }
+
+ GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
+ GPU_vertbuf_data_alloc(vbo, (NSEGMENTS * 2) * 2);
+
+ /* Base circle */
+ int vidx = 0;
+ for (int i = 0; i < NSEGMENTS; ++i) {
+ float v[3] = {0.0f, 0.0f, 0.0f};
+ copy_v2_v2(v, p[(i) % NSEGMENTS]);
+ GPU_vertbuf_attr_set(vbo, attr_id.pos, vidx++, v);
+ copy_v2_v2(v, p[(i + 1) % NSEGMENTS]);
+ GPU_vertbuf_attr_set(vbo, attr_id.pos, vidx++, v);
+ }
+
+ for (int i = 0; i < NSEGMENTS / 2; ++i) {
+ float v[3] = {0.0f, 0.0f, 0.0f};
+ int ci = i % NSEGMENTS;
+ int pi = (i + 1) % NSEGMENTS;
+ /* Y half circle */
+ copy_v3_fl3(v, p[ci][0], 0.0f, p[ci][1]);
+ GPU_vertbuf_attr_set(vbo, attr_id.pos, vidx++, v);
+ copy_v3_fl3(v, p[pi][0], 0.0f, p[pi][1]);
+ GPU_vertbuf_attr_set(vbo, attr_id.pos, vidx++, v);
+ /* X half circle */
+ copy_v3_fl3(v, 0.0f, p[ci][0], p[ci][1]);
+ GPU_vertbuf_attr_set(vbo, attr_id.pos, vidx++, v);
+ copy_v3_fl3(v, 0.0f, p[pi][0], p[pi][1]);
+ GPU_vertbuf_attr_set(vbo, attr_id.pos, vidx++, v);
+ }
+
+ SHC.drw_empty_capsule_cap = GPU_batch_create_ex(GPU_PRIM_LINES, vbo, NULL, GPU_BATCH_OWNS_VBO);
+ }
+ return SHC.drw_empty_capsule_cap;
#undef NSEGMENTS
}
GPUBatch *DRW_cache_image_plane_get(void)
{
- if (!SHC.drw_image_plane) {
- const float quad[4][2] = {{0, 0}, {1, 0}, {1, 1}, {0, 1}};
- static GPUVertFormat format = { 0 };
- static struct { uint pos, texCoords; } attr_id;
- if (format.attr_len == 0) {
- attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 2, GPU_FETCH_FLOAT);
- attr_id.texCoords = GPU_vertformat_attr_add(&format, "texCoord", GPU_COMP_F32, 2, GPU_FETCH_FLOAT);
- }
- GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
- GPU_vertbuf_data_alloc(vbo, 4);
- for (uint j = 0; j < 4; j++) {
- GPU_vertbuf_attr_set(vbo, attr_id.pos, j, quad[j]);
- GPU_vertbuf_attr_set(vbo, attr_id.texCoords, j, quad[j]);
- }
- SHC.drw_image_plane = GPU_batch_create_ex(GPU_PRIM_TRI_FAN, vbo, NULL, GPU_BATCH_OWNS_VBO);
- }
- return SHC.drw_image_plane;
+ if (!SHC.drw_image_plane) {
+ const float quad[4][2] = {{0, 0}, {1, 0}, {1, 1}, {0, 1}};
+ static GPUVertFormat format = {0};
+ static struct {
+ uint pos, texCoords;
+ } attr_id;
+ if (format.attr_len == 0) {
+ attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 2, GPU_FETCH_FLOAT);
+ attr_id.texCoords = GPU_vertformat_attr_add(
+ &format, "texCoord", GPU_COMP_F32, 2, GPU_FETCH_FLOAT);
+ }
+ GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
+ GPU_vertbuf_data_alloc(vbo, 4);
+ for (uint j = 0; j < 4; j++) {
+ GPU_vertbuf_attr_set(vbo, attr_id.pos, j, quad[j]);
+ GPU_vertbuf_attr_set(vbo, attr_id.texCoords, j, quad[j]);
+ }
+ SHC.drw_image_plane = GPU_batch_create_ex(GPU_PRIM_TRI_FAN, vbo, NULL, GPU_BATCH_OWNS_VBO);
+ }
+ return SHC.drw_image_plane;
}
GPUBatch *DRW_cache_image_plane_wire_get(void)
{
- if (!SHC.drw_image_plane_wire) {
- const float quad[4][2] = {{0, 0}, {1, 0}, {1, 1}, {0, 1}};
- static GPUVertFormat format = { 0 };
- static struct { uint pos; } attr_id;
- if (format.attr_len == 0) {
- attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 2, GPU_FETCH_FLOAT);
- }
- GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
- GPU_vertbuf_data_alloc(vbo, 4);
- for (uint j = 0; j < 4; j++) {
- GPU_vertbuf_attr_set(vbo, attr_id.pos, j, quad[j]);
- }
- SHC.drw_image_plane_wire = GPU_batch_create_ex(GPU_PRIM_LINE_LOOP, vbo, NULL, GPU_BATCH_OWNS_VBO);
- }
- return SHC.drw_image_plane_wire;
+ if (!SHC.drw_image_plane_wire) {
+ const float quad[4][2] = {{0, 0}, {1, 0}, {1, 1}, {0, 1}};
+ static GPUVertFormat format = {0};
+ static struct {
+ uint pos;
+ } attr_id;
+ if (format.attr_len == 0) {
+ attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 2, GPU_FETCH_FLOAT);
+ }
+ GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
+ GPU_vertbuf_data_alloc(vbo, 4);
+ for (uint j = 0; j < 4; j++) {
+ GPU_vertbuf_attr_set(vbo, attr_id.pos, j, quad[j]);
+ }
+ SHC.drw_image_plane_wire = GPU_batch_create_ex(
+ GPU_PRIM_LINE_LOOP, vbo, NULL, GPU_BATCH_OWNS_VBO);
+ }
+ return SHC.drw_image_plane_wire;
}
/* Force Field */
GPUBatch *DRW_cache_field_wind_get(void)
{
#define CIRCLE_RESOL 32
- if (!SHC.drw_field_wind) {
- float v[3] = {0.0f, 0.0f, 0.0f};
-
- /* Position Only 3D format */
- static GPUVertFormat format = { 0 };
- static struct { uint pos; } attr_id;
- if (format.attr_len == 0) {
- attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
- }
-
- GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
- GPU_vertbuf_data_alloc(vbo, CIRCLE_RESOL * 2 * 4);
-
- for (int i = 0; i < 4; i++) {
- float z = 0.05f * (float)i;
- for (int a = 0; a < CIRCLE_RESOL; a++) {
- v[0] = sinf((2.0f * M_PI * a) / ((float)CIRCLE_RESOL));
- v[1] = cosf((2.0f * M_PI * a) / ((float)CIRCLE_RESOL));
- v[2] = z;
- GPU_vertbuf_attr_set(vbo, attr_id.pos, i * CIRCLE_RESOL * 2 + a * 2, v);
-
- v[0] = sinf((2.0f * M_PI * (a + 1)) / ((float)CIRCLE_RESOL));
- v[1] = cosf((2.0f * M_PI * (a + 1)) / ((float)CIRCLE_RESOL));
- v[2] = z;
- GPU_vertbuf_attr_set(vbo, attr_id.pos, i * CIRCLE_RESOL * 2 + a * 2 + 1, v);
- }
- }
-
- SHC.drw_field_wind = GPU_batch_create_ex(GPU_PRIM_LINES, vbo, NULL, GPU_BATCH_OWNS_VBO);
- }
- return SHC.drw_field_wind;
+ if (!SHC.drw_field_wind) {
+ float v[3] = {0.0f, 0.0f, 0.0f};
+
+ /* Position Only 3D format */
+ static GPUVertFormat format = {0};
+ static struct {
+ uint pos;
+ } attr_id;
+ if (format.attr_len == 0) {
+ attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
+ }
+
+ GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
+ GPU_vertbuf_data_alloc(vbo, CIRCLE_RESOL * 2 * 4);
+
+ for (int i = 0; i < 4; i++) {
+ float z = 0.05f * (float)i;
+ for (int a = 0; a < CIRCLE_RESOL; a++) {
+ v[0] = sinf((2.0f * M_PI * a) / ((float)CIRCLE_RESOL));
+ v[1] = cosf((2.0f * M_PI * a) / ((float)CIRCLE_RESOL));
+ v[2] = z;
+ GPU_vertbuf_attr_set(vbo, attr_id.pos, i * CIRCLE_RESOL * 2 + a * 2, v);
+
+ v[0] = sinf((2.0f * M_PI * (a + 1)) / ((float)CIRCLE_RESOL));
+ v[1] = cosf((2.0f * M_PI * (a + 1)) / ((float)CIRCLE_RESOL));
+ v[2] = z;
+ GPU_vertbuf_attr_set(vbo, attr_id.pos, i * CIRCLE_RESOL * 2 + a * 2 + 1, v);
+ }
+ }
+
+ SHC.drw_field_wind = GPU_batch_create_ex(GPU_PRIM_LINES, vbo, NULL, GPU_BATCH_OWNS_VBO);
+ }
+ return SHC.drw_field_wind;
#undef CIRCLE_RESOL
}
GPUBatch *DRW_cache_field_force_get(void)
{
#define CIRCLE_RESOL 32
- if (!SHC.drw_field_force) {
- float v[3] = {0.0f, 0.0f, 0.0f};
-
- /* Position Only 3D format */
- static GPUVertFormat format = { 0 };
- static struct { uint pos; } attr_id;
- if (format.attr_len == 0) {
- attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
- }
-
- GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
- GPU_vertbuf_data_alloc(vbo, CIRCLE_RESOL * 2 * 3);
-
- for (int i = 0; i < 3; i++) {
- float radius = 1.0f + 0.5f * (float)i;
- for (int a = 0; a < CIRCLE_RESOL; a++) {
- v[0] = radius * sinf((2.0f * M_PI * a) / ((float)CIRCLE_RESOL));
- v[1] = radius * cosf((2.0f * M_PI * a) / ((float)CIRCLE_RESOL));
- v[2] = 0.0f;
- GPU_vertbuf_attr_set(vbo, attr_id.pos, i * CIRCLE_RESOL * 2 + a * 2, v);
-
- v[0] = radius * sinf((2.0f * M_PI * (a + 1)) / ((float)CIRCLE_RESOL));
- v[1] = radius * cosf((2.0f * M_PI * (a + 1)) / ((float)CIRCLE_RESOL));
- v[2] = 0.0f;
- GPU_vertbuf_attr_set(vbo, attr_id.pos, i * CIRCLE_RESOL * 2 + a * 2 + 1, v);
- }
- }
-
- SHC.drw_field_force = GPU_batch_create_ex(GPU_PRIM_LINES, vbo, NULL, GPU_BATCH_OWNS_VBO);
- }
- return SHC.drw_field_force;
+ if (!SHC.drw_field_force) {
+ float v[3] = {0.0f, 0.0f, 0.0f};
+
+ /* Position Only 3D format */
+ static GPUVertFormat format = {0};
+ static struct {
+ uint pos;
+ } attr_id;
+ if (format.attr_len == 0) {
+ attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
+ }
+
+ GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
+ GPU_vertbuf_data_alloc(vbo, CIRCLE_RESOL * 2 * 3);
+
+ for (int i = 0; i < 3; i++) {
+ float radius = 1.0f + 0.5f * (float)i;
+ for (int a = 0; a < CIRCLE_RESOL; a++) {
+ v[0] = radius * sinf((2.0f * M_PI * a) / ((float)CIRCLE_RESOL));
+ v[1] = radius * cosf((2.0f * M_PI * a) / ((float)CIRCLE_RESOL));
+ v[2] = 0.0f;
+ GPU_vertbuf_attr_set(vbo, attr_id.pos, i * CIRCLE_RESOL * 2 + a * 2, v);
+
+ v[0] = radius * sinf((2.0f * M_PI * (a + 1)) / ((float)CIRCLE_RESOL));
+ v[1] = radius * cosf((2.0f * M_PI * (a + 1)) / ((float)CIRCLE_RESOL));
+ v[2] = 0.0f;
+ GPU_vertbuf_attr_set(vbo, attr_id.pos, i * CIRCLE_RESOL * 2 + a * 2 + 1, v);
+ }
+ }
+
+ SHC.drw_field_force = GPU_batch_create_ex(GPU_PRIM_LINES, vbo, NULL, GPU_BATCH_OWNS_VBO);
+ }
+ return SHC.drw_field_force;
#undef CIRCLE_RESOL
}
GPUBatch *DRW_cache_field_vortex_get(void)
{
#define SPIRAL_RESOL 32
- if (!SHC.drw_field_vortex) {
- float v[3] = {0.0f, 0.0f, 0.0f};
- uint v_idx = 0;
-
- /* Position Only 3D format */
- static GPUVertFormat format = { 0 };
- static struct { uint pos; } attr_id;
- if (format.attr_len == 0) {
- attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
- }
-
- GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
- GPU_vertbuf_data_alloc(vbo, SPIRAL_RESOL * 2 + 1);
-
- for (int a = SPIRAL_RESOL; a > -1; a--) {
- v[0] = sinf((2.0f * M_PI * a) / ((float)SPIRAL_RESOL)) * (a / (float)SPIRAL_RESOL);
- v[1] = cosf((2.0f * M_PI * a) / ((float)SPIRAL_RESOL)) * (a / (float)SPIRAL_RESOL);
-
- GPU_vertbuf_attr_set(vbo, attr_id.pos, v_idx++, v);
- }
-
- for (int a = 1; a <= SPIRAL_RESOL; a++) {
- v[0] = -sinf((2.0f * M_PI * a) / ((float)SPIRAL_RESOL)) * (a / (float)SPIRAL_RESOL);
- v[1] = -cosf((2.0f * M_PI * a) / ((float)SPIRAL_RESOL)) * (a / (float)SPIRAL_RESOL);
-
- GPU_vertbuf_attr_set(vbo, attr_id.pos, v_idx++, v);
- }
-
- SHC.drw_field_vortex = GPU_batch_create_ex(GPU_PRIM_LINE_STRIP, vbo, NULL, GPU_BATCH_OWNS_VBO);
- }
- return SHC.drw_field_vortex;
+ if (!SHC.drw_field_vortex) {
+ float v[3] = {0.0f, 0.0f, 0.0f};
+ uint v_idx = 0;
+
+ /* Position Only 3D format */
+ static GPUVertFormat format = {0};
+ static struct {
+ uint pos;
+ } attr_id;
+ if (format.attr_len == 0) {
+ attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
+ }
+
+ GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
+ GPU_vertbuf_data_alloc(vbo, SPIRAL_RESOL * 2 + 1);
+
+ for (int a = SPIRAL_RESOL; a > -1; a--) {
+ v[0] = sinf((2.0f * M_PI * a) / ((float)SPIRAL_RESOL)) * (a / (float)SPIRAL_RESOL);
+ v[1] = cosf((2.0f * M_PI * a) / ((float)SPIRAL_RESOL)) * (a / (float)SPIRAL_RESOL);
+
+ GPU_vertbuf_attr_set(vbo, attr_id.pos, v_idx++, v);
+ }
+
+ for (int a = 1; a <= SPIRAL_RESOL; a++) {
+ v[0] = -sinf((2.0f * M_PI * a) / ((float)SPIRAL_RESOL)) * (a / (float)SPIRAL_RESOL);
+ v[1] = -cosf((2.0f * M_PI * a) / ((float)SPIRAL_RESOL)) * (a / (float)SPIRAL_RESOL);
+
+ GPU_vertbuf_attr_set(vbo, attr_id.pos, v_idx++, v);
+ }
+
+ SHC.drw_field_vortex = GPU_batch_create_ex(GPU_PRIM_LINE_STRIP, vbo, NULL, GPU_BATCH_OWNS_VBO);
+ }
+ return SHC.drw_field_vortex;
#undef SPIRAL_RESOL
}
GPUBatch *DRW_cache_field_tube_limit_get(void)
{
#define CIRCLE_RESOL 32
- if (!SHC.drw_field_tube_limit) {
- float v[3] = {0.0f, 0.0f, 0.0f};
- uint v_idx = 0;
-
- /* Position Only 3D format */
- static GPUVertFormat format = { 0 };
- static struct { uint pos; } attr_id;
- if (format.attr_len == 0) {
- attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
- }
-
- GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
- GPU_vertbuf_data_alloc(vbo, CIRCLE_RESOL * 2 * 2 + 8);
-
- /* Caps */
- for (int i = 0; i < 2; i++) {
- float z = (float)i * 2.0f - 1.0f;
- for (int a = 0; a < CIRCLE_RESOL; a++) {
- v[0] = sinf((2.0f * M_PI * a) / ((float)CIRCLE_RESOL));
- v[1] = cosf((2.0f * M_PI * a) / ((float)CIRCLE_RESOL));
- v[2] = z;
- GPU_vertbuf_attr_set(vbo, attr_id.pos, v_idx++, v);
-
- v[0] = sinf((2.0f * M_PI * (a + 1)) / ((float)CIRCLE_RESOL));
- v[1] = cosf((2.0f * M_PI * (a + 1)) / ((float)CIRCLE_RESOL));
- v[2] = z;
- GPU_vertbuf_attr_set(vbo, attr_id.pos, v_idx++, v);
- }
- }
- /* Side Edges */
- for (int a = 0; a < 4; a++) {
- for (int i = 0; i < 2; i++) {
- float z = (float)i * 2.0f - 1.0f;
- v[0] = sinf((2.0f * M_PI * a) / 4.0f);
- v[1] = cosf((2.0f * M_PI * a) / 4.0f);
- v[2] = z;
- GPU_vertbuf_attr_set(vbo, attr_id.pos, v_idx++, v);
- }
- }
-
- SHC.drw_field_tube_limit = GPU_batch_create_ex(GPU_PRIM_LINES, vbo, NULL, GPU_BATCH_OWNS_VBO);
- }
- return SHC.drw_field_tube_limit;
+ if (!SHC.drw_field_tube_limit) {
+ float v[3] = {0.0f, 0.0f, 0.0f};
+ uint v_idx = 0;
+
+ /* Position Only 3D format */
+ static GPUVertFormat format = {0};
+ static struct {
+ uint pos;
+ } attr_id;
+ if (format.attr_len == 0) {
+ attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
+ }
+
+ GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
+ GPU_vertbuf_data_alloc(vbo, CIRCLE_RESOL * 2 * 2 + 8);
+
+ /* Caps */
+ for (int i = 0; i < 2; i++) {
+ float z = (float)i * 2.0f - 1.0f;
+ for (int a = 0; a < CIRCLE_RESOL; a++) {
+ v[0] = sinf((2.0f * M_PI * a) / ((float)CIRCLE_RESOL));
+ v[1] = cosf((2.0f * M_PI * a) / ((float)CIRCLE_RESOL));
+ v[2] = z;
+ GPU_vertbuf_attr_set(vbo, attr_id.pos, v_idx++, v);
+
+ v[0] = sinf((2.0f * M_PI * (a + 1)) / ((float)CIRCLE_RESOL));
+ v[1] = cosf((2.0f * M_PI * (a + 1)) / ((float)CIRCLE_RESOL));
+ v[2] = z;
+ GPU_vertbuf_attr_set(vbo, attr_id.pos, v_idx++, v);
+ }
+ }
+ /* Side Edges */
+ for (int a = 0; a < 4; a++) {
+ for (int i = 0; i < 2; i++) {
+ float z = (float)i * 2.0f - 1.0f;
+ v[0] = sinf((2.0f * M_PI * a) / 4.0f);
+ v[1] = cosf((2.0f * M_PI * a) / 4.0f);
+ v[2] = z;
+ GPU_vertbuf_attr_set(vbo, attr_id.pos, v_idx++, v);
+ }
+ }
+
+ SHC.drw_field_tube_limit = GPU_batch_create_ex(GPU_PRIM_LINES, vbo, NULL, GPU_BATCH_OWNS_VBO);
+ }
+ return SHC.drw_field_tube_limit;
#undef CIRCLE_RESOL
}
GPUBatch *DRW_cache_field_cone_limit_get(void)
{
#define CIRCLE_RESOL 32
- if (!SHC.drw_field_cone_limit) {
- float v[3] = {0.0f, 0.0f, 0.0f};
- uint v_idx = 0;
-
- /* Position Only 3D format */
- static GPUVertFormat format = { 0 };
- static struct { uint pos; } attr_id;
- if (format.attr_len == 0) {
- attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
- }
-
- GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
- GPU_vertbuf_data_alloc(vbo, CIRCLE_RESOL * 2 * 2 + 8);
-
- /* Caps */
- for (int i = 0; i < 2; i++) {
- float z = (float)i * 2.0f - 1.0f;
- for (int a = 0; a < CIRCLE_RESOL; a++) {
- v[0] = sinf((2.0f * M_PI * a) / ((float)CIRCLE_RESOL));
- v[1] = cosf((2.0f * M_PI * a) / ((float)CIRCLE_RESOL));
- v[2] = z;
- GPU_vertbuf_attr_set(vbo, attr_id.pos, v_idx++, v);
-
- v[0] = sinf((2.0f * M_PI * (a + 1)) / ((float)CIRCLE_RESOL));
- v[1] = cosf((2.0f * M_PI * (a + 1)) / ((float)CIRCLE_RESOL));
- v[2] = z;
- GPU_vertbuf_attr_set(vbo, attr_id.pos, v_idx++, v);
- }
- }
- /* Side Edges */
- for (int a = 0; a < 4; a++) {
- for (int i = 0; i < 2; i++) {
- float z = (float)i * 2.0f - 1.0f;
- v[0] = z * sinf((2.0f * M_PI * a) / 4.0f);
- v[1] = z * cosf((2.0f * M_PI * a) / 4.0f);
- v[2] = z;
- GPU_vertbuf_attr_set(vbo, attr_id.pos, v_idx++, v);
- }
- }
-
- SHC.drw_field_cone_limit = GPU_batch_create_ex(GPU_PRIM_LINES, vbo, NULL, GPU_BATCH_OWNS_VBO);
- }
- return SHC.drw_field_cone_limit;
+ if (!SHC.drw_field_cone_limit) {
+ float v[3] = {0.0f, 0.0f, 0.0f};
+ uint v_idx = 0;
+
+ /* Position Only 3D format */
+ static GPUVertFormat format = {0};
+ static struct {
+ uint pos;
+ } attr_id;
+ if (format.attr_len == 0) {
+ attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
+ }
+
+ GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
+ GPU_vertbuf_data_alloc(vbo, CIRCLE_RESOL * 2 * 2 + 8);
+
+ /* Caps */
+ for (int i = 0; i < 2; i++) {
+ float z = (float)i * 2.0f - 1.0f;
+ for (int a = 0; a < CIRCLE_RESOL; a++) {
+ v[0] = sinf((2.0f * M_PI * a) / ((float)CIRCLE_RESOL));
+ v[1] = cosf((2.0f * M_PI * a) / ((float)CIRCLE_RESOL));
+ v[2] = z;
+ GPU_vertbuf_attr_set(vbo, attr_id.pos, v_idx++, v);
+
+ v[0] = sinf((2.0f * M_PI * (a + 1)) / ((float)CIRCLE_RESOL));
+ v[1] = cosf((2.0f * M_PI * (a + 1)) / ((float)CIRCLE_RESOL));
+ v[2] = z;
+ GPU_vertbuf_attr_set(vbo, attr_id.pos, v_idx++, v);
+ }
+ }
+ /* Side Edges */
+ for (int a = 0; a < 4; a++) {
+ for (int i = 0; i < 2; i++) {
+ float z = (float)i * 2.0f - 1.0f;
+ v[0] = z * sinf((2.0f * M_PI * a) / 4.0f);
+ v[1] = z * cosf((2.0f * M_PI * a) / 4.0f);
+ v[2] = z;
+ GPU_vertbuf_attr_set(vbo, attr_id.pos, v_idx++, v);
+ }
+ }
+
+ SHC.drw_field_cone_limit = GPU_batch_create_ex(GPU_PRIM_LINES, vbo, NULL, GPU_BATCH_OWNS_VBO);
+ }
+ return SHC.drw_field_cone_limit;
#undef CIRCLE_RESOL
}
@@ -1334,412 +1385,429 @@ GPUBatch *DRW_cache_field_cone_limit_get(void)
GPUBatch *DRW_cache_light_get(void)
{
#define NSEGMENTS 8
- if (!SHC.drw_light) {
- float v[2];
-
- /* Position Only 3D format */
- static GPUVertFormat format = { 0 };
- static struct { uint pos; } attr_id;
- if (format.attr_len == 0) {
- attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 2, GPU_FETCH_FLOAT);
- }
-
- GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
- GPU_vertbuf_data_alloc(vbo, NSEGMENTS * 2);
-
- for (int a = 0; a < NSEGMENTS * 2; a += 2) {
- v[0] = sinf((2.0f * M_PI * a) / ((float)NSEGMENTS * 2));
- v[1] = cosf((2.0f * M_PI * a) / ((float)NSEGMENTS * 2));
- GPU_vertbuf_attr_set(vbo, attr_id.pos, a, v);
-
- v[0] = sinf((2.0f * M_PI * (a + 1)) / ((float)NSEGMENTS * 2));
- v[1] = cosf((2.0f * M_PI * (a + 1)) / ((float)NSEGMENTS * 2));
- GPU_vertbuf_attr_set(vbo, attr_id.pos, a + 1, v);
- }
-
- SHC.drw_light = GPU_batch_create_ex(GPU_PRIM_LINES, vbo, NULL, GPU_BATCH_OWNS_VBO);
- }
- return SHC.drw_light;
+ if (!SHC.drw_light) {
+ float v[2];
+
+ /* Position Only 3D format */
+ static GPUVertFormat format = {0};
+ static struct {
+ uint pos;
+ } attr_id;
+ if (format.attr_len == 0) {
+ attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 2, GPU_FETCH_FLOAT);
+ }
+
+ GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
+ GPU_vertbuf_data_alloc(vbo, NSEGMENTS * 2);
+
+ for (int a = 0; a < NSEGMENTS * 2; a += 2) {
+ v[0] = sinf((2.0f * M_PI * a) / ((float)NSEGMENTS * 2));
+ v[1] = cosf((2.0f * M_PI * a) / ((float)NSEGMENTS * 2));
+ GPU_vertbuf_attr_set(vbo, attr_id.pos, a, v);
+
+ v[0] = sinf((2.0f * M_PI * (a + 1)) / ((float)NSEGMENTS * 2));
+ v[1] = cosf((2.0f * M_PI * (a + 1)) / ((float)NSEGMENTS * 2));
+ GPU_vertbuf_attr_set(vbo, attr_id.pos, a + 1, v);
+ }
+
+ SHC.drw_light = GPU_batch_create_ex(GPU_PRIM_LINES, vbo, NULL, GPU_BATCH_OWNS_VBO);
+ }
+ return SHC.drw_light;
#undef NSEGMENTS
}
GPUBatch *DRW_cache_light_shadows_get(void)
{
#define NSEGMENTS 10
- if (!SHC.drw_light_shadows) {
- float v[2];
-
- /* Position Only 3D format */
- static GPUVertFormat format = { 0 };
- static struct { uint pos; } attr_id;
- if (format.attr_len == 0) {
- attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 2, GPU_FETCH_FLOAT);
- }
-
- GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
- GPU_vertbuf_data_alloc(vbo, NSEGMENTS * 2);
-
- for (int a = 0; a < NSEGMENTS * 2; a += 2) {
- v[0] = sinf((2.0f * M_PI * a) / ((float)NSEGMENTS * 2));
- v[1] = cosf((2.0f * M_PI * a) / ((float)NSEGMENTS * 2));
- GPU_vertbuf_attr_set(vbo, attr_id.pos, a, v);
-
- v[0] = sinf((2.0f * M_PI * (a + 1)) / ((float)NSEGMENTS * 2));
- v[1] = cosf((2.0f * M_PI * (a + 1)) / ((float)NSEGMENTS * 2));
- GPU_vertbuf_attr_set(vbo, attr_id.pos, a + 1, v);
- }
-
- SHC.drw_light_shadows = GPU_batch_create_ex(GPU_PRIM_LINES, vbo, NULL, GPU_BATCH_OWNS_VBO);
- }
- return SHC.drw_light_shadows;
+ if (!SHC.drw_light_shadows) {
+ float v[2];
+
+ /* Position Only 3D format */
+ static GPUVertFormat format = {0};
+ static struct {
+ uint pos;
+ } attr_id;
+ if (format.attr_len == 0) {
+ attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 2, GPU_FETCH_FLOAT);
+ }
+
+ GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
+ GPU_vertbuf_data_alloc(vbo, NSEGMENTS * 2);
+
+ for (int a = 0; a < NSEGMENTS * 2; a += 2) {
+ v[0] = sinf((2.0f * M_PI * a) / ((float)NSEGMENTS * 2));
+ v[1] = cosf((2.0f * M_PI * a) / ((float)NSEGMENTS * 2));
+ GPU_vertbuf_attr_set(vbo, attr_id.pos, a, v);
+
+ v[0] = sinf((2.0f * M_PI * (a + 1)) / ((float)NSEGMENTS * 2));
+ v[1] = cosf((2.0f * M_PI * (a + 1)) / ((float)NSEGMENTS * 2));
+ GPU_vertbuf_attr_set(vbo, attr_id.pos, a + 1, v);
+ }
+
+ SHC.drw_light_shadows = GPU_batch_create_ex(GPU_PRIM_LINES, vbo, NULL, GPU_BATCH_OWNS_VBO);
+ }
+ return SHC.drw_light_shadows;
#undef NSEGMENTS
}
GPUBatch *DRW_cache_light_sunrays_get(void)
{
- if (!SHC.drw_light_sunrays) {
- float v[2], v1[2], v2[2];
+ if (!SHC.drw_light_sunrays) {
+ float v[2], v1[2], v2[2];
- /* Position Only 2D format */
- static GPUVertFormat format = { 0 };
- static struct { uint pos; } attr_id;
- if (format.attr_len == 0) {
- attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 2, GPU_FETCH_FLOAT);
- }
+ /* Position Only 2D format */
+ static GPUVertFormat format = {0};
+ static struct {
+ uint pos;
+ } attr_id;
+ if (format.attr_len == 0) {
+ attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 2, GPU_FETCH_FLOAT);
+ }
- GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
- GPU_vertbuf_data_alloc(vbo, 32);
+ GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
+ GPU_vertbuf_data_alloc(vbo, 32);
- for (int a = 0; a < 8; a++) {
- v[0] = sinf((2.0f * M_PI * a) / 8.0f);
- v[1] = cosf((2.0f * M_PI * a) / 8.0f);
+ for (int a = 0; a < 8; a++) {
+ v[0] = sinf((2.0f * M_PI * a) / 8.0f);
+ v[1] = cosf((2.0f * M_PI * a) / 8.0f);
- mul_v2_v2fl(v1, v, 1.6f);
- mul_v2_v2fl(v2, v, 1.9f);
- GPU_vertbuf_attr_set(vbo, attr_id.pos, a * 4, v1);
- GPU_vertbuf_attr_set(vbo, attr_id.pos, a * 4 + 1, v2);
+ mul_v2_v2fl(v1, v, 1.6f);
+ mul_v2_v2fl(v2, v, 1.9f);
+ GPU_vertbuf_attr_set(vbo, attr_id.pos, a * 4, v1);
+ GPU_vertbuf_attr_set(vbo, attr_id.pos, a * 4 + 1, v2);
- mul_v2_v2fl(v1, v, 2.2f);
- mul_v2_v2fl(v2, v, 2.5f);
- GPU_vertbuf_attr_set(vbo, attr_id.pos, a * 4 + 2, v1);
- GPU_vertbuf_attr_set(vbo, attr_id.pos, a * 4 + 3, v2);
- }
+ mul_v2_v2fl(v1, v, 2.2f);
+ mul_v2_v2fl(v2, v, 2.5f);
+ GPU_vertbuf_attr_set(vbo, attr_id.pos, a * 4 + 2, v1);
+ GPU_vertbuf_attr_set(vbo, attr_id.pos, a * 4 + 3, v2);
+ }
- SHC.drw_light_sunrays = GPU_batch_create_ex(GPU_PRIM_LINES, vbo, NULL, GPU_BATCH_OWNS_VBO);
- }
- return SHC.drw_light_sunrays;
+ SHC.drw_light_sunrays = GPU_batch_create_ex(GPU_PRIM_LINES, vbo, NULL, GPU_BATCH_OWNS_VBO);
+ }
+ return SHC.drw_light_sunrays;
}
GPUBatch *DRW_cache_light_area_square_get(void)
{
- if (!SHC.drw_light_area_square) {
- float v1[3] = {0.0f, 0.0f, 0.0f};
-
- /* Position Only 3D format */
- static GPUVertFormat format = { 0 };
- static struct { uint pos; } attr_id;
- if (format.attr_len == 0) {
- attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
- }
-
- GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
- GPU_vertbuf_data_alloc(vbo, 8);
-
- v1[0] = v1[1] = 0.5f;
- GPU_vertbuf_attr_set(vbo, attr_id.pos, 0, v1);
- v1[0] = -0.5f;
- GPU_vertbuf_attr_set(vbo, attr_id.pos, 1, v1);
- GPU_vertbuf_attr_set(vbo, attr_id.pos, 2, v1);
- v1[1] = -0.5f;
- GPU_vertbuf_attr_set(vbo, attr_id.pos, 3, v1);
- GPU_vertbuf_attr_set(vbo, attr_id.pos, 4, v1);
- v1[0] = 0.5f;
- GPU_vertbuf_attr_set(vbo, attr_id.pos, 5, v1);
- GPU_vertbuf_attr_set(vbo, attr_id.pos, 6, v1);
- v1[1] = 0.5f;
- GPU_vertbuf_attr_set(vbo, attr_id.pos, 7, v1);
-
- SHC.drw_light_area_square = GPU_batch_create_ex(GPU_PRIM_LINES, vbo, NULL, GPU_BATCH_OWNS_VBO);
- }
- return SHC.drw_light_area_square;
+ if (!SHC.drw_light_area_square) {
+ float v1[3] = {0.0f, 0.0f, 0.0f};
+
+ /* Position Only 3D format */
+ static GPUVertFormat format = {0};
+ static struct {
+ uint pos;
+ } attr_id;
+ if (format.attr_len == 0) {
+ attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
+ }
+
+ GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
+ GPU_vertbuf_data_alloc(vbo, 8);
+
+ v1[0] = v1[1] = 0.5f;
+ GPU_vertbuf_attr_set(vbo, attr_id.pos, 0, v1);
+ v1[0] = -0.5f;
+ GPU_vertbuf_attr_set(vbo, attr_id.pos, 1, v1);
+ GPU_vertbuf_attr_set(vbo, attr_id.pos, 2, v1);
+ v1[1] = -0.5f;
+ GPU_vertbuf_attr_set(vbo, attr_id.pos, 3, v1);
+ GPU_vertbuf_attr_set(vbo, attr_id.pos, 4, v1);
+ v1[0] = 0.5f;
+ GPU_vertbuf_attr_set(vbo, attr_id.pos, 5, v1);
+ GPU_vertbuf_attr_set(vbo, attr_id.pos, 6, v1);
+ v1[1] = 0.5f;
+ GPU_vertbuf_attr_set(vbo, attr_id.pos, 7, v1);
+
+ SHC.drw_light_area_square = GPU_batch_create_ex(GPU_PRIM_LINES, vbo, NULL, GPU_BATCH_OWNS_VBO);
+ }
+ return SHC.drw_light_area_square;
}
GPUBatch *DRW_cache_light_area_disk_get(void)
{
#define NSEGMENTS 32
- if (!SHC.drw_light_area_disk) {
- /* Position Only 3D format */
- static GPUVertFormat format = { 0 };
- static struct { uint pos; } attr_id;
- if (format.attr_len == 0) {
- attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
- }
-
- GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
- GPU_vertbuf_data_alloc(vbo, 2 * NSEGMENTS);
-
- float v[3] = {0.0f, 0.5f, 0.0f};
- GPU_vertbuf_attr_set(vbo, attr_id.pos, 0, v);
- for (int a = 1; a < NSEGMENTS; a++) {
- v[0] = 0.5f * sinf(2.0f * (float)M_PI * a / NSEGMENTS);
- v[1] = 0.5f * cosf(2.0f * (float)M_PI * a / NSEGMENTS);
- GPU_vertbuf_attr_set(vbo, attr_id.pos, 2 * a - 1, v);
- GPU_vertbuf_attr_set(vbo, attr_id.pos, 2 * a, v);
- }
- copy_v3_fl3(v, 0.0f, 0.5f, 0.0f);
- GPU_vertbuf_attr_set(vbo, attr_id.pos, (2 * NSEGMENTS) - 1, v);
-
- SHC.drw_light_area_disk = GPU_batch_create_ex(GPU_PRIM_LINES, vbo, NULL, GPU_BATCH_OWNS_VBO);
- }
- return SHC.drw_light_area_disk;
+ if (!SHC.drw_light_area_disk) {
+ /* Position Only 3D format */
+ static GPUVertFormat format = {0};
+ static struct {
+ uint pos;
+ } attr_id;
+ if (format.attr_len == 0) {
+ attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
+ }
+
+ GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
+ GPU_vertbuf_data_alloc(vbo, 2 * NSEGMENTS);
+
+ float v[3] = {0.0f, 0.5f, 0.0f};
+ GPU_vertbuf_attr_set(vbo, attr_id.pos, 0, v);
+ for (int a = 1; a < NSEGMENTS; a++) {
+ v[0] = 0.5f * sinf(2.0f * (float)M_PI * a / NSEGMENTS);
+ v[1] = 0.5f * cosf(2.0f * (float)M_PI * a / NSEGMENTS);
+ GPU_vertbuf_attr_set(vbo, attr_id.pos, 2 * a - 1, v);
+ GPU_vertbuf_attr_set(vbo, attr_id.pos, 2 * a, v);
+ }
+ copy_v3_fl3(v, 0.0f, 0.5f, 0.0f);
+ GPU_vertbuf_attr_set(vbo, attr_id.pos, (2 * NSEGMENTS) - 1, v);
+
+ SHC.drw_light_area_disk = GPU_batch_create_ex(GPU_PRIM_LINES, vbo, NULL, GPU_BATCH_OWNS_VBO);
+ }
+ return SHC.drw_light_area_disk;
#undef NSEGMENTS
}
GPUBatch *DRW_cache_light_hemi_get(void)
{
#define CIRCLE_RESOL 32
- if (!SHC.drw_light_hemi) {
- float v[3];
- int vidx = 0;
-
- /* Position Only 3D format */
- static GPUVertFormat format = { 0 };
- static struct { uint pos; } attr_id;
- if (format.attr_len == 0) {
- attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
- }
-
- GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
- GPU_vertbuf_data_alloc(vbo, CIRCLE_RESOL * 2 * 2 - 6 * 2 * 2);
-
- /* XZ plane */
- for (int a = 3; a < CIRCLE_RESOL / 2 - 3; a++) {
- v[0] = sinf((2.0f * M_PI * a) / ((float)CIRCLE_RESOL) - M_PI / 2);
- v[2] = cosf((2.0f * M_PI * a) / ((float)CIRCLE_RESOL) - M_PI / 2) - 1.0f;
- v[1] = 0.0f;
- GPU_vertbuf_attr_set(vbo, attr_id.pos, vidx++, v);
-
- v[0] = sinf((2.0f * M_PI * (a + 1)) / ((float)CIRCLE_RESOL) - M_PI / 2);
- v[2] = cosf((2.0f * M_PI * (a + 1)) / ((float)CIRCLE_RESOL) - M_PI / 2) - 1.0f;
- v[1] = 0.0f;
- GPU_vertbuf_attr_set(vbo, attr_id.pos, vidx++, v);
- }
-
- /* XY plane */
- for (int a = 3; a < CIRCLE_RESOL / 2 - 3; a++) {
- v[2] = sinf((2.0f * M_PI * a) / ((float)CIRCLE_RESOL)) - 1.0f;
- v[1] = cosf((2.0f * M_PI * a) / ((float)CIRCLE_RESOL));
- v[0] = 0.0f;
- GPU_vertbuf_attr_set(vbo, attr_id.pos, vidx++, v);
-
- v[2] = sinf((2.0f * M_PI * (a + 1)) / ((float)CIRCLE_RESOL)) - 1.0f;
- v[1] = cosf((2.0f * M_PI * (a + 1)) / ((float)CIRCLE_RESOL));
- v[0] = 0.0f;
- GPU_vertbuf_attr_set(vbo, attr_id.pos, vidx++, v);
- }
-
- /* YZ plane full circle */
- /* lease v[2] as it is */
- const float rad = cosf((2.0f * M_PI * 3) / ((float)CIRCLE_RESOL));
- for (int a = 0; a < CIRCLE_RESOL; a++) {
- v[1] = rad * sinf((2.0f * M_PI * a) / ((float)CIRCLE_RESOL));
- v[0] = rad * cosf((2.0f * M_PI * a) / ((float)CIRCLE_RESOL));
- GPU_vertbuf_attr_set(vbo, attr_id.pos, vidx++, v);
-
- v[1] = rad * sinf((2.0f * M_PI * (a + 1)) / ((float)CIRCLE_RESOL));
- v[0] = rad * cosf((2.0f * M_PI * (a + 1)) / ((float)CIRCLE_RESOL));
- GPU_vertbuf_attr_set(vbo, attr_id.pos, vidx++, v);
- }
-
-
- SHC.drw_light_hemi = GPU_batch_create_ex(GPU_PRIM_LINES, vbo, NULL, GPU_BATCH_OWNS_VBO);
- }
- return SHC.drw_light_hemi;
+ if (!SHC.drw_light_hemi) {
+ float v[3];
+ int vidx = 0;
+
+ /* Position Only 3D format */
+ static GPUVertFormat format = {0};
+ static struct {
+ uint pos;
+ } attr_id;
+ if (format.attr_len == 0) {
+ attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
+ }
+
+ GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
+ GPU_vertbuf_data_alloc(vbo, CIRCLE_RESOL * 2 * 2 - 6 * 2 * 2);
+
+ /* XZ plane */
+ for (int a = 3; a < CIRCLE_RESOL / 2 - 3; a++) {
+ v[0] = sinf((2.0f * M_PI * a) / ((float)CIRCLE_RESOL) - M_PI / 2);
+ v[2] = cosf((2.0f * M_PI * a) / ((float)CIRCLE_RESOL) - M_PI / 2) - 1.0f;
+ v[1] = 0.0f;
+ GPU_vertbuf_attr_set(vbo, attr_id.pos, vidx++, v);
+
+ v[0] = sinf((2.0f * M_PI * (a + 1)) / ((float)CIRCLE_RESOL) - M_PI / 2);
+ v[2] = cosf((2.0f * M_PI * (a + 1)) / ((float)CIRCLE_RESOL) - M_PI / 2) - 1.0f;
+ v[1] = 0.0f;
+ GPU_vertbuf_attr_set(vbo, attr_id.pos, vidx++, v);
+ }
+
+ /* XY plane */
+ for (int a = 3; a < CIRCLE_RESOL / 2 - 3; a++) {
+ v[2] = sinf((2.0f * M_PI * a) / ((float)CIRCLE_RESOL)) - 1.0f;
+ v[1] = cosf((2.0f * M_PI * a) / ((float)CIRCLE_RESOL));
+ v[0] = 0.0f;
+ GPU_vertbuf_attr_set(vbo, attr_id.pos, vidx++, v);
+
+ v[2] = sinf((2.0f * M_PI * (a + 1)) / ((float)CIRCLE_RESOL)) - 1.0f;
+ v[1] = cosf((2.0f * M_PI * (a + 1)) / ((float)CIRCLE_RESOL));
+ v[0] = 0.0f;
+ GPU_vertbuf_attr_set(vbo, attr_id.pos, vidx++, v);
+ }
+
+ /* YZ plane full circle */
+ /* lease v[2] as it is */
+ const float rad = cosf((2.0f * M_PI * 3) / ((float)CIRCLE_RESOL));
+ for (int a = 0; a < CIRCLE_RESOL; a++) {
+ v[1] = rad * sinf((2.0f * M_PI * a) / ((float)CIRCLE_RESOL));
+ v[0] = rad * cosf((2.0f * M_PI * a) / ((float)CIRCLE_RESOL));
+ GPU_vertbuf_attr_set(vbo, attr_id.pos, vidx++, v);
+
+ v[1] = rad * sinf((2.0f * M_PI * (a + 1)) / ((float)CIRCLE_RESOL));
+ v[0] = rad * cosf((2.0f * M_PI * (a + 1)) / ((float)CIRCLE_RESOL));
+ GPU_vertbuf_attr_set(vbo, attr_id.pos, vidx++, v);
+ }
+
+ SHC.drw_light_hemi = GPU_batch_create_ex(GPU_PRIM_LINES, vbo, NULL, GPU_BATCH_OWNS_VBO);
+ }
+ return SHC.drw_light_hemi;
#undef CIRCLE_RESOL
}
-
GPUBatch *DRW_cache_light_spot_get(void)
{
#define NSEGMENTS 32
- if (!SHC.drw_light_spot) {
- /* a single ring of vertices */
- float p[NSEGMENTS][2];
- float n[NSEGMENTS][3];
- float neg[NSEGMENTS][3];
- float half_angle = 2 * M_PI / ((float)NSEGMENTS * 2);
- for (int i = 0; i < NSEGMENTS; ++i) {
- float angle = 2 * M_PI * ((float)i / (float)NSEGMENTS);
- p[i][0] = cosf(angle);
- p[i][1] = sinf(angle);
-
- n[i][0] = cosf(angle - half_angle);
- n[i][1] = sinf(angle - half_angle);
- n[i][2] = cosf(M_PI / 16.0f); /* slope of the cone */
- normalize_v3(n[i]); /* necessary ? */
- negate_v3_v3(neg[i], n[i]);
- }
-
- static GPUVertFormat format = { 0 };
- static struct { uint pos, n1, n2; } attr_id;
- if (format.attr_len == 0) {
- attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
- attr_id.n1 = GPU_vertformat_attr_add(&format, "N1", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
- attr_id.n2 = GPU_vertformat_attr_add(&format, "N2", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
- }
-
- GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
- GPU_vertbuf_data_alloc(vbo, NSEGMENTS * 4);
-
- for (int i = 0; i < NSEGMENTS; ++i) {
- float cv[2], v[3];
- cv[0] = p[i % NSEGMENTS][0];
- cv[1] = p[i % NSEGMENTS][1];
-
- /* cone sides */
- ARRAY_SET_ITEMS(v, cv[0], cv[1], -1.0f);
- GPU_vertbuf_attr_set(vbo, attr_id.pos, i * 4, v);
- ARRAY_SET_ITEMS(v, 0.0f, 0.0f, 0.0f);
- GPU_vertbuf_attr_set(vbo, attr_id.pos, i * 4 + 1, v);
-
- GPU_vertbuf_attr_set(vbo, attr_id.n1, i * 4, n[(i) % NSEGMENTS]);
- GPU_vertbuf_attr_set(vbo, attr_id.n1, i * 4 + 1, n[(i) % NSEGMENTS]);
- GPU_vertbuf_attr_set(vbo, attr_id.n2, i * 4, n[(i + 1) % NSEGMENTS]);
- GPU_vertbuf_attr_set(vbo, attr_id.n2, i * 4 + 1, n[(i + 1) % NSEGMENTS]);
-
- /* end ring */
- ARRAY_SET_ITEMS(v, cv[0], cv[1], -1.0f);
- GPU_vertbuf_attr_set(vbo, attr_id.pos, i * 4 + 2, v);
- cv[0] = p[(i + 1) % NSEGMENTS][0];
- cv[1] = p[(i + 1) % NSEGMENTS][1];
- ARRAY_SET_ITEMS(v, cv[0], cv[1], -1.0f);
- GPU_vertbuf_attr_set(vbo, attr_id.pos, i * 4 + 3, v);
-
- GPU_vertbuf_attr_set(vbo, attr_id.n1, i * 4 + 2, n[(i) % NSEGMENTS]);
- GPU_vertbuf_attr_set(vbo, attr_id.n1, i * 4 + 3, n[(i) % NSEGMENTS]);
- GPU_vertbuf_attr_set(vbo, attr_id.n2, i * 4 + 2, neg[(i) % NSEGMENTS]);
- GPU_vertbuf_attr_set(vbo, attr_id.n2, i * 4 + 3, neg[(i) % NSEGMENTS]);
- }
-
- SHC.drw_light_spot = GPU_batch_create_ex(GPU_PRIM_LINES, vbo, NULL, GPU_BATCH_OWNS_VBO);
- }
- return SHC.drw_light_spot;
+ if (!SHC.drw_light_spot) {
+ /* a single ring of vertices */
+ float p[NSEGMENTS][2];
+ float n[NSEGMENTS][3];
+ float neg[NSEGMENTS][3];
+ float half_angle = 2 * M_PI / ((float)NSEGMENTS * 2);
+ for (int i = 0; i < NSEGMENTS; ++i) {
+ float angle = 2 * M_PI * ((float)i / (float)NSEGMENTS);
+ p[i][0] = cosf(angle);
+ p[i][1] = sinf(angle);
+
+ n[i][0] = cosf(angle - half_angle);
+ n[i][1] = sinf(angle - half_angle);
+ n[i][2] = cosf(M_PI / 16.0f); /* slope of the cone */
+ normalize_v3(n[i]); /* necessary ? */
+ negate_v3_v3(neg[i], n[i]);
+ }
+
+ static GPUVertFormat format = {0};
+ static struct {
+ uint pos, n1, n2;
+ } attr_id;
+ if (format.attr_len == 0) {
+ attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
+ attr_id.n1 = GPU_vertformat_attr_add(&format, "N1", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
+ attr_id.n2 = GPU_vertformat_attr_add(&format, "N2", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
+ }
+
+ GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
+ GPU_vertbuf_data_alloc(vbo, NSEGMENTS * 4);
+
+ for (int i = 0; i < NSEGMENTS; ++i) {
+ float cv[2], v[3];
+ cv[0] = p[i % NSEGMENTS][0];
+ cv[1] = p[i % NSEGMENTS][1];
+
+ /* cone sides */
+ ARRAY_SET_ITEMS(v, cv[0], cv[1], -1.0f);
+ GPU_vertbuf_attr_set(vbo, attr_id.pos, i * 4, v);
+ ARRAY_SET_ITEMS(v, 0.0f, 0.0f, 0.0f);
+ GPU_vertbuf_attr_set(vbo, attr_id.pos, i * 4 + 1, v);
+
+ GPU_vertbuf_attr_set(vbo, attr_id.n1, i * 4, n[(i) % NSEGMENTS]);
+ GPU_vertbuf_attr_set(vbo, attr_id.n1, i * 4 + 1, n[(i) % NSEGMENTS]);
+ GPU_vertbuf_attr_set(vbo, attr_id.n2, i * 4, n[(i + 1) % NSEGMENTS]);
+ GPU_vertbuf_attr_set(vbo, attr_id.n2, i * 4 + 1, n[(i + 1) % NSEGMENTS]);
+
+ /* end ring */
+ ARRAY_SET_ITEMS(v, cv[0], cv[1], -1.0f);
+ GPU_vertbuf_attr_set(vbo, attr_id.pos, i * 4 + 2, v);
+ cv[0] = p[(i + 1) % NSEGMENTS][0];
+ cv[1] = p[(i + 1) % NSEGMENTS][1];
+ ARRAY_SET_ITEMS(v, cv[0], cv[1], -1.0f);
+ GPU_vertbuf_attr_set(vbo, attr_id.pos, i * 4 + 3, v);
+
+ GPU_vertbuf_attr_set(vbo, attr_id.n1, i * 4 + 2, n[(i) % NSEGMENTS]);
+ GPU_vertbuf_attr_set(vbo, attr_id.n1, i * 4 + 3, n[(i) % NSEGMENTS]);
+ GPU_vertbuf_attr_set(vbo, attr_id.n2, i * 4 + 2, neg[(i) % NSEGMENTS]);
+ GPU_vertbuf_attr_set(vbo, attr_id.n2, i * 4 + 3, neg[(i) % NSEGMENTS]);
+ }
+
+ SHC.drw_light_spot = GPU_batch_create_ex(GPU_PRIM_LINES, vbo, NULL, GPU_BATCH_OWNS_VBO);
+ }
+ return SHC.drw_light_spot;
#undef NSEGMENTS
}
GPUBatch *DRW_cache_light_spot_volume_get(void)
{
#define NSEGMENTS 32
- if (!SHC.drw_light_spot_volume) {
- /* a single ring of vertices */
- float p[NSEGMENTS][2];
- for (int i = 0; i < NSEGMENTS; ++i) {
- float angle = 2 * M_PI * ((float)i / (float)NSEGMENTS);
- p[i][0] = cosf(angle);
- p[i][1] = sinf(angle);
- }
-
- static GPUVertFormat format = { 0 };
- static struct { uint pos; } attr_id;
- if (format.attr_len == 0) {
- attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
- }
-
- GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
- GPU_vertbuf_data_alloc(vbo, NSEGMENTS * 3);
-
- uint v_idx = 0;
- for (int i = 0; i < NSEGMENTS; ++i) {
- float cv[2], v[3];
-
- ARRAY_SET_ITEMS(v, 0.0f, 0.0f, 0.0f);
- GPU_vertbuf_attr_set(vbo, attr_id.pos, v_idx++, v);
-
- cv[0] = p[i % NSEGMENTS][0];
- cv[1] = p[i % NSEGMENTS][1];
- ARRAY_SET_ITEMS(v, cv[0], cv[1], -1.0f);
- GPU_vertbuf_attr_set(vbo, attr_id.pos, v_idx++, v);
-
- cv[0] = p[(i + 1) % NSEGMENTS][0];
- cv[1] = p[(i + 1) % NSEGMENTS][1];
- ARRAY_SET_ITEMS(v, cv[0], cv[1], -1.0f);
- GPU_vertbuf_attr_set(vbo, attr_id.pos, v_idx++, v);
- }
-
- SHC.drw_light_spot_volume = GPU_batch_create_ex(GPU_PRIM_TRIS, vbo, NULL, GPU_BATCH_OWNS_VBO);
- }
- return SHC.drw_light_spot_volume;
+ if (!SHC.drw_light_spot_volume) {
+ /* a single ring of vertices */
+ float p[NSEGMENTS][2];
+ for (int i = 0; i < NSEGMENTS; ++i) {
+ float angle = 2 * M_PI * ((float)i / (float)NSEGMENTS);
+ p[i][0] = cosf(angle);
+ p[i][1] = sinf(angle);
+ }
+
+ static GPUVertFormat format = {0};
+ static struct {
+ uint pos;
+ } attr_id;
+ if (format.attr_len == 0) {
+ attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
+ }
+
+ GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
+ GPU_vertbuf_data_alloc(vbo, NSEGMENTS * 3);
+
+ uint v_idx = 0;
+ for (int i = 0; i < NSEGMENTS; ++i) {
+ float cv[2], v[3];
+
+ ARRAY_SET_ITEMS(v, 0.0f, 0.0f, 0.0f);
+ GPU_vertbuf_attr_set(vbo, attr_id.pos, v_idx++, v);
+
+ cv[0] = p[i % NSEGMENTS][0];
+ cv[1] = p[i % NSEGMENTS][1];
+ ARRAY_SET_ITEMS(v, cv[0], cv[1], -1.0f);
+ GPU_vertbuf_attr_set(vbo, attr_id.pos, v_idx++, v);
+
+ cv[0] = p[(i + 1) % NSEGMENTS][0];
+ cv[1] = p[(i + 1) % NSEGMENTS][1];
+ ARRAY_SET_ITEMS(v, cv[0], cv[1], -1.0f);
+ GPU_vertbuf_attr_set(vbo, attr_id.pos, v_idx++, v);
+ }
+
+ SHC.drw_light_spot_volume = GPU_batch_create_ex(GPU_PRIM_TRIS, vbo, NULL, GPU_BATCH_OWNS_VBO);
+ }
+ return SHC.drw_light_spot_volume;
#undef NSEGMENTS
}
GPUBatch *DRW_cache_light_spot_square_get(void)
{
- if (!SHC.drw_light_spot_square) {
- float p[5][3] = {
- { 0.0f, 0.0f, 0.0f},
- { 1.0f, 1.0f, -1.0f},
- { 1.0f, -1.0f, -1.0f},
- {-1.0f, -1.0f, -1.0f},
- {-1.0f, 1.0f, -1.0f}};
+ if (!SHC.drw_light_spot_square) {
+ float p[5][3] = {{0.0f, 0.0f, 0.0f},
+ {1.0f, 1.0f, -1.0f},
+ {1.0f, -1.0f, -1.0f},
+ {-1.0f, -1.0f, -1.0f},
+ {-1.0f, 1.0f, -1.0f}};
- uint v_idx = 0;
+ uint v_idx = 0;
- /* Position Only 3D format */
- static GPUVertFormat format = { 0 };
- static struct { uint pos; } attr_id;
- if (format.attr_len == 0) {
- attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
- }
+ /* Position Only 3D format */
+ static GPUVertFormat format = {0};
+ static struct {
+ uint pos;
+ } attr_id;
+ if (format.attr_len == 0) {
+ attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
+ }
- GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
- GPU_vertbuf_data_alloc(vbo, 16);
+ GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
+ GPU_vertbuf_data_alloc(vbo, 16);
- /* piramid sides */
- for (int i = 1; i <= 4; ++i) {
- GPU_vertbuf_attr_set(vbo, attr_id.pos, v_idx++, p[0]);
- GPU_vertbuf_attr_set(vbo, attr_id.pos, v_idx++, p[i]);
+ /* piramid sides */
+ for (int i = 1; i <= 4; ++i) {
+ GPU_vertbuf_attr_set(vbo, attr_id.pos, v_idx++, p[0]);
+ GPU_vertbuf_attr_set(vbo, attr_id.pos, v_idx++, p[i]);
- GPU_vertbuf_attr_set(vbo, attr_id.pos, v_idx++, p[(i % 4) + 1]);
- GPU_vertbuf_attr_set(vbo, attr_id.pos, v_idx++, p[((i + 1) % 4) + 1]);
- }
+ GPU_vertbuf_attr_set(vbo, attr_id.pos, v_idx++, p[(i % 4) + 1]);
+ GPU_vertbuf_attr_set(vbo, attr_id.pos, v_idx++, p[((i + 1) % 4) + 1]);
+ }
- SHC.drw_light_spot_square = GPU_batch_create_ex(GPU_PRIM_LINES, vbo, NULL, GPU_BATCH_OWNS_VBO);
- }
- return SHC.drw_light_spot_square;
+ SHC.drw_light_spot_square = GPU_batch_create_ex(GPU_PRIM_LINES, vbo, NULL, GPU_BATCH_OWNS_VBO);
+ }
+ return SHC.drw_light_spot_square;
}
GPUBatch *DRW_cache_light_spot_square_volume_get(void)
{
- if (!SHC.drw_light_spot_square_volume) {
- float p[5][3] = {
- { 0.0f, 0.0f, 0.0f},
- { 1.0f, 1.0f, -1.0f},
- { 1.0f, -1.0f, -1.0f},
- {-1.0f, -1.0f, -1.0f},
- {-1.0f, 1.0f, -1.0f}};
+ if (!SHC.drw_light_spot_square_volume) {
+ float p[5][3] = {{0.0f, 0.0f, 0.0f},
+ {1.0f, 1.0f, -1.0f},
+ {1.0f, -1.0f, -1.0f},
+ {-1.0f, -1.0f, -1.0f},
+ {-1.0f, 1.0f, -1.0f}};
- uint v_idx = 0;
+ uint v_idx = 0;
- /* Position Only 3D format */
- static GPUVertFormat format = { 0 };
- static struct { uint pos; } attr_id;
- if (format.attr_len == 0) {
- attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
- }
+ /* Position Only 3D format */
+ static GPUVertFormat format = {0};
+ static struct {
+ uint pos;
+ } attr_id;
+ if (format.attr_len == 0) {
+ attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
+ }
- GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
- GPU_vertbuf_data_alloc(vbo, 12);
+ GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
+ GPU_vertbuf_data_alloc(vbo, 12);
- /* piramid sides */
- for (int i = 1; i <= 4; ++i) {
- GPU_vertbuf_attr_set(vbo, attr_id.pos, v_idx++, p[0]);
- GPU_vertbuf_attr_set(vbo, attr_id.pos, v_idx++, p[((i + 1) % 4) + 1]);
- GPU_vertbuf_attr_set(vbo, attr_id.pos, v_idx++, p[(i % 4) + 1]);
- }
+ /* piramid sides */
+ for (int i = 1; i <= 4; ++i) {
+ GPU_vertbuf_attr_set(vbo, attr_id.pos, v_idx++, p[0]);
+ GPU_vertbuf_attr_set(vbo, attr_id.pos, v_idx++, p[((i + 1) % 4) + 1]);
+ GPU_vertbuf_attr_set(vbo, attr_id.pos, v_idx++, p[(i % 4) + 1]);
+ }
- SHC.drw_light_spot_square_volume = GPU_batch_create_ex(GPU_PRIM_TRIS, vbo, NULL, GPU_BATCH_OWNS_VBO);
- }
- return SHC.drw_light_spot_square_volume;
+ SHC.drw_light_spot_square_volume = GPU_batch_create_ex(
+ GPU_PRIM_TRIS, vbo, NULL, GPU_BATCH_OWNS_VBO);
+ }
+ return SHC.drw_light_spot_square_volume;
}
/** \} */
@@ -1750,59 +1818,61 @@ GPUBatch *DRW_cache_light_spot_square_volume_get(void)
GPUBatch *DRW_cache_speaker_get(void)
{
- if (!SHC.drw_speaker) {
- float v[3];
- const int segments = 16;
- int vidx = 0;
-
- /* Position Only 3D format */
- static GPUVertFormat format = { 0 };
- static struct { uint pos; } attr_id;
- if (format.attr_len == 0) {
- attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
- }
-
- GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
- GPU_vertbuf_data_alloc(vbo, 3 * segments * 2 + 4 * 4);
-
- for (int j = 0; j < 3; j++) {
- float z = 0.25f * j - 0.125f;
- float r = (j == 0 ? 0.5f : 0.25f);
-
- copy_v3_fl3(v, r, 0.0f, z);
- GPU_vertbuf_attr_set(vbo, attr_id.pos, vidx++, v);
- for (int i = 1; i < segments; i++) {
- float x = cosf(2.f * (float)M_PI * i / segments) * r;
- float y = sinf(2.f * (float)M_PI * i / segments) * r;
- copy_v3_fl3(v, x, y, z);
- GPU_vertbuf_attr_set(vbo, attr_id.pos, vidx++, v);
- GPU_vertbuf_attr_set(vbo, attr_id.pos, vidx++, v);
- }
- copy_v3_fl3(v, r, 0.0f, z);
- GPU_vertbuf_attr_set(vbo, attr_id.pos, vidx++, v);
- }
-
- for (int j = 0; j < 4; j++) {
- float x = (((j + 1) % 2) * (j - 1)) * 0.5f;
- float y = ((j % 2) * (j - 2)) * 0.5f;
- for (int i = 0; i < 3; i++) {
- if (i == 1) {
- x *= 0.5f;
- y *= 0.5f;
- }
-
- float z = 0.25f * i - 0.125f;
- copy_v3_fl3(v, x, y, z);
- GPU_vertbuf_attr_set(vbo, attr_id.pos, vidx++, v);
- if (i == 1) {
- GPU_vertbuf_attr_set(vbo, attr_id.pos, vidx++, v);
- }
- }
- }
-
- SHC.drw_speaker = GPU_batch_create_ex(GPU_PRIM_LINES, vbo, NULL, GPU_BATCH_OWNS_VBO);
- }
- return SHC.drw_speaker;
+ if (!SHC.drw_speaker) {
+ float v[3];
+ const int segments = 16;
+ int vidx = 0;
+
+ /* Position Only 3D format */
+ static GPUVertFormat format = {0};
+ static struct {
+ uint pos;
+ } attr_id;
+ if (format.attr_len == 0) {
+ attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
+ }
+
+ GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
+ GPU_vertbuf_data_alloc(vbo, 3 * segments * 2 + 4 * 4);
+
+ for (int j = 0; j < 3; j++) {
+ float z = 0.25f * j - 0.125f;
+ float r = (j == 0 ? 0.5f : 0.25f);
+
+ copy_v3_fl3(v, r, 0.0f, z);
+ GPU_vertbuf_attr_set(vbo, attr_id.pos, vidx++, v);
+ for (int i = 1; i < segments; i++) {
+ float x = cosf(2.f * (float)M_PI * i / segments) * r;
+ float y = sinf(2.f * (float)M_PI * i / segments) * r;
+ copy_v3_fl3(v, x, y, z);
+ GPU_vertbuf_attr_set(vbo, attr_id.pos, vidx++, v);
+ GPU_vertbuf_attr_set(vbo, attr_id.pos, vidx++, v);
+ }
+ copy_v3_fl3(v, r, 0.0f, z);
+ GPU_vertbuf_attr_set(vbo, attr_id.pos, vidx++, v);
+ }
+
+ for (int j = 0; j < 4; j++) {
+ float x = (((j + 1) % 2) * (j - 1)) * 0.5f;
+ float y = ((j % 2) * (j - 2)) * 0.5f;
+ for (int i = 0; i < 3; i++) {
+ if (i == 1) {
+ x *= 0.5f;
+ y *= 0.5f;
+ }
+
+ float z = 0.25f * i - 0.125f;
+ copy_v3_fl3(v, x, y, z);
+ GPU_vertbuf_attr_set(vbo, attr_id.pos, vidx++, v);
+ if (i == 1) {
+ GPU_vertbuf_attr_set(vbo, attr_id.pos, vidx++, v);
+ }
+ }
+ }
+
+ SHC.drw_speaker = GPU_batch_create_ex(GPU_PRIM_LINES, vbo, NULL, GPU_BATCH_OWNS_VBO);
+ }
+ return SHC.drw_speaker;
}
/** \} */
@@ -1813,136 +1883,142 @@ GPUBatch *DRW_cache_speaker_get(void)
GPUBatch *DRW_cache_lightprobe_cube_get(void)
{
- if (!SHC.drw_lightprobe_cube) {
- int v_idx = 0;
- const float sin_pi_3 = 0.86602540378f;
- const float cos_pi_3 = 0.5f;
- float v[7][3] = {
- {0.0f, 1.0f, 0.0f},
- {sin_pi_3, cos_pi_3, 0.0f},
- {sin_pi_3, -cos_pi_3, 0.0f},
- {0.0f, -1.0f, 0.0f},
- {-sin_pi_3, -cos_pi_3, 0.0f},
- {-sin_pi_3, cos_pi_3, 0.0f},
- {0.0f, 0.0f, 0.0f},
- };
-
- /* Position Only 3D format */
- static GPUVertFormat format = { 0 };
- static struct { uint pos; } attr_id;
- if (format.attr_len == 0) {
- attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
- }
-
- GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
- GPU_vertbuf_data_alloc(vbo, (6 + 3) * 2);
-
- for (int i = 0; i < 6; ++i) {
- GPU_vertbuf_attr_set(vbo, attr_id.pos, v_idx++, v[i]);
- GPU_vertbuf_attr_set(vbo, attr_id.pos, v_idx++, v[(i + 1) % 6]);
- }
-
- GPU_vertbuf_attr_set(vbo, attr_id.pos, v_idx++, v[1]);
- GPU_vertbuf_attr_set(vbo, attr_id.pos, v_idx++, v[6]);
-
- GPU_vertbuf_attr_set(vbo, attr_id.pos, v_idx++, v[5]);
- GPU_vertbuf_attr_set(vbo, attr_id.pos, v_idx++, v[6]);
-
- GPU_vertbuf_attr_set(vbo, attr_id.pos, v_idx++, v[3]);
- GPU_vertbuf_attr_set(vbo, attr_id.pos, v_idx++, v[6]);
-
- SHC.drw_lightprobe_cube = GPU_batch_create_ex(GPU_PRIM_LINES, vbo, NULL, GPU_BATCH_OWNS_VBO);
- }
- return SHC.drw_lightprobe_cube;
+ if (!SHC.drw_lightprobe_cube) {
+ int v_idx = 0;
+ const float sin_pi_3 = 0.86602540378f;
+ const float cos_pi_3 = 0.5f;
+ float v[7][3] = {
+ {0.0f, 1.0f, 0.0f},
+ {sin_pi_3, cos_pi_3, 0.0f},
+ {sin_pi_3, -cos_pi_3, 0.0f},
+ {0.0f, -1.0f, 0.0f},
+ {-sin_pi_3, -cos_pi_3, 0.0f},
+ {-sin_pi_3, cos_pi_3, 0.0f},
+ {0.0f, 0.0f, 0.0f},
+ };
+
+ /* Position Only 3D format */
+ static GPUVertFormat format = {0};
+ static struct {
+ uint pos;
+ } attr_id;
+ if (format.attr_len == 0) {
+ attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
+ }
+
+ GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
+ GPU_vertbuf_data_alloc(vbo, (6 + 3) * 2);
+
+ for (int i = 0; i < 6; ++i) {
+ GPU_vertbuf_attr_set(vbo, attr_id.pos, v_idx++, v[i]);
+ GPU_vertbuf_attr_set(vbo, attr_id.pos, v_idx++, v[(i + 1) % 6]);
+ }
+
+ GPU_vertbuf_attr_set(vbo, attr_id.pos, v_idx++, v[1]);
+ GPU_vertbuf_attr_set(vbo, attr_id.pos, v_idx++, v[6]);
+
+ GPU_vertbuf_attr_set(vbo, attr_id.pos, v_idx++, v[5]);
+ GPU_vertbuf_attr_set(vbo, attr_id.pos, v_idx++, v[6]);
+
+ GPU_vertbuf_attr_set(vbo, attr_id.pos, v_idx++, v[3]);
+ GPU_vertbuf_attr_set(vbo, attr_id.pos, v_idx++, v[6]);
+
+ SHC.drw_lightprobe_cube = GPU_batch_create_ex(GPU_PRIM_LINES, vbo, NULL, GPU_BATCH_OWNS_VBO);
+ }
+ return SHC.drw_lightprobe_cube;
}
GPUBatch *DRW_cache_lightprobe_grid_get(void)
{
- if (!SHC.drw_lightprobe_grid) {
- int v_idx = 0;
- const float sin_pi_3 = 0.86602540378f;
- const float cos_pi_3 = 0.5f;
- const float v[7][3] = {
- {0.0f, 1.0f, 0.0f},
- {sin_pi_3, cos_pi_3, 0.0f},
- {sin_pi_3, -cos_pi_3, 0.0f},
- {0.0f, -1.0f, 0.0f},
- {-sin_pi_3, -cos_pi_3, 0.0f},
- {-sin_pi_3, cos_pi_3, 0.0f},
- {0.0f, 0.0f, 0.0f},
- };
-
- /* Position Only 3D format */
- static GPUVertFormat format = { 0 };
- static struct { uint pos; } attr_id;
- if (format.attr_len == 0) {
- attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
- }
-
- GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
- GPU_vertbuf_data_alloc(vbo, (6 * 2 + 3) * 2);
-
- for (int i = 0; i < 6; ++i) {
- float tmp_v1[3], tmp_v2[3], tmp_tr[3];
- copy_v3_v3(tmp_v1, v[i]);
- copy_v3_v3(tmp_v2, v[(i + 1) % 6]);
- GPU_vertbuf_attr_set(vbo, attr_id.pos, v_idx++, tmp_v1);
- GPU_vertbuf_attr_set(vbo, attr_id.pos, v_idx++, tmp_v2);
-
- /* Internal wires. */
- for (int j = 1; j < 2; ++j) {
- mul_v3_v3fl(tmp_tr, v[(i / 2) * 2 + 1], -0.5f * j);
- add_v3_v3v3(tmp_v1, v[i], tmp_tr);
- add_v3_v3v3(tmp_v2, v[(i + 1) % 6], tmp_tr);
- GPU_vertbuf_attr_set(vbo, attr_id.pos, v_idx++, tmp_v1);
- GPU_vertbuf_attr_set(vbo, attr_id.pos, v_idx++, tmp_v2);
- }
- }
-
- GPU_vertbuf_attr_set(vbo, attr_id.pos, v_idx++, v[1]);
- GPU_vertbuf_attr_set(vbo, attr_id.pos, v_idx++, v[6]);
-
- GPU_vertbuf_attr_set(vbo, attr_id.pos, v_idx++, v[5]);
- GPU_vertbuf_attr_set(vbo, attr_id.pos, v_idx++, v[6]);
-
- GPU_vertbuf_attr_set(vbo, attr_id.pos, v_idx++, v[3]);
- GPU_vertbuf_attr_set(vbo, attr_id.pos, v_idx++, v[6]);
-
- SHC.drw_lightprobe_grid = GPU_batch_create_ex(GPU_PRIM_LINES, vbo, NULL, GPU_BATCH_OWNS_VBO);
- }
- return SHC.drw_lightprobe_grid;
+ if (!SHC.drw_lightprobe_grid) {
+ int v_idx = 0;
+ const float sin_pi_3 = 0.86602540378f;
+ const float cos_pi_3 = 0.5f;
+ const float v[7][3] = {
+ {0.0f, 1.0f, 0.0f},
+ {sin_pi_3, cos_pi_3, 0.0f},
+ {sin_pi_3, -cos_pi_3, 0.0f},
+ {0.0f, -1.0f, 0.0f},
+ {-sin_pi_3, -cos_pi_3, 0.0f},
+ {-sin_pi_3, cos_pi_3, 0.0f},
+ {0.0f, 0.0f, 0.0f},
+ };
+
+ /* Position Only 3D format */
+ static GPUVertFormat format = {0};
+ static struct {
+ uint pos;
+ } attr_id;
+ if (format.attr_len == 0) {
+ attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
+ }
+
+ GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
+ GPU_vertbuf_data_alloc(vbo, (6 * 2 + 3) * 2);
+
+ for (int i = 0; i < 6; ++i) {
+ float tmp_v1[3], tmp_v2[3], tmp_tr[3];
+ copy_v3_v3(tmp_v1, v[i]);
+ copy_v3_v3(tmp_v2, v[(i + 1) % 6]);
+ GPU_vertbuf_attr_set(vbo, attr_id.pos, v_idx++, tmp_v1);
+ GPU_vertbuf_attr_set(vbo, attr_id.pos, v_idx++, tmp_v2);
+
+ /* Internal wires. */
+ for (int j = 1; j < 2; ++j) {
+ mul_v3_v3fl(tmp_tr, v[(i / 2) * 2 + 1], -0.5f * j);
+ add_v3_v3v3(tmp_v1, v[i], tmp_tr);
+ add_v3_v3v3(tmp_v2, v[(i + 1) % 6], tmp_tr);
+ GPU_vertbuf_attr_set(vbo, attr_id.pos, v_idx++, tmp_v1);
+ GPU_vertbuf_attr_set(vbo, attr_id.pos, v_idx++, tmp_v2);
+ }
+ }
+
+ GPU_vertbuf_attr_set(vbo, attr_id.pos, v_idx++, v[1]);
+ GPU_vertbuf_attr_set(vbo, attr_id.pos, v_idx++, v[6]);
+
+ GPU_vertbuf_attr_set(vbo, attr_id.pos, v_idx++, v[5]);
+ GPU_vertbuf_attr_set(vbo, attr_id.pos, v_idx++, v[6]);
+
+ GPU_vertbuf_attr_set(vbo, attr_id.pos, v_idx++, v[3]);
+ GPU_vertbuf_attr_set(vbo, attr_id.pos, v_idx++, v[6]);
+
+ SHC.drw_lightprobe_grid = GPU_batch_create_ex(GPU_PRIM_LINES, vbo, NULL, GPU_BATCH_OWNS_VBO);
+ }
+ return SHC.drw_lightprobe_grid;
}
GPUBatch *DRW_cache_lightprobe_planar_get(void)
{
- if (!SHC.drw_lightprobe_planar) {
- int v_idx = 0;
- const float sin_pi_3 = 0.86602540378f;
- float v[4][3] = {
- {0.0f, 0.5f, 0.0f},
- {sin_pi_3, 0.0f, 0.0f},
- {0.0f, -0.5f, 0.0f},
- {-sin_pi_3, 0.0f, 0.0f},
- };
-
- /* Position Only 3D format */
- static GPUVertFormat format = { 0 };
- static struct { uint pos; } attr_id;
- if (format.attr_len == 0) {
- attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
- }
-
- GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
- GPU_vertbuf_data_alloc(vbo, 4 * 2);
-
- for (int i = 0; i < 4; ++i) {
- GPU_vertbuf_attr_set(vbo, attr_id.pos, v_idx++, v[i]);
- GPU_vertbuf_attr_set(vbo, attr_id.pos, v_idx++, v[(i + 1) % 4]);
- }
-
- SHC.drw_lightprobe_planar = GPU_batch_create_ex(GPU_PRIM_LINES, vbo, NULL, GPU_BATCH_OWNS_VBO);
- }
- return SHC.drw_lightprobe_planar;
+ if (!SHC.drw_lightprobe_planar) {
+ int v_idx = 0;
+ const float sin_pi_3 = 0.86602540378f;
+ float v[4][3] = {
+ {0.0f, 0.5f, 0.0f},
+ {sin_pi_3, 0.0f, 0.0f},
+ {0.0f, -0.5f, 0.0f},
+ {-sin_pi_3, 0.0f, 0.0f},
+ };
+
+ /* Position Only 3D format */
+ static GPUVertFormat format = {0};
+ static struct {
+ uint pos;
+ } attr_id;
+ if (format.attr_len == 0) {
+ attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
+ }
+
+ GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
+ GPU_vertbuf_data_alloc(vbo, 4 * 2);
+
+ for (int i = 0; i < 4; ++i) {
+ GPU_vertbuf_attr_set(vbo, attr_id.pos, v_idx++, v[i]);
+ GPU_vertbuf_attr_set(vbo, attr_id.pos, v_idx++, v[(i + 1) % 4]);
+ }
+
+ SHC.drw_lightprobe_planar = GPU_batch_create_ex(GPU_PRIM_LINES, vbo, NULL, GPU_BATCH_OWNS_VBO);
+ }
+ return SHC.drw_lightprobe_planar;
}
/** \} */
@@ -1952,57 +2028,57 @@ GPUBatch *DRW_cache_lightprobe_planar_get(void)
* \{ */
static const float bone_octahedral_verts[6][3] = {
- { 0.0f, 0.0f, 0.0f},
- { 0.1f, 0.1f, 0.1f},
- { 0.1f, 0.1f, -0.1f},
- {-0.1f, 0.1f, -0.1f},
- {-0.1f, 0.1f, 0.1f},
- { 0.0f, 1.0f, 0.0f},
+ {0.0f, 0.0f, 0.0f},
+ {0.1f, 0.1f, 0.1f},
+ {0.1f, 0.1f, -0.1f},
+ {-0.1f, 0.1f, -0.1f},
+ {-0.1f, 0.1f, 0.1f},
+ {0.0f, 1.0f, 0.0f},
};
static const float bone_octahedral_smooth_normals[6][3] = {
- { 0.0f, -1.0f, 0.0f},
+ {0.0f, -1.0f, 0.0f},
#if 0 /* creates problems for outlines when scaled */
- { 0.943608f * M_SQRT1_2, -0.331048f, 0.943608f * M_SQRT1_2},
- { 0.943608f * M_SQRT1_2, -0.331048f, -0.943608f * M_SQRT1_2},
- {-0.943608f * M_SQRT1_2, -0.331048f, -0.943608f * M_SQRT1_2},
- {-0.943608f * M_SQRT1_2, -0.331048f, 0.943608f * M_SQRT1_2},
+ { 0.943608f * M_SQRT1_2, -0.331048f, 0.943608f * M_SQRT1_2},
+ { 0.943608f * M_SQRT1_2, -0.331048f, -0.943608f * M_SQRT1_2},
+ {-0.943608f * M_SQRT1_2, -0.331048f, -0.943608f * M_SQRT1_2},
+ {-0.943608f * M_SQRT1_2, -0.331048f, 0.943608f * M_SQRT1_2},
#else
- { M_SQRT1_2, 0.0f, M_SQRT1_2},
- { M_SQRT1_2, 0.0f, -M_SQRT1_2},
- {-M_SQRT1_2, 0.0f, -M_SQRT1_2},
- {-M_SQRT1_2, 0.0f, M_SQRT1_2},
+ {M_SQRT1_2, 0.0f, M_SQRT1_2},
+ {M_SQRT1_2, 0.0f, -M_SQRT1_2},
+ {-M_SQRT1_2, 0.0f, -M_SQRT1_2},
+ {-M_SQRT1_2, 0.0f, M_SQRT1_2},
#endif
- { 0.0f, 1.0f, 0.0f},
+ {0.0f, 1.0f, 0.0f},
};
-#if 0 /* UNUSED */
+#if 0 /* UNUSED */
static const uint bone_octahedral_wire[24] = {
- 0, 1, 1, 5, 5, 3, 3, 0,
- 0, 4, 4, 5, 5, 2, 2, 0,
- 1, 2, 2, 3, 3, 4, 4, 1,
+ 0, 1, 1, 5, 5, 3, 3, 0,
+ 0, 4, 4, 5, 5, 2, 2, 0,
+ 1, 2, 2, 3, 3, 4, 4, 1,
};
/* aligned with bone_octahedral_wire
* Contains adjacent normal index */
static const uint bone_octahedral_wire_adjacent_face[24] = {
- 0, 3, 4, 7, 5, 6, 1, 2,
- 2, 3, 6, 7, 4, 5, 0, 1,
- 0, 4, 1, 5, 2, 6, 3, 7,
+ 0, 3, 4, 7, 5, 6, 1, 2,
+ 2, 3, 6, 7, 4, 5, 0, 1,
+ 0, 4, 1, 5, 2, 6, 3, 7,
};
#endif
static const uint bone_octahedral_solid_tris[8][3] = {
- {2, 1, 0}, /* bottom */
- {3, 2, 0},
- {4, 3, 0},
- {1, 4, 0},
-
- {5, 1, 2}, /* top */
- {5, 2, 3},
- {5, 3, 4},
- {5, 4, 1},
+ {2, 1, 0}, /* bottom */
+ {3, 2, 0},
+ {4, 3, 0},
+ {1, 4, 0},
+
+ {5, 1, 2}, /* top */
+ {5, 2, 3},
+ {5, 3, 4},
+ {5, 4, 1},
};
/**
@@ -2017,151 +2093,164 @@ static const uint bone_octahedral_solid_tris[8][3] = {
* {0, 12, 1, 10, 2, 3}
*/
static const uint bone_octahedral_wire_lines_adjacency[12][4] = {
- { 0, 1, 2, 6}, { 0, 12, 1, 6}, { 0, 3, 12, 6}, { 0, 2, 3, 6},
- { 1, 6, 2, 3}, { 1, 12, 6, 3}, { 1, 0, 12, 3}, { 1, 2, 0, 3},
- { 2, 0, 1, 12}, { 2, 3, 0, 12}, { 2, 6, 3, 12}, { 2, 1, 6, 12},
+ {0, 1, 2, 6},
+ {0, 12, 1, 6},
+ {0, 3, 12, 6},
+ {0, 2, 3, 6},
+ {1, 6, 2, 3},
+ {1, 12, 6, 3},
+ {1, 0, 12, 3},
+ {1, 2, 0, 3},
+ {2, 0, 1, 12},
+ {2, 3, 0, 12},
+ {2, 6, 3, 12},
+ {2, 1, 6, 12},
};
#if 0 /* UNUSED */
static const uint bone_octahedral_solid_tris_adjacency[8][6] = {
- { 0, 12, 1, 10, 2, 3},
- { 3, 15, 4, 1, 5, 6},
- { 6, 18, 7, 4, 8, 9},
- { 9, 21, 10, 7, 11, 0},
-
- {12, 22, 13, 2, 14, 17},
- {15, 13, 16, 5, 17, 20},
- {18, 16, 19, 8, 20, 23},
- {21, 19, 22, 11, 23, 14},
+ { 0, 12, 1, 10, 2, 3},
+ { 3, 15, 4, 1, 5, 6},
+ { 6, 18, 7, 4, 8, 9},
+ { 9, 21, 10, 7, 11, 0},
+
+ {12, 22, 13, 2, 14, 17},
+ {15, 13, 16, 5, 17, 20},
+ {18, 16, 19, 8, 20, 23},
+ {21, 19, 22, 11, 23, 14},
};
#endif
/* aligned with bone_octahedral_solid_tris */
static const float bone_octahedral_solid_normals[8][3] = {
- { M_SQRT1_2, -M_SQRT1_2, 0.00000000f},
- {-0.00000000f, -M_SQRT1_2, -M_SQRT1_2},
- {-M_SQRT1_2, -M_SQRT1_2, 0.00000000f},
- { 0.00000000f, -M_SQRT1_2, M_SQRT1_2},
- { 0.99388373f, 0.11043154f, -0.00000000f},
- { 0.00000000f, 0.11043154f, -0.99388373f},
- {-0.99388373f, 0.11043154f, 0.00000000f},
- { 0.00000000f, 0.11043154f, 0.99388373f},
+ {M_SQRT1_2, -M_SQRT1_2, 0.00000000f},
+ {-0.00000000f, -M_SQRT1_2, -M_SQRT1_2},
+ {-M_SQRT1_2, -M_SQRT1_2, 0.00000000f},
+ {0.00000000f, -M_SQRT1_2, M_SQRT1_2},
+ {0.99388373f, 0.11043154f, -0.00000000f},
+ {0.00000000f, 0.11043154f, -0.99388373f},
+ {-0.99388373f, 0.11043154f, 0.00000000f},
+ {0.00000000f, 0.11043154f, 0.99388373f},
};
GPUBatch *DRW_cache_bone_octahedral_get(void)
{
- if (!SHC.drw_bone_octahedral) {
- uint v_idx = 0;
-
- static GPUVertFormat format = { 0 };
- static struct { uint pos, nor, snor; } attr_id;
- if (format.attr_len == 0) {
- attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
- attr_id.nor = GPU_vertformat_attr_add(&format, "nor", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
- attr_id.snor = GPU_vertformat_attr_add(&format, "snor", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
- }
-
- /* Vertices */
- GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
- GPU_vertbuf_data_alloc(vbo, 24);
-
- for (int i = 0; i < 8; i++) {
- for (int j = 0; j < 3; ++j) {
- GPU_vertbuf_attr_set(vbo, attr_id.nor, v_idx, bone_octahedral_solid_normals[i]);
- GPU_vertbuf_attr_set(vbo, attr_id.snor, v_idx, bone_octahedral_smooth_normals[bone_octahedral_solid_tris[i][j]]);
- GPU_vertbuf_attr_set(vbo, attr_id.pos, v_idx++, bone_octahedral_verts[bone_octahedral_solid_tris[i][j]]);
- }
- }
-
- SHC.drw_bone_octahedral = GPU_batch_create_ex(
- GPU_PRIM_TRIS, vbo, NULL,
- GPU_BATCH_OWNS_VBO);
- }
- return SHC.drw_bone_octahedral;
+ if (!SHC.drw_bone_octahedral) {
+ uint v_idx = 0;
+
+ static GPUVertFormat format = {0};
+ static struct {
+ uint pos, nor, snor;
+ } attr_id;
+ if (format.attr_len == 0) {
+ attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
+ attr_id.nor = GPU_vertformat_attr_add(&format, "nor", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
+ attr_id.snor = GPU_vertformat_attr_add(&format, "snor", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
+ }
+
+ /* Vertices */
+ GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
+ GPU_vertbuf_data_alloc(vbo, 24);
+
+ for (int i = 0; i < 8; i++) {
+ for (int j = 0; j < 3; ++j) {
+ GPU_vertbuf_attr_set(vbo, attr_id.nor, v_idx, bone_octahedral_solid_normals[i]);
+ GPU_vertbuf_attr_set(vbo,
+ attr_id.snor,
+ v_idx,
+ bone_octahedral_smooth_normals[bone_octahedral_solid_tris[i][j]]);
+ GPU_vertbuf_attr_set(
+ vbo, attr_id.pos, v_idx++, bone_octahedral_verts[bone_octahedral_solid_tris[i][j]]);
+ }
+ }
+
+ SHC.drw_bone_octahedral = GPU_batch_create_ex(GPU_PRIM_TRIS, vbo, NULL, GPU_BATCH_OWNS_VBO);
+ }
+ return SHC.drw_bone_octahedral;
}
GPUBatch *DRW_cache_bone_octahedral_wire_get(void)
{
- if (!SHC.drw_bone_octahedral_wire) {
- GPUIndexBufBuilder elb;
- GPU_indexbuf_init(&elb, GPU_PRIM_LINES_ADJ, 12, 24);
+ if (!SHC.drw_bone_octahedral_wire) {
+ GPUIndexBufBuilder elb;
+ GPU_indexbuf_init(&elb, GPU_PRIM_LINES_ADJ, 12, 24);
- for (int i = 0; i < 12; i++) {
- GPU_indexbuf_add_line_adj_verts(
- &elb,
- bone_octahedral_wire_lines_adjacency[i][0],
- bone_octahedral_wire_lines_adjacency[i][1],
- bone_octahedral_wire_lines_adjacency[i][2],
- bone_octahedral_wire_lines_adjacency[i][3]);
- }
+ for (int i = 0; i < 12; i++) {
+ GPU_indexbuf_add_line_adj_verts(&elb,
+ bone_octahedral_wire_lines_adjacency[i][0],
+ bone_octahedral_wire_lines_adjacency[i][1],
+ bone_octahedral_wire_lines_adjacency[i][2],
+ bone_octahedral_wire_lines_adjacency[i][3]);
+ }
- /* HACK Reuse vertex buffer. */
- GPUBatch *pos_nor_batch = DRW_cache_bone_octahedral_get();
+ /* HACK Reuse vertex buffer. */
+ GPUBatch *pos_nor_batch = DRW_cache_bone_octahedral_get();
- SHC.drw_bone_octahedral_wire = GPU_batch_create_ex(
- GPU_PRIM_LINES_ADJ, pos_nor_batch->verts[0], GPU_indexbuf_build(&elb),
- GPU_BATCH_OWNS_INDEX);
- }
- return SHC.drw_bone_octahedral_wire;
+ SHC.drw_bone_octahedral_wire = GPU_batch_create_ex(GPU_PRIM_LINES_ADJ,
+ pos_nor_batch->verts[0],
+ GPU_indexbuf_build(&elb),
+ GPU_BATCH_OWNS_INDEX);
+ }
+ return SHC.drw_bone_octahedral_wire;
}
/* XXX TODO move that 1 unit cube to more common/generic place? */
static const float bone_box_verts[8][3] = {
- { 1.0f, 0.0f, 1.0f},
- { 1.0f, 0.0f, -1.0f},
- {-1.0f, 0.0f, -1.0f},
- {-1.0f, 0.0f, 1.0f},
- { 1.0f, 1.0f, 1.0f},
- { 1.0f, 1.0f, -1.0f},
- {-1.0f, 1.0f, -1.0f},
- {-1.0f, 1.0f, 1.0f},
+ {1.0f, 0.0f, 1.0f},
+ {1.0f, 0.0f, -1.0f},
+ {-1.0f, 0.0f, -1.0f},
+ {-1.0f, 0.0f, 1.0f},
+ {1.0f, 1.0f, 1.0f},
+ {1.0f, 1.0f, -1.0f},
+ {-1.0f, 1.0f, -1.0f},
+ {-1.0f, 1.0f, 1.0f},
};
static const float bone_box_smooth_normals[8][3] = {
- { M_SQRT3, -M_SQRT3, M_SQRT3},
- { M_SQRT3, -M_SQRT3, -M_SQRT3},
- {-M_SQRT3, -M_SQRT3, -M_SQRT3},
- {-M_SQRT3, -M_SQRT3, M_SQRT3},
- { M_SQRT3, M_SQRT3, M_SQRT3},
- { M_SQRT3, M_SQRT3, -M_SQRT3},
- {-M_SQRT3, M_SQRT3, -M_SQRT3},
- {-M_SQRT3, M_SQRT3, M_SQRT3},
+ {M_SQRT3, -M_SQRT3, M_SQRT3},
+ {M_SQRT3, -M_SQRT3, -M_SQRT3},
+ {-M_SQRT3, -M_SQRT3, -M_SQRT3},
+ {-M_SQRT3, -M_SQRT3, M_SQRT3},
+ {M_SQRT3, M_SQRT3, M_SQRT3},
+ {M_SQRT3, M_SQRT3, -M_SQRT3},
+ {-M_SQRT3, M_SQRT3, -M_SQRT3},
+ {-M_SQRT3, M_SQRT3, M_SQRT3},
};
#if 0 /* UNUSED */
static const uint bone_box_wire[24] = {
- 0, 1, 1, 2, 2, 3, 3, 0,
- 4, 5, 5, 6, 6, 7, 7, 4,
- 0, 4, 1, 5, 2, 6, 3, 7,
+ 0, 1, 1, 2, 2, 3, 3, 0,
+ 4, 5, 5, 6, 6, 7, 7, 4,
+ 0, 4, 1, 5, 2, 6, 3, 7,
};
/* aligned with bone_octahedral_wire
* Contains adjacent normal index */
static const uint bone_box_wire_adjacent_face[24] = {
- 0, 2, 0, 4, 1, 6, 1, 8,
- 3, 10, 5, 10, 7, 11, 9, 11,
- 3, 8, 2, 5, 4, 7, 6, 9,
+ 0, 2, 0, 4, 1, 6, 1, 8,
+ 3, 10, 5, 10, 7, 11, 9, 11,
+ 3, 8, 2, 5, 4, 7, 6, 9,
};
#endif
static const uint bone_box_solid_tris[12][3] = {
- {0, 2, 1}, /* bottom */
- {0, 3, 2},
+ {0, 2, 1}, /* bottom */
+ {0, 3, 2},
- {0, 1, 5}, /* sides */
- {0, 5, 4},
+ {0, 1, 5}, /* sides */
+ {0, 5, 4},
- {1, 2, 6},
- {1, 6, 5},
+ {1, 2, 6},
+ {1, 6, 5},
- {2, 3, 7},
- {2, 7, 6},
+ {2, 3, 7},
+ {2, 7, 6},
- {3, 0, 4},
- {3, 4, 7},
+ {3, 0, 4},
+ {3, 4, 7},
- {4, 5, 6}, /* top */
- {4, 6, 7},
+ {4, 5, 6}, /* top */
+ {4, 6, 7},
};
/**
@@ -2169,337 +2258,357 @@ static const uint bone_box_solid_tris[12][3] = {
* See bone_octahedral_solid_tris for more infos.
*/
static const uint bone_box_wire_lines_adjacency[12][4] = {
- { 4, 2, 0, 11}, { 0, 1, 2, 8}, { 2, 4, 1, 14}, { 1, 0, 4, 20}, /* bottom */
- { 0, 8, 11, 14}, { 2, 14, 8, 20}, { 1, 20, 14, 11}, { 4, 11, 20, 8}, /* top */
- { 20, 0, 11, 2}, { 11, 2, 8, 1}, { 8, 1, 14, 4}, { 14, 4, 20, 0}, /* sides */
+ {4, 2, 0, 11},
+ {0, 1, 2, 8},
+ {2, 4, 1, 14},
+ {1, 0, 4, 20}, /* bottom */
+ {0, 8, 11, 14},
+ {2, 14, 8, 20},
+ {1, 20, 14, 11},
+ {4, 11, 20, 8}, /* top */
+ {20, 0, 11, 2},
+ {11, 2, 8, 1},
+ {8, 1, 14, 4},
+ {14, 4, 20, 0}, /* sides */
};
#if 0 /* UNUSED */
static const uint bone_box_solid_tris_adjacency[12][6] = {
- { 0, 5, 1, 14, 2, 8},
- { 3, 26, 4, 20, 5, 1},
+ { 0, 5, 1, 14, 2, 8},
+ { 3, 26, 4, 20, 5, 1},
- { 6, 2, 7, 16, 8, 11},
- { 9, 7, 10, 32, 11, 24},
+ { 6, 2, 7, 16, 8, 11},
+ { 9, 7, 10, 32, 11, 24},
- {12, 0, 13, 22, 14, 17},
- {15, 13, 16, 30, 17, 6},
+ {12, 0, 13, 22, 14, 17},
+ {15, 13, 16, 30, 17, 6},
- {18, 3, 19, 28, 20, 23},
- {21, 19, 22, 33, 23, 12},
+ {18, 3, 19, 28, 20, 23},
+ {21, 19, 22, 33, 23, 12},
- {24, 4, 25, 10, 26, 29},
- {27, 25, 28, 34, 29, 18},
+ {24, 4, 25, 10, 26, 29},
+ {27, 25, 28, 34, 29, 18},
- {30, 9, 31, 15, 32, 35},
- {33, 31, 34, 21, 35, 27},
+ {30, 9, 31, 15, 32, 35},
+ {33, 31, 34, 21, 35, 27},
};
#endif
/* aligned with bone_box_solid_tris */
static const float bone_box_solid_normals[12][3] = {
- { 0.0f, -1.0f, 0.0f},
- { 0.0f, -1.0f, 0.0f},
+ {0.0f, -1.0f, 0.0f},
+ {0.0f, -1.0f, 0.0f},
- { 1.0f, 0.0f, 0.0f},
- { 1.0f, 0.0f, 0.0f},
+ {1.0f, 0.0f, 0.0f},
+ {1.0f, 0.0f, 0.0f},
- { 0.0f, 0.0f, -1.0f},
- { 0.0f, 0.0f, -1.0f},
+ {0.0f, 0.0f, -1.0f},
+ {0.0f, 0.0f, -1.0f},
- {-1.0f, 0.0f, 0.0f},
- {-1.0f, 0.0f, 0.0f},
+ {-1.0f, 0.0f, 0.0f},
+ {-1.0f, 0.0f, 0.0f},
- { 0.0f, 0.0f, 1.0f},
- { 0.0f, 0.0f, 1.0f},
+ {0.0f, 0.0f, 1.0f},
+ {0.0f, 0.0f, 1.0f},
- { 0.0f, 1.0f, 0.0f},
- { 0.0f, 1.0f, 0.0f},
+ {0.0f, 1.0f, 0.0f},
+ {0.0f, 1.0f, 0.0f},
};
GPUBatch *DRW_cache_bone_box_get(void)
{
- if (!SHC.drw_bone_box) {
- uint v_idx = 0;
-
- static GPUVertFormat format = { 0 };
- static struct { uint pos, nor, snor; } attr_id;
- if (format.attr_len == 0) {
- attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
- attr_id.nor = GPU_vertformat_attr_add(&format, "nor", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
- attr_id.snor = GPU_vertformat_attr_add(&format, "snor", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
- }
-
- /* Vertices */
- GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
- GPU_vertbuf_data_alloc(vbo, 36);
-
- for (int i = 0; i < 12; i++) {
- for (int j = 0; j < 3; j++) {
- GPU_vertbuf_attr_set(vbo, attr_id.nor, v_idx, bone_box_solid_normals[i]);
- GPU_vertbuf_attr_set(vbo, attr_id.snor, v_idx, bone_box_smooth_normals[bone_box_solid_tris[i][j]]);
- GPU_vertbuf_attr_set(vbo, attr_id.pos, v_idx++, bone_box_verts[bone_box_solid_tris[i][j]]);
- }
- }
-
- SHC.drw_bone_box = GPU_batch_create_ex(
- GPU_PRIM_TRIS, vbo, NULL,
- GPU_BATCH_OWNS_VBO);
- }
- return SHC.drw_bone_box;
+ if (!SHC.drw_bone_box) {
+ uint v_idx = 0;
+
+ static GPUVertFormat format = {0};
+ static struct {
+ uint pos, nor, snor;
+ } attr_id;
+ if (format.attr_len == 0) {
+ attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
+ attr_id.nor = GPU_vertformat_attr_add(&format, "nor", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
+ attr_id.snor = GPU_vertformat_attr_add(&format, "snor", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
+ }
+
+ /* Vertices */
+ GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
+ GPU_vertbuf_data_alloc(vbo, 36);
+
+ for (int i = 0; i < 12; i++) {
+ for (int j = 0; j < 3; j++) {
+ GPU_vertbuf_attr_set(vbo, attr_id.nor, v_idx, bone_box_solid_normals[i]);
+ GPU_vertbuf_attr_set(
+ vbo, attr_id.snor, v_idx, bone_box_smooth_normals[bone_box_solid_tris[i][j]]);
+ GPU_vertbuf_attr_set(vbo, attr_id.pos, v_idx++, bone_box_verts[bone_box_solid_tris[i][j]]);
+ }
+ }
+
+ SHC.drw_bone_box = GPU_batch_create_ex(GPU_PRIM_TRIS, vbo, NULL, GPU_BATCH_OWNS_VBO);
+ }
+ return SHC.drw_bone_box;
}
GPUBatch *DRW_cache_bone_box_wire_get(void)
{
- if (!SHC.drw_bone_box_wire) {
- GPUIndexBufBuilder elb;
- GPU_indexbuf_init(&elb, GPU_PRIM_LINES_ADJ, 12, 36);
+ if (!SHC.drw_bone_box_wire) {
+ GPUIndexBufBuilder elb;
+ GPU_indexbuf_init(&elb, GPU_PRIM_LINES_ADJ, 12, 36);
- for (int i = 0; i < 12; i++) {
- GPU_indexbuf_add_line_adj_verts(
- &elb,
- bone_box_wire_lines_adjacency[i][0],
- bone_box_wire_lines_adjacency[i][1],
- bone_box_wire_lines_adjacency[i][2],
- bone_box_wire_lines_adjacency[i][3]);
- }
+ for (int i = 0; i < 12; i++) {
+ GPU_indexbuf_add_line_adj_verts(&elb,
+ bone_box_wire_lines_adjacency[i][0],
+ bone_box_wire_lines_adjacency[i][1],
+ bone_box_wire_lines_adjacency[i][2],
+ bone_box_wire_lines_adjacency[i][3]);
+ }
- /* HACK Reuse vertex buffer. */
- GPUBatch *pos_nor_batch = DRW_cache_bone_box_get();
+ /* HACK Reuse vertex buffer. */
+ GPUBatch *pos_nor_batch = DRW_cache_bone_box_get();
- SHC.drw_bone_box_wire = GPU_batch_create_ex(
- GPU_PRIM_LINES_ADJ, pos_nor_batch->verts[0], GPU_indexbuf_build(&elb),
- GPU_BATCH_OWNS_INDEX);
- }
- return SHC.drw_bone_box_wire;
+ SHC.drw_bone_box_wire = GPU_batch_create_ex(GPU_PRIM_LINES_ADJ,
+ pos_nor_batch->verts[0],
+ GPU_indexbuf_build(&elb),
+ GPU_BATCH_OWNS_INDEX);
+ }
+ return SHC.drw_bone_box_wire;
}
/* Helpers for envelope bone's solid sphere-with-hidden-equatorial-cylinder.
* Note that here we only encode head/tail in forth component of the vector. */
static void benv_lat_lon_to_co(const float lat, const float lon, float r_nor[3])
{
- r_nor[0] = sinf(lat) * cosf(lon);
- r_nor[1] = sinf(lat) * sinf(lon);
- r_nor[2] = cosf(lat);
+ r_nor[0] = sinf(lat) * cosf(lon);
+ r_nor[1] = sinf(lat) * sinf(lon);
+ r_nor[2] = cosf(lat);
}
GPUBatch *DRW_cache_bone_envelope_solid_get(void)
{
- if (!SHC.drw_bone_envelope) {
- const int lon_res = 24;
- const int lat_res = 24;
- const float lon_inc = 2.0f * M_PI / lon_res;
- const float lat_inc = M_PI / lat_res;
- uint v_idx = 0;
+ if (!SHC.drw_bone_envelope) {
+ const int lon_res = 24;
+ const int lat_res = 24;
+ const float lon_inc = 2.0f * M_PI / lon_res;
+ const float lat_inc = M_PI / lat_res;
+ uint v_idx = 0;
- static GPUVertFormat format = { 0 };
- static struct { uint pos; } attr_id;
- if (format.attr_len == 0) {
- attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
- }
+ static GPUVertFormat format = {0};
+ static struct {
+ uint pos;
+ } attr_id;
+ if (format.attr_len == 0) {
+ attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
+ }
- /* Vertices */
- GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
- GPU_vertbuf_data_alloc(vbo, ((lat_res + 1) * 2) * lon_res * 1);
+ /* Vertices */
+ GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
+ GPU_vertbuf_data_alloc(vbo, ((lat_res + 1) * 2) * lon_res * 1);
- float lon = 0.0f;
- for (int i = 0; i < lon_res; i++, lon += lon_inc) {
- float lat = 0.0f;
- float co1[3], co2[3];
+ float lon = 0.0f;
+ for (int i = 0; i < lon_res; i++, lon += lon_inc) {
+ float lat = 0.0f;
+ float co1[3], co2[3];
- /* Note: the poles are duplicated on purpose, to restart the strip. */
+ /* Note: the poles are duplicated on purpose, to restart the strip. */
- /* 1st sphere */
- for (int j = 0; j < lat_res; j++, lat += lat_inc) {
- benv_lat_lon_to_co(lat, lon, co1);
- benv_lat_lon_to_co(lat, lon + lon_inc, co2);
+ /* 1st sphere */
+ for (int j = 0; j < lat_res; j++, lat += lat_inc) {
+ benv_lat_lon_to_co(lat, lon, co1);
+ benv_lat_lon_to_co(lat, lon + lon_inc, co2);
- GPU_vertbuf_attr_set(vbo, attr_id.pos, v_idx++, co1);
- GPU_vertbuf_attr_set(vbo, attr_id.pos, v_idx++, co2);
- }
+ GPU_vertbuf_attr_set(vbo, attr_id.pos, v_idx++, co1);
+ GPU_vertbuf_attr_set(vbo, attr_id.pos, v_idx++, co2);
+ }
- /* Closing the loop */
- benv_lat_lon_to_co(M_PI, lon, co1);
- benv_lat_lon_to_co(M_PI, lon + lon_inc, co2);
+ /* Closing the loop */
+ benv_lat_lon_to_co(M_PI, lon, co1);
+ benv_lat_lon_to_co(M_PI, lon + lon_inc, co2);
- GPU_vertbuf_attr_set(vbo, attr_id.pos, v_idx++, co1);
- GPU_vertbuf_attr_set(vbo, attr_id.pos, v_idx++, co2);
- }
+ GPU_vertbuf_attr_set(vbo, attr_id.pos, v_idx++, co1);
+ GPU_vertbuf_attr_set(vbo, attr_id.pos, v_idx++, co2);
+ }
- SHC.drw_bone_envelope = GPU_batch_create_ex(GPU_PRIM_TRI_STRIP, vbo, NULL, GPU_BATCH_OWNS_VBO);
- }
- return SHC.drw_bone_envelope;
+ SHC.drw_bone_envelope = GPU_batch_create_ex(GPU_PRIM_TRI_STRIP, vbo, NULL, GPU_BATCH_OWNS_VBO);
+ }
+ return SHC.drw_bone_envelope;
}
GPUBatch *DRW_cache_bone_envelope_outline_get(void)
{
- if (!SHC.drw_bone_envelope_outline) {
-# define CIRCLE_RESOL 64
- float v0[2], v1[2], v2[2];
- const float radius = 1.0f;
-
- /* Position Only 2D format */
- static GPUVertFormat format = { 0 };
- static struct { uint pos0, pos1, pos2; } attr_id;
- if (format.attr_len == 0) {
- attr_id.pos0 = GPU_vertformat_attr_add(&format, "pos0", GPU_COMP_F32, 2, GPU_FETCH_FLOAT);
- attr_id.pos1 = GPU_vertformat_attr_add(&format, "pos1", GPU_COMP_F32, 2, GPU_FETCH_FLOAT);
- attr_id.pos2 = GPU_vertformat_attr_add(&format, "pos2", GPU_COMP_F32, 2, GPU_FETCH_FLOAT);
- }
-
- GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
- GPU_vertbuf_data_alloc(vbo, (CIRCLE_RESOL + 1) * 2);
-
- v0[0] = radius * sinf((2.0f * M_PI * -2) / ((float)CIRCLE_RESOL));
- v0[1] = radius * cosf((2.0f * M_PI * -2) / ((float)CIRCLE_RESOL));
- v1[0] = radius * sinf((2.0f * M_PI * -1) / ((float)CIRCLE_RESOL));
- v1[1] = radius * cosf((2.0f * M_PI * -1) / ((float)CIRCLE_RESOL));
-
- /* Output 4 verts for each position. See shader for explanation. */
- uint v = 0;
- for (int a = 0; a < CIRCLE_RESOL; a++) {
- v2[0] = radius * sinf((2.0f * M_PI * a) / ((float)CIRCLE_RESOL));
- v2[1] = radius * cosf((2.0f * M_PI * a) / ((float)CIRCLE_RESOL));
- GPU_vertbuf_attr_set(vbo, attr_id.pos0, v, v0);
- GPU_vertbuf_attr_set(vbo, attr_id.pos1, v, v1);
- GPU_vertbuf_attr_set(vbo, attr_id.pos2, v++, v2);
- GPU_vertbuf_attr_set(vbo, attr_id.pos0, v, v0);
- GPU_vertbuf_attr_set(vbo, attr_id.pos1, v, v1);
- GPU_vertbuf_attr_set(vbo, attr_id.pos2, v++, v2);
- copy_v2_v2(v0, v1);
- copy_v2_v2(v1, v2);
- }
- v2[0] = 0.0f;
- v2[1] = radius;
- GPU_vertbuf_attr_set(vbo, attr_id.pos0, v, v0);
- GPU_vertbuf_attr_set(vbo, attr_id.pos1, v, v1);
- GPU_vertbuf_attr_set(vbo, attr_id.pos2, v++, v2);
- GPU_vertbuf_attr_set(vbo, attr_id.pos0, v, v0);
- GPU_vertbuf_attr_set(vbo, attr_id.pos1, v, v1);
- GPU_vertbuf_attr_set(vbo, attr_id.pos2, v++, v2);
-
- SHC.drw_bone_envelope_outline = GPU_batch_create_ex(GPU_PRIM_TRI_STRIP, vbo, NULL, GPU_BATCH_OWNS_VBO);
-# undef CIRCLE_RESOL
- }
- return SHC.drw_bone_envelope_outline;
+ if (!SHC.drw_bone_envelope_outline) {
+#define CIRCLE_RESOL 64
+ float v0[2], v1[2], v2[2];
+ const float radius = 1.0f;
+
+ /* Position Only 2D format */
+ static GPUVertFormat format = {0};
+ static struct {
+ uint pos0, pos1, pos2;
+ } attr_id;
+ if (format.attr_len == 0) {
+ attr_id.pos0 = GPU_vertformat_attr_add(&format, "pos0", GPU_COMP_F32, 2, GPU_FETCH_FLOAT);
+ attr_id.pos1 = GPU_vertformat_attr_add(&format, "pos1", GPU_COMP_F32, 2, GPU_FETCH_FLOAT);
+ attr_id.pos2 = GPU_vertformat_attr_add(&format, "pos2", GPU_COMP_F32, 2, GPU_FETCH_FLOAT);
+ }
+
+ GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
+ GPU_vertbuf_data_alloc(vbo, (CIRCLE_RESOL + 1) * 2);
+
+ v0[0] = radius * sinf((2.0f * M_PI * -2) / ((float)CIRCLE_RESOL));
+ v0[1] = radius * cosf((2.0f * M_PI * -2) / ((float)CIRCLE_RESOL));
+ v1[0] = radius * sinf((2.0f * M_PI * -1) / ((float)CIRCLE_RESOL));
+ v1[1] = radius * cosf((2.0f * M_PI * -1) / ((float)CIRCLE_RESOL));
+
+ /* Output 4 verts for each position. See shader for explanation. */
+ uint v = 0;
+ for (int a = 0; a < CIRCLE_RESOL; a++) {
+ v2[0] = radius * sinf((2.0f * M_PI * a) / ((float)CIRCLE_RESOL));
+ v2[1] = radius * cosf((2.0f * M_PI * a) / ((float)CIRCLE_RESOL));
+ GPU_vertbuf_attr_set(vbo, attr_id.pos0, v, v0);
+ GPU_vertbuf_attr_set(vbo, attr_id.pos1, v, v1);
+ GPU_vertbuf_attr_set(vbo, attr_id.pos2, v++, v2);
+ GPU_vertbuf_attr_set(vbo, attr_id.pos0, v, v0);
+ GPU_vertbuf_attr_set(vbo, attr_id.pos1, v, v1);
+ GPU_vertbuf_attr_set(vbo, attr_id.pos2, v++, v2);
+ copy_v2_v2(v0, v1);
+ copy_v2_v2(v1, v2);
+ }
+ v2[0] = 0.0f;
+ v2[1] = radius;
+ GPU_vertbuf_attr_set(vbo, attr_id.pos0, v, v0);
+ GPU_vertbuf_attr_set(vbo, attr_id.pos1, v, v1);
+ GPU_vertbuf_attr_set(vbo, attr_id.pos2, v++, v2);
+ GPU_vertbuf_attr_set(vbo, attr_id.pos0, v, v0);
+ GPU_vertbuf_attr_set(vbo, attr_id.pos1, v, v1);
+ GPU_vertbuf_attr_set(vbo, attr_id.pos2, v++, v2);
+
+ SHC.drw_bone_envelope_outline = GPU_batch_create_ex(
+ GPU_PRIM_TRI_STRIP, vbo, NULL, GPU_BATCH_OWNS_VBO);
+#undef CIRCLE_RESOL
+ }
+ return SHC.drw_bone_envelope_outline;
}
GPUBatch *DRW_cache_bone_point_get(void)
{
- if (!SHC.drw_bone_point) {
+ if (!SHC.drw_bone_point) {
#if 0 /* old style geometry sphere */
- const int lon_res = 16;
- const int lat_res = 8;
- const float rad = 0.05f;
- const float lon_inc = 2 * M_PI / lon_res;
- const float lat_inc = M_PI / lat_res;
- uint v_idx = 0;
-
- static GPUVertFormat format = { 0 };
- static struct { uint pos, nor; } attr_id;
- if (format.attr_len == 0) {
- attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
- attr_id.nor = GPU_vertformat_attr_add(&format, "nor", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
- }
-
- /* Vertices */
- GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
- GPU_vertbuf_data_alloc(vbo, (lat_res - 1) * lon_res * 6);
-
- float lon = 0.0f;
- for (int i = 0; i < lon_res; i++, lon += lon_inc) {
- float lat = 0.0f;
- for (int j = 0; j < lat_res; j++, lat += lat_inc) {
- if (j != lat_res - 1) { /* Pole */
- add_lat_lon_vert(vbo, attr_id.pos, attr_id.nor, &v_idx, rad, lat + lat_inc, lon + lon_inc);
- add_lat_lon_vert(vbo, attr_id.pos, attr_id.nor, &v_idx, rad, lat + lat_inc, lon);
- add_lat_lon_vert(vbo, attr_id.pos, attr_id.nor, &v_idx, rad, lat, lon);
- }
-
- if (j != 0) { /* Pole */
- add_lat_lon_vert(vbo, attr_id.pos, attr_id.nor, &v_idx, rad, lat, lon + lon_inc);
- add_lat_lon_vert(vbo, attr_id.pos, attr_id.nor, &v_idx, rad, lat + lat_inc, lon + lon_inc);
- add_lat_lon_vert(vbo, attr_id.pos, attr_id.nor, &v_idx, rad, lat, lon);
- }
- }
- }
-
- SHC.drw_bone_point = GPU_batch_create_ex(GPU_PRIM_TRIS, vbo, NULL, GPU_BATCH_OWNS_VBO);
+ const int lon_res = 16;
+ const int lat_res = 8;
+ const float rad = 0.05f;
+ const float lon_inc = 2 * M_PI / lon_res;
+ const float lat_inc = M_PI / lat_res;
+ uint v_idx = 0;
+
+ static GPUVertFormat format = { 0 };
+ static struct { uint pos, nor; } attr_id;
+ if (format.attr_len == 0) {
+ attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
+ attr_id.nor = GPU_vertformat_attr_add(&format, "nor", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
+ }
+
+ /* Vertices */
+ GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
+ GPU_vertbuf_data_alloc(vbo, (lat_res - 1) * lon_res * 6);
+
+ float lon = 0.0f;
+ for (int i = 0; i < lon_res; i++, lon += lon_inc) {
+ float lat = 0.0f;
+ for (int j = 0; j < lat_res; j++, lat += lat_inc) {
+ if (j != lat_res - 1) { /* Pole */
+ add_lat_lon_vert(vbo, attr_id.pos, attr_id.nor, &v_idx, rad, lat + lat_inc, lon + lon_inc);
+ add_lat_lon_vert(vbo, attr_id.pos, attr_id.nor, &v_idx, rad, lat + lat_inc, lon);
+ add_lat_lon_vert(vbo, attr_id.pos, attr_id.nor, &v_idx, rad, lat, lon);
+ }
+
+ if (j != 0) { /* Pole */
+ add_lat_lon_vert(vbo, attr_id.pos, attr_id.nor, &v_idx, rad, lat, lon + lon_inc);
+ add_lat_lon_vert(vbo, attr_id.pos, attr_id.nor, &v_idx, rad, lat + lat_inc, lon + lon_inc);
+ add_lat_lon_vert(vbo, attr_id.pos, attr_id.nor, &v_idx, rad, lat, lon);
+ }
+ }
+ }
+
+ SHC.drw_bone_point = GPU_batch_create_ex(GPU_PRIM_TRIS, vbo, NULL, GPU_BATCH_OWNS_VBO);
#else
# define CIRCLE_RESOL 64
- float v[2];
- const float radius = 0.05f;
-
- /* Position Only 2D format */
- static GPUVertFormat format = { 0 };
- static struct { uint pos; } attr_id;
- if (format.attr_len == 0) {
- attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 2, GPU_FETCH_FLOAT);
- }
-
- GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
- GPU_vertbuf_data_alloc(vbo, CIRCLE_RESOL);
-
- for (int a = 0; a < CIRCLE_RESOL; a++) {
- v[0] = radius * sinf((2.0f * M_PI * a) / ((float)CIRCLE_RESOL));
- v[1] = radius * cosf((2.0f * M_PI * a) / ((float)CIRCLE_RESOL));
- GPU_vertbuf_attr_set(vbo, attr_id.pos, a, v);
- }
-
- SHC.drw_bone_point = GPU_batch_create_ex(GPU_PRIM_TRI_FAN, vbo, NULL, GPU_BATCH_OWNS_VBO);
+ float v[2];
+ const float radius = 0.05f;
+
+ /* Position Only 2D format */
+ static GPUVertFormat format = {0};
+ static struct {
+ uint pos;
+ } attr_id;
+ if (format.attr_len == 0) {
+ attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 2, GPU_FETCH_FLOAT);
+ }
+
+ GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
+ GPU_vertbuf_data_alloc(vbo, CIRCLE_RESOL);
+
+ for (int a = 0; a < CIRCLE_RESOL; a++) {
+ v[0] = radius * sinf((2.0f * M_PI * a) / ((float)CIRCLE_RESOL));
+ v[1] = radius * cosf((2.0f * M_PI * a) / ((float)CIRCLE_RESOL));
+ GPU_vertbuf_attr_set(vbo, attr_id.pos, a, v);
+ }
+
+ SHC.drw_bone_point = GPU_batch_create_ex(GPU_PRIM_TRI_FAN, vbo, NULL, GPU_BATCH_OWNS_VBO);
# undef CIRCLE_RESOL
#endif
- }
- return SHC.drw_bone_point;
+ }
+ return SHC.drw_bone_point;
}
GPUBatch *DRW_cache_bone_point_wire_outline_get(void)
{
- if (!SHC.drw_bone_point_wire) {
+ if (!SHC.drw_bone_point_wire) {
#if 0 /* old style geometry sphere */
- GPUVertBuf *vbo = sphere_wire_vbo(0.05f);
- SHC.drw_bone_point_wire = GPU_batch_create_ex(GPU_PRIM_LINES, vbo, NULL, GPU_BATCH_OWNS_VBO);
+ GPUVertBuf *vbo = sphere_wire_vbo(0.05f);
+ SHC.drw_bone_point_wire = GPU_batch_create_ex(GPU_PRIM_LINES, vbo, NULL, GPU_BATCH_OWNS_VBO);
#else
# define CIRCLE_RESOL 64
- float v0[2], v1[2];
- const float radius = 0.05f;
-
- /* Position Only 2D format */
- static GPUVertFormat format = { 0 };
- static struct { uint pos0, pos1; } attr_id;
- if (format.attr_len == 0) {
- attr_id.pos0 = GPU_vertformat_attr_add(&format, "pos0", GPU_COMP_F32, 2, GPU_FETCH_FLOAT);
- attr_id.pos1 = GPU_vertformat_attr_add(&format, "pos1", GPU_COMP_F32, 2, GPU_FETCH_FLOAT);
- }
-
- GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
- GPU_vertbuf_data_alloc(vbo, (CIRCLE_RESOL + 1) * 2);
-
- v0[0] = radius * sinf((2.0f * M_PI * -1) / ((float)CIRCLE_RESOL));
- v0[1] = radius * cosf((2.0f * M_PI * -1) / ((float)CIRCLE_RESOL));
-
- uint v = 0;
- for (int a = 0; a < CIRCLE_RESOL; a++) {
- v1[0] = radius * sinf((2.0f * M_PI * a) / ((float)CIRCLE_RESOL));
- v1[1] = radius * cosf((2.0f * M_PI * a) / ((float)CIRCLE_RESOL));
- GPU_vertbuf_attr_set(vbo, attr_id.pos0, v, v0);
- GPU_vertbuf_attr_set(vbo, attr_id.pos1, v++, v1);
- GPU_vertbuf_attr_set(vbo, attr_id.pos0, v, v0);
- GPU_vertbuf_attr_set(vbo, attr_id.pos1, v++, v1);
- copy_v2_v2(v0, v1);
- }
- v1[0] = 0.0f;
- v1[1] = radius;
- GPU_vertbuf_attr_set(vbo, attr_id.pos0, v, v0);
- GPU_vertbuf_attr_set(vbo, attr_id.pos1, v++, v1);
- GPU_vertbuf_attr_set(vbo, attr_id.pos0, v, v0);
- GPU_vertbuf_attr_set(vbo, attr_id.pos1, v++, v1);
-
- SHC.drw_bone_point_wire = GPU_batch_create_ex(GPU_PRIM_TRI_STRIP, vbo, NULL, GPU_BATCH_OWNS_VBO);
+ float v0[2], v1[2];
+ const float radius = 0.05f;
+
+ /* Position Only 2D format */
+ static GPUVertFormat format = {0};
+ static struct {
+ uint pos0, pos1;
+ } attr_id;
+ if (format.attr_len == 0) {
+ attr_id.pos0 = GPU_vertformat_attr_add(&format, "pos0", GPU_COMP_F32, 2, GPU_FETCH_FLOAT);
+ attr_id.pos1 = GPU_vertformat_attr_add(&format, "pos1", GPU_COMP_F32, 2, GPU_FETCH_FLOAT);
+ }
+
+ GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
+ GPU_vertbuf_data_alloc(vbo, (CIRCLE_RESOL + 1) * 2);
+
+ v0[0] = radius * sinf((2.0f * M_PI * -1) / ((float)CIRCLE_RESOL));
+ v0[1] = radius * cosf((2.0f * M_PI * -1) / ((float)CIRCLE_RESOL));
+
+ uint v = 0;
+ for (int a = 0; a < CIRCLE_RESOL; a++) {
+ v1[0] = radius * sinf((2.0f * M_PI * a) / ((float)CIRCLE_RESOL));
+ v1[1] = radius * cosf((2.0f * M_PI * a) / ((float)CIRCLE_RESOL));
+ GPU_vertbuf_attr_set(vbo, attr_id.pos0, v, v0);
+ GPU_vertbuf_attr_set(vbo, attr_id.pos1, v++, v1);
+ GPU_vertbuf_attr_set(vbo, attr_id.pos0, v, v0);
+ GPU_vertbuf_attr_set(vbo, attr_id.pos1, v++, v1);
+ copy_v2_v2(v0, v1);
+ }
+ v1[0] = 0.0f;
+ v1[1] = radius;
+ GPU_vertbuf_attr_set(vbo, attr_id.pos0, v, v0);
+ GPU_vertbuf_attr_set(vbo, attr_id.pos1, v++, v1);
+ GPU_vertbuf_attr_set(vbo, attr_id.pos0, v, v0);
+ GPU_vertbuf_attr_set(vbo, attr_id.pos1, v++, v1);
+
+ SHC.drw_bone_point_wire = GPU_batch_create_ex(
+ GPU_PRIM_TRI_STRIP, vbo, NULL, GPU_BATCH_OWNS_VBO);
# undef CIRCLE_RESOL
#endif
- }
- return SHC.drw_bone_point_wire;
+ }
+ return SHC.drw_bone_point_wire;
}
/* keep in sync with armature_stick_vert.glsl */
@@ -2514,87 +2623,97 @@ GPUBatch *DRW_cache_bone_point_wire_outline_get(void)
GPUBatch *DRW_cache_bone_stick_get(void)
{
- if (!SHC.drw_bone_stick) {
+ if (!SHC.drw_bone_stick) {
#define CIRCLE_RESOL 12
- uint v = 0;
- uint flag;
- const float radius = 2.0f; /* head/tail radius */
- float pos[2];
-
- /* Position Only 2D format */
- static GPUVertFormat format = { 0 };
- static struct { uint pos, flag; } attr_id;
- if (format.attr_len == 0) {
- attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 2, GPU_FETCH_FLOAT);
- attr_id.flag = GPU_vertformat_attr_add(&format, "flag", GPU_COMP_U32, 1, GPU_FETCH_INT);
- }
-
- const uint vcount = (CIRCLE_RESOL + 1) * 2 + 6;
-
- GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
- GPU_vertbuf_data_alloc(vbo, vcount);
-
- GPUIndexBufBuilder elb;
- GPU_indexbuf_init_ex(&elb, GPU_PRIM_TRI_FAN, (CIRCLE_RESOL + 2) * 2 + 6 + 2, vcount, true);
-
- /* head/tail points */
- for (int i = 0; i < 2; ++i) {
- /* center vertex */
- copy_v2_fl(pos, 0.0f);
- flag = (i == 0) ? POS_HEAD : POS_TAIL;
- flag |= (i == 0) ? COL_HEAD : COL_TAIL;
- GPU_vertbuf_attr_set(vbo, attr_id.pos, v, pos);
- GPU_vertbuf_attr_set(vbo, attr_id.flag, v, &flag);
- GPU_indexbuf_add_generic_vert(&elb, v++);
- /* circle vertices */
- flag |= COL_WIRE;
- for (int a = 0; a < CIRCLE_RESOL; a++) {
- pos[0] = radius * sinf((2.0f * M_PI * a) / ((float)CIRCLE_RESOL));
- pos[1] = radius * cosf((2.0f * M_PI * a) / ((float)CIRCLE_RESOL));
- GPU_vertbuf_attr_set(vbo, attr_id.pos, v, pos);
- GPU_vertbuf_attr_set(vbo, attr_id.flag, v, &flag);
- GPU_indexbuf_add_generic_vert(&elb, v++);
- }
- /* Close the circle */
- GPU_indexbuf_add_generic_vert(&elb, v - CIRCLE_RESOL);
-
- GPU_indexbuf_add_primitive_restart(&elb);
- }
-
- /* Bone rectangle */
- pos[0] = 0.0f;
- for (int i = 0; i < 6; ++i) {
- pos[1] = (i == 0 || i == 3) ? 0.0f : ((i < 3) ? 1.0f : -1.0f);
- flag = ((i < 2 || i > 4) ? POS_HEAD : POS_TAIL) |
- ((i == 0 || i == 3) ? 0 : COL_WIRE) | COL_BONE | POS_BONE;
- GPU_vertbuf_attr_set(vbo, attr_id.pos, v, pos);
- GPU_vertbuf_attr_set(vbo, attr_id.flag, v, &flag);
- GPU_indexbuf_add_generic_vert(&elb, v++);
- }
-
- SHC.drw_bone_stick = GPU_batch_create_ex(
- GPU_PRIM_TRI_FAN, vbo, GPU_indexbuf_build(&elb),
- GPU_BATCH_OWNS_VBO | GPU_BATCH_OWNS_INDEX);
+ uint v = 0;
+ uint flag;
+ const float radius = 2.0f; /* head/tail radius */
+ float pos[2];
+
+ /* Position Only 2D format */
+ static GPUVertFormat format = {0};
+ static struct {
+ uint pos, flag;
+ } attr_id;
+ if (format.attr_len == 0) {
+ attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 2, GPU_FETCH_FLOAT);
+ attr_id.flag = GPU_vertformat_attr_add(&format, "flag", GPU_COMP_U32, 1, GPU_FETCH_INT);
+ }
+
+ const uint vcount = (CIRCLE_RESOL + 1) * 2 + 6;
+
+ GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
+ GPU_vertbuf_data_alloc(vbo, vcount);
+
+ GPUIndexBufBuilder elb;
+ GPU_indexbuf_init_ex(&elb, GPU_PRIM_TRI_FAN, (CIRCLE_RESOL + 2) * 2 + 6 + 2, vcount, true);
+
+ /* head/tail points */
+ for (int i = 0; i < 2; ++i) {
+ /* center vertex */
+ copy_v2_fl(pos, 0.0f);
+ flag = (i == 0) ? POS_HEAD : POS_TAIL;
+ flag |= (i == 0) ? COL_HEAD : COL_TAIL;
+ GPU_vertbuf_attr_set(vbo, attr_id.pos, v, pos);
+ GPU_vertbuf_attr_set(vbo, attr_id.flag, v, &flag);
+ GPU_indexbuf_add_generic_vert(&elb, v++);
+ /* circle vertices */
+ flag |= COL_WIRE;
+ for (int a = 0; a < CIRCLE_RESOL; a++) {
+ pos[0] = radius * sinf((2.0f * M_PI * a) / ((float)CIRCLE_RESOL));
+ pos[1] = radius * cosf((2.0f * M_PI * a) / ((float)CIRCLE_RESOL));
+ GPU_vertbuf_attr_set(vbo, attr_id.pos, v, pos);
+ GPU_vertbuf_attr_set(vbo, attr_id.flag, v, &flag);
+ GPU_indexbuf_add_generic_vert(&elb, v++);
+ }
+ /* Close the circle */
+ GPU_indexbuf_add_generic_vert(&elb, v - CIRCLE_RESOL);
+
+ GPU_indexbuf_add_primitive_restart(&elb);
+ }
+
+ /* Bone rectangle */
+ pos[0] = 0.0f;
+ for (int i = 0; i < 6; ++i) {
+ pos[1] = (i == 0 || i == 3) ? 0.0f : ((i < 3) ? 1.0f : -1.0f);
+ flag = ((i < 2 || i > 4) ? POS_HEAD : POS_TAIL) | ((i == 0 || i == 3) ? 0 : COL_WIRE) |
+ COL_BONE | POS_BONE;
+ GPU_vertbuf_attr_set(vbo, attr_id.pos, v, pos);
+ GPU_vertbuf_attr_set(vbo, attr_id.flag, v, &flag);
+ GPU_indexbuf_add_generic_vert(&elb, v++);
+ }
+
+ SHC.drw_bone_stick = GPU_batch_create_ex(GPU_PRIM_TRI_FAN,
+ vbo,
+ GPU_indexbuf_build(&elb),
+ GPU_BATCH_OWNS_VBO | GPU_BATCH_OWNS_INDEX);
#undef CIRCLE_RESOL
- }
- return SHC.drw_bone_stick;
+ }
+ return SHC.drw_bone_stick;
}
-static void set_bone_axis_vert(
- GPUVertBuf *vbo, uint axis, uint pos, uint col,
- uint *v, const float *a, const float *p, const float *c)
+static void set_bone_axis_vert(GPUVertBuf *vbo,
+ uint axis,
+ uint pos,
+ uint col,
+ uint *v,
+ const float *a,
+ const float *p,
+ const float *c)
{
- GPU_vertbuf_attr_set(vbo, axis, *v, a);
- GPU_vertbuf_attr_set(vbo, pos, *v, p);
- GPU_vertbuf_attr_set(vbo, col, *v, c);
- *v += 1;
+ GPU_vertbuf_attr_set(vbo, axis, *v, a);
+ GPU_vertbuf_attr_set(vbo, pos, *v, p);
+ GPU_vertbuf_attr_set(vbo, col, *v, c);
+ *v += 1;
}
#define S_X 0.0215f
#define S_Y 0.025f
static float x_axis_name[4][2] = {
- { 0.9f * S_X, 1.0f * S_Y}, {-1.0f * S_X, -1.0f * S_Y},
- {-0.9f * S_X, 1.0f * S_Y}, { 1.0f * S_X, -1.0f * S_Y},
+ {0.9f * S_X, 1.0f * S_Y},
+ {-1.0f * S_X, -1.0f * S_Y},
+ {-0.9f * S_X, 1.0f * S_Y},
+ {1.0f * S_X, -1.0f * S_Y},
};
#define X_LEN (sizeof(x_axis_name) / (sizeof(float) * 2))
#undef S_X
@@ -2603,9 +2722,12 @@ static float x_axis_name[4][2] = {
#define S_X 0.0175f
#define S_Y 0.025f
static float y_axis_name[6][2] = {
- {-1.0f * S_X, 1.0f * S_Y}, { 0.0f * S_X, -0.1f * S_Y},
- { 1.0f * S_X, 1.0f * S_Y}, { 0.0f * S_X, -0.1f * S_Y},
- { 0.0f * S_X, -0.1f * S_Y}, { 0.0f * S_X, -1.0f * S_Y},
+ {-1.0f * S_X, 1.0f * S_Y},
+ {0.0f * S_X, -0.1f * S_Y},
+ {1.0f * S_X, 1.0f * S_Y},
+ {0.0f * S_X, -0.1f * S_Y},
+ {0.0f * S_X, -0.1f * S_Y},
+ {0.0f * S_X, -1.0f * S_Y},
};
#define Y_LEN (sizeof(y_axis_name) / (sizeof(float) * 2))
#undef S_X
@@ -2614,11 +2736,16 @@ static float y_axis_name[6][2] = {
#define S_X 0.02f
#define S_Y 0.025f
static float z_axis_name[10][2] = {
- {-0.95f * S_X, 1.00f * S_Y}, { 0.95f * S_X, 1.00f * S_Y},
- { 0.95f * S_X, 1.00f * S_Y}, { 0.95f * S_X, 0.90f * S_Y},
- { 0.95f * S_X, 0.90f * S_Y}, {-1.00f * S_X, -0.90f * S_Y},
- {-1.00f * S_X, -0.90f * S_Y}, {-1.00f * S_X, -1.00f * S_Y},
- {-1.00f * S_X, -1.00f * S_Y}, { 1.00f * S_X, -1.00f * S_Y},
+ {-0.95f * S_X, 1.00f * S_Y},
+ {0.95f * S_X, 1.00f * S_Y},
+ {0.95f * S_X, 1.00f * S_Y},
+ {0.95f * S_X, 0.90f * S_Y},
+ {0.95f * S_X, 0.90f * S_Y},
+ {-1.00f * S_X, -0.90f * S_Y},
+ {-1.00f * S_X, -0.90f * S_Y},
+ {-1.00f * S_X, -1.00f * S_Y},
+ {-1.00f * S_X, -1.00f * S_Y},
+ {1.00f * S_X, -1.00f * S_Y},
};
#define Z_LEN (sizeof(z_axis_name) / (sizeof(float) * 2))
#undef S_X
@@ -2628,15 +2755,19 @@ static float z_axis_name[10][2] = {
#define S_Y 0.007f
static float axis_marker[8][2] = {
#if 0 /* square */
- {-1.0f * S_X, 1.0f * S_Y}, { 1.0f * S_X, 1.0f * S_Y},
- { 1.0f * S_X, 1.0f * S_Y}, { 1.0f * S_X, -1.0f * S_Y},
- { 1.0f * S_X, -1.0f * S_Y}, {-1.0f * S_X, -1.0f * S_Y},
- {-1.0f * S_X, -1.0f * S_Y}, {-1.0f * S_X, 1.0f * S_Y}
+ {-1.0f * S_X, 1.0f * S_Y}, { 1.0f * S_X, 1.0f * S_Y},
+ { 1.0f * S_X, 1.0f * S_Y}, { 1.0f * S_X, -1.0f * S_Y},
+ { 1.0f * S_X, -1.0f * S_Y}, {-1.0f * S_X, -1.0f * S_Y},
+ {-1.0f * S_X, -1.0f * S_Y}, {-1.0f * S_X, 1.0f * S_Y}
#else /* diamond */
- {-S_X, 0.f}, { 0.f, S_Y},
- { 0.f, S_Y}, { S_X, 0.f},
- { S_X, 0.f}, { 0.f, -S_Y},
- { 0.f, -S_Y}, {-S_X, 0.f}
+ {-S_X, 0.f},
+ {0.f, S_Y},
+ {0.f, S_Y},
+ {S_X, 0.f},
+ {S_X, 0.f},
+ {0.f, -S_Y},
+ {0.f, -S_Y},
+ {-S_X, 0.f}
#endif
};
#define MARKER_LEN (sizeof(axis_marker) / (sizeof(float) * 2))
@@ -2646,13 +2777,17 @@ static float axis_marker[8][2] = {
#define S_X 0.0007f
#define S_Y 0.0007f
-#define O_X 0.001f
+#define O_X 0.001f
#define O_Y -0.001f
static float axis_name_shadow[8][2] = {
- {-S_X + O_X, S_Y + O_Y}, { S_X + O_X, S_Y + O_Y},
- { S_X + O_X, S_Y + O_Y}, { S_X + O_X, -S_Y + O_Y},
- { S_X + O_X, -S_Y + O_Y}, {-S_X + O_X, -S_Y + O_Y},
- {-S_X + O_X, -S_Y + O_Y}, {-S_X + O_X, S_Y + O_Y},
+ {-S_X + O_X, S_Y + O_Y},
+ {S_X + O_X, S_Y + O_Y},
+ {S_X + O_X, S_Y + O_Y},
+ {S_X + O_X, -S_Y + O_Y},
+ {S_X + O_X, -S_Y + O_Y},
+ {-S_X + O_X, -S_Y + O_Y},
+ {-S_X + O_X, -S_Y + O_Y},
+ {-S_X + O_X, S_Y + O_Y},
};
// #define SHADOW_RES (sizeof(axis_name_shadow) / (sizeof(float) * 2))
#define SHADOW_RES 0
@@ -2663,177 +2798,197 @@ static float axis_name_shadow[8][2] = {
GPUBatch *DRW_cache_bone_arrows_get(void)
{
- if (!SHC.drw_bone_arrows) {
- /* Position Only 3D format */
- static GPUVertFormat format = { 0 };
- static struct { uint axis, pos, col; } attr_id;
- if (format.attr_len == 0) {
- attr_id.axis = GPU_vertformat_attr_add(&format, "axis", GPU_COMP_F32, 1, GPU_FETCH_FLOAT);
- attr_id.pos = GPU_vertformat_attr_add(&format, "screenPos", GPU_COMP_F32, 2, GPU_FETCH_FLOAT);
- attr_id.col = GPU_vertformat_attr_add(&format, "colorAxis", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
- }
-
- /* Line */
- GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
- GPU_vertbuf_data_alloc(vbo, (2 + MARKER_LEN * MARKER_FILL_LAYER) * 3 +
- (X_LEN + Y_LEN + Z_LEN) * (1 + SHADOW_RES));
-
- uint v = 0;
-
- for (int axis = 0; axis < 3; axis++) {
- float pos[2] = {0.0f, 0.0f};
- float c[3] = {0.0f, 0.0f, 0.0f};
- float a = 0.0f;
- /* center to axis line */
- set_bone_axis_vert(vbo, attr_id.axis, attr_id.pos, attr_id.col, &v, &a, pos, c);
- c[axis] = 0.5f;
- a = axis + 0.25f;
- set_bone_axis_vert(vbo, attr_id.axis, attr_id.pos, attr_id.col, &v, &a, pos, c);
-
- /* Axis end marker */
- for (int j = 1; j < MARKER_FILL_LAYER + 1; ++j) {
- for (int i = 0; i < MARKER_LEN; ++i) {
- float tmp[2];
- mul_v2_v2fl(tmp, axis_marker[i], j / (float)MARKER_FILL_LAYER);
- set_bone_axis_vert(vbo, attr_id.axis, attr_id.pos, attr_id.col,
- &v, &a, tmp, c);
- }
- }
-
- a = axis + 0.31f;
- /* Axis name */
- int axis_v_len;
- float (*axis_verts)[2];
- if (axis == 0) {
- axis_verts = x_axis_name;
- axis_v_len = X_LEN;
- }
- else if (axis == 1) {
- axis_verts = y_axis_name;
- axis_v_len = Y_LEN;
- }
- else {
- axis_verts = z_axis_name;
- axis_v_len = Z_LEN;
- }
-
- /* Axis name shadows */
- copy_v3_fl(c, 0.0f);
- c[axis] = 0.3f;
- for (int j = 0; j < SHADOW_RES; ++j) {
- for (int i = 0; i < axis_v_len; ++i) {
- float tmp[2];
- add_v2_v2v2(tmp, axis_verts[i], axis_name_shadow[j]);
- set_bone_axis_vert(vbo, attr_id.axis, attr_id.pos, attr_id.col,
- &v, &a, tmp, c);
- }
- }
-
- /* Axis name */
- copy_v3_fl(c, 0.1f);
- c[axis] = 1.0f;
- for (int i = 0; i < axis_v_len; ++i) {
- set_bone_axis_vert(vbo, attr_id.axis, attr_id.pos, attr_id.col,
- &v, &a, axis_verts[i], c);
- }
- }
-
- SHC.drw_bone_arrows = GPU_batch_create_ex(GPU_PRIM_LINES, vbo, NULL, GPU_BATCH_OWNS_VBO);
- }
- return SHC.drw_bone_arrows;
+ if (!SHC.drw_bone_arrows) {
+ /* Position Only 3D format */
+ static GPUVertFormat format = {0};
+ static struct {
+ uint axis, pos, col;
+ } attr_id;
+ if (format.attr_len == 0) {
+ attr_id.axis = GPU_vertformat_attr_add(&format, "axis", GPU_COMP_F32, 1, GPU_FETCH_FLOAT);
+ attr_id.pos = GPU_vertformat_attr_add(
+ &format, "screenPos", GPU_COMP_F32, 2, GPU_FETCH_FLOAT);
+ attr_id.col = GPU_vertformat_attr_add(
+ &format, "colorAxis", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
+ }
+
+ /* Line */
+ GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
+ GPU_vertbuf_data_alloc(vbo,
+ (2 + MARKER_LEN * MARKER_FILL_LAYER) * 3 +
+ (X_LEN + Y_LEN + Z_LEN) * (1 + SHADOW_RES));
+
+ uint v = 0;
+
+ for (int axis = 0; axis < 3; axis++) {
+ float pos[2] = {0.0f, 0.0f};
+ float c[3] = {0.0f, 0.0f, 0.0f};
+ float a = 0.0f;
+ /* center to axis line */
+ set_bone_axis_vert(vbo, attr_id.axis, attr_id.pos, attr_id.col, &v, &a, pos, c);
+ c[axis] = 0.5f;
+ a = axis + 0.25f;
+ set_bone_axis_vert(vbo, attr_id.axis, attr_id.pos, attr_id.col, &v, &a, pos, c);
+
+ /* Axis end marker */
+ for (int j = 1; j < MARKER_FILL_LAYER + 1; ++j) {
+ for (int i = 0; i < MARKER_LEN; ++i) {
+ float tmp[2];
+ mul_v2_v2fl(tmp, axis_marker[i], j / (float)MARKER_FILL_LAYER);
+ set_bone_axis_vert(vbo, attr_id.axis, attr_id.pos, attr_id.col, &v, &a, tmp, c);
+ }
+ }
+
+ a = axis + 0.31f;
+ /* Axis name */
+ int axis_v_len;
+ float(*axis_verts)[2];
+ if (axis == 0) {
+ axis_verts = x_axis_name;
+ axis_v_len = X_LEN;
+ }
+ else if (axis == 1) {
+ axis_verts = y_axis_name;
+ axis_v_len = Y_LEN;
+ }
+ else {
+ axis_verts = z_axis_name;
+ axis_v_len = Z_LEN;
+ }
+
+ /* Axis name shadows */
+ copy_v3_fl(c, 0.0f);
+ c[axis] = 0.3f;
+ for (int j = 0; j < SHADOW_RES; ++j) {
+ for (int i = 0; i < axis_v_len; ++i) {
+ float tmp[2];
+ add_v2_v2v2(tmp, axis_verts[i], axis_name_shadow[j]);
+ set_bone_axis_vert(vbo, attr_id.axis, attr_id.pos, attr_id.col, &v, &a, tmp, c);
+ }
+ }
+
+ /* Axis name */
+ copy_v3_fl(c, 0.1f);
+ c[axis] = 1.0f;
+ for (int i = 0; i < axis_v_len; ++i) {
+ set_bone_axis_vert(vbo, attr_id.axis, attr_id.pos, attr_id.col, &v, &a, axis_verts[i], c);
+ }
+ }
+
+ SHC.drw_bone_arrows = GPU_batch_create_ex(GPU_PRIM_LINES, vbo, NULL, GPU_BATCH_OWNS_VBO);
+ }
+ return SHC.drw_bone_arrows;
}
static const float staticSine[16] = {
- 0.0f, 0.104528463268f, 0.207911690818f, 0.309016994375f,
- 0.406736643076f, 0.5f, 0.587785252292f, 0.669130606359f,
- 0.743144825477f, 0.809016994375f, 0.866025403784f,
- 0.913545457643f, 0.951056516295f, 0.978147600734f,
- 0.994521895368f, 1.0f,
+ 0.0f,
+ 0.104528463268f,
+ 0.207911690818f,
+ 0.309016994375f,
+ 0.406736643076f,
+ 0.5f,
+ 0.587785252292f,
+ 0.669130606359f,
+ 0.743144825477f,
+ 0.809016994375f,
+ 0.866025403784f,
+ 0.913545457643f,
+ 0.951056516295f,
+ 0.978147600734f,
+ 0.994521895368f,
+ 1.0f,
};
-#define set_vert(a, b, quarter) { \
- copy_v2_fl2(pos, (quarter % 2 == 0) ? -(a) : (a), (quarter < 2) ? -(b) : (b)); \
- GPU_vertbuf_attr_set(vbo, attr_id.pos, v++, pos); \
- } ((void)0)
+#define set_vert(a, b, quarter) \
+ { \
+ copy_v2_fl2(pos, (quarter % 2 == 0) ? -(a) : (a), (quarter < 2) ? -(b) : (b)); \
+ GPU_vertbuf_attr_set(vbo, attr_id.pos, v++, pos); \
+ } \
+ ((void)0)
GPUBatch *DRW_cache_bone_dof_sphere_get(void)
{
- if (!SHC.drw_bone_dof_sphere) {
- int i, j, q, n = ARRAY_SIZE(staticSine);
- float x, z, px, pz, pos[2];
-
- /* Position Only 3D format */
- static GPUVertFormat format = { 0 };
- static struct { uint pos; } attr_id;
- if (format.attr_len == 0) {
- attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 2, GPU_FETCH_FLOAT);
- }
-
- GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
- GPU_vertbuf_data_alloc(vbo, n * n * 6 * 4);
-
- uint v = 0;
- for (q = 0; q < 4; ++q) {
- pz = 0.0f;
- for (i = 1; i < n; ++i) {
- z = staticSine[i];
- px = 0.0f;
- for (j = 1; j <= (n - i); ++j) {
- x = staticSine[j];
- if (j == n - i) {
- set_vert(px, z, q);
- set_vert(px, pz, q);
- set_vert(x, pz, q);
- }
- else {
- set_vert(x, z, q);
- set_vert(x, pz, q);
- set_vert(px, z, q);
-
- set_vert(x, pz, q);
- set_vert(px, pz, q);
- set_vert(px, z, q);
- }
- px = x;
- }
- pz = z;
- }
- }
- /* TODO alloc right count from the begining. */
- GPU_vertbuf_data_resize(vbo, v);
-
- SHC.drw_bone_dof_sphere = GPU_batch_create_ex(GPU_PRIM_TRIS, vbo, NULL, GPU_BATCH_OWNS_VBO);
- }
- return SHC.drw_bone_dof_sphere;
+ if (!SHC.drw_bone_dof_sphere) {
+ int i, j, q, n = ARRAY_SIZE(staticSine);
+ float x, z, px, pz, pos[2];
+
+ /* Position Only 3D format */
+ static GPUVertFormat format = {0};
+ static struct {
+ uint pos;
+ } attr_id;
+ if (format.attr_len == 0) {
+ attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 2, GPU_FETCH_FLOAT);
+ }
+
+ GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
+ GPU_vertbuf_data_alloc(vbo, n * n * 6 * 4);
+
+ uint v = 0;
+ for (q = 0; q < 4; ++q) {
+ pz = 0.0f;
+ for (i = 1; i < n; ++i) {
+ z = staticSine[i];
+ px = 0.0f;
+ for (j = 1; j <= (n - i); ++j) {
+ x = staticSine[j];
+ if (j == n - i) {
+ set_vert(px, z, q);
+ set_vert(px, pz, q);
+ set_vert(x, pz, q);
+ }
+ else {
+ set_vert(x, z, q);
+ set_vert(x, pz, q);
+ set_vert(px, z, q);
+
+ set_vert(x, pz, q);
+ set_vert(px, pz, q);
+ set_vert(px, z, q);
+ }
+ px = x;
+ }
+ pz = z;
+ }
+ }
+ /* TODO alloc right count from the begining. */
+ GPU_vertbuf_data_resize(vbo, v);
+
+ SHC.drw_bone_dof_sphere = GPU_batch_create_ex(GPU_PRIM_TRIS, vbo, NULL, GPU_BATCH_OWNS_VBO);
+ }
+ return SHC.drw_bone_dof_sphere;
}
GPUBatch *DRW_cache_bone_dof_lines_get(void)
{
- if (!SHC.drw_bone_dof_lines) {
- int i, n = ARRAY_SIZE(staticSine);
- float pos[2];
+ if (!SHC.drw_bone_dof_lines) {
+ int i, n = ARRAY_SIZE(staticSine);
+ float pos[2];
- /* Position Only 3D format */
- static GPUVertFormat format = { 0 };
- static struct { uint pos; } attr_id;
- if (format.attr_len == 0) {
- attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 2, GPU_FETCH_FLOAT);
- }
+ /* Position Only 3D format */
+ static GPUVertFormat format = {0};
+ static struct {
+ uint pos;
+ } attr_id;
+ if (format.attr_len == 0) {
+ attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 2, GPU_FETCH_FLOAT);
+ }
- GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
- GPU_vertbuf_data_alloc(vbo, n * 4);
+ GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
+ GPU_vertbuf_data_alloc(vbo, n * 4);
- uint v = 0;
- for (i = 0; i < n * 4; i++) {
- float a = (1.0f - (i / (float)(n * 4))) * 2.0f * M_PI;
- float x = cosf(a);
- float y = sinf(a);
- set_vert(x, y, 0);
- }
+ uint v = 0;
+ for (i = 0; i < n * 4; i++) {
+ float a = (1.0f - (i / (float)(n * 4))) * 2.0f * M_PI;
+ float x = cosf(a);
+ float y = sinf(a);
+ set_vert(x, y, 0);
+ }
- SHC.drw_bone_dof_lines = GPU_batch_create_ex(GPU_PRIM_LINE_LOOP, vbo, NULL, GPU_BATCH_OWNS_VBO);
- }
- return SHC.drw_bone_dof_lines;
+ SHC.drw_bone_dof_lines = GPU_batch_create_ex(
+ GPU_PRIM_LINE_LOOP, vbo, NULL, GPU_BATCH_OWNS_VBO);
+ }
+ return SHC.drw_bone_dof_lines;
}
#undef set_vert
@@ -2852,135 +3007,145 @@ GPUBatch *DRW_cache_bone_dof_lines_get(void)
*/
static const float camera_coords_frame_bounds[5] = {
- 0.0f, /* center point */
- 1.0f, /* + X + Y */
- 2.0f, /* + X - Y */
- 3.0f, /* - X - Y */
- 4.0f, /* - X + Y */
+ 0.0f, /* center point */
+ 1.0f, /* + X + Y */
+ 2.0f, /* + X - Y */
+ 3.0f, /* - X - Y */
+ 4.0f, /* - X + Y */
};
static const float camera_coords_frame_tri[3] = {
- 5.0f, /* tria + X */
- 6.0f, /* tria - X */
- 7.0f, /* tria + Y */
+ 5.0f, /* tria + X */
+ 6.0f, /* tria - X */
+ 7.0f, /* tria + Y */
};
/** Draw a loop of lines. */
-static void camera_fill_lines_loop_fl_v1(
- GPUVertBufRaw *pos_step,
- const float *coords, const uint coords_len)
+static void camera_fill_lines_loop_fl_v1(GPUVertBufRaw *pos_step,
+ const float *coords,
+ const uint coords_len)
{
- for (uint i = 0, i_prev = coords_len - 1; i < coords_len; i_prev = i++) {
- *((float *)GPU_vertbuf_raw_step(pos_step)) = coords[i_prev];
- *((float *)GPU_vertbuf_raw_step(pos_step)) = coords[i];
- }
+ for (uint i = 0, i_prev = coords_len - 1; i < coords_len; i_prev = i++) {
+ *((float *)GPU_vertbuf_raw_step(pos_step)) = coords[i_prev];
+ *((float *)GPU_vertbuf_raw_step(pos_step)) = coords[i];
+ }
}
/** Fan lines out from the first vertex. */
-static void camera_fill_lines_fan_fl_v1(
- GPUVertBufRaw *pos_step,
- const float *coords, const uint coords_len)
+static void camera_fill_lines_fan_fl_v1(GPUVertBufRaw *pos_step,
+ const float *coords,
+ const uint coords_len)
{
- for (uint i = 1; i < coords_len; i++) {
- *((float *)GPU_vertbuf_raw_step(pos_step)) = coords[0];
- *((float *)GPU_vertbuf_raw_step(pos_step)) = coords[i];
- }
+ for (uint i = 1; i < coords_len; i++) {
+ *((float *)GPU_vertbuf_raw_step(pos_step)) = coords[0];
+ *((float *)GPU_vertbuf_raw_step(pos_step)) = coords[i];
+ }
}
/** Simply fill the array. */
-static void camera_fill_array_fl_v1(
- GPUVertBufRaw *pos_step,
- const float *coords, const uint coords_len)
+static void camera_fill_array_fl_v1(GPUVertBufRaw *pos_step,
+ const float *coords,
+ const uint coords_len)
{
- for (uint i = 0; i < coords_len; i++) {
- *((float *)GPU_vertbuf_raw_step(pos_step)) = coords[i];
- }
+ for (uint i = 0; i < coords_len; i++) {
+ *((float *)GPU_vertbuf_raw_step(pos_step)) = coords[i];
+ }
}
-
GPUBatch *DRW_cache_camera_get(void)
{
- if (!SHC.drw_camera) {
- static GPUVertFormat format = { 0 };
- static struct { uint pos; } attr_id;
- if (format.attr_len == 0) {
- attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 1, GPU_FETCH_FLOAT);
- }
+ if (!SHC.drw_camera) {
+ static GPUVertFormat format = {0};
+ static struct {
+ uint pos;
+ } attr_id;
+ if (format.attr_len == 0) {
+ attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 1, GPU_FETCH_FLOAT);
+ }
- /* Vertices */
- GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
- const int vbo_len_capacity = 22;
- GPU_vertbuf_data_alloc(vbo, vbo_len_capacity);
- GPUVertBufRaw pos_step;
- GPU_vertbuf_attr_get_raw_data(vbo, attr_id.pos, &pos_step);
+ /* Vertices */
+ GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
+ const int vbo_len_capacity = 22;
+ GPU_vertbuf_data_alloc(vbo, vbo_len_capacity);
+ GPUVertBufRaw pos_step;
+ GPU_vertbuf_attr_get_raw_data(vbo, attr_id.pos, &pos_step);
- /* camera cone (from center to frame) */
- camera_fill_lines_fan_fl_v1(&pos_step, camera_coords_frame_bounds, ARRAY_SIZE(camera_coords_frame_bounds));
+ /* camera cone (from center to frame) */
+ camera_fill_lines_fan_fl_v1(
+ &pos_step, camera_coords_frame_bounds, ARRAY_SIZE(camera_coords_frame_bounds));
- /* camera frame (skip center) */
- camera_fill_lines_loop_fl_v1(&pos_step, &camera_coords_frame_bounds[1], ARRAY_SIZE(camera_coords_frame_bounds) - 1);
+ /* camera frame (skip center) */
+ camera_fill_lines_loop_fl_v1(
+ &pos_step, &camera_coords_frame_bounds[1], ARRAY_SIZE(camera_coords_frame_bounds) - 1);
- /* camera triangle (above the frame) */
- camera_fill_lines_loop_fl_v1(&pos_step, camera_coords_frame_tri, ARRAY_SIZE(camera_coords_frame_tri));
+ /* camera triangle (above the frame) */
+ camera_fill_lines_loop_fl_v1(
+ &pos_step, camera_coords_frame_tri, ARRAY_SIZE(camera_coords_frame_tri));
- BLI_assert(vbo_len_capacity == GPU_vertbuf_raw_used(&pos_step));
+ BLI_assert(vbo_len_capacity == GPU_vertbuf_raw_used(&pos_step));
- SHC.drw_camera = GPU_batch_create_ex(GPU_PRIM_LINES, vbo, NULL, GPU_BATCH_OWNS_VBO);
- }
- return SHC.drw_camera;
+ SHC.drw_camera = GPU_batch_create_ex(GPU_PRIM_LINES, vbo, NULL, GPU_BATCH_OWNS_VBO);
+ }
+ return SHC.drw_camera;
}
GPUBatch *DRW_cache_camera_frame_get(void)
{
- if (!SHC.drw_camera_frame) {
+ if (!SHC.drw_camera_frame) {
- static GPUVertFormat format = { 0 };
- static struct { uint pos; } attr_id;
- if (format.attr_len == 0) {
- attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 1, GPU_FETCH_FLOAT);
- }
+ static GPUVertFormat format = {0};
+ static struct {
+ uint pos;
+ } attr_id;
+ if (format.attr_len == 0) {
+ attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 1, GPU_FETCH_FLOAT);
+ }
- /* Vertices */
- GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
- const int vbo_len_capacity = 8;
- GPU_vertbuf_data_alloc(vbo, vbo_len_capacity);
- GPUVertBufRaw pos_step;
- GPU_vertbuf_attr_get_raw_data(vbo, attr_id.pos, &pos_step);
+ /* Vertices */
+ GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
+ const int vbo_len_capacity = 8;
+ GPU_vertbuf_data_alloc(vbo, vbo_len_capacity);
+ GPUVertBufRaw pos_step;
+ GPU_vertbuf_attr_get_raw_data(vbo, attr_id.pos, &pos_step);
- /* camera frame (skip center) */
- camera_fill_lines_loop_fl_v1(&pos_step, &camera_coords_frame_bounds[1], ARRAY_SIZE(camera_coords_frame_bounds) - 1);
+ /* camera frame (skip center) */
+ camera_fill_lines_loop_fl_v1(
+ &pos_step, &camera_coords_frame_bounds[1], ARRAY_SIZE(camera_coords_frame_bounds) - 1);
- BLI_assert(vbo_len_capacity == GPU_vertbuf_raw_used(&pos_step));
+ BLI_assert(vbo_len_capacity == GPU_vertbuf_raw_used(&pos_step));
- SHC.drw_camera_frame = GPU_batch_create_ex(GPU_PRIM_LINES, vbo, NULL, GPU_BATCH_OWNS_VBO);
- }
- return SHC.drw_camera_frame;
+ SHC.drw_camera_frame = GPU_batch_create_ex(GPU_PRIM_LINES, vbo, NULL, GPU_BATCH_OWNS_VBO);
+ }
+ return SHC.drw_camera_frame;
}
GPUBatch *DRW_cache_camera_tria_get(void)
{
- if (!SHC.drw_camera_tria) {
- static GPUVertFormat format = { 0 };
- static struct { uint pos; } attr_id;
- if (format.attr_len == 0) {
- attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 1, GPU_FETCH_FLOAT);
- }
+ if (!SHC.drw_camera_tria) {
+ static GPUVertFormat format = {0};
+ static struct {
+ uint pos;
+ } attr_id;
+ if (format.attr_len == 0) {
+ attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 1, GPU_FETCH_FLOAT);
+ }
- /* Vertices */
- GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
- const int vbo_len_capacity = 3;
- GPU_vertbuf_data_alloc(vbo, vbo_len_capacity);
- GPUVertBufRaw pos_step;
- GPU_vertbuf_attr_get_raw_data(vbo, attr_id.pos, &pos_step);
+ /* Vertices */
+ GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
+ const int vbo_len_capacity = 3;
+ GPU_vertbuf_data_alloc(vbo, vbo_len_capacity);
+ GPUVertBufRaw pos_step;
+ GPU_vertbuf_attr_get_raw_data(vbo, attr_id.pos, &pos_step);
- /* camera triangle (above the frame) */
- camera_fill_array_fl_v1(&pos_step, camera_coords_frame_tri, ARRAY_SIZE(camera_coords_frame_tri));
+ /* camera triangle (above the frame) */
+ camera_fill_array_fl_v1(
+ &pos_step, camera_coords_frame_tri, ARRAY_SIZE(camera_coords_frame_tri));
- BLI_assert(vbo_len_capacity == GPU_vertbuf_raw_used(&pos_step));
+ BLI_assert(vbo_len_capacity == GPU_vertbuf_raw_used(&pos_step));
- SHC.drw_camera_tria = GPU_batch_create_ex(GPU_PRIM_TRIS, vbo, NULL, GPU_BATCH_OWNS_VBO);
- }
- return SHC.drw_camera_tria;
+ SHC.drw_camera_tria = GPU_batch_create_ex(GPU_PRIM_TRIS, vbo, NULL, GPU_BATCH_OWNS_VBO);
+ }
+ return SHC.drw_camera_tria;
}
/** \} */
@@ -2992,24 +3157,26 @@ GPUBatch *DRW_cache_camera_tria_get(void)
/* Object Center */
GPUBatch *DRW_cache_single_vert_get(void)
{
- if (!SHC.drw_single_vertice) {
- float v1[3] = {0.0f, 0.0f, 0.0f};
+ if (!SHC.drw_single_vertice) {
+ float v1[3] = {0.0f, 0.0f, 0.0f};
- /* Position Only 3D format */
- static GPUVertFormat format = { 0 };
- static struct { uint pos; } attr_id;
- if (format.attr_len == 0) {
- attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
- }
+ /* Position Only 3D format */
+ static GPUVertFormat format = {0};
+ static struct {
+ uint pos;
+ } attr_id;
+ if (format.attr_len == 0) {
+ attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
+ }
- GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
- GPU_vertbuf_data_alloc(vbo, 1);
+ GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
+ GPU_vertbuf_data_alloc(vbo, 1);
- GPU_vertbuf_attr_set(vbo, attr_id.pos, 0, v1);
+ GPU_vertbuf_attr_set(vbo, attr_id.pos, 0, v1);
- SHC.drw_single_vertice = GPU_batch_create_ex(GPU_PRIM_POINTS, vbo, NULL, GPU_BATCH_OWNS_VBO);
- }
- return SHC.drw_single_vertice;
+ SHC.drw_single_vertice = GPU_batch_create_ex(GPU_PRIM_POINTS, vbo, NULL, GPU_BATCH_OWNS_VBO);
+ }
+ return SHC.drw_single_vertice;
}
/** \} */
@@ -3020,88 +3187,94 @@ GPUBatch *DRW_cache_single_vert_get(void)
GPUBatch *DRW_cache_mesh_all_verts_get(Object *ob)
{
- BLI_assert(ob->type == OB_MESH);
- return DRW_mesh_batch_cache_get_all_verts(ob->data);
+ BLI_assert(ob->type == OB_MESH);
+ return DRW_mesh_batch_cache_get_all_verts(ob->data);
}
GPUBatch *DRW_cache_mesh_all_edges_get(Object *ob)
{
- BLI_assert(ob->type == OB_MESH);
- return DRW_mesh_batch_cache_get_all_edges(ob->data);
+ BLI_assert(ob->type == OB_MESH);
+ return DRW_mesh_batch_cache_get_all_edges(ob->data);
}
GPUBatch *DRW_cache_mesh_loose_edges_get(Object *ob)
{
- BLI_assert(ob->type == OB_MESH);
- return DRW_mesh_batch_cache_get_loose_edges(ob->data);
+ BLI_assert(ob->type == OB_MESH);
+ return DRW_mesh_batch_cache_get_loose_edges(ob->data);
}
GPUBatch *DRW_cache_mesh_edge_detection_get(Object *ob, bool *r_is_manifold)
{
- BLI_assert(ob->type == OB_MESH);
- return DRW_mesh_batch_cache_get_edge_detection(ob->data, r_is_manifold);
+ BLI_assert(ob->type == OB_MESH);
+ return DRW_mesh_batch_cache_get_edge_detection(ob->data, r_is_manifold);
}
GPUBatch *DRW_cache_mesh_surface_get(Object *ob)
{
- BLI_assert(ob->type == OB_MESH);
- return DRW_mesh_batch_cache_get_surface(ob->data);
+ BLI_assert(ob->type == OB_MESH);
+ return DRW_mesh_batch_cache_get_surface(ob->data);
}
GPUBatch *DRW_cache_mesh_surface_edges_get(Object *ob)
{
- BLI_assert(ob->type == OB_MESH);
- return DRW_mesh_batch_cache_get_surface_edges(ob->data);
+ BLI_assert(ob->type == OB_MESH);
+ return DRW_mesh_batch_cache_get_surface_edges(ob->data);
}
/* Return list of batches with length equal to max(1, totcol). */
-GPUBatch **DRW_cache_mesh_surface_shaded_get(
- Object *ob, struct GPUMaterial **gpumat_array, uint gpumat_array_len,
- char **auto_layer_names, int **auto_layer_is_srgb, int *auto_layer_count)
-{
- BLI_assert(ob->type == OB_MESH);
- return DRW_mesh_batch_cache_get_surface_shaded(
- ob->data, gpumat_array, gpumat_array_len,
- auto_layer_names, auto_layer_is_srgb, auto_layer_count);
+GPUBatch **DRW_cache_mesh_surface_shaded_get(Object *ob,
+ struct GPUMaterial **gpumat_array,
+ uint gpumat_array_len,
+ char **auto_layer_names,
+ int **auto_layer_is_srgb,
+ int *auto_layer_count)
+{
+ BLI_assert(ob->type == OB_MESH);
+ return DRW_mesh_batch_cache_get_surface_shaded(ob->data,
+ gpumat_array,
+ gpumat_array_len,
+ auto_layer_names,
+ auto_layer_is_srgb,
+ auto_layer_count);
}
/* Return list of batches with length equal to max(1, totcol). */
GPUBatch **DRW_cache_mesh_surface_texpaint_get(Object *ob)
{
- BLI_assert(ob->type == OB_MESH);
- return DRW_mesh_batch_cache_get_surface_texpaint(ob->data);
+ BLI_assert(ob->type == OB_MESH);
+ return DRW_mesh_batch_cache_get_surface_texpaint(ob->data);
}
GPUBatch *DRW_cache_mesh_surface_texpaint_single_get(Object *ob)
{
- BLI_assert(ob->type == OB_MESH);
- return DRW_mesh_batch_cache_get_surface_texpaint_single(ob->data);
+ BLI_assert(ob->type == OB_MESH);
+ return DRW_mesh_batch_cache_get_surface_texpaint_single(ob->data);
}
GPUBatch *DRW_cache_mesh_surface_vertpaint_get(Object *ob)
{
- BLI_assert(ob->type == OB_MESH);
- return DRW_mesh_batch_cache_get_surface_vertpaint(ob->data);
+ BLI_assert(ob->type == OB_MESH);
+ return DRW_mesh_batch_cache_get_surface_vertpaint(ob->data);
}
GPUBatch *DRW_cache_mesh_surface_weights_get(Object *ob)
{
- BLI_assert(ob->type == OB_MESH);
- return DRW_mesh_batch_cache_get_surface_weights(ob->data);
+ BLI_assert(ob->type == OB_MESH);
+ return DRW_mesh_batch_cache_get_surface_weights(ob->data);
}
GPUBatch *DRW_cache_mesh_face_wireframe_get(Object *ob)
{
- BLI_assert(ob->type == OB_MESH);
- return DRW_mesh_batch_cache_get_wireframes_face(ob->data);
+ BLI_assert(ob->type == OB_MESH);
+ return DRW_mesh_batch_cache_get_wireframes_face(ob->data);
}
void DRW_cache_mesh_sculpt_coords_ensure(Object *ob)
{
- BLI_assert(ob->type == OB_MESH);
+ BLI_assert(ob->type == OB_MESH);
- Mesh *me = ob->data;
- DRW_mesh_cache_sculpt_coords_ensure(me);
+ Mesh *me = ob->data;
+ DRW_mesh_cache_sculpt_coords_ensure(me);
}
/** \} */
@@ -3112,107 +3285,109 @@ void DRW_cache_mesh_sculpt_coords_ensure(Object *ob)
GPUBatch *DRW_cache_curve_edge_wire_get(Object *ob)
{
- BLI_assert(ob->type == OB_CURVE);
+ BLI_assert(ob->type == OB_CURVE);
- struct Curve *cu = ob->data;
- return DRW_curve_batch_cache_get_wire_edge(cu);
+ struct Curve *cu = ob->data;
+ return DRW_curve_batch_cache_get_wire_edge(cu);
}
GPUBatch *DRW_cache_curve_edge_normal_get(Object *ob)
{
- BLI_assert(ob->type == OB_CURVE);
+ BLI_assert(ob->type == OB_CURVE);
- struct Curve *cu = ob->data;
- return DRW_curve_batch_cache_get_normal_edge(cu);
+ struct Curve *cu = ob->data;
+ return DRW_curve_batch_cache_get_normal_edge(cu);
}
GPUBatch *DRW_cache_curve_edge_overlay_get(Object *ob)
{
- BLI_assert(ELEM(ob->type, OB_CURVE, OB_SURF));
+ BLI_assert(ELEM(ob->type, OB_CURVE, OB_SURF));
- struct Curve *cu = ob->data;
- return DRW_curve_batch_cache_get_edit_edges(cu);
+ struct Curve *cu = ob->data;
+ return DRW_curve_batch_cache_get_edit_edges(cu);
}
GPUBatch *DRW_cache_curve_vert_overlay_get(Object *ob, bool handles)
{
- BLI_assert(ELEM(ob->type, OB_CURVE, OB_SURF));
+ BLI_assert(ELEM(ob->type, OB_CURVE, OB_SURF));
- struct Curve *cu = ob->data;
- return DRW_curve_batch_cache_get_edit_verts(cu, handles);
+ struct Curve *cu = ob->data;
+ return DRW_curve_batch_cache_get_edit_verts(cu, handles);
}
GPUBatch *DRW_cache_curve_surface_get(Object *ob)
{
- BLI_assert(ob->type == OB_CURVE);
+ BLI_assert(ob->type == OB_CURVE);
- struct Curve *cu = ob->data;
- struct Mesh *mesh_eval = ob->runtime.mesh_eval;
- if (mesh_eval != NULL) {
- return DRW_mesh_batch_cache_get_surface(mesh_eval);
- }
- else {
- return DRW_curve_batch_cache_get_triangles_with_normals(cu);
- }
+ struct Curve *cu = ob->data;
+ struct Mesh *mesh_eval = ob->runtime.mesh_eval;
+ if (mesh_eval != NULL) {
+ return DRW_mesh_batch_cache_get_surface(mesh_eval);
+ }
+ else {
+ return DRW_curve_batch_cache_get_triangles_with_normals(cu);
+ }
}
GPUBatch *DRW_cache_curve_loose_edges_get(Object *ob)
{
- BLI_assert(ob->type == OB_CURVE);
+ BLI_assert(ob->type == OB_CURVE);
- struct Curve *cu = ob->data;
- struct Mesh *mesh_eval = ob->runtime.mesh_eval;
- if (mesh_eval != NULL) {
- return DRW_mesh_batch_cache_get_loose_edges(mesh_eval);
- }
- else {
- /* TODO */
- UNUSED_VARS(cu);
- return NULL;
- }
+ struct Curve *cu = ob->data;
+ struct Mesh *mesh_eval = ob->runtime.mesh_eval;
+ if (mesh_eval != NULL) {
+ return DRW_mesh_batch_cache_get_loose_edges(mesh_eval);
+ }
+ else {
+ /* TODO */
+ UNUSED_VARS(cu);
+ return NULL;
+ }
}
GPUBatch *DRW_cache_curve_face_wireframe_get(Object *ob)
{
- BLI_assert(ob->type == OB_CURVE);
+ BLI_assert(ob->type == OB_CURVE);
- struct Curve *cu = ob->data;
- struct Mesh *mesh_eval = ob->runtime.mesh_eval;
- if (mesh_eval != NULL) {
- return DRW_mesh_batch_cache_get_wireframes_face(mesh_eval);
- }
- else {
- return DRW_curve_batch_cache_get_wireframes_face(cu);
- }
+ struct Curve *cu = ob->data;
+ struct Mesh *mesh_eval = ob->runtime.mesh_eval;
+ if (mesh_eval != NULL) {
+ return DRW_mesh_batch_cache_get_wireframes_face(mesh_eval);
+ }
+ else {
+ return DRW_curve_batch_cache_get_wireframes_face(cu);
+ }
}
GPUBatch *DRW_cache_curve_edge_detection_get(Object *ob, bool *r_is_manifold)
{
- BLI_assert(ob->type == OB_CURVE);
- struct Curve *cu = ob->data;
- struct Mesh *mesh_eval = ob->runtime.mesh_eval;
- if (mesh_eval != NULL) {
- return DRW_mesh_batch_cache_get_edge_detection(mesh_eval, r_is_manifold);
- }
- else {
- return DRW_curve_batch_cache_get_edge_detection(cu, r_is_manifold);
- }
+ BLI_assert(ob->type == OB_CURVE);
+ struct Curve *cu = ob->data;
+ struct Mesh *mesh_eval = ob->runtime.mesh_eval;
+ if (mesh_eval != NULL) {
+ return DRW_mesh_batch_cache_get_edge_detection(mesh_eval, r_is_manifold);
+ }
+ else {
+ return DRW_curve_batch_cache_get_edge_detection(cu, r_is_manifold);
+ }
}
/* Return list of batches */
-GPUBatch **DRW_cache_curve_surface_shaded_get(
- Object *ob, struct GPUMaterial **gpumat_array, uint gpumat_array_len)
+GPUBatch **DRW_cache_curve_surface_shaded_get(Object *ob,
+ struct GPUMaterial **gpumat_array,
+ uint gpumat_array_len)
{
- BLI_assert(ob->type == OB_CURVE);
+ BLI_assert(ob->type == OB_CURVE);
- struct Curve *cu = ob->data;
- struct Mesh *mesh_eval = ob->runtime.mesh_eval;
- if (mesh_eval != NULL) {
- return DRW_mesh_batch_cache_get_surface_shaded(mesh_eval, gpumat_array, gpumat_array_len, NULL, NULL, NULL);
- }
- else {
- return DRW_curve_batch_cache_get_surface_shaded(cu, gpumat_array, gpumat_array_len);
- }
+ struct Curve *cu = ob->data;
+ struct Mesh *mesh_eval = ob->runtime.mesh_eval;
+ if (mesh_eval != NULL) {
+ return DRW_mesh_batch_cache_get_surface_shaded(
+ mesh_eval, gpumat_array, gpumat_array_len, NULL, NULL, NULL);
+ }
+ else {
+ return DRW_curve_batch_cache_get_surface_shaded(cu, gpumat_array, gpumat_array_len);
+ }
}
/** \} */
@@ -3223,28 +3398,29 @@ GPUBatch **DRW_cache_curve_surface_shaded_get(
GPUBatch *DRW_cache_mball_surface_get(Object *ob)
{
- BLI_assert(ob->type == OB_MBALL);
- return DRW_metaball_batch_cache_get_triangles_with_normals(ob);
+ BLI_assert(ob->type == OB_MBALL);
+ return DRW_metaball_batch_cache_get_triangles_with_normals(ob);
}
GPUBatch *DRW_cache_mball_edge_detection_get(Object *ob, bool *r_is_manifold)
{
- BLI_assert(ob->type == OB_MBALL);
- return DRW_metaball_batch_cache_get_edge_detection(ob, r_is_manifold);
+ BLI_assert(ob->type == OB_MBALL);
+ return DRW_metaball_batch_cache_get_edge_detection(ob, r_is_manifold);
}
GPUBatch *DRW_cache_mball_face_wireframe_get(Object *ob)
{
- BLI_assert(ob->type == OB_MBALL);
- return DRW_metaball_batch_cache_get_wireframes_face(ob);
+ BLI_assert(ob->type == OB_MBALL);
+ return DRW_metaball_batch_cache_get_wireframes_face(ob);
}
-GPUBatch **DRW_cache_mball_surface_shaded_get(
- Object *ob, struct GPUMaterial **gpumat_array, uint gpumat_array_len)
+GPUBatch **DRW_cache_mball_surface_shaded_get(Object *ob,
+ struct GPUMaterial **gpumat_array,
+ uint gpumat_array_len)
{
- BLI_assert(ob->type == OB_MBALL);
- MetaBall *mb = ob->data;
- return DRW_metaball_batch_cache_get_surface_shaded(ob, mb, gpumat_array, gpumat_array_len);
+ BLI_assert(ob->type == OB_MBALL);
+ MetaBall *mb = ob->data;
+ return DRW_metaball_batch_cache_get_surface_shaded(ob, mb, gpumat_array, gpumat_array_len);
}
/** \} */
@@ -3255,92 +3431,94 @@ GPUBatch **DRW_cache_mball_surface_shaded_get(
GPUBatch *DRW_cache_text_edge_wire_get(Object *ob)
{
- BLI_assert(ob->type == OB_FONT);
+ BLI_assert(ob->type == OB_FONT);
- struct Curve *cu = ob->data;
- return DRW_curve_batch_cache_get_wire_edge(cu);
+ struct Curve *cu = ob->data;
+ return DRW_curve_batch_cache_get_wire_edge(cu);
}
GPUBatch *DRW_cache_text_surface_get(Object *ob)
{
- BLI_assert(ob->type == OB_FONT);
- struct Curve *cu = ob->data;
- struct Mesh *mesh_eval = ob->runtime.mesh_eval;
- if (cu->editfont && (cu->flag & CU_FAST)) {
- return NULL;
- }
- if (mesh_eval != NULL) {
- return DRW_mesh_batch_cache_get_surface(mesh_eval);
- }
- else {
- return DRW_curve_batch_cache_get_triangles_with_normals(cu);
- }
+ BLI_assert(ob->type == OB_FONT);
+ struct Curve *cu = ob->data;
+ struct Mesh *mesh_eval = ob->runtime.mesh_eval;
+ if (cu->editfont && (cu->flag & CU_FAST)) {
+ return NULL;
+ }
+ if (mesh_eval != NULL) {
+ return DRW_mesh_batch_cache_get_surface(mesh_eval);
+ }
+ else {
+ return DRW_curve_batch_cache_get_triangles_with_normals(cu);
+ }
}
GPUBatch *DRW_cache_text_edge_detection_get(Object *ob, bool *r_is_manifold)
{
- BLI_assert(ob->type == OB_FONT);
- struct Curve *cu = ob->data;
- struct Mesh *mesh_eval = ob->runtime.mesh_eval;
- if (cu->editfont && (cu->flag & CU_FAST)) {
- return NULL;
- }
- if (mesh_eval != NULL) {
- return DRW_mesh_batch_cache_get_edge_detection(mesh_eval, r_is_manifold);
- }
- else {
- return DRW_curve_batch_cache_get_edge_detection(cu, r_is_manifold);
- }
+ BLI_assert(ob->type == OB_FONT);
+ struct Curve *cu = ob->data;
+ struct Mesh *mesh_eval = ob->runtime.mesh_eval;
+ if (cu->editfont && (cu->flag & CU_FAST)) {
+ return NULL;
+ }
+ if (mesh_eval != NULL) {
+ return DRW_mesh_batch_cache_get_edge_detection(mesh_eval, r_is_manifold);
+ }
+ else {
+ return DRW_curve_batch_cache_get_edge_detection(cu, r_is_manifold);
+ }
}
GPUBatch *DRW_cache_text_loose_edges_get(Object *ob)
{
- BLI_assert(ob->type == OB_FONT);
- struct Curve *cu = ob->data;
- struct Mesh *mesh_eval = ob->runtime.mesh_eval;
- if (cu->editfont && (cu->flag & CU_FAST)) {
- return NULL;
- }
- if (mesh_eval != NULL) {
- return DRW_mesh_batch_cache_get_loose_edges(mesh_eval);
- }
- else {
- /* TODO */
- return NULL;
- }
+ BLI_assert(ob->type == OB_FONT);
+ struct Curve *cu = ob->data;
+ struct Mesh *mesh_eval = ob->runtime.mesh_eval;
+ if (cu->editfont && (cu->flag & CU_FAST)) {
+ return NULL;
+ }
+ if (mesh_eval != NULL) {
+ return DRW_mesh_batch_cache_get_loose_edges(mesh_eval);
+ }
+ else {
+ /* TODO */
+ return NULL;
+ }
}
GPUBatch *DRW_cache_text_face_wireframe_get(Object *ob)
{
- BLI_assert(ob->type == OB_FONT);
- struct Curve *cu = ob->data;
- struct Mesh *mesh_eval = ob->runtime.mesh_eval;
- if (cu->editfont && (cu->flag & CU_FAST)) {
- return NULL;
- }
- if (mesh_eval != NULL) {
- return DRW_mesh_batch_cache_get_wireframes_face(mesh_eval);
- }
- else {
- return DRW_curve_batch_cache_get_wireframes_face(cu);
- }
-}
-
-GPUBatch **DRW_cache_text_surface_shaded_get(
- Object *ob, struct GPUMaterial **gpumat_array, uint gpumat_array_len)
-{
- BLI_assert(ob->type == OB_FONT);
- struct Curve *cu = ob->data;
- struct Mesh *mesh_eval = ob->runtime.mesh_eval;
- if (cu->editfont && (cu->flag & CU_FAST)) {
- return NULL;
- }
- if (mesh_eval != NULL) {
- return DRW_mesh_batch_cache_get_surface_shaded(mesh_eval, gpumat_array, gpumat_array_len, NULL, NULL, NULL);
- }
- else {
- return DRW_curve_batch_cache_get_surface_shaded(cu, gpumat_array, gpumat_array_len);
- }
+ BLI_assert(ob->type == OB_FONT);
+ struct Curve *cu = ob->data;
+ struct Mesh *mesh_eval = ob->runtime.mesh_eval;
+ if (cu->editfont && (cu->flag & CU_FAST)) {
+ return NULL;
+ }
+ if (mesh_eval != NULL) {
+ return DRW_mesh_batch_cache_get_wireframes_face(mesh_eval);
+ }
+ else {
+ return DRW_curve_batch_cache_get_wireframes_face(cu);
+ }
+}
+
+GPUBatch **DRW_cache_text_surface_shaded_get(Object *ob,
+ struct GPUMaterial **gpumat_array,
+ uint gpumat_array_len)
+{
+ BLI_assert(ob->type == OB_FONT);
+ struct Curve *cu = ob->data;
+ struct Mesh *mesh_eval = ob->runtime.mesh_eval;
+ if (cu->editfont && (cu->flag & CU_FAST)) {
+ return NULL;
+ }
+ if (mesh_eval != NULL) {
+ return DRW_mesh_batch_cache_get_surface_shaded(
+ mesh_eval, gpumat_array, gpumat_array_len, NULL, NULL, NULL);
+ }
+ else {
+ return DRW_curve_batch_cache_get_surface_shaded(cu, gpumat_array, gpumat_array_len);
+ }
}
/** \} */
@@ -3351,83 +3529,85 @@ GPUBatch **DRW_cache_text_surface_shaded_get(
GPUBatch *DRW_cache_surf_surface_get(Object *ob)
{
- BLI_assert(ob->type == OB_SURF);
+ BLI_assert(ob->type == OB_SURF);
- struct Curve *cu = ob->data;
- struct Mesh *mesh_eval = ob->runtime.mesh_eval;
- if (mesh_eval != NULL) {
- return DRW_mesh_batch_cache_get_surface(mesh_eval);
- }
- else {
- return DRW_curve_batch_cache_get_triangles_with_normals(cu);
- }
+ struct Curve *cu = ob->data;
+ struct Mesh *mesh_eval = ob->runtime.mesh_eval;
+ if (mesh_eval != NULL) {
+ return DRW_mesh_batch_cache_get_surface(mesh_eval);
+ }
+ else {
+ return DRW_curve_batch_cache_get_triangles_with_normals(cu);
+ }
}
GPUBatch *DRW_cache_surf_edge_wire_get(Object *ob)
{
- BLI_assert(ob->type == OB_SURF);
+ BLI_assert(ob->type == OB_SURF);
- struct Curve *cu = ob->data;
- return DRW_curve_batch_cache_get_wire_edge(cu);
+ struct Curve *cu = ob->data;
+ return DRW_curve_batch_cache_get_wire_edge(cu);
}
GPUBatch *DRW_cache_surf_face_wireframe_get(Object *ob)
{
- BLI_assert(ob->type == OB_SURF);
+ BLI_assert(ob->type == OB_SURF);
- struct Curve *cu = ob->data;
- struct Mesh *mesh_eval = ob->runtime.mesh_eval;
- if (mesh_eval != NULL) {
- return DRW_mesh_batch_cache_get_wireframes_face(mesh_eval);
- }
- else {
- return DRW_curve_batch_cache_get_wireframes_face(cu);
- }
+ struct Curve *cu = ob->data;
+ struct Mesh *mesh_eval = ob->runtime.mesh_eval;
+ if (mesh_eval != NULL) {
+ return DRW_mesh_batch_cache_get_wireframes_face(mesh_eval);
+ }
+ else {
+ return DRW_curve_batch_cache_get_wireframes_face(cu);
+ }
}
GPUBatch *DRW_cache_surf_edge_detection_get(Object *ob, bool *r_is_manifold)
{
- BLI_assert(ob->type == OB_SURF);
- struct Curve *cu = ob->data;
- struct Mesh *mesh_eval = ob->runtime.mesh_eval;
- if (mesh_eval != NULL) {
- return DRW_mesh_batch_cache_get_edge_detection(mesh_eval, r_is_manifold);
- }
- else {
- return DRW_curve_batch_cache_get_edge_detection(cu, r_is_manifold);
- }
+ BLI_assert(ob->type == OB_SURF);
+ struct Curve *cu = ob->data;
+ struct Mesh *mesh_eval = ob->runtime.mesh_eval;
+ if (mesh_eval != NULL) {
+ return DRW_mesh_batch_cache_get_edge_detection(mesh_eval, r_is_manifold);
+ }
+ else {
+ return DRW_curve_batch_cache_get_edge_detection(cu, r_is_manifold);
+ }
}
GPUBatch *DRW_cache_surf_loose_edges_get(Object *ob)
{
- BLI_assert(ob->type == OB_SURF);
+ BLI_assert(ob->type == OB_SURF);
- struct Curve *cu = ob->data;
- struct Mesh *mesh_eval = ob->runtime.mesh_eval;
- if (mesh_eval != NULL) {
- return DRW_mesh_batch_cache_get_loose_edges(mesh_eval);
- }
- else {
- /* TODO */
- UNUSED_VARS(cu);
- return NULL;
- }
+ struct Curve *cu = ob->data;
+ struct Mesh *mesh_eval = ob->runtime.mesh_eval;
+ if (mesh_eval != NULL) {
+ return DRW_mesh_batch_cache_get_loose_edges(mesh_eval);
+ }
+ else {
+ /* TODO */
+ UNUSED_VARS(cu);
+ return NULL;
+ }
}
/* Return list of batches */
-GPUBatch **DRW_cache_surf_surface_shaded_get(
- Object *ob, struct GPUMaterial **gpumat_array, uint gpumat_array_len)
+GPUBatch **DRW_cache_surf_surface_shaded_get(Object *ob,
+ struct GPUMaterial **gpumat_array,
+ uint gpumat_array_len)
{
- BLI_assert(ob->type == OB_SURF);
+ BLI_assert(ob->type == OB_SURF);
- struct Curve *cu = ob->data;
- struct Mesh *mesh_eval = ob->runtime.mesh_eval;
- if (mesh_eval != NULL) {
- return DRW_mesh_batch_cache_get_surface_shaded(mesh_eval, gpumat_array, gpumat_array_len, NULL, NULL, NULL);
- }
- else {
- return DRW_curve_batch_cache_get_surface_shaded(cu, gpumat_array, gpumat_array_len);
- }
+ struct Curve *cu = ob->data;
+ struct Mesh *mesh_eval = ob->runtime.mesh_eval;
+ if (mesh_eval != NULL) {
+ return DRW_mesh_batch_cache_get_surface_shaded(
+ mesh_eval, gpumat_array, gpumat_array_len, NULL, NULL, NULL);
+ }
+ else {
+ return DRW_curve_batch_cache_get_surface_shaded(cu, gpumat_array, gpumat_array_len);
+ }
}
/** \} */
@@ -3438,32 +3618,32 @@ GPUBatch **DRW_cache_surf_surface_shaded_get(
GPUBatch *DRW_cache_lattice_verts_get(Object *ob)
{
- BLI_assert(ob->type == OB_LATTICE);
+ BLI_assert(ob->type == OB_LATTICE);
- struct Lattice *lt = ob->data;
- return DRW_lattice_batch_cache_get_all_verts(lt);
+ struct Lattice *lt = ob->data;
+ return DRW_lattice_batch_cache_get_all_verts(lt);
}
GPUBatch *DRW_cache_lattice_wire_get(Object *ob, bool use_weight)
{
- BLI_assert(ob->type == OB_LATTICE);
+ BLI_assert(ob->type == OB_LATTICE);
- Lattice *lt = ob->data;
- int actdef = -1;
+ Lattice *lt = ob->data;
+ int actdef = -1;
- if (use_weight && ob->defbase.first && lt->editlatt->latt->dvert) {
- actdef = ob->actdef - 1;
- }
+ if (use_weight && ob->defbase.first && lt->editlatt->latt->dvert) {
+ actdef = ob->actdef - 1;
+ }
- return DRW_lattice_batch_cache_get_all_edges(lt, use_weight, actdef);
+ return DRW_lattice_batch_cache_get_all_edges(lt, use_weight, actdef);
}
GPUBatch *DRW_cache_lattice_vert_overlay_get(Object *ob)
{
- BLI_assert(ob->type == OB_LATTICE);
+ BLI_assert(ob->type == OB_LATTICE);
- struct Lattice *lt = ob->data;
- return DRW_lattice_batch_cache_get_edit_verts(lt);
+ struct Lattice *lt = ob->data;
+ return DRW_lattice_batch_cache_get_edit_verts(lt);
}
/** \} */
@@ -3474,263 +3654,266 @@ GPUBatch *DRW_cache_lattice_vert_overlay_get(Object *ob)
GPUBatch *DRW_cache_particles_get_hair(Object *object, ParticleSystem *psys, ModifierData *md)
{
- return DRW_particles_batch_cache_get_hair(object, psys, md);
+ return DRW_particles_batch_cache_get_hair(object, psys, md);
}
GPUBatch *DRW_cache_particles_get_dots(Object *object, ParticleSystem *psys)
{
- return DRW_particles_batch_cache_get_dots(object, psys);
+ return DRW_particles_batch_cache_get_dots(object, psys);
}
-GPUBatch *DRW_cache_particles_get_edit_strands(
- Object *object,
- ParticleSystem *psys,
- struct PTCacheEdit *edit,
- bool use_weight)
+GPUBatch *DRW_cache_particles_get_edit_strands(Object *object,
+ ParticleSystem *psys,
+ struct PTCacheEdit *edit,
+ bool use_weight)
{
- return DRW_particles_batch_cache_get_edit_strands(object, psys, edit, use_weight);
+ return DRW_particles_batch_cache_get_edit_strands(object, psys, edit, use_weight);
}
-GPUBatch *DRW_cache_particles_get_edit_inner_points(
- Object *object,
- ParticleSystem *psys,
- struct PTCacheEdit *edit)
+GPUBatch *DRW_cache_particles_get_edit_inner_points(Object *object,
+ ParticleSystem *psys,
+ struct PTCacheEdit *edit)
{
- return DRW_particles_batch_cache_get_edit_inner_points(object, psys, edit);
+ return DRW_particles_batch_cache_get_edit_inner_points(object, psys, edit);
}
-GPUBatch *DRW_cache_particles_get_edit_tip_points(
- Object *object,
- ParticleSystem *psys,
- struct PTCacheEdit *edit)
+GPUBatch *DRW_cache_particles_get_edit_tip_points(Object *object,
+ ParticleSystem *psys,
+ struct PTCacheEdit *edit)
{
- return DRW_particles_batch_cache_get_edit_tip_points(object, psys, edit);
+ return DRW_particles_batch_cache_get_edit_tip_points(object, psys, edit);
}
GPUBatch *DRW_cache_particles_get_prim(int type)
{
- switch (type) {
- case PART_DRAW_CROSS:
- if (!SHC.drw_particle_cross) {
- static GPUVertFormat format = { 0 };
- static uint pos_id, axis_id;
-
- if (format.attr_len == 0) {
- pos_id = GPU_vertformat_attr_add(&format, "inst_pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
- axis_id = GPU_vertformat_attr_add(&format, "axis", GPU_COMP_I32, 1, GPU_FETCH_INT);
- }
-
- GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
- GPU_vertbuf_data_alloc(vbo, 6);
-
- /* X axis */
- float co[3] = {-1.0f, 0.0f, 0.0f};
- int axis = -1;
- GPU_vertbuf_attr_set(vbo, pos_id, 0, co);
- GPU_vertbuf_attr_set(vbo, axis_id, 0, &axis);
-
- co[0] = 1.0f;
- GPU_vertbuf_attr_set(vbo, pos_id, 1, co);
- GPU_vertbuf_attr_set(vbo, axis_id, 1, &axis);
-
- /* Y axis */
- co[0] = 0.0f;
- co[1] = -1.0f;
- GPU_vertbuf_attr_set(vbo, pos_id, 2, co);
- GPU_vertbuf_attr_set(vbo, axis_id, 2, &axis);
-
- co[1] = 1.0f;
- GPU_vertbuf_attr_set(vbo, pos_id, 3, co);
- GPU_vertbuf_attr_set(vbo, axis_id, 3, &axis);
-
- /* Z axis */
- co[1] = 0.0f;
- co[2] = -1.0f;
- GPU_vertbuf_attr_set(vbo, pos_id, 4, co);
- GPU_vertbuf_attr_set(vbo, axis_id, 4, &axis);
-
- co[2] = 1.0f;
- GPU_vertbuf_attr_set(vbo, pos_id, 5, co);
- GPU_vertbuf_attr_set(vbo, axis_id, 5, &axis);
-
- SHC.drw_particle_cross = GPU_batch_create_ex(GPU_PRIM_LINES, vbo, NULL, GPU_BATCH_OWNS_VBO);
- }
-
- return SHC.drw_particle_cross;
- case PART_DRAW_AXIS:
- if (!SHC.drw_particle_axis) {
- static GPUVertFormat format = { 0 };
- static uint pos_id, axis_id;
-
- if (format.attr_len == 0) {
- pos_id = GPU_vertformat_attr_add(&format, "inst_pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
- axis_id = GPU_vertformat_attr_add(&format, "axis", GPU_COMP_I32, 1, GPU_FETCH_INT);
- }
-
- GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
- GPU_vertbuf_data_alloc(vbo, 6);
-
- /* X axis */
- float co[3] = {0.0f, 0.0f, 0.0f};
- int axis = 0;
- GPU_vertbuf_attr_set(vbo, pos_id, 0, co);
- GPU_vertbuf_attr_set(vbo, axis_id, 0, &axis);
-
- co[0] = 1.0f;
- GPU_vertbuf_attr_set(vbo, pos_id, 1, co);
- GPU_vertbuf_attr_set(vbo, axis_id, 1, &axis);
-
- /* Y axis */
- co[0] = 0.0f;
- axis = 1;
- GPU_vertbuf_attr_set(vbo, pos_id, 2, co);
- GPU_vertbuf_attr_set(vbo, axis_id, 2, &axis);
-
- co[1] = 1.0f;
- GPU_vertbuf_attr_set(vbo, pos_id, 3, co);
- GPU_vertbuf_attr_set(vbo, axis_id, 3, &axis);
-
- /* Z axis */
- co[1] = 0.0f;
- axis = 2;
- GPU_vertbuf_attr_set(vbo, pos_id, 4, co);
- GPU_vertbuf_attr_set(vbo, axis_id, 4, &axis);
-
- co[2] = 1.0f;
- GPU_vertbuf_attr_set(vbo, pos_id, 5, co);
- GPU_vertbuf_attr_set(vbo, axis_id, 5, &axis);
-
- SHC.drw_particle_axis = GPU_batch_create_ex(GPU_PRIM_LINES, vbo, NULL, GPU_BATCH_OWNS_VBO);
- }
-
- return SHC.drw_particle_axis;
- case PART_DRAW_CIRC:
+ switch (type) {
+ case PART_DRAW_CROSS:
+ if (!SHC.drw_particle_cross) {
+ static GPUVertFormat format = {0};
+ static uint pos_id, axis_id;
+
+ if (format.attr_len == 0) {
+ pos_id = GPU_vertformat_attr_add(&format, "inst_pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
+ axis_id = GPU_vertformat_attr_add(&format, "axis", GPU_COMP_I32, 1, GPU_FETCH_INT);
+ }
+
+ GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
+ GPU_vertbuf_data_alloc(vbo, 6);
+
+ /* X axis */
+ float co[3] = {-1.0f, 0.0f, 0.0f};
+ int axis = -1;
+ GPU_vertbuf_attr_set(vbo, pos_id, 0, co);
+ GPU_vertbuf_attr_set(vbo, axis_id, 0, &axis);
+
+ co[0] = 1.0f;
+ GPU_vertbuf_attr_set(vbo, pos_id, 1, co);
+ GPU_vertbuf_attr_set(vbo, axis_id, 1, &axis);
+
+ /* Y axis */
+ co[0] = 0.0f;
+ co[1] = -1.0f;
+ GPU_vertbuf_attr_set(vbo, pos_id, 2, co);
+ GPU_vertbuf_attr_set(vbo, axis_id, 2, &axis);
+
+ co[1] = 1.0f;
+ GPU_vertbuf_attr_set(vbo, pos_id, 3, co);
+ GPU_vertbuf_attr_set(vbo, axis_id, 3, &axis);
+
+ /* Z axis */
+ co[1] = 0.0f;
+ co[2] = -1.0f;
+ GPU_vertbuf_attr_set(vbo, pos_id, 4, co);
+ GPU_vertbuf_attr_set(vbo, axis_id, 4, &axis);
+
+ co[2] = 1.0f;
+ GPU_vertbuf_attr_set(vbo, pos_id, 5, co);
+ GPU_vertbuf_attr_set(vbo, axis_id, 5, &axis);
+
+ SHC.drw_particle_cross = GPU_batch_create_ex(
+ GPU_PRIM_LINES, vbo, NULL, GPU_BATCH_OWNS_VBO);
+ }
+
+ return SHC.drw_particle_cross;
+ case PART_DRAW_AXIS:
+ if (!SHC.drw_particle_axis) {
+ static GPUVertFormat format = {0};
+ static uint pos_id, axis_id;
+
+ if (format.attr_len == 0) {
+ pos_id = GPU_vertformat_attr_add(&format, "inst_pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
+ axis_id = GPU_vertformat_attr_add(&format, "axis", GPU_COMP_I32, 1, GPU_FETCH_INT);
+ }
+
+ GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
+ GPU_vertbuf_data_alloc(vbo, 6);
+
+ /* X axis */
+ float co[3] = {0.0f, 0.0f, 0.0f};
+ int axis = 0;
+ GPU_vertbuf_attr_set(vbo, pos_id, 0, co);
+ GPU_vertbuf_attr_set(vbo, axis_id, 0, &axis);
+
+ co[0] = 1.0f;
+ GPU_vertbuf_attr_set(vbo, pos_id, 1, co);
+ GPU_vertbuf_attr_set(vbo, axis_id, 1, &axis);
+
+ /* Y axis */
+ co[0] = 0.0f;
+ axis = 1;
+ GPU_vertbuf_attr_set(vbo, pos_id, 2, co);
+ GPU_vertbuf_attr_set(vbo, axis_id, 2, &axis);
+
+ co[1] = 1.0f;
+ GPU_vertbuf_attr_set(vbo, pos_id, 3, co);
+ GPU_vertbuf_attr_set(vbo, axis_id, 3, &axis);
+
+ /* Z axis */
+ co[1] = 0.0f;
+ axis = 2;
+ GPU_vertbuf_attr_set(vbo, pos_id, 4, co);
+ GPU_vertbuf_attr_set(vbo, axis_id, 4, &axis);
+
+ co[2] = 1.0f;
+ GPU_vertbuf_attr_set(vbo, pos_id, 5, co);
+ GPU_vertbuf_attr_set(vbo, axis_id, 5, &axis);
+
+ SHC.drw_particle_axis = GPU_batch_create_ex(GPU_PRIM_LINES, vbo, NULL, GPU_BATCH_OWNS_VBO);
+ }
+
+ return SHC.drw_particle_axis;
+ case PART_DRAW_CIRC:
#define CIRCLE_RESOL 32
- if (!SHC.drw_particle_circle) {
- float v[3] = {0.0f, 0.0f, 0.0f};
- int axis = -1;
-
- static GPUVertFormat format = { 0 };
- static uint pos_id, axis_id;
-
- if (format.attr_len == 0) {
- pos_id = GPU_vertformat_attr_add(&format, "inst_pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
- axis_id = GPU_vertformat_attr_add(&format, "axis", GPU_COMP_I32, 1, GPU_FETCH_INT);
- }
-
- GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
- GPU_vertbuf_data_alloc(vbo, CIRCLE_RESOL);
-
- for (int a = 0; a < CIRCLE_RESOL; a++) {
- v[0] = sinf((2.0f * M_PI * a) / ((float)CIRCLE_RESOL));
- v[1] = cosf((2.0f * M_PI * a) / ((float)CIRCLE_RESOL));
- v[2] = 0.0f;
- GPU_vertbuf_attr_set(vbo, pos_id, a, v);
- GPU_vertbuf_attr_set(vbo, axis_id, a, &axis);
- }
-
- SHC.drw_particle_circle = GPU_batch_create_ex(GPU_PRIM_LINE_LOOP, vbo, NULL, GPU_BATCH_OWNS_VBO);
- }
-
- return SHC.drw_particle_circle;
+ if (!SHC.drw_particle_circle) {
+ float v[3] = {0.0f, 0.0f, 0.0f};
+ int axis = -1;
+
+ static GPUVertFormat format = {0};
+ static uint pos_id, axis_id;
+
+ if (format.attr_len == 0) {
+ pos_id = GPU_vertformat_attr_add(&format, "inst_pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
+ axis_id = GPU_vertformat_attr_add(&format, "axis", GPU_COMP_I32, 1, GPU_FETCH_INT);
+ }
+
+ GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
+ GPU_vertbuf_data_alloc(vbo, CIRCLE_RESOL);
+
+ for (int a = 0; a < CIRCLE_RESOL; a++) {
+ v[0] = sinf((2.0f * M_PI * a) / ((float)CIRCLE_RESOL));
+ v[1] = cosf((2.0f * M_PI * a) / ((float)CIRCLE_RESOL));
+ v[2] = 0.0f;
+ GPU_vertbuf_attr_set(vbo, pos_id, a, v);
+ GPU_vertbuf_attr_set(vbo, axis_id, a, &axis);
+ }
+
+ SHC.drw_particle_circle = GPU_batch_create_ex(
+ GPU_PRIM_LINE_LOOP, vbo, NULL, GPU_BATCH_OWNS_VBO);
+ }
+
+ return SHC.drw_particle_circle;
#undef CIRCLE_RESOL
- default:
- BLI_assert(false);
- break;
- }
+ default:
+ BLI_assert(false);
+ break;
+ }
- return NULL;
+ return NULL;
}
/* 3D cursor */
GPUBatch *DRW_cache_cursor_get(bool crosshair_lines)
{
- GPUBatch **drw_cursor = crosshair_lines ? &SHC.drw_cursor : &SHC.drw_cursor_only_circle;
+ GPUBatch **drw_cursor = crosshair_lines ? &SHC.drw_cursor : &SHC.drw_cursor_only_circle;
- if (*drw_cursor == NULL) {
- const float f5 = 0.25f;
- const float f10 = 0.5f;
- const float f20 = 1.0f;
+ if (*drw_cursor == NULL) {
+ const float f5 = 0.25f;
+ const float f10 = 0.5f;
+ const float f20 = 1.0f;
- const int segments = 16;
- const int vert_len = segments + 8;
- const int index_len = vert_len + 5;
+ const int segments = 16;
+ const int vert_len = segments + 8;
+ const int index_len = vert_len + 5;
- uchar red[3] = {255, 0, 0};
- uchar white[3] = {255, 255, 255};
+ uchar red[3] = {255, 0, 0};
+ uchar white[3] = {255, 255, 255};
- static GPUVertFormat format = { 0 };
- static struct { uint pos, color; } attr_id;
- if (format.attr_len == 0) {
- attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 2, GPU_FETCH_FLOAT);
- attr_id.color = GPU_vertformat_attr_add(&format, "color", GPU_COMP_U8, 3, GPU_FETCH_INT_TO_FLOAT_UNIT);
- }
+ static GPUVertFormat format = {0};
+ static struct {
+ uint pos, color;
+ } attr_id;
+ if (format.attr_len == 0) {
+ attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 2, GPU_FETCH_FLOAT);
+ attr_id.color = GPU_vertformat_attr_add(
+ &format, "color", GPU_COMP_U8, 3, GPU_FETCH_INT_TO_FLOAT_UNIT);
+ }
- GPUIndexBufBuilder elb;
- GPU_indexbuf_init_ex(&elb, GPU_PRIM_LINE_STRIP, index_len, vert_len, true);
+ GPUIndexBufBuilder elb;
+ GPU_indexbuf_init_ex(&elb, GPU_PRIM_LINE_STRIP, index_len, vert_len, true);
- GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
- GPU_vertbuf_data_alloc(vbo, vert_len);
+ GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
+ GPU_vertbuf_data_alloc(vbo, vert_len);
- int v = 0;
- for (int i = 0; i < segments; ++i) {
- float angle = (float)(2 * M_PI) * ((float)i / (float)segments);
- float x = f10 * cosf(angle);
- float y = f10 * sinf(angle);
+ int v = 0;
+ for (int i = 0; i < segments; ++i) {
+ float angle = (float)(2 * M_PI) * ((float)i / (float)segments);
+ float x = f10 * cosf(angle);
+ float y = f10 * sinf(angle);
- GPU_vertbuf_attr_set(vbo, attr_id.color, v, (i % 2 == 0) ? red : white);
+ GPU_vertbuf_attr_set(vbo, attr_id.color, v, (i % 2 == 0) ? red : white);
- GPU_vertbuf_attr_set(vbo, attr_id.pos, v, (const float[2]){x, y});
- GPU_indexbuf_add_generic_vert(&elb, v++);
- }
- GPU_indexbuf_add_generic_vert(&elb, 0);
+ GPU_vertbuf_attr_set(vbo, attr_id.pos, v, (const float[2]){x, y});
+ GPU_indexbuf_add_generic_vert(&elb, v++);
+ }
+ GPU_indexbuf_add_generic_vert(&elb, 0);
- if (crosshair_lines) {
- uchar crosshair_color[3];
- UI_GetThemeColor3ubv(TH_VIEW_OVERLAY, crosshair_color);
+ if (crosshair_lines) {
+ uchar crosshair_color[3];
+ UI_GetThemeColor3ubv(TH_VIEW_OVERLAY, crosshair_color);
- GPU_indexbuf_add_primitive_restart(&elb);
+ GPU_indexbuf_add_primitive_restart(&elb);
- GPU_vertbuf_attr_set(vbo, attr_id.pos, v, (const float[2]){-f20, 0});
- GPU_vertbuf_attr_set(vbo, attr_id.color, v, crosshair_color);
- GPU_indexbuf_add_generic_vert(&elb, v++);
- GPU_vertbuf_attr_set(vbo, attr_id.pos, v, (const float[2]){-f5, 0});
- GPU_vertbuf_attr_set(vbo, attr_id.color, v, crosshair_color);
- GPU_indexbuf_add_generic_vert(&elb, v++);
+ GPU_vertbuf_attr_set(vbo, attr_id.pos, v, (const float[2]){-f20, 0});
+ GPU_vertbuf_attr_set(vbo, attr_id.color, v, crosshair_color);
+ GPU_indexbuf_add_generic_vert(&elb, v++);
+ GPU_vertbuf_attr_set(vbo, attr_id.pos, v, (const float[2]){-f5, 0});
+ GPU_vertbuf_attr_set(vbo, attr_id.color, v, crosshair_color);
+ GPU_indexbuf_add_generic_vert(&elb, v++);
- GPU_indexbuf_add_primitive_restart(&elb);
+ GPU_indexbuf_add_primitive_restart(&elb);
- GPU_vertbuf_attr_set(vbo, attr_id.pos, v, (const float[2]){+f5, 0});
- GPU_vertbuf_attr_set(vbo, attr_id.color, v, crosshair_color);
- GPU_indexbuf_add_generic_vert(&elb, v++);
- GPU_vertbuf_attr_set(vbo, attr_id.pos, v, (const float[2]){+f20, 0});
- GPU_vertbuf_attr_set(vbo, attr_id.color, v, crosshair_color);
- GPU_indexbuf_add_generic_vert(&elb, v++);
+ GPU_vertbuf_attr_set(vbo, attr_id.pos, v, (const float[2]){+f5, 0});
+ GPU_vertbuf_attr_set(vbo, attr_id.color, v, crosshair_color);
+ GPU_indexbuf_add_generic_vert(&elb, v++);
+ GPU_vertbuf_attr_set(vbo, attr_id.pos, v, (const float[2]){+f20, 0});
+ GPU_vertbuf_attr_set(vbo, attr_id.color, v, crosshair_color);
+ GPU_indexbuf_add_generic_vert(&elb, v++);
- GPU_indexbuf_add_primitive_restart(&elb);
+ GPU_indexbuf_add_primitive_restart(&elb);
- GPU_vertbuf_attr_set(vbo, attr_id.pos, v, (const float[2]){0, -f20});
- GPU_vertbuf_attr_set(vbo, attr_id.color, v, crosshair_color);
- GPU_indexbuf_add_generic_vert(&elb, v++);
- GPU_vertbuf_attr_set(vbo, attr_id.pos, v, (const float[2]){0, -f5});
- GPU_vertbuf_attr_set(vbo, attr_id.color, v, crosshair_color);
- GPU_indexbuf_add_generic_vert(&elb, v++);
+ GPU_vertbuf_attr_set(vbo, attr_id.pos, v, (const float[2]){0, -f20});
+ GPU_vertbuf_attr_set(vbo, attr_id.color, v, crosshair_color);
+ GPU_indexbuf_add_generic_vert(&elb, v++);
+ GPU_vertbuf_attr_set(vbo, attr_id.pos, v, (const float[2]){0, -f5});
+ GPU_vertbuf_attr_set(vbo, attr_id.color, v, crosshair_color);
+ GPU_indexbuf_add_generic_vert(&elb, v++);
- GPU_indexbuf_add_primitive_restart(&elb);
+ GPU_indexbuf_add_primitive_restart(&elb);
- GPU_vertbuf_attr_set(vbo, attr_id.pos, v, (const float[2]){0, +f5});
- GPU_vertbuf_attr_set(vbo, attr_id.color, v, crosshair_color);
- GPU_indexbuf_add_generic_vert(&elb, v++);
- GPU_vertbuf_attr_set(vbo, attr_id.pos, v, (const float[2]){0, +f20});
- GPU_vertbuf_attr_set(vbo, attr_id.color, v, crosshair_color);
- GPU_indexbuf_add_generic_vert(&elb, v++);
- }
+ GPU_vertbuf_attr_set(vbo, attr_id.pos, v, (const float[2]){0, +f5});
+ GPU_vertbuf_attr_set(vbo, attr_id.color, v, crosshair_color);
+ GPU_indexbuf_add_generic_vert(&elb, v++);
+ GPU_vertbuf_attr_set(vbo, attr_id.pos, v, (const float[2]){0, +f20});
+ GPU_vertbuf_attr_set(vbo, attr_id.color, v, crosshair_color);
+ GPU_indexbuf_add_generic_vert(&elb, v++);
+ }
- GPUIndexBuf *ibo = GPU_indexbuf_build(&elb);
+ GPUIndexBuf *ibo = GPU_indexbuf_build(&elb);
- *drw_cursor = GPU_batch_create_ex(GPU_PRIM_LINE_STRIP, vbo, ibo, GPU_BATCH_OWNS_VBO | GPU_BATCH_OWNS_INDEX);
- }
- return *drw_cursor;
+ *drw_cursor = GPU_batch_create_ex(
+ GPU_PRIM_LINE_STRIP, vbo, ibo, GPU_BATCH_OWNS_VBO | GPU_BATCH_OWNS_INDEX);
+ }
+ return *drw_cursor;
}
/** \} */
@@ -3741,97 +3924,97 @@ GPUBatch *DRW_cache_cursor_get(bool crosshair_lines)
GPUBatch *DRW_batch_request(GPUBatch **batch)
{
- /* XXX TODO(fclem): We are writting to batch cache here. Need to make this thread safe. */
- if (*batch == NULL) {
- *batch = MEM_callocN(sizeof(GPUBatch), "GPUBatch");
- }
- return *batch;
+ /* XXX TODO(fclem): We are writting to batch cache here. Need to make this thread safe. */
+ if (*batch == NULL) {
+ *batch = MEM_callocN(sizeof(GPUBatch), "GPUBatch");
+ }
+ return *batch;
}
bool DRW_batch_requested(GPUBatch *batch, int prim_type)
{
- /* Batch has been requested if it has been created but not initialized. */
- if (batch != NULL && batch->verts[0] == NULL) {
- /* HACK. We init without a valid VBO and let the first vbo binding
- * fill verts[0]. */
- GPU_batch_init_ex(batch, prim_type, (GPUVertBuf *)1, NULL, 0);
- batch->verts[0] = NULL;
- return true;
- }
- return false;
+ /* Batch has been requested if it has been created but not initialized. */
+ if (batch != NULL && batch->verts[0] == NULL) {
+ /* HACK. We init without a valid VBO and let the first vbo binding
+ * fill verts[0]. */
+ GPU_batch_init_ex(batch, prim_type, (GPUVertBuf *)1, NULL, 0);
+ batch->verts[0] = NULL;
+ return true;
+ }
+ return false;
}
void DRW_ibo_request(GPUBatch *batch, GPUIndexBuf **ibo)
{
- if (*ibo == NULL) {
- *ibo = MEM_callocN(sizeof(GPUIndexBuf), "GPUIndexBuf");
- }
- GPU_batch_vao_cache_clear(batch);
- batch->elem = *ibo;
+ if (*ibo == NULL) {
+ *ibo = MEM_callocN(sizeof(GPUIndexBuf), "GPUIndexBuf");
+ }
+ GPU_batch_vao_cache_clear(batch);
+ batch->elem = *ibo;
}
bool DRW_ibo_requested(GPUIndexBuf *ibo)
{
- /* TODO do not rely on data uploaded. This prevents multithreading.
- * (need access to a gl context) */
- return (ibo != NULL && ibo->ibo_id == 0 && ibo->data == NULL);
+ /* TODO do not rely on data uploaded. This prevents multithreading.
+ * (need access to a gl context) */
+ return (ibo != NULL && ibo->ibo_id == 0 && ibo->data == NULL);
}
void DRW_vbo_request(GPUBatch *batch, GPUVertBuf **vbo)
{
- if (*vbo == NULL) {
- *vbo = MEM_callocN(sizeof(GPUVertBuf), "GPUVertBuf");
- }
- /* HACK set first vbo if not init. */
- if (batch->verts[0] == NULL) {
- GPU_batch_vao_cache_clear(batch);
- batch->verts[0] = *vbo;
- }
- else {
- /* HACK: bypass assert */
- int vbo_vert_len = (*vbo)->vertex_len;
- (*vbo)->vertex_len = batch->verts[0]->vertex_len;
- GPU_batch_vertbuf_add(batch, *vbo);
- (*vbo)->vertex_len = vbo_vert_len;
- }
+ if (*vbo == NULL) {
+ *vbo = MEM_callocN(sizeof(GPUVertBuf), "GPUVertBuf");
+ }
+ /* HACK set first vbo if not init. */
+ if (batch->verts[0] == NULL) {
+ GPU_batch_vao_cache_clear(batch);
+ batch->verts[0] = *vbo;
+ }
+ else {
+ /* HACK: bypass assert */
+ int vbo_vert_len = (*vbo)->vertex_len;
+ (*vbo)->vertex_len = batch->verts[0]->vertex_len;
+ GPU_batch_vertbuf_add(batch, *vbo);
+ (*vbo)->vertex_len = vbo_vert_len;
+ }
}
bool DRW_vbo_requested(GPUVertBuf *vbo)
{
- return (vbo != NULL && vbo->format.attr_len == 0);
+ return (vbo != NULL && vbo->format.attr_len == 0);
}
void drw_batch_cache_generate_requested(Object *ob)
{
- const DRWContextState *draw_ctx = DRW_context_state_get();
- const ToolSettings *ts = draw_ctx->scene->toolsettings;
- const enum eContextObjectMode mode = CTX_data_mode_enum_ex(
- draw_ctx->object_edit, draw_ctx->obact, draw_ctx->object_mode);
- const bool is_paint_mode = ELEM(mode, CTX_MODE_PAINT_TEXTURE, CTX_MODE_PAINT_VERTEX, CTX_MODE_PAINT_WEIGHT);
-
- const bool use_hide = (
- (ob->type == OB_MESH) &&
- ((is_paint_mode && (ob == draw_ctx->obact) &&
- DRW_object_use_hide_faces(ob)) ||
- ((mode == CTX_MODE_EDIT_MESH) && BKE_object_is_in_editmode(ob))));
-
- struct Mesh *mesh_eval = ob->runtime.mesh_eval;
- switch (ob->type) {
- case OB_MESH:
- DRW_mesh_batch_cache_create_requested(ob, (Mesh *)ob->data, ts, is_paint_mode, use_hide);
- break;
- case OB_CURVE:
- case OB_FONT:
- case OB_SURF:
- if (mesh_eval) {
- DRW_mesh_batch_cache_create_requested(ob, mesh_eval, ts, is_paint_mode, use_hide);
- }
- DRW_curve_batch_cache_create_requested(ob);
- break;
- /* TODO all cases */
- default:
- break;
- }
+ const DRWContextState *draw_ctx = DRW_context_state_get();
+ const ToolSettings *ts = draw_ctx->scene->toolsettings;
+ const enum eContextObjectMode mode = CTX_data_mode_enum_ex(
+ draw_ctx->object_edit, draw_ctx->obact, draw_ctx->object_mode);
+ const bool is_paint_mode = ELEM(
+ mode, CTX_MODE_PAINT_TEXTURE, CTX_MODE_PAINT_VERTEX, CTX_MODE_PAINT_WEIGHT);
+
+ const bool use_hide = ((ob->type == OB_MESH) &&
+ ((is_paint_mode && (ob == draw_ctx->obact) &&
+ DRW_object_use_hide_faces(ob)) ||
+ ((mode == CTX_MODE_EDIT_MESH) && BKE_object_is_in_editmode(ob))));
+
+ struct Mesh *mesh_eval = ob->runtime.mesh_eval;
+ switch (ob->type) {
+ case OB_MESH:
+ DRW_mesh_batch_cache_create_requested(ob, (Mesh *)ob->data, ts, is_paint_mode, use_hide);
+ break;
+ case OB_CURVE:
+ case OB_FONT:
+ case OB_SURF:
+ if (mesh_eval) {
+ DRW_mesh_batch_cache_create_requested(ob, mesh_eval, ts, is_paint_mode, use_hide);
+ }
+ DRW_curve_batch_cache_create_requested(ob);
+ break;
+ /* TODO all cases */
+ default:
+ break;
+ }
}
/** \} */
diff --git a/source/blender/draw/intern/draw_cache.h b/source/blender/draw/intern/draw_cache.h
index 2940a2c89f8..c9ae0a01cb9 100644
--- a/source/blender/draw/intern/draw_cache.h
+++ b/source/blender/draw/intern/draw_cache.h
@@ -52,9 +52,12 @@ struct GPUBatch *DRW_cache_object_all_edges_get(struct Object *ob);
struct GPUBatch *DRW_cache_object_edge_detection_get(struct Object *ob, bool *r_is_manifold);
struct GPUBatch *DRW_cache_object_surface_get(struct Object *ob);
struct GPUBatch *DRW_cache_object_loose_edges_get(struct Object *ob);
-struct GPUBatch **DRW_cache_object_surface_material_get(
- struct Object *ob, struct GPUMaterial **gpumat_array, uint gpumat_array_len,
- char **auto_layer_names, int **auto_layer_is_srgb, int *auto_layer_count);
+struct GPUBatch **DRW_cache_object_surface_material_get(struct Object *ob,
+ struct GPUMaterial **gpumat_array,
+ uint gpumat_array_len,
+ char **auto_layer_names,
+ int **auto_layer_is_srgb,
+ int *auto_layer_count);
struct GPUBatch *DRW_cache_object_face_wireframe_get(Object *ob);
/* Empties */
@@ -127,9 +130,12 @@ struct GPUBatch *DRW_cache_mesh_loose_edges_get(struct Object *ob);
struct GPUBatch *DRW_cache_mesh_edge_detection_get(struct Object *ob, bool *r_is_manifold);
struct GPUBatch *DRW_cache_mesh_surface_get(struct Object *ob);
struct GPUBatch *DRW_cache_mesh_surface_edges_get(struct Object *ob);
-struct GPUBatch **DRW_cache_mesh_surface_shaded_get(
- struct Object *ob, struct GPUMaterial **gpumat_array, uint gpumat_array_len,
- char **auto_layer_names, int **auto_layer_is_srgb, int *auto_layer_count);
+struct GPUBatch **DRW_cache_mesh_surface_shaded_get(struct Object *ob,
+ struct GPUMaterial **gpumat_array,
+ uint gpumat_array_len,
+ char **auto_layer_names,
+ int **auto_layer_is_srgb,
+ int *auto_layer_count);
struct GPUBatch **DRW_cache_mesh_surface_texpaint_get(struct Object *ob);
struct GPUBatch *DRW_cache_mesh_surface_texpaint_single_get(struct Object *ob);
struct GPUBatch *DRW_cache_mesh_surface_vertpaint_get(struct Object *ob);
@@ -140,8 +146,9 @@ void DRW_cache_mesh_sculpt_coords_ensure(struct Object *ob);
/* Curve */
struct GPUBatch *DRW_cache_curve_surface_get(struct Object *ob);
-struct GPUBatch **DRW_cache_curve_surface_shaded_get(
- struct Object *ob, struct GPUMaterial **gpumat_array, uint gpumat_array_len);
+struct GPUBatch **DRW_cache_curve_surface_shaded_get(struct Object *ob,
+ struct GPUMaterial **gpumat_array,
+ uint gpumat_array_len);
struct GPUBatch *DRW_cache_curve_loose_edges_get(struct Object *ob);
struct GPUBatch *DRW_cache_curve_edge_wire_get(struct Object *ob);
struct GPUBatch *DRW_cache_curve_face_wireframe_get(Object *ob);
@@ -156,16 +163,18 @@ struct GPUBatch *DRW_cache_text_surface_get(struct Object *ob);
struct GPUBatch *DRW_cache_text_edge_detection_get(Object *ob, bool *r_is_manifold);
struct GPUBatch *DRW_cache_text_loose_edges_get(struct Object *ob);
struct GPUBatch *DRW_cache_text_edge_wire_get(struct Object *ob);
-struct GPUBatch **DRW_cache_text_surface_shaded_get(
- struct Object *ob, struct GPUMaterial **gpumat_array, uint gpumat_array_len);
+struct GPUBatch **DRW_cache_text_surface_shaded_get(struct Object *ob,
+ struct GPUMaterial **gpumat_array,
+ uint gpumat_array_len);
struct GPUBatch *DRW_cache_text_face_wireframe_get(Object *ob);
/* Surface */
struct GPUBatch *DRW_cache_surf_surface_get(struct Object *ob);
struct GPUBatch *DRW_cache_surf_edge_wire_get(struct Object *ob);
struct GPUBatch *DRW_cache_surf_loose_edges_get(struct Object *ob);
-struct GPUBatch **DRW_cache_surf_surface_shaded_get(
- struct Object *ob, struct GPUMaterial **gpumat_array, uint gpumat_array_len);
+struct GPUBatch **DRW_cache_surf_surface_shaded_get(struct Object *ob,
+ struct GPUMaterial **gpumat_array,
+ uint gpumat_array_len);
struct GPUBatch *DRW_cache_surf_face_wireframe_get(Object *ob);
struct GPUBatch *DRW_cache_surf_edge_detection_get(struct Object *ob, bool *r_is_manifold);
@@ -175,21 +184,27 @@ struct GPUBatch *DRW_cache_lattice_wire_get(struct Object *ob, bool use_weight);
struct GPUBatch *DRW_cache_lattice_vert_overlay_get(struct Object *ob);
/* Particles */
-struct GPUBatch *DRW_cache_particles_get_hair(
- struct Object *object, struct ParticleSystem *psys, struct ModifierData *md);
-struct GPUBatch *DRW_cache_particles_get_dots(
- struct Object *object, struct ParticleSystem *psys);
-struct GPUBatch *DRW_cache_particles_get_edit_strands(
- struct Object *object, struct ParticleSystem *psys, struct PTCacheEdit *edit, bool use_weight);
-struct GPUBatch *DRW_cache_particles_get_edit_inner_points(
- struct Object *object, struct ParticleSystem *psys, struct PTCacheEdit *edit);
-struct GPUBatch *DRW_cache_particles_get_edit_tip_points(
- struct Object *object, struct ParticleSystem *psys, struct PTCacheEdit *edit);
+struct GPUBatch *DRW_cache_particles_get_hair(struct Object *object,
+ struct ParticleSystem *psys,
+ struct ModifierData *md);
+struct GPUBatch *DRW_cache_particles_get_dots(struct Object *object, struct ParticleSystem *psys);
+struct GPUBatch *DRW_cache_particles_get_edit_strands(struct Object *object,
+ struct ParticleSystem *psys,
+ struct PTCacheEdit *edit,
+ bool use_weight);
+struct GPUBatch *DRW_cache_particles_get_edit_inner_points(struct Object *object,
+ struct ParticleSystem *psys,
+ struct PTCacheEdit *edit);
+struct GPUBatch *DRW_cache_particles_get_edit_tip_points(struct Object *object,
+ struct ParticleSystem *psys,
+ struct PTCacheEdit *edit);
struct GPUBatch *DRW_cache_particles_get_prim(int type);
/* Metaball */
struct GPUBatch *DRW_cache_mball_surface_get(struct Object *ob);
-struct GPUBatch **DRW_cache_mball_surface_shaded_get(struct Object *ob, struct GPUMaterial **gpumat_array, uint gpumat_array_len);
+struct GPUBatch **DRW_cache_mball_surface_shaded_get(struct Object *ob,
+ struct GPUMaterial **gpumat_array,
+ uint gpumat_array_len);
struct GPUBatch *DRW_cache_mball_face_wireframe_get(Object *ob);
struct GPUBatch *DRW_cache_mball_edge_detection_get(struct Object *ob, bool *r_is_manifold);
diff --git a/source/blender/draw/intern/draw_cache_impl.h b/source/blender/draw/intern/draw_cache_impl.h
index 4e014711245..94d8a82f2e4 100644
--- a/source/blender/draw/intern/draw_cache_impl.h
+++ b/source/blender/draw/intern/draw_cache_impl.h
@@ -71,36 +71,48 @@ struct GPUBatch *DRW_curve_batch_cache_get_edit_edges(struct Curve *cu);
struct GPUBatch *DRW_curve_batch_cache_get_edit_verts(struct Curve *cu, bool handles);
struct GPUBatch *DRW_curve_batch_cache_get_triangles_with_normals(struct Curve *cu);
-struct GPUBatch **DRW_curve_batch_cache_get_surface_shaded(
- struct Curve *cu, struct GPUMaterial **gpumat_array, uint gpumat_array_len);
+struct GPUBatch **DRW_curve_batch_cache_get_surface_shaded(struct Curve *cu,
+ struct GPUMaterial **gpumat_array,
+ uint gpumat_array_len);
struct GPUBatch *DRW_curve_batch_cache_get_wireframes_face(struct Curve *cu);
/* Metaball */
struct GPUBatch *DRW_metaball_batch_cache_get_triangles_with_normals(struct Object *ob);
-struct GPUBatch **DRW_metaball_batch_cache_get_surface_shaded(
- struct Object *ob, struct MetaBall *mb, struct GPUMaterial **gpumat_array, uint gpumat_array_len);
+struct GPUBatch **DRW_metaball_batch_cache_get_surface_shaded(struct Object *ob,
+ struct MetaBall *mb,
+ struct GPUMaterial **gpumat_array,
+ uint gpumat_array_len);
struct GPUBatch *DRW_metaball_batch_cache_get_wireframes_face(struct Object *ob);
-struct GPUBatch *DRW_metaball_batch_cache_get_edge_detection(struct Object *ob, bool *r_is_manifold);
+struct GPUBatch *DRW_metaball_batch_cache_get_edge_detection(struct Object *ob,
+ bool *r_is_manifold);
/* DispList */
void DRW_displist_vertbuf_create_pos_and_nor(struct ListBase *lb, struct GPUVertBuf *vbo);
void DRW_displist_vertbuf_create_wiredata(struct ListBase *lb, struct GPUVertBuf *vbo);
-void DRW_displist_vertbuf_create_loop_pos_and_nor_and_uv(
- struct ListBase *lb, struct GPUVertBuf *vbo_pos_nor, struct GPUVertBuf *vbo_uv);
+void DRW_displist_vertbuf_create_loop_pos_and_nor_and_uv(struct ListBase *lb,
+ struct GPUVertBuf *vbo_pos_nor,
+ struct GPUVertBuf *vbo_uv);
void DRW_displist_indexbuf_create_lines_in_order(struct ListBase *lb, struct GPUIndexBuf *ibo);
void DRW_displist_indexbuf_create_triangles_in_order(struct ListBase *lb, struct GPUIndexBuf *ibo);
-void DRW_displist_indexbuf_create_triangles_loop_split_by_material(
- struct ListBase *lb, struct GPUIndexBuf **ibo_mat, uint mat_len);
-void DRW_displist_indexbuf_create_edges_adjacency_lines(struct ListBase *lb, struct GPUIndexBuf *ibo, bool *r_is_manifold);
+void DRW_displist_indexbuf_create_triangles_loop_split_by_material(struct ListBase *lb,
+ struct GPUIndexBuf **ibo_mat,
+ uint mat_len);
+void DRW_displist_indexbuf_create_edges_adjacency_lines(struct ListBase *lb,
+ struct GPUIndexBuf *ibo,
+ bool *r_is_manifold);
/* Lattice */
-struct GPUBatch *DRW_lattice_batch_cache_get_all_edges(struct Lattice *lt, bool use_weight, const int actdef);
+struct GPUBatch *DRW_lattice_batch_cache_get_all_edges(struct Lattice *lt,
+ bool use_weight,
+ const int actdef);
struct GPUBatch *DRW_lattice_batch_cache_get_all_verts(struct Lattice *lt);
struct GPUBatch *DRW_lattice_batch_cache_get_edit_verts(struct Lattice *lt);
/* Mesh */
-void DRW_mesh_batch_cache_create_requested(
- struct Object *ob, struct Mesh *me,
- const struct ToolSettings *ts, const bool is_paint_mode, const bool use_hide);
+void DRW_mesh_batch_cache_create_requested(struct Object *ob,
+ struct Mesh *me,
+ const struct ToolSettings *ts,
+ const bool is_paint_mode,
+ const bool use_hide);
struct GPUBatch *DRW_mesh_batch_cache_get_all_verts(struct Mesh *me);
struct GPUBatch *DRW_mesh_batch_cache_get_all_edges(struct Mesh *me);
@@ -108,9 +120,12 @@ struct GPUBatch *DRW_mesh_batch_cache_get_loose_edges(struct Mesh *me);
struct GPUBatch *DRW_mesh_batch_cache_get_edge_detection(struct Mesh *me, bool *r_is_manifold);
struct GPUBatch *DRW_mesh_batch_cache_get_surface(struct Mesh *me);
struct GPUBatch *DRW_mesh_batch_cache_get_surface_edges(struct Mesh *me);
-struct GPUBatch **DRW_mesh_batch_cache_get_surface_shaded(
- struct Mesh *me, struct GPUMaterial **gpumat_array, uint gpumat_array_len,
- char **auto_layer_names, int **auto_layer_is_srgb, int *auto_layer_count);
+struct GPUBatch **DRW_mesh_batch_cache_get_surface_shaded(struct Mesh *me,
+ struct GPUMaterial **gpumat_array,
+ uint gpumat_array_len,
+ char **auto_layer_names,
+ int **auto_layer_is_srgb,
+ int *auto_layer_count);
struct GPUBatch **DRW_mesh_batch_cache_get_surface_texpaint(struct Mesh *me);
struct GPUBatch *DRW_mesh_batch_cache_get_surface_texpaint_single(struct Mesh *me);
struct GPUBatch *DRW_mesh_batch_cache_get_surface_vertpaint(struct Mesh *me);
@@ -142,45 +157,52 @@ void DRW_mesh_cache_sculpt_coords_ensure(struct Mesh *me);
/* Edit mesh bitflags (is this the right place?) */
enum {
- VFLAG_VERT_ACTIVE = 1 << 0,
- VFLAG_VERT_SELECTED = 1 << 1,
- VFLAG_EDGE_ACTIVE = 1 << 2,
- VFLAG_EDGE_SELECTED = 1 << 3,
- VFLAG_EDGE_SEAM = 1 << 4,
- VFLAG_EDGE_SHARP = 1 << 5,
- VFLAG_EDGE_FREESTYLE = 1 << 6,
- /* Beware to not go over 1 << 7 (it's a byte flag)
- * (see gpu_shader_edit_mesh_overlay_geom.glsl) */
+ VFLAG_VERT_ACTIVE = 1 << 0,
+ VFLAG_VERT_SELECTED = 1 << 1,
+ VFLAG_EDGE_ACTIVE = 1 << 2,
+ VFLAG_EDGE_SELECTED = 1 << 3,
+ VFLAG_EDGE_SEAM = 1 << 4,
+ VFLAG_EDGE_SHARP = 1 << 5,
+ VFLAG_EDGE_FREESTYLE = 1 << 6,
+ /* Beware to not go over 1 << 7 (it's a byte flag)
+ * (see gpu_shader_edit_mesh_overlay_geom.glsl) */
};
enum {
- VFLAG_FACE_ACTIVE = 1 << 0,
- VFLAG_FACE_SELECTED = 1 << 1,
- VFLAG_FACE_FREESTYLE = 1 << 2,
- VFLAG_VERT_UV_SELECT = 1 << 3,
- VFLAG_VERT_UV_PINNED = 1 << 4,
- VFLAG_EDGE_UV_SELECT = 1 << 5,
- VFLAG_FACE_UV_ACTIVE = 1 << 6,
- VFLAG_FACE_UV_SELECT = 1 << 7,
- /* Beware to not go over 1 << 7 (it's a byte flag)
- * (see gpu_shader_edit_mesh_overlay_geom.glsl) */
+ VFLAG_FACE_ACTIVE = 1 << 0,
+ VFLAG_FACE_SELECTED = 1 << 1,
+ VFLAG_FACE_FREESTYLE = 1 << 2,
+ VFLAG_VERT_UV_SELECT = 1 << 3,
+ VFLAG_VERT_UV_PINNED = 1 << 4,
+ VFLAG_EDGE_UV_SELECT = 1 << 5,
+ VFLAG_FACE_UV_ACTIVE = 1 << 6,
+ VFLAG_FACE_UV_SELECT = 1 << 7,
+ /* Beware to not go over 1 << 7 (it's a byte flag)
+ * (see gpu_shader_edit_mesh_overlay_geom.glsl) */
};
/* Particles */
-struct GPUBatch *DRW_particles_batch_cache_get_hair(
- struct Object *object, struct ParticleSystem *psys, struct ModifierData *md);
-struct GPUBatch *DRW_particles_batch_cache_get_dots(
- struct Object *object, struct ParticleSystem *psys);
-struct GPUBatch *DRW_particles_batch_cache_get_edit_strands(
- struct Object *object, struct ParticleSystem *psys, struct PTCacheEdit *edit, bool use_weight);
-struct GPUBatch *DRW_particles_batch_cache_get_edit_inner_points(
- struct Object *object, struct ParticleSystem *psys, struct PTCacheEdit *edit);
-struct GPUBatch *DRW_particles_batch_cache_get_edit_tip_points(
- struct Object *object, struct ParticleSystem *psys, struct PTCacheEdit *edit);
+struct GPUBatch *DRW_particles_batch_cache_get_hair(struct Object *object,
+ struct ParticleSystem *psys,
+ struct ModifierData *md);
+struct GPUBatch *DRW_particles_batch_cache_get_dots(struct Object *object,
+ struct ParticleSystem *psys);
+struct GPUBatch *DRW_particles_batch_cache_get_edit_strands(struct Object *object,
+ struct ParticleSystem *psys,
+ struct PTCacheEdit *edit,
+ bool use_weight);
+struct GPUBatch *DRW_particles_batch_cache_get_edit_inner_points(struct Object *object,
+ struct ParticleSystem *psys,
+ struct PTCacheEdit *edit);
+struct GPUBatch *DRW_particles_batch_cache_get_edit_tip_points(struct Object *object,
+ struct ParticleSystem *psys,
+ struct PTCacheEdit *edit);
/* Common */
-#define DRW_ADD_FLAG_FROM_VBO_REQUEST(flag, vbo, value) (flag |= DRW_vbo_requested(vbo) ? (value) : 0)
-#define DRW_ADD_FLAG_FROM_IBO_REQUEST(flag, ibo, value) (flag |= DRW_ibo_requested(ibo) ? (value) : 0)
+#define DRW_ADD_FLAG_FROM_VBO_REQUEST(flag, vbo, value) \
+ (flag |= DRW_vbo_requested(vbo) ? (value) : 0)
+#define DRW_ADD_FLAG_FROM_IBO_REQUEST(flag, ibo, value) \
+ (flag |= DRW_ibo_requested(ibo) ? (value) : 0)
/* Test and assign NULL if test fails */
#define DRW_TEST_ASSIGN_VBO(v) (v = (DRW_vbo_requested(v) ? (v) : NULL))
diff --git a/source/blender/draw/intern/draw_cache_impl_curve.c b/source/blender/draw/intern/draw_cache_impl_curve.c
index eecb6c3dd6a..8e7a2253e21 100644
--- a/source/blender/draw/intern/draw_cache_impl_curve.c
+++ b/source/blender/draw/intern/draw_cache_impl_curve.c
@@ -42,17 +42,17 @@
#include "DRW_render.h"
-#include "draw_cache_impl.h" /* own include */
+#include "draw_cache_impl.h" /* own include */
-#define SELECT 1
-#define ACTIVE_NURB 1 << 2
-#define EVEN_U_BIT 1 << 3 /* Alternate this bit for every U vert. */
+#define SELECT 1
+#define ACTIVE_NURB 1 << 2
+#define EVEN_U_BIT 1 << 3 /* Alternate this bit for every U vert. */
/* Used as values of `color_id` in `edit_curve_overlay_handle_geom.glsl` */
enum {
- COLOR_NURB_ULINE_ID = TH_HANDLE_AUTOCLAMP - TH_HANDLE_FREE + 2,
+ COLOR_NURB_ULINE_ID = TH_HANDLE_AUTOCLAMP - TH_HANDLE_FREE + 2,
- TOT_HANDLE_COL,
+ TOT_HANDLE_COL,
};
/**
@@ -67,474 +67,480 @@ static void curve_batch_cache_clear(Curve *cu);
/* ---------------------------------------------------------------------- */
/* Curve Interface, direct access to basic data. */
-static void curve_render_overlay_verts_edges_len_get(
- ListBase *lb, int *r_vert_len, int *r_edge_len)
+static void curve_render_overlay_verts_edges_len_get(ListBase *lb,
+ int *r_vert_len,
+ int *r_edge_len)
{
- BLI_assert(r_vert_len || r_edge_len);
- int vert_len = 0;
- int edge_len = 0;
- for (Nurb *nu = lb->first; nu; nu = nu->next) {
- if (nu->bezt) {
- vert_len += nu->pntsu * 3;
- /* 2x handles per point*/
- edge_len += 2 * nu->pntsu;
- }
- else if (nu->bp) {
- vert_len += nu->pntsu * nu->pntsv;
- /* segments between points */
- edge_len += (nu->pntsu - 1) * nu->pntsv;
- edge_len += (nu->pntsv - 1) * nu->pntsu;
- }
- }
- if (r_vert_len) {
- *r_vert_len = vert_len;
- }
- if (r_edge_len) {
- *r_edge_len = edge_len;
- }
+ BLI_assert(r_vert_len || r_edge_len);
+ int vert_len = 0;
+ int edge_len = 0;
+ for (Nurb *nu = lb->first; nu; nu = nu->next) {
+ if (nu->bezt) {
+ vert_len += nu->pntsu * 3;
+ /* 2x handles per point*/
+ edge_len += 2 * nu->pntsu;
+ }
+ else if (nu->bp) {
+ vert_len += nu->pntsu * nu->pntsv;
+ /* segments between points */
+ edge_len += (nu->pntsu - 1) * nu->pntsv;
+ edge_len += (nu->pntsv - 1) * nu->pntsu;
+ }
+ }
+ if (r_vert_len) {
+ *r_vert_len = vert_len;
+ }
+ if (r_edge_len) {
+ *r_edge_len = edge_len;
+ }
}
-static void curve_render_wire_verts_edges_len_get(
- const CurveCache *ob_curve_cache,
- int *r_curve_len, int *r_vert_len, int *r_edge_len)
+static void curve_render_wire_verts_edges_len_get(const CurveCache *ob_curve_cache,
+ int *r_curve_len,
+ int *r_vert_len,
+ int *r_edge_len)
{
- BLI_assert(r_vert_len || r_edge_len);
- int vert_len = 0;
- int edge_len = 0;
- int curve_len = 0;
- for (const BevList *bl = ob_curve_cache->bev.first; bl; bl = bl->next) {
- if (bl->nr > 0) {
- const bool is_cyclic = bl->poly != -1;
- edge_len += (is_cyclic) ? bl->nr : bl->nr - 1;
- vert_len += bl->nr;
- curve_len += 1;
- }
- }
- for (const DispList *dl = ob_curve_cache->disp.first; dl; dl = dl->next) {
- if (ELEM(dl->type, DL_SEGM, DL_POLY)) {
- BLI_assert(dl->parts == 1);
- const bool is_cyclic = dl->type == DL_POLY;
- edge_len += (is_cyclic) ? dl->nr : dl->nr - 1;
- vert_len += dl->nr;
- curve_len += 1;
- }
- }
- if (r_vert_len) {
- *r_vert_len = vert_len;
- }
- if (r_edge_len) {
- *r_edge_len = edge_len;
- }
- if (r_curve_len) {
- *r_curve_len = curve_len;
- }
+ BLI_assert(r_vert_len || r_edge_len);
+ int vert_len = 0;
+ int edge_len = 0;
+ int curve_len = 0;
+ for (const BevList *bl = ob_curve_cache->bev.first; bl; bl = bl->next) {
+ if (bl->nr > 0) {
+ const bool is_cyclic = bl->poly != -1;
+ edge_len += (is_cyclic) ? bl->nr : bl->nr - 1;
+ vert_len += bl->nr;
+ curve_len += 1;
+ }
+ }
+ for (const DispList *dl = ob_curve_cache->disp.first; dl; dl = dl->next) {
+ if (ELEM(dl->type, DL_SEGM, DL_POLY)) {
+ BLI_assert(dl->parts == 1);
+ const bool is_cyclic = dl->type == DL_POLY;
+ edge_len += (is_cyclic) ? dl->nr : dl->nr - 1;
+ vert_len += dl->nr;
+ curve_len += 1;
+ }
+ }
+ if (r_vert_len) {
+ *r_vert_len = vert_len;
+ }
+ if (r_edge_len) {
+ *r_edge_len = edge_len;
+ }
+ if (r_curve_len) {
+ *r_curve_len = curve_len;
+ }
}
static int curve_render_normal_len_get(const ListBase *lb, const CurveCache *ob_curve_cache)
{
- int normal_len = 0;
- const BevList *bl;
- const Nurb *nu;
- for (bl = ob_curve_cache->bev.first, nu = lb->first; nu && bl; bl = bl->next, nu = nu->next) {
- int nr = bl->nr;
- int skip = nu->resolu / 16;
+ int normal_len = 0;
+ const BevList *bl;
+ const Nurb *nu;
+ for (bl = ob_curve_cache->bev.first, nu = lb->first; nu && bl; bl = bl->next, nu = nu->next) {
+ int nr = bl->nr;
+ int skip = nu->resolu / 16;
#if 0
- while (nr-- > 0) { /* accounts for empty bevel lists */
- normal_len += 1;
- nr -= skip;
- }
+ while (nr-- > 0) { /* accounts for empty bevel lists */
+ normal_len += 1;
+ nr -= skip;
+ }
#else
- /* Same as loop above */
- normal_len += (nr / (skip + 1)) + ((nr % (skip + 1)) != 0);
+ /* Same as loop above */
+ normal_len += (nr / (skip + 1)) + ((nr % (skip + 1)) != 0);
#endif
- }
- return normal_len;
+ }
+ return normal_len;
}
/* ---------------------------------------------------------------------- */
/* Curve Interface, indirect, partially cached access to complex data. */
typedef struct CurveRenderData {
- int types;
-
- struct {
- int vert_len;
- int edge_len;
- } overlay;
-
- struct {
- int curve_len;
- int vert_len;
- int edge_len;
- } wire;
-
- /* edit mode normal's */
- struct {
- /* 'edge_len == len * 2'
- * 'vert_len == len * 3' */
- int len;
- } normal;
-
- struct {
- EditFont *edit_font;
- } text;
-
- /* borrow from 'Object' */
- CurveCache *ob_curve_cache;
-
- /* borrow from 'Curve' */
- ListBase *nurbs;
-
- /* edit, index in nurb list */
- int actnu;
- /* edit, index in active nurb (BPoint or BezTriple) */
- int actvert;
+ int types;
+
+ struct {
+ int vert_len;
+ int edge_len;
+ } overlay;
+
+ struct {
+ int curve_len;
+ int vert_len;
+ int edge_len;
+ } wire;
+
+ /* edit mode normal's */
+ struct {
+ /* 'edge_len == len * 2'
+ * 'vert_len == len * 3' */
+ int len;
+ } normal;
+
+ struct {
+ EditFont *edit_font;
+ } text;
+
+ /* borrow from 'Object' */
+ CurveCache *ob_curve_cache;
+
+ /* borrow from 'Curve' */
+ ListBase *nurbs;
+
+ /* edit, index in nurb list */
+ int actnu;
+ /* edit, index in active nurb (BPoint or BezTriple) */
+ int actvert;
} CurveRenderData;
enum {
- /* Wire center-line */
- CU_DATATYPE_WIRE = 1 << 0,
- /* Edit-mode verts and optionally handles */
- CU_DATATYPE_OVERLAY = 1 << 1,
- /* Edit-mode normals */
- CU_DATATYPE_NORMAL = 1 << 2,
- /* Geometry */
- CU_DATATYPE_SURFACE = 1 << 3,
- /* Text */
- CU_DATATYPE_TEXT_SELECT = 1 << 4,
+ /* Wire center-line */
+ CU_DATATYPE_WIRE = 1 << 0,
+ /* Edit-mode verts and optionally handles */
+ CU_DATATYPE_OVERLAY = 1 << 1,
+ /* Edit-mode normals */
+ CU_DATATYPE_NORMAL = 1 << 2,
+ /* Geometry */
+ CU_DATATYPE_SURFACE = 1 << 3,
+ /* Text */
+ CU_DATATYPE_TEXT_SELECT = 1 << 4,
};
/*
* ob_curve_cache can be NULL, only needed for CU_DATATYPE_WIRE
*/
-static CurveRenderData *curve_render_data_create(Curve *cu, CurveCache *ob_curve_cache, const int types)
+static CurveRenderData *curve_render_data_create(Curve *cu,
+ CurveCache *ob_curve_cache,
+ const int types)
{
- CurveRenderData *rdata = MEM_callocN(sizeof(*rdata), __func__);
- rdata->types = types;
- ListBase *nurbs;
-
- rdata->actnu = cu->actnu;
- rdata->actvert = cu->actvert;
-
- rdata->ob_curve_cache = ob_curve_cache;
-
- if (types & CU_DATATYPE_WIRE) {
- curve_render_wire_verts_edges_len_get(
- rdata->ob_curve_cache,
- &rdata->wire.curve_len, &rdata->wire.vert_len, &rdata->wire.edge_len);
- }
-
- if (cu->editnurb) {
- EditNurb *editnurb = cu->editnurb;
- nurbs = &editnurb->nurbs;
-
- if (types & CU_DATATYPE_OVERLAY) {
- curve_render_overlay_verts_edges_len_get(
- nurbs,
- &rdata->overlay.vert_len,
- &rdata->overlay.edge_len);
-
- rdata->actnu = cu->actnu;
- rdata->actvert = cu->actvert;
- }
- if (types & CU_DATATYPE_NORMAL) {
- rdata->normal.len = curve_render_normal_len_get(nurbs, rdata->ob_curve_cache);
- }
- }
- else {
- nurbs = &cu->nurb;
- }
-
- rdata->nurbs = nurbs;
-
- rdata->text.edit_font = cu->editfont;
-
- return rdata;
+ CurveRenderData *rdata = MEM_callocN(sizeof(*rdata), __func__);
+ rdata->types = types;
+ ListBase *nurbs;
+
+ rdata->actnu = cu->actnu;
+ rdata->actvert = cu->actvert;
+
+ rdata->ob_curve_cache = ob_curve_cache;
+
+ if (types & CU_DATATYPE_WIRE) {
+ curve_render_wire_verts_edges_len_get(rdata->ob_curve_cache,
+ &rdata->wire.curve_len,
+ &rdata->wire.vert_len,
+ &rdata->wire.edge_len);
+ }
+
+ if (cu->editnurb) {
+ EditNurb *editnurb = cu->editnurb;
+ nurbs = &editnurb->nurbs;
+
+ if (types & CU_DATATYPE_OVERLAY) {
+ curve_render_overlay_verts_edges_len_get(
+ nurbs, &rdata->overlay.vert_len, &rdata->overlay.edge_len);
+
+ rdata->actnu = cu->actnu;
+ rdata->actvert = cu->actvert;
+ }
+ if (types & CU_DATATYPE_NORMAL) {
+ rdata->normal.len = curve_render_normal_len_get(nurbs, rdata->ob_curve_cache);
+ }
+ }
+ else {
+ nurbs = &cu->nurb;
+ }
+
+ rdata->nurbs = nurbs;
+
+ rdata->text.edit_font = cu->editfont;
+
+ return rdata;
}
static void curve_render_data_free(CurveRenderData *rdata)
{
#if 0
- if (rdata->loose_verts) {
- MEM_freeN(rdata->loose_verts);
- }
+ if (rdata->loose_verts) {
+ MEM_freeN(rdata->loose_verts);
+ }
#endif
- MEM_freeN(rdata);
+ MEM_freeN(rdata);
}
static int curve_render_data_overlay_verts_len_get(const CurveRenderData *rdata)
{
- BLI_assert(rdata->types & CU_DATATYPE_OVERLAY);
- return rdata->overlay.vert_len;
+ BLI_assert(rdata->types & CU_DATATYPE_OVERLAY);
+ return rdata->overlay.vert_len;
}
static int curve_render_data_overlay_edges_len_get(const CurveRenderData *rdata)
{
- BLI_assert(rdata->types & CU_DATATYPE_OVERLAY);
- return rdata->overlay.edge_len;
+ BLI_assert(rdata->types & CU_DATATYPE_OVERLAY);
+ return rdata->overlay.edge_len;
}
static int curve_render_data_wire_verts_len_get(const CurveRenderData *rdata)
{
- BLI_assert(rdata->types & CU_DATATYPE_WIRE);
- return rdata->wire.vert_len;
+ BLI_assert(rdata->types & CU_DATATYPE_WIRE);
+ return rdata->wire.vert_len;
}
static int curve_render_data_wire_edges_len_get(const CurveRenderData *rdata)
{
- BLI_assert(rdata->types & CU_DATATYPE_WIRE);
- return rdata->wire.edge_len;
+ BLI_assert(rdata->types & CU_DATATYPE_WIRE);
+ return rdata->wire.edge_len;
}
static int curve_render_data_wire_curve_len_get(const CurveRenderData *rdata)
{
- BLI_assert(rdata->types & CU_DATATYPE_WIRE);
- return rdata->wire.curve_len;
+ BLI_assert(rdata->types & CU_DATATYPE_WIRE);
+ return rdata->wire.curve_len;
}
static int curve_render_data_normal_len_get(const CurveRenderData *rdata)
{
- BLI_assert(rdata->types & CU_DATATYPE_NORMAL);
- return rdata->normal.len;
+ BLI_assert(rdata->types & CU_DATATYPE_NORMAL);
+ return rdata->normal.len;
}
-static void curve_cd_calc_used_gpu_layers(int *cd_layers, struct GPUMaterial **gpumat_array, int gpumat_array_len)
+static void curve_cd_calc_used_gpu_layers(int *cd_layers,
+ struct GPUMaterial **gpumat_array,
+ int gpumat_array_len)
{
- GPUVertAttrLayers gpu_attrs = {{{0}}};
- for (int i = 0; i < gpumat_array_len; i++) {
- struct GPUMaterial *gpumat = gpumat_array[i];
- if (gpumat == NULL) {
- continue;
- }
- GPU_material_vertex_attrs(gpumat, &gpu_attrs);
- for (int j = 0; j < gpu_attrs.totlayer; j++) {
- const char *name = gpu_attrs.layer[j].name;
- int type = gpu_attrs.layer[j].type;
-
- /* Curves cannot have named layers.
- * Note: We could relax this assumption later. */
- if (name[0] != '\0') {
- continue;
- }
-
- if (type == CD_AUTO_FROM_NAME) {
- type = CD_MTFACE;
- }
-
- switch (type) {
- case CD_MTFACE:
- *cd_layers |= CD_MLOOPUV;
- break;
- case CD_TANGENT:
- /* Currently unsupported */
- // *cd_layers |= CD_TANGENT;
- break;
- case CD_MCOL:
- /* Curve object don't have Color data. */
- break;
- case CD_ORCO:
- *cd_layers |= CD_ORCO;
- break;
- }
- }
- }
+ GPUVertAttrLayers gpu_attrs = {{{0}}};
+ for (int i = 0; i < gpumat_array_len; i++) {
+ struct GPUMaterial *gpumat = gpumat_array[i];
+ if (gpumat == NULL) {
+ continue;
+ }
+ GPU_material_vertex_attrs(gpumat, &gpu_attrs);
+ for (int j = 0; j < gpu_attrs.totlayer; j++) {
+ const char *name = gpu_attrs.layer[j].name;
+ int type = gpu_attrs.layer[j].type;
+
+ /* Curves cannot have named layers.
+ * Note: We could relax this assumption later. */
+ if (name[0] != '\0') {
+ continue;
+ }
+
+ if (type == CD_AUTO_FROM_NAME) {
+ type = CD_MTFACE;
+ }
+
+ switch (type) {
+ case CD_MTFACE:
+ *cd_layers |= CD_MLOOPUV;
+ break;
+ case CD_TANGENT:
+ /* Currently unsupported */
+ // *cd_layers |= CD_TANGENT;
+ break;
+ case CD_MCOL:
+ /* Curve object don't have Color data. */
+ break;
+ case CD_ORCO:
+ *cd_layers |= CD_ORCO;
+ break;
+ }
+ }
+ }
}
/* ---------------------------------------------------------------------- */
/* Curve GPUBatch Cache */
typedef struct CurveBatchCache {
- struct {
- GPUVertBuf *pos_nor;
- GPUVertBuf *edge_fac;
- GPUVertBuf *curves_pos;
-
- GPUVertBuf *loop_pos_nor;
- GPUVertBuf *loop_uv;
- } ordered;
-
- struct {
- /* Curve points. Aligned with ordered.pos_nor */
- GPUVertBuf *curves_nor;
- GPUVertBuf *curves_weight; /* TODO. */
- /* Edit points (beztriples and bpoints) */
- GPUVertBuf *pos;
- GPUVertBuf *data;
- } edit;
-
- struct {
- GPUIndexBuf *surfaces_tris;
- GPUIndexBuf *surfaces_lines;
- GPUIndexBuf *curves_lines;
- GPUIndexBuf *edges_adj_lines;
- /* Edit mode */
- GPUIndexBuf *edit_verts_points; /* Only control points. Not handles. */
- GPUIndexBuf *edit_lines;
- } ibo;
-
- struct {
- GPUBatch *surfaces;
- GPUBatch *surfaces_edges;
- GPUBatch *curves;
- /* control handles and vertices */
- GPUBatch *edit_edges;
- GPUBatch *edit_verts;
- GPUBatch *edit_handles_verts;
- GPUBatch *edit_normals;
- GPUBatch *edge_detection;
- } batch;
-
- GPUIndexBuf **surf_per_mat_tris;
- GPUBatch **surf_per_mat;
- int mat_len;
- int cd_used, cd_needed;
-
- /* settings to determine if cache is invalid */
- bool is_dirty;
- bool is_editmode;
-
- /* Valid only if edge_detection is up to date. */
- bool is_manifold;
+ struct {
+ GPUVertBuf *pos_nor;
+ GPUVertBuf *edge_fac;
+ GPUVertBuf *curves_pos;
+
+ GPUVertBuf *loop_pos_nor;
+ GPUVertBuf *loop_uv;
+ } ordered;
+
+ struct {
+ /* Curve points. Aligned with ordered.pos_nor */
+ GPUVertBuf *curves_nor;
+ GPUVertBuf *curves_weight; /* TODO. */
+ /* Edit points (beztriples and bpoints) */
+ GPUVertBuf *pos;
+ GPUVertBuf *data;
+ } edit;
+
+ struct {
+ GPUIndexBuf *surfaces_tris;
+ GPUIndexBuf *surfaces_lines;
+ GPUIndexBuf *curves_lines;
+ GPUIndexBuf *edges_adj_lines;
+ /* Edit mode */
+ GPUIndexBuf *edit_verts_points; /* Only control points. Not handles. */
+ GPUIndexBuf *edit_lines;
+ } ibo;
+
+ struct {
+ GPUBatch *surfaces;
+ GPUBatch *surfaces_edges;
+ GPUBatch *curves;
+ /* control handles and vertices */
+ GPUBatch *edit_edges;
+ GPUBatch *edit_verts;
+ GPUBatch *edit_handles_verts;
+ GPUBatch *edit_normals;
+ GPUBatch *edge_detection;
+ } batch;
+
+ GPUIndexBuf **surf_per_mat_tris;
+ GPUBatch **surf_per_mat;
+ int mat_len;
+ int cd_used, cd_needed;
+
+ /* settings to determine if cache is invalid */
+ bool is_dirty;
+ bool is_editmode;
+
+ /* Valid only if edge_detection is up to date. */
+ bool is_manifold;
} CurveBatchCache;
/* GPUBatch cache management. */
static bool curve_batch_cache_valid(Curve *cu)
{
- CurveBatchCache *cache = cu->batch_cache;
+ CurveBatchCache *cache = cu->batch_cache;
- if (cache == NULL) {
- return false;
- }
+ if (cache == NULL) {
+ return false;
+ }
- if (cache->mat_len != max_ii(1, cu->totcol)) {
- return false;
- }
+ if (cache->mat_len != max_ii(1, cu->totcol)) {
+ return false;
+ }
- if (cache->is_dirty) {
- return false;
- }
+ if (cache->is_dirty) {
+ return false;
+ }
- if (cache->is_editmode != ((cu->editnurb != NULL) || (cu->editfont != NULL))) {
- return false;
- }
+ if (cache->is_editmode != ((cu->editnurb != NULL) || (cu->editfont != NULL))) {
+ return false;
+ }
- if (cache->is_editmode) {
- if (cu->editfont) {
- /* TODO */
- }
- }
+ if (cache->is_editmode) {
+ if (cu->editfont) {
+ /* TODO */
+ }
+ }
- return true;
+ return true;
}
static void curve_batch_cache_init(Curve *cu)
{
- CurveBatchCache *cache = cu->batch_cache;
+ CurveBatchCache *cache = cu->batch_cache;
- if (!cache) {
- cache = cu->batch_cache = MEM_callocN(sizeof(*cache), __func__);
- }
- else {
- memset(cache, 0, sizeof(*cache));
- }
+ if (!cache) {
+ cache = cu->batch_cache = MEM_callocN(sizeof(*cache), __func__);
+ }
+ else {
+ memset(cache, 0, sizeof(*cache));
+ }
#if 0
- ListBase *nurbs;
- if (cu->editnurb) {
- EditNurb *editnurb = cu->editnurb;
- nurbs = &editnurb->nurbs;
- }
- else {
- nurbs = &cu->nurb;
- }
+ ListBase *nurbs;
+ if (cu->editnurb) {
+ EditNurb *editnurb = cu->editnurb;
+ nurbs = &editnurb->nurbs;
+ }
+ else {
+ nurbs = &cu->nurb;
+ }
#endif
- cache->cd_used = 0;
- cache->mat_len = max_ii(1, cu->totcol);
- cache->surf_per_mat_tris = MEM_mallocN(sizeof(*cache->surf_per_mat_tris) * cache->mat_len, __func__);
- cache->surf_per_mat = MEM_mallocN(sizeof(*cache->surf_per_mat) * cache->mat_len, __func__);
+ cache->cd_used = 0;
+ cache->mat_len = max_ii(1, cu->totcol);
+ cache->surf_per_mat_tris = MEM_mallocN(sizeof(*cache->surf_per_mat_tris) * cache->mat_len,
+ __func__);
+ cache->surf_per_mat = MEM_mallocN(sizeof(*cache->surf_per_mat) * cache->mat_len, __func__);
- /* TODO Might be wiser to alloc in one chunck. */
- for (int i = 0; i < cache->mat_len; ++i) {
- cache->surf_per_mat_tris[i] = MEM_callocN(sizeof(GPUIndexBuf), "GPUIndexBuf");
- cache->surf_per_mat[i] = MEM_callocN(sizeof(GPUBatch), "GPUBatch");
- }
+ /* TODO Might be wiser to alloc in one chunck. */
+ for (int i = 0; i < cache->mat_len; ++i) {
+ cache->surf_per_mat_tris[i] = MEM_callocN(sizeof(GPUIndexBuf), "GPUIndexBuf");
+ cache->surf_per_mat[i] = MEM_callocN(sizeof(GPUBatch), "GPUBatch");
+ }
- cache->is_editmode = (cu->editnurb != NULL) || (cu->editfont != NULL);
+ cache->is_editmode = (cu->editnurb != NULL) || (cu->editfont != NULL);
- cache->is_dirty = false;
+ cache->is_dirty = false;
}
static CurveBatchCache *curve_batch_cache_get(Curve *cu)
{
- if (!curve_batch_cache_valid(cu)) {
- curve_batch_cache_clear(cu);
- curve_batch_cache_init(cu);
- }
- return cu->batch_cache;
+ if (!curve_batch_cache_valid(cu)) {
+ curve_batch_cache_clear(cu);
+ curve_batch_cache_init(cu);
+ }
+ return cu->batch_cache;
}
void DRW_curve_batch_cache_dirty_tag(Curve *cu, int mode)
{
- CurveBatchCache *cache = cu->batch_cache;
- if (cache == NULL) {
- return;
- }
- switch (mode) {
- case BKE_CURVE_BATCH_DIRTY_ALL:
- cache->is_dirty = true;
- break;
- case BKE_CURVE_BATCH_DIRTY_SELECT:
- GPU_VERTBUF_DISCARD_SAFE(cache->edit.data);
-
- GPU_BATCH_DISCARD_SAFE(cache->batch.edit_edges);
- GPU_BATCH_DISCARD_SAFE(cache->batch.edit_verts);
- GPU_BATCH_DISCARD_SAFE(cache->batch.edit_handles_verts);
- break;
- default:
- BLI_assert(0);
- }
+ CurveBatchCache *cache = cu->batch_cache;
+ if (cache == NULL) {
+ return;
+ }
+ switch (mode) {
+ case BKE_CURVE_BATCH_DIRTY_ALL:
+ cache->is_dirty = true;
+ break;
+ case BKE_CURVE_BATCH_DIRTY_SELECT:
+ GPU_VERTBUF_DISCARD_SAFE(cache->edit.data);
+
+ GPU_BATCH_DISCARD_SAFE(cache->batch.edit_edges);
+ GPU_BATCH_DISCARD_SAFE(cache->batch.edit_verts);
+ GPU_BATCH_DISCARD_SAFE(cache->batch.edit_handles_verts);
+ break;
+ default:
+ BLI_assert(0);
+ }
}
static void curve_batch_cache_clear(Curve *cu)
{
- CurveBatchCache *cache = cu->batch_cache;
- if (!cache) {
- return;
- }
-
- for (int i = 0; i < sizeof(cache->ordered) / sizeof(void *); ++i) {
- GPUVertBuf **vbo = (GPUVertBuf **)&cache->ordered;
- GPU_VERTBUF_DISCARD_SAFE(vbo[i]);
- }
- for (int i = 0; i < sizeof(cache->edit) / sizeof(void *); ++i) {
- GPUVertBuf **vbo = (GPUVertBuf **)&cache->edit;
- GPU_VERTBUF_DISCARD_SAFE(vbo[i]);
- }
- for (int i = 0; i < sizeof(cache->ibo) / sizeof(void *); ++i) {
- GPUIndexBuf **ibo = (GPUIndexBuf **)&cache->ibo;
- GPU_INDEXBUF_DISCARD_SAFE(ibo[i]);
- }
- for (int i = 0; i < sizeof(cache->batch) / sizeof(void *); ++i) {
- GPUBatch **batch = (GPUBatch **)&cache->batch;
- GPU_BATCH_DISCARD_SAFE(batch[i]);
- }
-
- for (int i = 0; i < cache->mat_len; ++i) {
- GPU_INDEXBUF_DISCARD_SAFE(cache->surf_per_mat_tris[i]);
- GPU_BATCH_DISCARD_SAFE(cache->surf_per_mat[i]);
- }
- MEM_SAFE_FREE(cache->surf_per_mat_tris);
- MEM_SAFE_FREE(cache->surf_per_mat);
- cache->mat_len = 0;
- cache->cd_used = 0;
+ CurveBatchCache *cache = cu->batch_cache;
+ if (!cache) {
+ return;
+ }
+
+ for (int i = 0; i < sizeof(cache->ordered) / sizeof(void *); ++i) {
+ GPUVertBuf **vbo = (GPUVertBuf **)&cache->ordered;
+ GPU_VERTBUF_DISCARD_SAFE(vbo[i]);
+ }
+ for (int i = 0; i < sizeof(cache->edit) / sizeof(void *); ++i) {
+ GPUVertBuf **vbo = (GPUVertBuf **)&cache->edit;
+ GPU_VERTBUF_DISCARD_SAFE(vbo[i]);
+ }
+ for (int i = 0; i < sizeof(cache->ibo) / sizeof(void *); ++i) {
+ GPUIndexBuf **ibo = (GPUIndexBuf **)&cache->ibo;
+ GPU_INDEXBUF_DISCARD_SAFE(ibo[i]);
+ }
+ for (int i = 0; i < sizeof(cache->batch) / sizeof(void *); ++i) {
+ GPUBatch **batch = (GPUBatch **)&cache->batch;
+ GPU_BATCH_DISCARD_SAFE(batch[i]);
+ }
+
+ for (int i = 0; i < cache->mat_len; ++i) {
+ GPU_INDEXBUF_DISCARD_SAFE(cache->surf_per_mat_tris[i]);
+ GPU_BATCH_DISCARD_SAFE(cache->surf_per_mat[i]);
+ }
+ MEM_SAFE_FREE(cache->surf_per_mat_tris);
+ MEM_SAFE_FREE(cache->surf_per_mat);
+ cache->mat_len = 0;
+ cache->cd_used = 0;
}
void DRW_curve_batch_cache_free(Curve *cu)
{
- curve_batch_cache_clear(cu);
- MEM_SAFE_FREE(cu->batch_cache);
+ curve_batch_cache_clear(cu);
+ MEM_SAFE_FREE(cu->batch_cache);
}
/* -------------------------------------------------------------------- */
@@ -544,282 +550,291 @@ void DRW_curve_batch_cache_free(Curve *cu)
/* GPUBatch cache usage. */
static void curve_create_curves_pos(CurveRenderData *rdata, GPUVertBuf *vbo_curves_pos)
{
- BLI_assert(rdata->ob_curve_cache != NULL);
-
- static GPUVertFormat format = { 0 };
- static struct { uint pos; } attr_id;
- if (format.attr_len == 0) {
- attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
- }
-
- const int vert_len = curve_render_data_wire_verts_len_get(rdata);
- GPU_vertbuf_init_with_format(vbo_curves_pos, &format);
- GPU_vertbuf_data_alloc(vbo_curves_pos, vert_len);
-
- int v_idx = 0;
- for (const BevList *bl = rdata->ob_curve_cache->bev.first; bl; bl = bl->next) {
- if (bl->nr <= 0) {
- continue;
- }
- const int i_end = v_idx + bl->nr;
- for (const BevPoint *bevp = bl->bevpoints; v_idx < i_end; v_idx++, bevp++) {
- GPU_vertbuf_attr_set(vbo_curves_pos, attr_id.pos, v_idx, bevp->vec);
- }
- }
- for (const DispList *dl = rdata->ob_curve_cache->disp.first; dl; dl = dl->next) {
- if (ELEM(dl->type, DL_SEGM, DL_POLY)) {
- for (int i = 0; i < dl->nr; v_idx++, i++) {
- GPU_vertbuf_attr_set(vbo_curves_pos, attr_id.pos, v_idx, &((float(*)[3])dl->verts)[i]);
- }
- }
- }
- BLI_assert(v_idx == vert_len);
+ BLI_assert(rdata->ob_curve_cache != NULL);
+
+ static GPUVertFormat format = {0};
+ static struct {
+ uint pos;
+ } attr_id;
+ if (format.attr_len == 0) {
+ attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
+ }
+
+ const int vert_len = curve_render_data_wire_verts_len_get(rdata);
+ GPU_vertbuf_init_with_format(vbo_curves_pos, &format);
+ GPU_vertbuf_data_alloc(vbo_curves_pos, vert_len);
+
+ int v_idx = 0;
+ for (const BevList *bl = rdata->ob_curve_cache->bev.first; bl; bl = bl->next) {
+ if (bl->nr <= 0) {
+ continue;
+ }
+ const int i_end = v_idx + bl->nr;
+ for (const BevPoint *bevp = bl->bevpoints; v_idx < i_end; v_idx++, bevp++) {
+ GPU_vertbuf_attr_set(vbo_curves_pos, attr_id.pos, v_idx, bevp->vec);
+ }
+ }
+ for (const DispList *dl = rdata->ob_curve_cache->disp.first; dl; dl = dl->next) {
+ if (ELEM(dl->type, DL_SEGM, DL_POLY)) {
+ for (int i = 0; i < dl->nr; v_idx++, i++) {
+ GPU_vertbuf_attr_set(vbo_curves_pos, attr_id.pos, v_idx, &((float(*)[3])dl->verts)[i]);
+ }
+ }
+ }
+ BLI_assert(v_idx == vert_len);
}
static void curve_create_curves_lines(CurveRenderData *rdata, GPUIndexBuf *ibo_curve_lines)
{
- BLI_assert(rdata->ob_curve_cache != NULL);
-
- const int vert_len = curve_render_data_wire_verts_len_get(rdata);
- const int edge_len = curve_render_data_wire_edges_len_get(rdata);
- const int curve_len = curve_render_data_wire_curve_len_get(rdata);
- /* Count the last vertex or each strip and the primitive restart. */
- const int index_len = edge_len + curve_len * 2;
-
- GPUIndexBufBuilder elb;
- GPU_indexbuf_init_ex(&elb, GPU_PRIM_LINE_STRIP, index_len, vert_len, true);
-
- int v_idx = 0;
- for (const BevList *bl = rdata->ob_curve_cache->bev.first; bl; bl = bl->next) {
- if (bl->nr <= 0) {
- continue;
- }
- const bool is_cyclic = bl->poly != -1;
- if (is_cyclic) {
- GPU_indexbuf_add_generic_vert(&elb, v_idx + (bl->nr - 1));
- }
- for (int i = 0; i < bl->nr; i++) {
- GPU_indexbuf_add_generic_vert(&elb, v_idx + i);
- }
- GPU_indexbuf_add_primitive_restart(&elb);
- v_idx += bl->nr;
- }
- for (const DispList *dl = rdata->ob_curve_cache->disp.first; dl; dl = dl->next) {
- if (ELEM(dl->type, DL_SEGM, DL_POLY)) {
- const bool is_cyclic = dl->type == DL_POLY;
- if (is_cyclic) {
- GPU_indexbuf_add_generic_vert(&elb, v_idx + (dl->nr - 1));
- }
- for (int i = 0; i < dl->nr; i++) {
- GPU_indexbuf_add_generic_vert(&elb, v_idx + i);
- }
- GPU_indexbuf_add_primitive_restart(&elb);
- v_idx += dl->nr;
- }
- }
- GPU_indexbuf_build_in_place(&elb, ibo_curve_lines);
+ BLI_assert(rdata->ob_curve_cache != NULL);
+
+ const int vert_len = curve_render_data_wire_verts_len_get(rdata);
+ const int edge_len = curve_render_data_wire_edges_len_get(rdata);
+ const int curve_len = curve_render_data_wire_curve_len_get(rdata);
+ /* Count the last vertex or each strip and the primitive restart. */
+ const int index_len = edge_len + curve_len * 2;
+
+ GPUIndexBufBuilder elb;
+ GPU_indexbuf_init_ex(&elb, GPU_PRIM_LINE_STRIP, index_len, vert_len, true);
+
+ int v_idx = 0;
+ for (const BevList *bl = rdata->ob_curve_cache->bev.first; bl; bl = bl->next) {
+ if (bl->nr <= 0) {
+ continue;
+ }
+ const bool is_cyclic = bl->poly != -1;
+ if (is_cyclic) {
+ GPU_indexbuf_add_generic_vert(&elb, v_idx + (bl->nr - 1));
+ }
+ for (int i = 0; i < bl->nr; i++) {
+ GPU_indexbuf_add_generic_vert(&elb, v_idx + i);
+ }
+ GPU_indexbuf_add_primitive_restart(&elb);
+ v_idx += bl->nr;
+ }
+ for (const DispList *dl = rdata->ob_curve_cache->disp.first; dl; dl = dl->next) {
+ if (ELEM(dl->type, DL_SEGM, DL_POLY)) {
+ const bool is_cyclic = dl->type == DL_POLY;
+ if (is_cyclic) {
+ GPU_indexbuf_add_generic_vert(&elb, v_idx + (dl->nr - 1));
+ }
+ for (int i = 0; i < dl->nr; i++) {
+ GPU_indexbuf_add_generic_vert(&elb, v_idx + i);
+ }
+ GPU_indexbuf_add_primitive_restart(&elb);
+ v_idx += dl->nr;
+ }
+ }
+ GPU_indexbuf_build_in_place(&elb, ibo_curve_lines);
}
static void curve_create_edit_curves_nor(CurveRenderData *rdata, GPUVertBuf *vbo_curves_nor)
{
- static GPUVertFormat format = { 0 };
- static struct { uint pos, nor, tan, rad; } attr_id;
- if (format.attr_len == 0) {
- /* initialize vertex formats */
- attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
- attr_id.rad = GPU_vertformat_attr_add(&format, "rad", GPU_COMP_F32, 1, GPU_FETCH_FLOAT);
- attr_id.nor = GPU_vertformat_attr_add(&format, "nor", GPU_COMP_I10, 3, GPU_FETCH_INT_TO_FLOAT_UNIT);
- attr_id.tan = GPU_vertformat_attr_add(&format, "tan", GPU_COMP_I10, 3, GPU_FETCH_INT_TO_FLOAT_UNIT);
- }
-
- int verts_len_capacity = curve_render_data_normal_len_get(rdata) * 2;
- int vbo_len_used = 0;
-
- GPU_vertbuf_init_with_format(vbo_curves_nor, &format);
- GPU_vertbuf_data_alloc(vbo_curves_nor, verts_len_capacity);
-
- const BevList *bl;
- const Nurb *nu;
-
- for (bl = rdata->ob_curve_cache->bev.first, nu = rdata->nurbs->first;
- nu && bl;
- bl = bl->next, nu = nu->next)
- {
- const BevPoint *bevp = bl->bevpoints;
- int nr = bl->nr;
- int skip = nu->resolu / 16;
-
- while (nr-- > 0) { /* accounts for empty bevel lists */
- float nor[3] = {1.0f, 0.0f, 0.0f};
- mul_qt_v3(bevp->quat, nor);
-
- GPUPackedNormal pnor = GPU_normal_convert_i10_v3(nor);
- GPUPackedNormal ptan = GPU_normal_convert_i10_v3(bevp->dir);
-
- /* Only set attributes for one vertex. */
- GPU_vertbuf_attr_set(vbo_curves_nor, attr_id.pos, vbo_len_used, bevp->vec);
- GPU_vertbuf_attr_set(vbo_curves_nor, attr_id.rad, vbo_len_used, &bevp->radius);
- GPU_vertbuf_attr_set(vbo_curves_nor, attr_id.nor, vbo_len_used, &pnor);
- GPU_vertbuf_attr_set(vbo_curves_nor, attr_id.tan, vbo_len_used, &ptan);
- vbo_len_used++;
-
- /* Skip the other vertex (it does not need to be offseted). */
- GPU_vertbuf_attr_set(vbo_curves_nor, attr_id.pos, vbo_len_used, bevp->vec);
- vbo_len_used++;
-
- bevp += skip + 1;
- nr -= skip;
- }
- }
- BLI_assert(vbo_len_used == verts_len_capacity);
+ static GPUVertFormat format = {0};
+ static struct {
+ uint pos, nor, tan, rad;
+ } attr_id;
+ if (format.attr_len == 0) {
+ /* initialize vertex formats */
+ attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
+ attr_id.rad = GPU_vertformat_attr_add(&format, "rad", GPU_COMP_F32, 1, GPU_FETCH_FLOAT);
+ attr_id.nor = GPU_vertformat_attr_add(
+ &format, "nor", GPU_COMP_I10, 3, GPU_FETCH_INT_TO_FLOAT_UNIT);
+ attr_id.tan = GPU_vertformat_attr_add(
+ &format, "tan", GPU_COMP_I10, 3, GPU_FETCH_INT_TO_FLOAT_UNIT);
+ }
+
+ int verts_len_capacity = curve_render_data_normal_len_get(rdata) * 2;
+ int vbo_len_used = 0;
+
+ GPU_vertbuf_init_with_format(vbo_curves_nor, &format);
+ GPU_vertbuf_data_alloc(vbo_curves_nor, verts_len_capacity);
+
+ const BevList *bl;
+ const Nurb *nu;
+
+ for (bl = rdata->ob_curve_cache->bev.first, nu = rdata->nurbs->first; nu && bl;
+ bl = bl->next, nu = nu->next) {
+ const BevPoint *bevp = bl->bevpoints;
+ int nr = bl->nr;
+ int skip = nu->resolu / 16;
+
+ while (nr-- > 0) { /* accounts for empty bevel lists */
+ float nor[3] = {1.0f, 0.0f, 0.0f};
+ mul_qt_v3(bevp->quat, nor);
+
+ GPUPackedNormal pnor = GPU_normal_convert_i10_v3(nor);
+ GPUPackedNormal ptan = GPU_normal_convert_i10_v3(bevp->dir);
+
+ /* Only set attributes for one vertex. */
+ GPU_vertbuf_attr_set(vbo_curves_nor, attr_id.pos, vbo_len_used, bevp->vec);
+ GPU_vertbuf_attr_set(vbo_curves_nor, attr_id.rad, vbo_len_used, &bevp->radius);
+ GPU_vertbuf_attr_set(vbo_curves_nor, attr_id.nor, vbo_len_used, &pnor);
+ GPU_vertbuf_attr_set(vbo_curves_nor, attr_id.tan, vbo_len_used, &ptan);
+ vbo_len_used++;
+
+ /* Skip the other vertex (it does not need to be offseted). */
+ GPU_vertbuf_attr_set(vbo_curves_nor, attr_id.pos, vbo_len_used, bevp->vec);
+ vbo_len_used++;
+
+ bevp += skip + 1;
+ nr -= skip;
+ }
+ }
+ BLI_assert(vbo_len_used == verts_len_capacity);
}
-static char beztriple_vflag_get(CurveRenderData *rdata, char flag, char col_id, int v_idx, int nu_id)
+static char beztriple_vflag_get(
+ CurveRenderData *rdata, char flag, char col_id, int v_idx, int nu_id)
{
- char vflag = 0;
- SET_FLAG_FROM_TEST(vflag, (flag & SELECT), VFLAG_VERT_SELECTED);
- SET_FLAG_FROM_TEST(vflag, (v_idx == rdata->actvert), VFLAG_VERT_ACTIVE);
- SET_FLAG_FROM_TEST(vflag, (nu_id == rdata->actnu), ACTIVE_NURB);
- /* handle color id */
- vflag |= col_id << 4; /* << 4 because of EVEN_U_BIT */
- return vflag;
+ char vflag = 0;
+ SET_FLAG_FROM_TEST(vflag, (flag & SELECT), VFLAG_VERT_SELECTED);
+ SET_FLAG_FROM_TEST(vflag, (v_idx == rdata->actvert), VFLAG_VERT_ACTIVE);
+ SET_FLAG_FROM_TEST(vflag, (nu_id == rdata->actnu), ACTIVE_NURB);
+ /* handle color id */
+ vflag |= col_id << 4; /* << 4 because of EVEN_U_BIT */
+ return vflag;
}
static char bpoint_vflag_get(CurveRenderData *rdata, char flag, int v_idx, int nu_id, int u)
{
- char vflag = 0;
- SET_FLAG_FROM_TEST(vflag, (flag & SELECT), VFLAG_VERT_SELECTED);
- SET_FLAG_FROM_TEST(vflag, (v_idx == rdata->actvert), VFLAG_VERT_ACTIVE);
- SET_FLAG_FROM_TEST(vflag, (nu_id == rdata->actnu), ACTIVE_NURB);
- SET_FLAG_FROM_TEST(vflag, ((u % 2) == 0), EVEN_U_BIT);
- vflag |= COLOR_NURB_ULINE_ID << 4; /* << 4 because of EVEN_U_BIT */
- return vflag;
+ char vflag = 0;
+ SET_FLAG_FROM_TEST(vflag, (flag & SELECT), VFLAG_VERT_SELECTED);
+ SET_FLAG_FROM_TEST(vflag, (v_idx == rdata->actvert), VFLAG_VERT_ACTIVE);
+ SET_FLAG_FROM_TEST(vflag, (nu_id == rdata->actnu), ACTIVE_NURB);
+ SET_FLAG_FROM_TEST(vflag, ((u % 2) == 0), EVEN_U_BIT);
+ vflag |= COLOR_NURB_ULINE_ID << 4; /* << 4 because of EVEN_U_BIT */
+ return vflag;
}
-static void curve_create_edit_data_and_handles(
- CurveRenderData *rdata,
- GPUVertBuf *vbo_pos, GPUVertBuf *vbo_data, GPUIndexBuf *ibo_edit_verts_points, GPUIndexBuf *ibo_edit_lines)
+static void curve_create_edit_data_and_handles(CurveRenderData *rdata,
+ GPUVertBuf *vbo_pos,
+ GPUVertBuf *vbo_data,
+ GPUIndexBuf *ibo_edit_verts_points,
+ GPUIndexBuf *ibo_edit_lines)
{
- static GPUVertFormat format_pos = { 0 };
- static GPUVertFormat format_data = { 0 };
- static struct { uint pos, data; } attr_id;
- if (format_pos.attr_len == 0) {
- /* initialize vertex formats */
- attr_id.pos = GPU_vertformat_attr_add(&format_pos, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
- attr_id.data = GPU_vertformat_attr_add(&format_data, "data", GPU_COMP_U8, 1, GPU_FETCH_INT);
- }
-
- int verts_len_capacity = curve_render_data_overlay_verts_len_get(rdata);
- int edges_len_capacity = curve_render_data_overlay_edges_len_get(rdata) * 2;
- int vbo_len_used = 0;
-
- if (DRW_TEST_ASSIGN_VBO(vbo_pos)) {
- GPU_vertbuf_init_with_format(vbo_pos, &format_pos);
- GPU_vertbuf_data_alloc(vbo_pos, verts_len_capacity);
- }
- if (DRW_TEST_ASSIGN_VBO(vbo_data)) {
- GPU_vertbuf_init_with_format(vbo_data, &format_data);
- GPU_vertbuf_data_alloc(vbo_data, verts_len_capacity);
- }
-
- GPUIndexBufBuilder elb_verts, *elbp_verts = NULL;
- GPUIndexBufBuilder elb_lines, *elbp_lines = NULL;
- if (DRW_TEST_ASSIGN_IBO(ibo_edit_verts_points)) {
- elbp_verts = &elb_verts;
- GPU_indexbuf_init(elbp_verts, GPU_PRIM_POINTS, verts_len_capacity, verts_len_capacity);
- }
- if (DRW_TEST_ASSIGN_IBO(ibo_edit_lines)) {
- elbp_lines = &elb_lines;
- GPU_indexbuf_init(elbp_lines, GPU_PRIM_LINES, edges_len_capacity, verts_len_capacity);
- }
-
- int v_idx = 0, nu_id = 0;
- for (Nurb *nu = rdata->nurbs->first; nu; nu = nu->next, nu_id++) {
- const BezTriple *bezt = nu->bezt;
- const BPoint *bp = nu->bp;
- if (bezt) {
- for (int a = 0; a < nu->pntsu; a++, bezt++) {
- if (bezt->hide == true) {
- continue;
- }
-
- if (elbp_verts) {
- GPU_indexbuf_add_point_vert(elbp_verts, vbo_len_used + 1);
- }
- if (elbp_lines) {
- GPU_indexbuf_add_line_verts(elbp_lines, vbo_len_used + 1, vbo_len_used + 0);
- GPU_indexbuf_add_line_verts(elbp_lines, vbo_len_used + 1, vbo_len_used + 2);
- }
- if (vbo_data) {
- char vflag[3] = {
- beztriple_vflag_get(rdata, bezt->f1, bezt->h1, v_idx, nu_id),
- beztriple_vflag_get(rdata, bezt->f2, bezt->h1, v_idx, nu_id),
- beztriple_vflag_get(rdata, bezt->f3, bezt->h2, v_idx, nu_id),
- };
- for (int j = 0; j < 3; j++) {
- GPU_vertbuf_attr_set(vbo_data, attr_id.data, vbo_len_used + j, &vflag[j]);
- }
- }
- if (vbo_pos) {
- for (int j = 0; j < 3; j++) {
- GPU_vertbuf_attr_set(vbo_pos, attr_id.pos, vbo_len_used + j, bezt->vec[j]);
- }
- }
- vbo_len_used += 3;
- v_idx += 1;
- }
- }
- else if (bp) {
- int pt_len = nu->pntsu * nu->pntsv;
- for (int a = 0; a < pt_len; a++, bp++) {
- if (bp->hide == true) {
- continue;
- }
- int u = (a % nu->pntsu);
- int v = (a / nu->pntsu);
- /* Use indexed rendering for bezier.
- * Specify all points and use indices to hide/show. */
- if (elbp_verts) {
- GPU_indexbuf_add_point_vert(elbp_verts, vbo_len_used);
- }
- if (elbp_lines) {
- const BPoint *bp_next_u = (u < (nu->pntsu - 1)) ? &nu->bp[a + 1] : NULL;
- const BPoint *bp_next_v = (v < (nu->pntsv - 1)) ? &nu->bp[a + nu->pntsu] : NULL;
- if (bp_next_u && (bp_next_u->hide == false)) {
- GPU_indexbuf_add_line_verts(elbp_lines, vbo_len_used, vbo_len_used + 1);
- }
- if (bp_next_v && (bp_next_v->hide == false)) {
- GPU_indexbuf_add_line_verts(elbp_lines, vbo_len_used, vbo_len_used + nu->pntsu);
- }
- }
- if (vbo_data) {
- char vflag = bpoint_vflag_get(rdata, bp->f1, v_idx, nu_id, u);
- GPU_vertbuf_attr_set(vbo_data, attr_id.data, vbo_len_used, &vflag);
- }
- if (vbo_pos) {
- GPU_vertbuf_attr_set(vbo_pos, attr_id.pos, vbo_len_used, bp->vec);
- }
- vbo_len_used += 1;
- v_idx += 1;
- }
- }
- }
-
- /* Resize & Finish */
- if (elbp_verts != NULL) {
- GPU_indexbuf_build_in_place(elbp_verts, ibo_edit_verts_points);
- }
- if (elbp_lines != NULL) {
- GPU_indexbuf_build_in_place(elbp_lines, ibo_edit_lines);
- }
- if (vbo_len_used != verts_len_capacity) {
- if (vbo_pos != NULL) {
- GPU_vertbuf_data_resize(vbo_pos, vbo_len_used);
- }
- if (vbo_data != NULL) {
- GPU_vertbuf_data_resize(vbo_data, vbo_len_used);
- }
- }
+ static GPUVertFormat format_pos = {0};
+ static GPUVertFormat format_data = {0};
+ static struct {
+ uint pos, data;
+ } attr_id;
+ if (format_pos.attr_len == 0) {
+ /* initialize vertex formats */
+ attr_id.pos = GPU_vertformat_attr_add(&format_pos, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
+ attr_id.data = GPU_vertformat_attr_add(&format_data, "data", GPU_COMP_U8, 1, GPU_FETCH_INT);
+ }
+
+ int verts_len_capacity = curve_render_data_overlay_verts_len_get(rdata);
+ int edges_len_capacity = curve_render_data_overlay_edges_len_get(rdata) * 2;
+ int vbo_len_used = 0;
+
+ if (DRW_TEST_ASSIGN_VBO(vbo_pos)) {
+ GPU_vertbuf_init_with_format(vbo_pos, &format_pos);
+ GPU_vertbuf_data_alloc(vbo_pos, verts_len_capacity);
+ }
+ if (DRW_TEST_ASSIGN_VBO(vbo_data)) {
+ GPU_vertbuf_init_with_format(vbo_data, &format_data);
+ GPU_vertbuf_data_alloc(vbo_data, verts_len_capacity);
+ }
+
+ GPUIndexBufBuilder elb_verts, *elbp_verts = NULL;
+ GPUIndexBufBuilder elb_lines, *elbp_lines = NULL;
+ if (DRW_TEST_ASSIGN_IBO(ibo_edit_verts_points)) {
+ elbp_verts = &elb_verts;
+ GPU_indexbuf_init(elbp_verts, GPU_PRIM_POINTS, verts_len_capacity, verts_len_capacity);
+ }
+ if (DRW_TEST_ASSIGN_IBO(ibo_edit_lines)) {
+ elbp_lines = &elb_lines;
+ GPU_indexbuf_init(elbp_lines, GPU_PRIM_LINES, edges_len_capacity, verts_len_capacity);
+ }
+
+ int v_idx = 0, nu_id = 0;
+ for (Nurb *nu = rdata->nurbs->first; nu; nu = nu->next, nu_id++) {
+ const BezTriple *bezt = nu->bezt;
+ const BPoint *bp = nu->bp;
+ if (bezt) {
+ for (int a = 0; a < nu->pntsu; a++, bezt++) {
+ if (bezt->hide == true) {
+ continue;
+ }
+
+ if (elbp_verts) {
+ GPU_indexbuf_add_point_vert(elbp_verts, vbo_len_used + 1);
+ }
+ if (elbp_lines) {
+ GPU_indexbuf_add_line_verts(elbp_lines, vbo_len_used + 1, vbo_len_used + 0);
+ GPU_indexbuf_add_line_verts(elbp_lines, vbo_len_used + 1, vbo_len_used + 2);
+ }
+ if (vbo_data) {
+ char vflag[3] = {
+ beztriple_vflag_get(rdata, bezt->f1, bezt->h1, v_idx, nu_id),
+ beztriple_vflag_get(rdata, bezt->f2, bezt->h1, v_idx, nu_id),
+ beztriple_vflag_get(rdata, bezt->f3, bezt->h2, v_idx, nu_id),
+ };
+ for (int j = 0; j < 3; j++) {
+ GPU_vertbuf_attr_set(vbo_data, attr_id.data, vbo_len_used + j, &vflag[j]);
+ }
+ }
+ if (vbo_pos) {
+ for (int j = 0; j < 3; j++) {
+ GPU_vertbuf_attr_set(vbo_pos, attr_id.pos, vbo_len_used + j, bezt->vec[j]);
+ }
+ }
+ vbo_len_used += 3;
+ v_idx += 1;
+ }
+ }
+ else if (bp) {
+ int pt_len = nu->pntsu * nu->pntsv;
+ for (int a = 0; a < pt_len; a++, bp++) {
+ if (bp->hide == true) {
+ continue;
+ }
+ int u = (a % nu->pntsu);
+ int v = (a / nu->pntsu);
+ /* Use indexed rendering for bezier.
+ * Specify all points and use indices to hide/show. */
+ if (elbp_verts) {
+ GPU_indexbuf_add_point_vert(elbp_verts, vbo_len_used);
+ }
+ if (elbp_lines) {
+ const BPoint *bp_next_u = (u < (nu->pntsu - 1)) ? &nu->bp[a + 1] : NULL;
+ const BPoint *bp_next_v = (v < (nu->pntsv - 1)) ? &nu->bp[a + nu->pntsu] : NULL;
+ if (bp_next_u && (bp_next_u->hide == false)) {
+ GPU_indexbuf_add_line_verts(elbp_lines, vbo_len_used, vbo_len_used + 1);
+ }
+ if (bp_next_v && (bp_next_v->hide == false)) {
+ GPU_indexbuf_add_line_verts(elbp_lines, vbo_len_used, vbo_len_used + nu->pntsu);
+ }
+ }
+ if (vbo_data) {
+ char vflag = bpoint_vflag_get(rdata, bp->f1, v_idx, nu_id, u);
+ GPU_vertbuf_attr_set(vbo_data, attr_id.data, vbo_len_used, &vflag);
+ }
+ if (vbo_pos) {
+ GPU_vertbuf_attr_set(vbo_pos, attr_id.pos, vbo_len_used, bp->vec);
+ }
+ vbo_len_used += 1;
+ v_idx += 1;
+ }
+ }
+ }
+
+ /* Resize & Finish */
+ if (elbp_verts != NULL) {
+ GPU_indexbuf_build_in_place(elbp_verts, ibo_edit_verts_points);
+ }
+ if (elbp_lines != NULL) {
+ GPU_indexbuf_build_in_place(elbp_lines, ibo_edit_lines);
+ }
+ if (vbo_len_used != verts_len_capacity) {
+ if (vbo_pos != NULL) {
+ GPU_vertbuf_data_resize(vbo_pos, vbo_len_used);
+ }
+ if (vbo_data != NULL) {
+ GPU_vertbuf_data_resize(vbo_data, vbo_len_used);
+ }
+ }
}
/** \} */
@@ -830,70 +845,70 @@ static void curve_create_edit_data_and_handles(
GPUBatch *DRW_curve_batch_cache_get_wire_edge(Curve *cu)
{
- CurveBatchCache *cache = curve_batch_cache_get(cu);
- return DRW_batch_request(&cache->batch.curves);
+ CurveBatchCache *cache = curve_batch_cache_get(cu);
+ return DRW_batch_request(&cache->batch.curves);
}
GPUBatch *DRW_curve_batch_cache_get_normal_edge(Curve *cu)
{
- CurveBatchCache *cache = curve_batch_cache_get(cu);
- return DRW_batch_request(&cache->batch.edit_normals);
+ CurveBatchCache *cache = curve_batch_cache_get(cu);
+ return DRW_batch_request(&cache->batch.edit_normals);
}
GPUBatch *DRW_curve_batch_cache_get_edit_edges(Curve *cu)
{
- CurveBatchCache *cache = curve_batch_cache_get(cu);
- return DRW_batch_request(&cache->batch.edit_edges);
+ CurveBatchCache *cache = curve_batch_cache_get(cu);
+ return DRW_batch_request(&cache->batch.edit_edges);
}
GPUBatch *DRW_curve_batch_cache_get_edit_verts(Curve *cu, bool handles)
{
- CurveBatchCache *cache = curve_batch_cache_get(cu);
- if (handles) {
- return DRW_batch_request(&cache->batch.edit_handles_verts);
- }
- else {
- return DRW_batch_request(&cache->batch.edit_verts);
- }
+ CurveBatchCache *cache = curve_batch_cache_get(cu);
+ if (handles) {
+ return DRW_batch_request(&cache->batch.edit_handles_verts);
+ }
+ else {
+ return DRW_batch_request(&cache->batch.edit_verts);
+ }
}
GPUBatch *DRW_curve_batch_cache_get_triangles_with_normals(struct Curve *cu)
{
- CurveBatchCache *cache = curve_batch_cache_get(cu);
- return DRW_batch_request(&cache->batch.surfaces);
+ CurveBatchCache *cache = curve_batch_cache_get(cu);
+ return DRW_batch_request(&cache->batch.surfaces);
}
-GPUBatch **DRW_curve_batch_cache_get_surface_shaded(
- struct Curve *cu,
- struct GPUMaterial **gpumat_array, uint gpumat_array_len)
+GPUBatch **DRW_curve_batch_cache_get_surface_shaded(struct Curve *cu,
+ struct GPUMaterial **gpumat_array,
+ uint gpumat_array_len)
{
- CurveBatchCache *cache = curve_batch_cache_get(cu);
+ CurveBatchCache *cache = curve_batch_cache_get(cu);
- BLI_assert(gpumat_array_len == cache->mat_len);
+ BLI_assert(gpumat_array_len == cache->mat_len);
- curve_cd_calc_used_gpu_layers(&cache->cd_needed, gpumat_array, gpumat_array_len);
+ curve_cd_calc_used_gpu_layers(&cache->cd_needed, gpumat_array, gpumat_array_len);
- for (int i = 0; i < cache->mat_len; ++i) {
- DRW_batch_request(&cache->surf_per_mat[i]);
- }
- return cache->surf_per_mat;
+ for (int i = 0; i < cache->mat_len; ++i) {
+ DRW_batch_request(&cache->surf_per_mat[i]);
+ }
+ return cache->surf_per_mat;
}
GPUBatch *DRW_curve_batch_cache_get_wireframes_face(Curve *cu)
{
- CurveBatchCache *cache = curve_batch_cache_get(cu);
- return DRW_batch_request(&cache->batch.surfaces_edges);
+ CurveBatchCache *cache = curve_batch_cache_get(cu);
+ return DRW_batch_request(&cache->batch.surfaces_edges);
}
GPUBatch *DRW_curve_batch_cache_get_edge_detection(Curve *cu, bool *r_is_manifold)
{
- CurveBatchCache *cache = curve_batch_cache_get(cu);
- /* Even if is_manifold is not correct (not updated),
- * the default (not manifold) is just the worst case. */
- if (r_is_manifold) {
- *r_is_manifold = cache->is_manifold;
- }
- return DRW_batch_request(&cache->batch.edge_detection);
+ CurveBatchCache *cache = curve_batch_cache_get(cu);
+ /* Even if is_manifold is not correct (not updated),
+ * the default (not manifold) is just the worst case. */
+ if (r_is_manifold) {
+ *r_is_manifold = cache->is_manifold;
+ }
+ return DRW_batch_request(&cache->batch.edge_detection);
}
/** \} */
@@ -904,156 +919,159 @@ GPUBatch *DRW_curve_batch_cache_get_edge_detection(Curve *cu, bool *r_is_manifol
void DRW_curve_batch_cache_create_requested(Object *ob)
{
- BLI_assert(ELEM(ob->type, OB_CURVE, OB_SURF, OB_FONT));
-
- Curve *cu = ob->data;
- CurveBatchCache *cache = curve_batch_cache_get(cu);
-
- /* Verify that all surface batches have needed attribute layers. */
- /* TODO(fclem): We could be a bit smarter here and only do it per material. */
- for (int i = 0; i < cache->mat_len; ++i) {
- if ((cache->cd_used & cache->cd_needed) != cache->cd_needed) {
- /* We can't discard batches at this point as they have been
- * referenced for drawing. Just clear them in place. */
- GPU_batch_clear(cache->surf_per_mat[i]);
- memset(cache->surf_per_mat[i], 0, sizeof(*cache->surf_per_mat[i]));
- }
- }
- if ((cache->cd_used & cache->cd_needed) != cache->cd_needed) {
- cache->cd_used |= cache->cd_needed;
- cache->cd_needed = 0;
- }
-
- /* Init batches and request VBOs & IBOs */
- if (DRW_batch_requested(cache->batch.surfaces, GPU_PRIM_TRIS)) {
- DRW_ibo_request(cache->batch.surfaces, &cache->ibo.surfaces_tris);
- DRW_vbo_request(cache->batch.surfaces, &cache->ordered.pos_nor);
- }
- if (DRW_batch_requested(cache->batch.surfaces_edges, GPU_PRIM_LINES)) {
- DRW_ibo_request(cache->batch.surfaces_edges, &cache->ibo.surfaces_lines);
- DRW_vbo_request(cache->batch.surfaces_edges, &cache->ordered.pos_nor);
- DRW_vbo_request(cache->batch.surfaces_edges, &cache->ordered.edge_fac);
- }
- if (DRW_batch_requested(cache->batch.curves, GPU_PRIM_LINE_STRIP)) {
- DRW_ibo_request(cache->batch.curves, &cache->ibo.curves_lines);
- DRW_vbo_request(cache->batch.curves, &cache->ordered.curves_pos);
- }
- if (DRW_batch_requested(cache->batch.edge_detection, GPU_PRIM_LINES_ADJ)) {
- DRW_ibo_request(cache->batch.edge_detection, &cache->ibo.edges_adj_lines);
- DRW_vbo_request(cache->batch.edge_detection, &cache->ordered.pos_nor);
- }
-
- /* Edit mode */
- if (DRW_batch_requested(cache->batch.edit_edges, GPU_PRIM_LINES)) {
- DRW_ibo_request(cache->batch.edit_edges, &cache->ibo.edit_lines);
- DRW_vbo_request(cache->batch.edit_edges, &cache->edit.pos);
- DRW_vbo_request(cache->batch.edit_edges, &cache->edit.data);
- }
- if (DRW_batch_requested(cache->batch.edit_verts, GPU_PRIM_POINTS)) {
- DRW_ibo_request(cache->batch.edit_verts, &cache->ibo.edit_verts_points);
- DRW_vbo_request(cache->batch.edit_verts, &cache->edit.pos);
- DRW_vbo_request(cache->batch.edit_verts, &cache->edit.data);
- }
- if (DRW_batch_requested(cache->batch.edit_handles_verts, GPU_PRIM_POINTS)) {
- DRW_vbo_request(cache->batch.edit_handles_verts, &cache->edit.pos);
- DRW_vbo_request(cache->batch.edit_handles_verts, &cache->edit.data);
- }
- if (DRW_batch_requested(cache->batch.edit_normals, GPU_PRIM_LINES)) {
- DRW_vbo_request(cache->batch.edit_normals, &cache->edit.curves_nor);
- }
- for (int i = 0; i < cache->mat_len; ++i) {
- if (DRW_batch_requested(cache->surf_per_mat[i], GPU_PRIM_TRIS)) {
- if (cache->mat_len > 1) {
- DRW_ibo_request(cache->surf_per_mat[i], &cache->surf_per_mat_tris[i]);
- }
- if (cache->cd_used & CD_MLOOPUV) {
- DRW_vbo_request(cache->surf_per_mat[i], &cache->ordered.loop_uv);
- }
- DRW_vbo_request(cache->surf_per_mat[i], &cache->ordered.loop_pos_nor);
- }
- }
-
- /* Generate MeshRenderData flags */
- int mr_flag = 0;
- DRW_ADD_FLAG_FROM_VBO_REQUEST(mr_flag, cache->ordered.pos_nor, CU_DATATYPE_SURFACE);
- DRW_ADD_FLAG_FROM_VBO_REQUEST(mr_flag, cache->ordered.edge_fac, CU_DATATYPE_SURFACE);
- DRW_ADD_FLAG_FROM_VBO_REQUEST(mr_flag, cache->ordered.curves_pos, CU_DATATYPE_WIRE);
- DRW_ADD_FLAG_FROM_VBO_REQUEST(mr_flag, cache->ordered.loop_pos_nor, CU_DATATYPE_SURFACE);
- DRW_ADD_FLAG_FROM_VBO_REQUEST(mr_flag, cache->ordered.loop_uv, CU_DATATYPE_SURFACE);
- DRW_ADD_FLAG_FROM_IBO_REQUEST(mr_flag, cache->ibo.surfaces_tris, CU_DATATYPE_SURFACE);
- DRW_ADD_FLAG_FROM_IBO_REQUEST(mr_flag, cache->ibo.surfaces_lines, CU_DATATYPE_SURFACE);
- DRW_ADD_FLAG_FROM_IBO_REQUEST(mr_flag, cache->ibo.curves_lines, CU_DATATYPE_WIRE);
- DRW_ADD_FLAG_FROM_IBO_REQUEST(mr_flag, cache->ibo.edges_adj_lines, CU_DATATYPE_SURFACE);
-
- DRW_ADD_FLAG_FROM_VBO_REQUEST(mr_flag, cache->edit.pos, CU_DATATYPE_OVERLAY);
- DRW_ADD_FLAG_FROM_VBO_REQUEST(mr_flag, cache->edit.data, CU_DATATYPE_OVERLAY);
- DRW_ADD_FLAG_FROM_VBO_REQUEST(mr_flag, cache->edit.curves_nor, CU_DATATYPE_NORMAL);
- DRW_ADD_FLAG_FROM_VBO_REQUEST(mr_flag, cache->edit.curves_weight, CU_DATATYPE_OVERLAY);
- DRW_ADD_FLAG_FROM_IBO_REQUEST(mr_flag, cache->ibo.edit_verts_points, CU_DATATYPE_OVERLAY);
- DRW_ADD_FLAG_FROM_IBO_REQUEST(mr_flag, cache->ibo.edit_lines, CU_DATATYPE_OVERLAY);
-
- for (int i = 0; i < cache->mat_len; ++i) {
- DRW_ADD_FLAG_FROM_IBO_REQUEST(mr_flag, cache->surf_per_mat_tris[i], CU_DATATYPE_SURFACE);
- }
-
- CurveRenderData *rdata = curve_render_data_create(cu, ob->runtime.curve_cache, mr_flag);
-
- /* DispLists */
- ListBase *lb = &rdata->ob_curve_cache->disp;
-
- /* Generate VBOs */
- if (DRW_vbo_requested(cache->ordered.pos_nor)) {
- DRW_displist_vertbuf_create_pos_and_nor(lb, cache->ordered.pos_nor);
- }
- if (DRW_vbo_requested(cache->ordered.edge_fac)) {
- DRW_displist_vertbuf_create_wiredata(lb, cache->ordered.edge_fac);
- }
- if (DRW_vbo_requested(cache->ordered.curves_pos)) {
- curve_create_curves_pos(rdata, cache->ordered.curves_pos);
- }
-
- if (DRW_vbo_requested(cache->ordered.loop_pos_nor) ||
- DRW_vbo_requested(cache->ordered.loop_uv))
- {
- DRW_displist_vertbuf_create_loop_pos_and_nor_and_uv(lb, cache->ordered.loop_pos_nor, cache->ordered.loop_uv);
- }
-
- if (DRW_ibo_requested(cache->surf_per_mat_tris[0])) {
- DRW_displist_indexbuf_create_triangles_loop_split_by_material(lb, cache->surf_per_mat_tris, cache->mat_len);
- }
-
- if (DRW_ibo_requested(cache->ibo.curves_lines)) {
- curve_create_curves_lines(rdata, cache->ibo.curves_lines);
- }
- if (DRW_ibo_requested(cache->ibo.surfaces_tris)) {
- DRW_displist_indexbuf_create_triangles_in_order(lb, cache->ibo.surfaces_tris);
- }
- if (DRW_ibo_requested(cache->ibo.surfaces_lines)) {
- DRW_displist_indexbuf_create_lines_in_order(lb, cache->ibo.surfaces_lines);
- }
- if (DRW_ibo_requested(cache->ibo.edges_adj_lines)) {
- DRW_displist_indexbuf_create_edges_adjacency_lines(lb, cache->ibo.edges_adj_lines, &cache->is_manifold);
- }
-
- if (DRW_vbo_requested(cache->edit.pos) ||
- DRW_vbo_requested(cache->edit.data) ||
- DRW_ibo_requested(cache->ibo.edit_verts_points) ||
- DRW_ibo_requested(cache->ibo.edit_lines))
- {
- curve_create_edit_data_and_handles(rdata, cache->edit.pos, cache->edit.data,
- cache->ibo.edit_verts_points, cache->ibo.edit_lines);
- }
- if (DRW_vbo_requested(cache->edit.curves_nor)) {
- curve_create_edit_curves_nor(rdata, cache->edit.curves_nor);
- }
-
- curve_render_data_free(rdata);
+ BLI_assert(ELEM(ob->type, OB_CURVE, OB_SURF, OB_FONT));
+
+ Curve *cu = ob->data;
+ CurveBatchCache *cache = curve_batch_cache_get(cu);
+
+ /* Verify that all surface batches have needed attribute layers. */
+ /* TODO(fclem): We could be a bit smarter here and only do it per material. */
+ for (int i = 0; i < cache->mat_len; ++i) {
+ if ((cache->cd_used & cache->cd_needed) != cache->cd_needed) {
+ /* We can't discard batches at this point as they have been
+ * referenced for drawing. Just clear them in place. */
+ GPU_batch_clear(cache->surf_per_mat[i]);
+ memset(cache->surf_per_mat[i], 0, sizeof(*cache->surf_per_mat[i]));
+ }
+ }
+ if ((cache->cd_used & cache->cd_needed) != cache->cd_needed) {
+ cache->cd_used |= cache->cd_needed;
+ cache->cd_needed = 0;
+ }
+
+ /* Init batches and request VBOs & IBOs */
+ if (DRW_batch_requested(cache->batch.surfaces, GPU_PRIM_TRIS)) {
+ DRW_ibo_request(cache->batch.surfaces, &cache->ibo.surfaces_tris);
+ DRW_vbo_request(cache->batch.surfaces, &cache->ordered.pos_nor);
+ }
+ if (DRW_batch_requested(cache->batch.surfaces_edges, GPU_PRIM_LINES)) {
+ DRW_ibo_request(cache->batch.surfaces_edges, &cache->ibo.surfaces_lines);
+ DRW_vbo_request(cache->batch.surfaces_edges, &cache->ordered.pos_nor);
+ DRW_vbo_request(cache->batch.surfaces_edges, &cache->ordered.edge_fac);
+ }
+ if (DRW_batch_requested(cache->batch.curves, GPU_PRIM_LINE_STRIP)) {
+ DRW_ibo_request(cache->batch.curves, &cache->ibo.curves_lines);
+ DRW_vbo_request(cache->batch.curves, &cache->ordered.curves_pos);
+ }
+ if (DRW_batch_requested(cache->batch.edge_detection, GPU_PRIM_LINES_ADJ)) {
+ DRW_ibo_request(cache->batch.edge_detection, &cache->ibo.edges_adj_lines);
+ DRW_vbo_request(cache->batch.edge_detection, &cache->ordered.pos_nor);
+ }
+
+ /* Edit mode */
+ if (DRW_batch_requested(cache->batch.edit_edges, GPU_PRIM_LINES)) {
+ DRW_ibo_request(cache->batch.edit_edges, &cache->ibo.edit_lines);
+ DRW_vbo_request(cache->batch.edit_edges, &cache->edit.pos);
+ DRW_vbo_request(cache->batch.edit_edges, &cache->edit.data);
+ }
+ if (DRW_batch_requested(cache->batch.edit_verts, GPU_PRIM_POINTS)) {
+ DRW_ibo_request(cache->batch.edit_verts, &cache->ibo.edit_verts_points);
+ DRW_vbo_request(cache->batch.edit_verts, &cache->edit.pos);
+ DRW_vbo_request(cache->batch.edit_verts, &cache->edit.data);
+ }
+ if (DRW_batch_requested(cache->batch.edit_handles_verts, GPU_PRIM_POINTS)) {
+ DRW_vbo_request(cache->batch.edit_handles_verts, &cache->edit.pos);
+ DRW_vbo_request(cache->batch.edit_handles_verts, &cache->edit.data);
+ }
+ if (DRW_batch_requested(cache->batch.edit_normals, GPU_PRIM_LINES)) {
+ DRW_vbo_request(cache->batch.edit_normals, &cache->edit.curves_nor);
+ }
+ for (int i = 0; i < cache->mat_len; ++i) {
+ if (DRW_batch_requested(cache->surf_per_mat[i], GPU_PRIM_TRIS)) {
+ if (cache->mat_len > 1) {
+ DRW_ibo_request(cache->surf_per_mat[i], &cache->surf_per_mat_tris[i]);
+ }
+ if (cache->cd_used & CD_MLOOPUV) {
+ DRW_vbo_request(cache->surf_per_mat[i], &cache->ordered.loop_uv);
+ }
+ DRW_vbo_request(cache->surf_per_mat[i], &cache->ordered.loop_pos_nor);
+ }
+ }
+
+ /* Generate MeshRenderData flags */
+ int mr_flag = 0;
+ DRW_ADD_FLAG_FROM_VBO_REQUEST(mr_flag, cache->ordered.pos_nor, CU_DATATYPE_SURFACE);
+ DRW_ADD_FLAG_FROM_VBO_REQUEST(mr_flag, cache->ordered.edge_fac, CU_DATATYPE_SURFACE);
+ DRW_ADD_FLAG_FROM_VBO_REQUEST(mr_flag, cache->ordered.curves_pos, CU_DATATYPE_WIRE);
+ DRW_ADD_FLAG_FROM_VBO_REQUEST(mr_flag, cache->ordered.loop_pos_nor, CU_DATATYPE_SURFACE);
+ DRW_ADD_FLAG_FROM_VBO_REQUEST(mr_flag, cache->ordered.loop_uv, CU_DATATYPE_SURFACE);
+ DRW_ADD_FLAG_FROM_IBO_REQUEST(mr_flag, cache->ibo.surfaces_tris, CU_DATATYPE_SURFACE);
+ DRW_ADD_FLAG_FROM_IBO_REQUEST(mr_flag, cache->ibo.surfaces_lines, CU_DATATYPE_SURFACE);
+ DRW_ADD_FLAG_FROM_IBO_REQUEST(mr_flag, cache->ibo.curves_lines, CU_DATATYPE_WIRE);
+ DRW_ADD_FLAG_FROM_IBO_REQUEST(mr_flag, cache->ibo.edges_adj_lines, CU_DATATYPE_SURFACE);
+
+ DRW_ADD_FLAG_FROM_VBO_REQUEST(mr_flag, cache->edit.pos, CU_DATATYPE_OVERLAY);
+ DRW_ADD_FLAG_FROM_VBO_REQUEST(mr_flag, cache->edit.data, CU_DATATYPE_OVERLAY);
+ DRW_ADD_FLAG_FROM_VBO_REQUEST(mr_flag, cache->edit.curves_nor, CU_DATATYPE_NORMAL);
+ DRW_ADD_FLAG_FROM_VBO_REQUEST(mr_flag, cache->edit.curves_weight, CU_DATATYPE_OVERLAY);
+ DRW_ADD_FLAG_FROM_IBO_REQUEST(mr_flag, cache->ibo.edit_verts_points, CU_DATATYPE_OVERLAY);
+ DRW_ADD_FLAG_FROM_IBO_REQUEST(mr_flag, cache->ibo.edit_lines, CU_DATATYPE_OVERLAY);
+
+ for (int i = 0; i < cache->mat_len; ++i) {
+ DRW_ADD_FLAG_FROM_IBO_REQUEST(mr_flag, cache->surf_per_mat_tris[i], CU_DATATYPE_SURFACE);
+ }
+
+ CurveRenderData *rdata = curve_render_data_create(cu, ob->runtime.curve_cache, mr_flag);
+
+ /* DispLists */
+ ListBase *lb = &rdata->ob_curve_cache->disp;
+
+ /* Generate VBOs */
+ if (DRW_vbo_requested(cache->ordered.pos_nor)) {
+ DRW_displist_vertbuf_create_pos_and_nor(lb, cache->ordered.pos_nor);
+ }
+ if (DRW_vbo_requested(cache->ordered.edge_fac)) {
+ DRW_displist_vertbuf_create_wiredata(lb, cache->ordered.edge_fac);
+ }
+ if (DRW_vbo_requested(cache->ordered.curves_pos)) {
+ curve_create_curves_pos(rdata, cache->ordered.curves_pos);
+ }
+
+ if (DRW_vbo_requested(cache->ordered.loop_pos_nor) ||
+ DRW_vbo_requested(cache->ordered.loop_uv)) {
+ DRW_displist_vertbuf_create_loop_pos_and_nor_and_uv(
+ lb, cache->ordered.loop_pos_nor, cache->ordered.loop_uv);
+ }
+
+ if (DRW_ibo_requested(cache->surf_per_mat_tris[0])) {
+ DRW_displist_indexbuf_create_triangles_loop_split_by_material(
+ lb, cache->surf_per_mat_tris, cache->mat_len);
+ }
+
+ if (DRW_ibo_requested(cache->ibo.curves_lines)) {
+ curve_create_curves_lines(rdata, cache->ibo.curves_lines);
+ }
+ if (DRW_ibo_requested(cache->ibo.surfaces_tris)) {
+ DRW_displist_indexbuf_create_triangles_in_order(lb, cache->ibo.surfaces_tris);
+ }
+ if (DRW_ibo_requested(cache->ibo.surfaces_lines)) {
+ DRW_displist_indexbuf_create_lines_in_order(lb, cache->ibo.surfaces_lines);
+ }
+ if (DRW_ibo_requested(cache->ibo.edges_adj_lines)) {
+ DRW_displist_indexbuf_create_edges_adjacency_lines(
+ lb, cache->ibo.edges_adj_lines, &cache->is_manifold);
+ }
+
+ if (DRW_vbo_requested(cache->edit.pos) || DRW_vbo_requested(cache->edit.data) ||
+ DRW_ibo_requested(cache->ibo.edit_verts_points) ||
+ DRW_ibo_requested(cache->ibo.edit_lines)) {
+ curve_create_edit_data_and_handles(rdata,
+ cache->edit.pos,
+ cache->edit.data,
+ cache->ibo.edit_verts_points,
+ cache->ibo.edit_lines);
+ }
+ if (DRW_vbo_requested(cache->edit.curves_nor)) {
+ curve_create_edit_curves_nor(rdata, cache->edit.curves_nor);
+ }
+
+ curve_render_data_free(rdata);
#ifdef DEBUG
- /* Make sure all requested batches have been setup. */
- for (int i = 0; i < sizeof(cache->batch) / sizeof(void *); ++i) {
- BLI_assert(!DRW_batch_requested(((GPUBatch **)&cache->batch)[i], 0));
- }
+ /* Make sure all requested batches have been setup. */
+ for (int i = 0; i < sizeof(cache->batch) / sizeof(void *); ++i) {
+ BLI_assert(!DRW_batch_requested(((GPUBatch **)&cache->batch)[i], 0));
+ }
#endif
}
diff --git a/source/blender/draw/intern/draw_cache_impl_displist.c b/source/blender/draw/intern/draw_cache_impl_displist.c
index 9dbf8af0372..ea394359b05 100644
--- a/source/blender/draw/intern/draw_cache_impl_displist.c
+++ b/source/blender/draw/intern/draw_cache_impl_displist.c
@@ -25,7 +25,6 @@
* \note DispList may be removed soon! This is a utility for object types that use render.
*/
-
#include "BLI_alloca.h"
#include "BLI_utildefines.h"
#include "BLI_edgehash.h"
@@ -38,621 +37,681 @@
#include "GPU_batch.h"
#include "GPU_extensions.h"
-#include "draw_cache_impl.h" /* own include */
+#include "draw_cache_impl.h" /* own include */
static int dl_vert_len(const DispList *dl)
{
- switch (dl->type) {
- case DL_INDEX3:
- case DL_INDEX4:
- return dl->nr;
- case DL_SURF:
- return dl->parts * dl->nr;
- }
- return 0;
+ switch (dl->type) {
+ case DL_INDEX3:
+ case DL_INDEX4:
+ return dl->nr;
+ case DL_SURF:
+ return dl->parts * dl->nr;
+ }
+ return 0;
}
static int dl_tri_len(const DispList *dl)
{
- switch (dl->type) {
- case DL_INDEX3:
- return dl->parts;
- case DL_INDEX4:
- return dl->parts * 2;
- case DL_SURF:
- return dl->totindex * 2;
- }
- return 0;
+ switch (dl->type) {
+ case DL_INDEX3:
+ return dl->parts;
+ case DL_INDEX4:
+ return dl->parts * 2;
+ case DL_SURF:
+ return dl->totindex * 2;
+ }
+ return 0;
}
/* see: displist_get_allverts */
static int curve_render_surface_vert_len_get(const ListBase *lb)
{
- int vert_len = 0;
- for (const DispList *dl = lb->first; dl; dl = dl->next) {
- vert_len += dl_vert_len(dl);
- }
- return vert_len;
+ int vert_len = 0;
+ for (const DispList *dl = lb->first; dl; dl = dl->next) {
+ vert_len += dl_vert_len(dl);
+ }
+ return vert_len;
}
static int curve_render_surface_tri_len_get(const ListBase *lb)
{
- int tri_len = 0;
- for (const DispList *dl = lb->first; dl; dl = dl->next) {
- tri_len += dl_tri_len(dl);
- }
- return tri_len;
+ int tri_len = 0;
+ for (const DispList *dl = lb->first; dl; dl = dl->next) {
+ tri_len += dl_tri_len(dl);
+ }
+ return tri_len;
}
-typedef void (SetTriIndicesFn)(void *thunk, uint v1, uint v2, uint v3);
+typedef void(SetTriIndicesFn)(void *thunk, uint v1, uint v2, uint v3);
static void displist_indexbufbuilder_set(
- SetTriIndicesFn *set_tri_indices,
- SetTriIndicesFn *set_quad_tri_indices, /* meh, find a better solution. */
- void *thunk, const DispList *dl, const int ofs)
+ SetTriIndicesFn *set_tri_indices,
+ SetTriIndicesFn *set_quad_tri_indices, /* meh, find a better solution. */
+ void *thunk,
+ const DispList *dl,
+ const int ofs)
{
- if (ELEM(dl->type, DL_INDEX3, DL_INDEX4, DL_SURF)) {
- const int *idx = dl->index;
- if (dl->type == DL_INDEX3) {
- const int i_end = dl->parts;
- for (int i = 0; i < i_end; i++, idx += 3) {
- set_tri_indices(thunk, idx[0] + ofs, idx[2] + ofs, idx[1] + ofs);
- }
- }
- else if (dl->type == DL_SURF) {
- const int i_end = dl->totindex;
- for (int i = 0; i < i_end; i++, idx += 4) {
- set_quad_tri_indices(thunk, idx[0] + ofs, idx[2] + ofs, idx[1] + ofs);
- set_quad_tri_indices(thunk, idx[2] + ofs, idx[0] + ofs, idx[3] + ofs);
- }
- }
- else {
- BLI_assert(dl->type == DL_INDEX4);
- const int i_end = dl->parts;
- for (int i = 0; i < i_end; i++, idx += 4) {
- if (idx[2] != idx[3]) {
- set_quad_tri_indices(thunk, idx[2] + ofs, idx[0] + ofs, idx[1] + ofs);
- set_quad_tri_indices(thunk, idx[0] + ofs, idx[2] + ofs, idx[3] + ofs);
- }
- else {
- set_tri_indices(thunk, idx[2] + ofs, idx[0] + ofs, idx[1] + ofs);
- }
- }
- }
- }
+ if (ELEM(dl->type, DL_INDEX3, DL_INDEX4, DL_SURF)) {
+ const int *idx = dl->index;
+ if (dl->type == DL_INDEX3) {
+ const int i_end = dl->parts;
+ for (int i = 0; i < i_end; i++, idx += 3) {
+ set_tri_indices(thunk, idx[0] + ofs, idx[2] + ofs, idx[1] + ofs);
+ }
+ }
+ else if (dl->type == DL_SURF) {
+ const int i_end = dl->totindex;
+ for (int i = 0; i < i_end; i++, idx += 4) {
+ set_quad_tri_indices(thunk, idx[0] + ofs, idx[2] + ofs, idx[1] + ofs);
+ set_quad_tri_indices(thunk, idx[2] + ofs, idx[0] + ofs, idx[3] + ofs);
+ }
+ }
+ else {
+ BLI_assert(dl->type == DL_INDEX4);
+ const int i_end = dl->parts;
+ for (int i = 0; i < i_end; i++, idx += 4) {
+ if (idx[2] != idx[3]) {
+ set_quad_tri_indices(thunk, idx[2] + ofs, idx[0] + ofs, idx[1] + ofs);
+ set_quad_tri_indices(thunk, idx[0] + ofs, idx[2] + ofs, idx[3] + ofs);
+ }
+ else {
+ set_tri_indices(thunk, idx[2] + ofs, idx[0] + ofs, idx[1] + ofs);
+ }
+ }
+ }
+ }
}
static int displist_indexbufbuilder_tess_set(
- SetTriIndicesFn *set_tri_indices,
- SetTriIndicesFn *set_quad_tri_indices, /* meh, find a better solution. */
- void *thunk, const DispList *dl, const int ofs)
+ SetTriIndicesFn *set_tri_indices,
+ SetTriIndicesFn *set_quad_tri_indices, /* meh, find a better solution. */
+ void *thunk,
+ const DispList *dl,
+ const int ofs)
{
- int v_idx = ofs;
- if (ELEM(dl->type, DL_INDEX3, DL_INDEX4, DL_SURF)) {
- if (dl->type == DL_INDEX3) {
- for (int i = 0; i < dl->parts; i++) {
- set_tri_indices(thunk, v_idx + 0, v_idx + 1, v_idx + 2);
- v_idx += 3;
- }
- }
- else if (dl->type == DL_SURF) {
- for (int a = 0; a < dl->parts; a++) {
- if ((dl->flag & DL_CYCL_V) == 0 && a == dl->parts - 1) {
- break;
- }
- int b = (dl->flag & DL_CYCL_U) ? 0 : 1;
- for (; b < dl->nr; b++) {
- set_quad_tri_indices(thunk, v_idx + 0, v_idx + 1, v_idx + 2);
- set_quad_tri_indices(thunk, v_idx + 3, v_idx + 4, v_idx + 5);
- v_idx += 6;
- }
- }
- }
- else {
- BLI_assert(dl->type == DL_INDEX4);
- const int *idx = dl->index;
- for (int i = 0; i < dl->parts; i++, idx += 4) {
- if (idx[2] != idx[3]) {
- set_quad_tri_indices(thunk, v_idx + 0, v_idx + 1, v_idx + 2);
- set_quad_tri_indices(thunk, v_idx + 3, v_idx + 4, v_idx + 5);
- v_idx += 6;
- }
- else {
- set_tri_indices(thunk, v_idx + 0, v_idx + 1, v_idx + 2);
- v_idx += 3;
- }
- }
- }
- }
- return v_idx;
+ int v_idx = ofs;
+ if (ELEM(dl->type, DL_INDEX3, DL_INDEX4, DL_SURF)) {
+ if (dl->type == DL_INDEX3) {
+ for (int i = 0; i < dl->parts; i++) {
+ set_tri_indices(thunk, v_idx + 0, v_idx + 1, v_idx + 2);
+ v_idx += 3;
+ }
+ }
+ else if (dl->type == DL_SURF) {
+ for (int a = 0; a < dl->parts; a++) {
+ if ((dl->flag & DL_CYCL_V) == 0 && a == dl->parts - 1) {
+ break;
+ }
+ int b = (dl->flag & DL_CYCL_U) ? 0 : 1;
+ for (; b < dl->nr; b++) {
+ set_quad_tri_indices(thunk, v_idx + 0, v_idx + 1, v_idx + 2);
+ set_quad_tri_indices(thunk, v_idx + 3, v_idx + 4, v_idx + 5);
+ v_idx += 6;
+ }
+ }
+ }
+ else {
+ BLI_assert(dl->type == DL_INDEX4);
+ const int *idx = dl->index;
+ for (int i = 0; i < dl->parts; i++, idx += 4) {
+ if (idx[2] != idx[3]) {
+ set_quad_tri_indices(thunk, v_idx + 0, v_idx + 1, v_idx + 2);
+ set_quad_tri_indices(thunk, v_idx + 3, v_idx + 4, v_idx + 5);
+ v_idx += 6;
+ }
+ else {
+ set_tri_indices(thunk, v_idx + 0, v_idx + 1, v_idx + 2);
+ v_idx += 3;
+ }
+ }
+ }
+ }
+ return v_idx;
}
void DRW_displist_vertbuf_create_pos_and_nor(ListBase *lb, GPUVertBuf *vbo)
{
- static GPUVertFormat format = { 0 };
- static struct { uint pos, nor; } attr_id;
- if (format.attr_len == 0) {
- /* initialize vertex format */
- attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
- attr_id.nor = GPU_vertformat_attr_add(&format, "nor", GPU_COMP_I10, 4, GPU_FETCH_INT_TO_FLOAT_UNIT);
- }
-
- GPU_vertbuf_init_with_format(vbo, &format);
- GPU_vertbuf_data_alloc(vbo, curve_render_surface_vert_len_get(lb));
-
- BKE_displist_normals_add(lb);
-
- int vbo_len_used = 0;
- for (const DispList *dl = lb->first; dl; dl = dl->next) {
- const bool ndata_is_single = dl->type == DL_INDEX3;
- if (ELEM(dl->type, DL_INDEX3, DL_INDEX4, DL_SURF)) {
- const float *fp_co = dl->verts;
- const float *fp_no = dl->nors;
- const int vbo_end = vbo_len_used + dl_vert_len(dl);
- while (vbo_len_used < vbo_end) {
- GPU_vertbuf_attr_set(vbo, attr_id.pos, vbo_len_used, fp_co);
- if (fp_no) {
- GPUPackedNormal vnor_pack = GPU_normal_convert_i10_v3(fp_no);
- GPU_vertbuf_attr_set(vbo, attr_id.nor, vbo_len_used, &vnor_pack);
- if (ndata_is_single == false) {
- fp_no += 3;
- }
- }
- fp_co += 3;
- vbo_len_used += 1;
- }
- }
- }
+ static GPUVertFormat format = {0};
+ static struct {
+ uint pos, nor;
+ } attr_id;
+ if (format.attr_len == 0) {
+ /* initialize vertex format */
+ attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
+ attr_id.nor = GPU_vertformat_attr_add(
+ &format, "nor", GPU_COMP_I10, 4, GPU_FETCH_INT_TO_FLOAT_UNIT);
+ }
+
+ GPU_vertbuf_init_with_format(vbo, &format);
+ GPU_vertbuf_data_alloc(vbo, curve_render_surface_vert_len_get(lb));
+
+ BKE_displist_normals_add(lb);
+
+ int vbo_len_used = 0;
+ for (const DispList *dl = lb->first; dl; dl = dl->next) {
+ const bool ndata_is_single = dl->type == DL_INDEX3;
+ if (ELEM(dl->type, DL_INDEX3, DL_INDEX4, DL_SURF)) {
+ const float *fp_co = dl->verts;
+ const float *fp_no = dl->nors;
+ const int vbo_end = vbo_len_used + dl_vert_len(dl);
+ while (vbo_len_used < vbo_end) {
+ GPU_vertbuf_attr_set(vbo, attr_id.pos, vbo_len_used, fp_co);
+ if (fp_no) {
+ GPUPackedNormal vnor_pack = GPU_normal_convert_i10_v3(fp_no);
+ GPU_vertbuf_attr_set(vbo, attr_id.nor, vbo_len_used, &vnor_pack);
+ if (ndata_is_single == false) {
+ fp_no += 3;
+ }
+ }
+ fp_co += 3;
+ vbo_len_used += 1;
+ }
+ }
+ }
}
void DRW_displist_vertbuf_create_wiredata(ListBase *lb, GPUVertBuf *vbo)
{
- static GPUVertFormat format = { 0 };
- static struct { uint wd; } attr_id;
- if (format.attr_len == 0) {
- /* initialize vertex format */
- if (!GPU_crappy_amd_driver()) {
- /* Some AMD drivers strangely crash with a vbo with this format. */
- attr_id.wd = GPU_vertformat_attr_add(&format, "wd", GPU_COMP_U8, 1, GPU_FETCH_INT_TO_FLOAT_UNIT);
- }
- else {
- attr_id.wd = GPU_vertformat_attr_add(&format, "wd", GPU_COMP_F32, 1, GPU_FETCH_FLOAT);
- }
- }
-
- int vbo_len_used = curve_render_surface_vert_len_get(lb);
-
- GPU_vertbuf_init_with_format(vbo, &format);
- GPU_vertbuf_data_alloc(vbo, vbo_len_used);
-
- if (vbo->format.stride == 1) {
- memset(vbo->data, 0xFF, (size_t)vbo_len_used);
- }
- else {
- GPUVertBufRaw wd_step;
- GPU_vertbuf_attr_get_raw_data(vbo, attr_id.wd, &wd_step);
- for (int i = 0; i < vbo_len_used; i++) {
- *((float *)GPU_vertbuf_raw_step(&wd_step)) = 1.0f;
- }
- }
+ static GPUVertFormat format = {0};
+ static struct {
+ uint wd;
+ } attr_id;
+ if (format.attr_len == 0) {
+ /* initialize vertex format */
+ if (!GPU_crappy_amd_driver()) {
+ /* Some AMD drivers strangely crash with a vbo with this format. */
+ attr_id.wd = GPU_vertformat_attr_add(
+ &format, "wd", GPU_COMP_U8, 1, GPU_FETCH_INT_TO_FLOAT_UNIT);
+ }
+ else {
+ attr_id.wd = GPU_vertformat_attr_add(&format, "wd", GPU_COMP_F32, 1, GPU_FETCH_FLOAT);
+ }
+ }
+
+ int vbo_len_used = curve_render_surface_vert_len_get(lb);
+
+ GPU_vertbuf_init_with_format(vbo, &format);
+ GPU_vertbuf_data_alloc(vbo, vbo_len_used);
+
+ if (vbo->format.stride == 1) {
+ memset(vbo->data, 0xFF, (size_t)vbo_len_used);
+ }
+ else {
+ GPUVertBufRaw wd_step;
+ GPU_vertbuf_attr_get_raw_data(vbo, attr_id.wd, &wd_step);
+ for (int i = 0; i < vbo_len_used; i++) {
+ *((float *)GPU_vertbuf_raw_step(&wd_step)) = 1.0f;
+ }
+ }
}
void DRW_displist_indexbuf_create_triangles_in_order(ListBase *lb, GPUIndexBuf *ibo)
{
- const int tri_len = curve_render_surface_tri_len_get(lb);
- const int vert_len = curve_render_surface_vert_len_get(lb);
-
- GPUIndexBufBuilder elb;
- GPU_indexbuf_init(&elb, GPU_PRIM_TRIS, tri_len, vert_len);
-
- int ofs = 0;
- for (const DispList *dl = lb->first; dl; dl = dl->next) {
- displist_indexbufbuilder_set(
- (SetTriIndicesFn *)GPU_indexbuf_add_tri_verts,
- (SetTriIndicesFn *)GPU_indexbuf_add_tri_verts,
- &elb, dl, ofs);
- ofs += dl_vert_len(dl);
- }
-
- GPU_indexbuf_build_in_place(&elb, ibo);
+ const int tri_len = curve_render_surface_tri_len_get(lb);
+ const int vert_len = curve_render_surface_vert_len_get(lb);
+
+ GPUIndexBufBuilder elb;
+ GPU_indexbuf_init(&elb, GPU_PRIM_TRIS, tri_len, vert_len);
+
+ int ofs = 0;
+ for (const DispList *dl = lb->first; dl; dl = dl->next) {
+ displist_indexbufbuilder_set((SetTriIndicesFn *)GPU_indexbuf_add_tri_verts,
+ (SetTriIndicesFn *)GPU_indexbuf_add_tri_verts,
+ &elb,
+ dl,
+ ofs);
+ ofs += dl_vert_len(dl);
+ }
+
+ GPU_indexbuf_build_in_place(&elb, ibo);
}
-void DRW_displist_indexbuf_create_triangles_loop_split_by_material(
- ListBase *lb,
- GPUIndexBuf **ibo_mats, uint mat_len)
+void DRW_displist_indexbuf_create_triangles_loop_split_by_material(ListBase *lb,
+ GPUIndexBuf **ibo_mats,
+ uint mat_len)
{
- GPUIndexBufBuilder *elb = BLI_array_alloca(elb, mat_len);
-
- const int tri_len = curve_render_surface_tri_len_get(lb);
-
- /* Init each index buffer builder */
- for (int i = 0; i < mat_len; i++) {
- GPU_indexbuf_init(&elb[i], GPU_PRIM_TRIS, tri_len * 3, tri_len * 3);
- }
-
- /* calc each index buffer builder */
- uint v_idx = 0;
- for (const DispList *dl = lb->first; dl; dl = dl->next) {
- v_idx = displist_indexbufbuilder_tess_set(
- (SetTriIndicesFn *)GPU_indexbuf_add_tri_verts,
- (SetTriIndicesFn *)GPU_indexbuf_add_tri_verts,
- &elb[dl->col], dl, v_idx);
- }
-
- /* build each indexbuf */
- for (int i = 0; i < mat_len; i++) {
- GPU_indexbuf_build_in_place(&elb[i], ibo_mats[i]);
- }
+ GPUIndexBufBuilder *elb = BLI_array_alloca(elb, mat_len);
+
+ const int tri_len = curve_render_surface_tri_len_get(lb);
+
+ /* Init each index buffer builder */
+ for (int i = 0; i < mat_len; i++) {
+ GPU_indexbuf_init(&elb[i], GPU_PRIM_TRIS, tri_len * 3, tri_len * 3);
+ }
+
+ /* calc each index buffer builder */
+ uint v_idx = 0;
+ for (const DispList *dl = lb->first; dl; dl = dl->next) {
+ v_idx = displist_indexbufbuilder_tess_set((SetTriIndicesFn *)GPU_indexbuf_add_tri_verts,
+ (SetTriIndicesFn *)GPU_indexbuf_add_tri_verts,
+ &elb[dl->col],
+ dl,
+ v_idx);
+ }
+
+ /* build each indexbuf */
+ for (int i = 0; i < mat_len; i++) {
+ GPU_indexbuf_build_in_place(&elb[i], ibo_mats[i]);
+ }
}
static void set_overlay_wires_tri_indices(void *thunk, uint v1, uint v2, uint v3)
{
- GPUIndexBufBuilder *eld = (GPUIndexBufBuilder *)thunk;
- GPU_indexbuf_add_line_verts(eld, v1, v2);
- GPU_indexbuf_add_line_verts(eld, v2, v3);
- GPU_indexbuf_add_line_verts(eld, v3, v1);
+ GPUIndexBufBuilder *eld = (GPUIndexBufBuilder *)thunk;
+ GPU_indexbuf_add_line_verts(eld, v1, v2);
+ GPU_indexbuf_add_line_verts(eld, v2, v3);
+ GPU_indexbuf_add_line_verts(eld, v3, v1);
}
static void set_overlay_wires_quad_tri_indices(void *thunk, uint v1, uint v2, uint v3)
{
- GPUIndexBufBuilder *eld = (GPUIndexBufBuilder *)thunk;
- GPU_indexbuf_add_line_verts(eld, v1, v3);
- GPU_indexbuf_add_line_verts(eld, v3, v2);
+ GPUIndexBufBuilder *eld = (GPUIndexBufBuilder *)thunk;
+ GPU_indexbuf_add_line_verts(eld, v1, v3);
+ GPU_indexbuf_add_line_verts(eld, v3, v2);
}
void DRW_displist_indexbuf_create_lines_in_order(ListBase *lb, GPUIndexBuf *ibo)
{
- const int tri_len = curve_render_surface_tri_len_get(lb);
- const int vert_len = curve_render_surface_vert_len_get(lb);
-
- GPUIndexBufBuilder elb;
- GPU_indexbuf_init(&elb, GPU_PRIM_LINES, tri_len * 3, vert_len);
-
- int ofs = 0;
- for (const DispList *dl = lb->first; dl; dl = dl->next) {
- displist_indexbufbuilder_set(
- set_overlay_wires_tri_indices,
- set_overlay_wires_quad_tri_indices,
- &elb, dl, ofs);
- ofs += dl_vert_len(dl);
- }
-
- GPU_indexbuf_build_in_place(&elb, ibo);
+ const int tri_len = curve_render_surface_tri_len_get(lb);
+ const int vert_len = curve_render_surface_vert_len_get(lb);
+
+ GPUIndexBufBuilder elb;
+ GPU_indexbuf_init(&elb, GPU_PRIM_LINES, tri_len * 3, vert_len);
+
+ int ofs = 0;
+ for (const DispList *dl = lb->first; dl; dl = dl->next) {
+ displist_indexbufbuilder_set(
+ set_overlay_wires_tri_indices, set_overlay_wires_quad_tri_indices, &elb, dl, ofs);
+ ofs += dl_vert_len(dl);
+ }
+
+ GPU_indexbuf_build_in_place(&elb, ibo);
}
static void surf_uv_quad(const DispList *dl, const uint quad[4], float r_uv[4][2])
{
- int orco_sizeu = dl->nr - 1;
- int orco_sizev = dl->parts - 1;
-
- /* exception as handled in convertblender.c too */
- if (dl->flag & DL_CYCL_U) {
- orco_sizeu++;
- }
- if (dl->flag & DL_CYCL_V) {
- orco_sizev++;
- }
-
- for (int i = 0; i < 4; i++) {
- /* find uv based on vertex index into grid array */
- r_uv[i][0] = (quad[i] / dl->nr) / (float)orco_sizev;
- r_uv[i][1] = (quad[i] % dl->nr) / (float)orco_sizeu;
-
- /* cyclic correction */
- if ((i == 1 || i == 2) && r_uv[i][0] == 0.0f) {
- r_uv[i][0] = 1.0f;
- }
- if ((i == 0 || i == 1) && r_uv[i][1] == 0.0f) {
- r_uv[i][1] = 1.0f;
- }
- }
+ int orco_sizeu = dl->nr - 1;
+ int orco_sizev = dl->parts - 1;
+
+ /* exception as handled in convertblender.c too */
+ if (dl->flag & DL_CYCL_U) {
+ orco_sizeu++;
+ }
+ if (dl->flag & DL_CYCL_V) {
+ orco_sizev++;
+ }
+
+ for (int i = 0; i < 4; i++) {
+ /* find uv based on vertex index into grid array */
+ r_uv[i][0] = (quad[i] / dl->nr) / (float)orco_sizev;
+ r_uv[i][1] = (quad[i] % dl->nr) / (float)orco_sizeu;
+
+ /* cyclic correction */
+ if ((i == 1 || i == 2) && r_uv[i][0] == 0.0f) {
+ r_uv[i][0] = 1.0f;
+ }
+ if ((i == 0 || i == 1) && r_uv[i][1] == 0.0f) {
+ r_uv[i][1] = 1.0f;
+ }
+ }
}
-static void displist_vertbuf_attr_set_tri_pos_nor_uv(
- GPUVertBufRaw *pos_step, GPUVertBufRaw *nor_step, GPUVertBufRaw *uv_step,
- const float v1[3], const float v2[3], const float v3[3],
- const GPUPackedNormal *n1, const GPUPackedNormal *n2, const GPUPackedNormal *n3,
- const float uv1[2], const float uv2[2], const float uv3[2])
+static void displist_vertbuf_attr_set_tri_pos_nor_uv(GPUVertBufRaw *pos_step,
+ GPUVertBufRaw *nor_step,
+ GPUVertBufRaw *uv_step,
+ const float v1[3],
+ const float v2[3],
+ const float v3[3],
+ const GPUPackedNormal *n1,
+ const GPUPackedNormal *n2,
+ const GPUPackedNormal *n3,
+ const float uv1[2],
+ const float uv2[2],
+ const float uv3[2])
{
- if (pos_step->size != 0) {
- copy_v3_v3(GPU_vertbuf_raw_step(pos_step), v1);
- copy_v3_v3(GPU_vertbuf_raw_step(pos_step), v2);
- copy_v3_v3(GPU_vertbuf_raw_step(pos_step), v3);
-
- *(GPUPackedNormal *)GPU_vertbuf_raw_step(nor_step) = *n1;
- *(GPUPackedNormal *)GPU_vertbuf_raw_step(nor_step) = *n2;
- *(GPUPackedNormal *)GPU_vertbuf_raw_step(nor_step) = *n3;
- }
-
- if (uv_step->size != 0) {
- normal_float_to_short_v2(GPU_vertbuf_raw_step(uv_step), uv1);
- normal_float_to_short_v2(GPU_vertbuf_raw_step(uv_step), uv2);
- normal_float_to_short_v2(GPU_vertbuf_raw_step(uv_step), uv3);
- }
+ if (pos_step->size != 0) {
+ copy_v3_v3(GPU_vertbuf_raw_step(pos_step), v1);
+ copy_v3_v3(GPU_vertbuf_raw_step(pos_step), v2);
+ copy_v3_v3(GPU_vertbuf_raw_step(pos_step), v3);
+
+ *(GPUPackedNormal *)GPU_vertbuf_raw_step(nor_step) = *n1;
+ *(GPUPackedNormal *)GPU_vertbuf_raw_step(nor_step) = *n2;
+ *(GPUPackedNormal *)GPU_vertbuf_raw_step(nor_step) = *n3;
+ }
+
+ if (uv_step->size != 0) {
+ normal_float_to_short_v2(GPU_vertbuf_raw_step(uv_step), uv1);
+ normal_float_to_short_v2(GPU_vertbuf_raw_step(uv_step), uv2);
+ normal_float_to_short_v2(GPU_vertbuf_raw_step(uv_step), uv3);
+ }
}
-void DRW_displist_vertbuf_create_loop_pos_and_nor_and_uv(
- ListBase *lb,
- GPUVertBuf *vbo_pos_nor, GPUVertBuf *vbo_uv)
+void DRW_displist_vertbuf_create_loop_pos_and_nor_and_uv(ListBase *lb,
+ GPUVertBuf *vbo_pos_nor,
+ GPUVertBuf *vbo_uv)
{
- static GPUVertFormat format_pos_nor = { 0 };
- static GPUVertFormat format_uv = { 0 };
- static struct { uint pos, nor, uv; } attr_id;
- if (format_pos_nor.attr_len == 0) {
- /* initialize vertex format */
- attr_id.pos = GPU_vertformat_attr_add(&format_pos_nor, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
- attr_id.nor = GPU_vertformat_attr_add(&format_pos_nor, "nor", GPU_COMP_I10, 3, GPU_FETCH_INT_TO_FLOAT_UNIT);
- GPU_vertformat_triple_load(&format_pos_nor);
- /* UVs are in [0..1] range. We can compress them. */
- attr_id.uv = GPU_vertformat_attr_add(&format_uv, "u", GPU_COMP_I16, 2, GPU_FETCH_INT_TO_FLOAT_UNIT);
- }
-
- int vbo_len_capacity = curve_render_surface_tri_len_get(lb) * 3;
-
- GPUVertBufRaw pos_step = {0};
- GPUVertBufRaw nor_step = {0};
- GPUVertBufRaw uv_step = {0};
-
- if (DRW_TEST_ASSIGN_VBO(vbo_pos_nor)) {
- GPU_vertbuf_init_with_format(vbo_pos_nor, &format_pos_nor);
- GPU_vertbuf_data_alloc(vbo_pos_nor, vbo_len_capacity);
- GPU_vertbuf_attr_get_raw_data(vbo_pos_nor, attr_id.pos, &pos_step);
- GPU_vertbuf_attr_get_raw_data(vbo_pos_nor, attr_id.nor, &nor_step);
- }
- if (DRW_TEST_ASSIGN_VBO(vbo_uv)) {
- GPU_vertbuf_init_with_format(vbo_uv, &format_uv);
- GPU_vertbuf_data_alloc(vbo_uv, vbo_len_capacity);
- GPU_vertbuf_attr_get_raw_data(vbo_uv, attr_id.uv, &uv_step);
- }
-
- BKE_displist_normals_add(lb);
-
- for (const DispList *dl = lb->first; dl; dl = dl->next) {
- const bool is_smooth = (dl->rt & CU_SMOOTH) != 0;
- if (ELEM(dl->type, DL_INDEX3, DL_INDEX4, DL_SURF)) {
- const float(*verts)[3] = (float(*)[3])dl->verts;
- const float(*nors)[3] = (float(*)[3])dl->nors;
- const int *idx = dl->index;
- float uv[4][2];
-
- if (dl->type == DL_INDEX3) {
- /* Currently 'DL_INDEX3' is always a flat surface with a single normal. */
- const GPUPackedNormal pnor = GPU_normal_convert_i10_v3(dl->nors);
- const float x_max = (float)(dl->nr - 1);
- uv[0][1] = uv[1][1] = uv[2][1] = 0.0f;
- const int i_end = dl->parts;
- for (int i = 0; i < i_end; i++, idx += 3) {
- if (vbo_uv) {
- uv[0][0] = idx[0] / x_max;
- uv[1][0] = idx[1] / x_max;
- uv[2][0] = idx[2] / x_max;
- }
-
- displist_vertbuf_attr_set_tri_pos_nor_uv(
- &pos_step, &nor_step, &uv_step,
- verts[idx[0]], verts[idx[2]], verts[idx[1]],
- &pnor, &pnor, &pnor,
- uv[0], uv[2], uv[1]);
- }
- }
- else if (dl->type == DL_SURF) {
- uint quad[4];
- for (int a = 0; a < dl->parts; a++) {
- if ((dl->flag & DL_CYCL_V) == 0 && a == dl->parts - 1) {
- break;
- }
-
- int b;
- if (dl->flag & DL_CYCL_U) {
- quad[0] = dl->nr * a;
- quad[3] = quad[0] + dl->nr - 1;
- quad[1] = quad[0] + dl->nr;
- quad[2] = quad[3] + dl->nr;
- b = 0;
- }
- else {
- quad[3] = dl->nr * a;
- quad[0] = quad[3] + 1;
- quad[2] = quad[3] + dl->nr;
- quad[1] = quad[0] + dl->nr;
- b = 1;
- }
- if ((dl->flag & DL_CYCL_V) && a == dl->parts - 1) {
- quad[1] -= dl->parts * dl->nr;
- quad[2] -= dl->parts * dl->nr;
- }
-
- for (; b < dl->nr; b++) {
- if (vbo_uv) {
- surf_uv_quad(dl, quad, uv);
- }
-
- GPUPackedNormal pnors_quad[4];
- if (is_smooth) {
- for (int j = 0; j < 4; j++) {
- pnors_quad[j] = GPU_normal_convert_i10_v3(nors[quad[j]]);
- }
- }
- else {
- float nor_flat[3];
- normal_quad_v3(nor_flat, verts[quad[0]], verts[quad[1]], verts[quad[2]], verts[quad[3]]);
- pnors_quad[0] = GPU_normal_convert_i10_v3(nor_flat);
- pnors_quad[1] = pnors_quad[0];
- pnors_quad[2] = pnors_quad[0];
- pnors_quad[3] = pnors_quad[0];
- }
-
- displist_vertbuf_attr_set_tri_pos_nor_uv(
- &pos_step, &nor_step, &uv_step,
- verts[quad[2]], verts[quad[0]], verts[quad[1]],
- &pnors_quad[2], &pnors_quad[0], &pnors_quad[1],
- uv[2], uv[0], uv[1]);
-
- displist_vertbuf_attr_set_tri_pos_nor_uv(
- &pos_step, &nor_step, &uv_step,
- verts[quad[0]], verts[quad[2]], verts[quad[3]],
- &pnors_quad[0], &pnors_quad[2], &pnors_quad[3],
- uv[0], uv[2], uv[3]);
-
- quad[2] = quad[1];
- quad[1]++;
- quad[3] = quad[0];
- quad[0]++;
- }
- }
- }
- else {
- BLI_assert(dl->type == DL_INDEX4);
- uv[0][0] = uv[0][1] = uv[1][0] = uv[3][1] = 0.0f;
- uv[1][1] = uv[2][0] = uv[2][1] = uv[3][0] = 1.0f;
-
- const int i_end = dl->parts;
- for (int i = 0; i < i_end; i++, idx += 4) {
- const bool is_tri = idx[2] != idx[3];
-
- GPUPackedNormal pnors_idx[4];
- if (is_smooth) {
- int idx_len = is_tri ? 3 : 4;
- for (int j = 0; j < idx_len; j++) {
- pnors_idx[j] = GPU_normal_convert_i10_v3(nors[idx[j]]);
- }
- }
- else {
- float nor_flat[3];
- if (is_tri) {
- normal_tri_v3(nor_flat, verts[idx[0]], verts[idx[1]], verts[idx[2]]);
- }
- else {
- normal_quad_v3(nor_flat, verts[idx[0]], verts[idx[1]], verts[idx[2]], verts[idx[3]]);
- }
- pnors_idx[0] = GPU_normal_convert_i10_v3(nor_flat);
- pnors_idx[1] = pnors_idx[0];
- pnors_idx[2] = pnors_idx[0];
- pnors_idx[3] = pnors_idx[0];
- }
-
- displist_vertbuf_attr_set_tri_pos_nor_uv(
- &pos_step, &nor_step, &uv_step,
- verts[idx[0]], verts[idx[2]], verts[idx[1]],
- &pnors_idx[0], &pnors_idx[2], &pnors_idx[1],
- uv[0], uv[2], uv[1]);
-
- if (idx[2] != idx[3]) {
- displist_vertbuf_attr_set_tri_pos_nor_uv(
- &pos_step, &nor_step, &uv_step,
- verts[idx[2]], verts[idx[0]], verts[idx[3]],
- &pnors_idx[2], &pnors_idx[0], &pnors_idx[3],
- uv[2], uv[0], uv[3]);
- }
- }
- }
- }
- }
- /* Resize and finish. */
- if (pos_step.size != 0) {
- int vbo_len_used = GPU_vertbuf_raw_used(&pos_step);
- if (vbo_len_used < vbo_len_capacity) {
- GPU_vertbuf_data_resize(vbo_pos_nor, vbo_len_used);
- }
- }
- if (uv_step.size != 0) {
- int vbo_len_used = GPU_vertbuf_raw_used(&uv_step);
- if (vbo_len_used < vbo_len_capacity) {
- GPU_vertbuf_data_resize(vbo_uv, vbo_len_used);
- }
- }
+ static GPUVertFormat format_pos_nor = {0};
+ static GPUVertFormat format_uv = {0};
+ static struct {
+ uint pos, nor, uv;
+ } attr_id;
+ if (format_pos_nor.attr_len == 0) {
+ /* initialize vertex format */
+ attr_id.pos = GPU_vertformat_attr_add(
+ &format_pos_nor, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
+ attr_id.nor = GPU_vertformat_attr_add(
+ &format_pos_nor, "nor", GPU_COMP_I10, 3, GPU_FETCH_INT_TO_FLOAT_UNIT);
+ GPU_vertformat_triple_load(&format_pos_nor);
+ /* UVs are in [0..1] range. We can compress them. */
+ attr_id.uv = GPU_vertformat_attr_add(
+ &format_uv, "u", GPU_COMP_I16, 2, GPU_FETCH_INT_TO_FLOAT_UNIT);
+ }
+
+ int vbo_len_capacity = curve_render_surface_tri_len_get(lb) * 3;
+
+ GPUVertBufRaw pos_step = {0};
+ GPUVertBufRaw nor_step = {0};
+ GPUVertBufRaw uv_step = {0};
+
+ if (DRW_TEST_ASSIGN_VBO(vbo_pos_nor)) {
+ GPU_vertbuf_init_with_format(vbo_pos_nor, &format_pos_nor);
+ GPU_vertbuf_data_alloc(vbo_pos_nor, vbo_len_capacity);
+ GPU_vertbuf_attr_get_raw_data(vbo_pos_nor, attr_id.pos, &pos_step);
+ GPU_vertbuf_attr_get_raw_data(vbo_pos_nor, attr_id.nor, &nor_step);
+ }
+ if (DRW_TEST_ASSIGN_VBO(vbo_uv)) {
+ GPU_vertbuf_init_with_format(vbo_uv, &format_uv);
+ GPU_vertbuf_data_alloc(vbo_uv, vbo_len_capacity);
+ GPU_vertbuf_attr_get_raw_data(vbo_uv, attr_id.uv, &uv_step);
+ }
+
+ BKE_displist_normals_add(lb);
+
+ for (const DispList *dl = lb->first; dl; dl = dl->next) {
+ const bool is_smooth = (dl->rt & CU_SMOOTH) != 0;
+ if (ELEM(dl->type, DL_INDEX3, DL_INDEX4, DL_SURF)) {
+ const float(*verts)[3] = (float(*)[3])dl->verts;
+ const float(*nors)[3] = (float(*)[3])dl->nors;
+ const int *idx = dl->index;
+ float uv[4][2];
+
+ if (dl->type == DL_INDEX3) {
+ /* Currently 'DL_INDEX3' is always a flat surface with a single normal. */
+ const GPUPackedNormal pnor = GPU_normal_convert_i10_v3(dl->nors);
+ const float x_max = (float)(dl->nr - 1);
+ uv[0][1] = uv[1][1] = uv[2][1] = 0.0f;
+ const int i_end = dl->parts;
+ for (int i = 0; i < i_end; i++, idx += 3) {
+ if (vbo_uv) {
+ uv[0][0] = idx[0] / x_max;
+ uv[1][0] = idx[1] / x_max;
+ uv[2][0] = idx[2] / x_max;
+ }
+
+ displist_vertbuf_attr_set_tri_pos_nor_uv(&pos_step,
+ &nor_step,
+ &uv_step,
+ verts[idx[0]],
+ verts[idx[2]],
+ verts[idx[1]],
+ &pnor,
+ &pnor,
+ &pnor,
+ uv[0],
+ uv[2],
+ uv[1]);
+ }
+ }
+ else if (dl->type == DL_SURF) {
+ uint quad[4];
+ for (int a = 0; a < dl->parts; a++) {
+ if ((dl->flag & DL_CYCL_V) == 0 && a == dl->parts - 1) {
+ break;
+ }
+
+ int b;
+ if (dl->flag & DL_CYCL_U) {
+ quad[0] = dl->nr * a;
+ quad[3] = quad[0] + dl->nr - 1;
+ quad[1] = quad[0] + dl->nr;
+ quad[2] = quad[3] + dl->nr;
+ b = 0;
+ }
+ else {
+ quad[3] = dl->nr * a;
+ quad[0] = quad[3] + 1;
+ quad[2] = quad[3] + dl->nr;
+ quad[1] = quad[0] + dl->nr;
+ b = 1;
+ }
+ if ((dl->flag & DL_CYCL_V) && a == dl->parts - 1) {
+ quad[1] -= dl->parts * dl->nr;
+ quad[2] -= dl->parts * dl->nr;
+ }
+
+ for (; b < dl->nr; b++) {
+ if (vbo_uv) {
+ surf_uv_quad(dl, quad, uv);
+ }
+
+ GPUPackedNormal pnors_quad[4];
+ if (is_smooth) {
+ for (int j = 0; j < 4; j++) {
+ pnors_quad[j] = GPU_normal_convert_i10_v3(nors[quad[j]]);
+ }
+ }
+ else {
+ float nor_flat[3];
+ normal_quad_v3(
+ nor_flat, verts[quad[0]], verts[quad[1]], verts[quad[2]], verts[quad[3]]);
+ pnors_quad[0] = GPU_normal_convert_i10_v3(nor_flat);
+ pnors_quad[1] = pnors_quad[0];
+ pnors_quad[2] = pnors_quad[0];
+ pnors_quad[3] = pnors_quad[0];
+ }
+
+ displist_vertbuf_attr_set_tri_pos_nor_uv(&pos_step,
+ &nor_step,
+ &uv_step,
+ verts[quad[2]],
+ verts[quad[0]],
+ verts[quad[1]],
+ &pnors_quad[2],
+ &pnors_quad[0],
+ &pnors_quad[1],
+ uv[2],
+ uv[0],
+ uv[1]);
+
+ displist_vertbuf_attr_set_tri_pos_nor_uv(&pos_step,
+ &nor_step,
+ &uv_step,
+ verts[quad[0]],
+ verts[quad[2]],
+ verts[quad[3]],
+ &pnors_quad[0],
+ &pnors_quad[2],
+ &pnors_quad[3],
+ uv[0],
+ uv[2],
+ uv[3]);
+
+ quad[2] = quad[1];
+ quad[1]++;
+ quad[3] = quad[0];
+ quad[0]++;
+ }
+ }
+ }
+ else {
+ BLI_assert(dl->type == DL_INDEX4);
+ uv[0][0] = uv[0][1] = uv[1][0] = uv[3][1] = 0.0f;
+ uv[1][1] = uv[2][0] = uv[2][1] = uv[3][0] = 1.0f;
+
+ const int i_end = dl->parts;
+ for (int i = 0; i < i_end; i++, idx += 4) {
+ const bool is_tri = idx[2] != idx[3];
+
+ GPUPackedNormal pnors_idx[4];
+ if (is_smooth) {
+ int idx_len = is_tri ? 3 : 4;
+ for (int j = 0; j < idx_len; j++) {
+ pnors_idx[j] = GPU_normal_convert_i10_v3(nors[idx[j]]);
+ }
+ }
+ else {
+ float nor_flat[3];
+ if (is_tri) {
+ normal_tri_v3(nor_flat, verts[idx[0]], verts[idx[1]], verts[idx[2]]);
+ }
+ else {
+ normal_quad_v3(nor_flat, verts[idx[0]], verts[idx[1]], verts[idx[2]], verts[idx[3]]);
+ }
+ pnors_idx[0] = GPU_normal_convert_i10_v3(nor_flat);
+ pnors_idx[1] = pnors_idx[0];
+ pnors_idx[2] = pnors_idx[0];
+ pnors_idx[3] = pnors_idx[0];
+ }
+
+ displist_vertbuf_attr_set_tri_pos_nor_uv(&pos_step,
+ &nor_step,
+ &uv_step,
+ verts[idx[0]],
+ verts[idx[2]],
+ verts[idx[1]],
+ &pnors_idx[0],
+ &pnors_idx[2],
+ &pnors_idx[1],
+ uv[0],
+ uv[2],
+ uv[1]);
+
+ if (idx[2] != idx[3]) {
+ displist_vertbuf_attr_set_tri_pos_nor_uv(&pos_step,
+ &nor_step,
+ &uv_step,
+ verts[idx[2]],
+ verts[idx[0]],
+ verts[idx[3]],
+ &pnors_idx[2],
+ &pnors_idx[0],
+ &pnors_idx[3],
+ uv[2],
+ uv[0],
+ uv[3]);
+ }
+ }
+ }
+ }
+ }
+ /* Resize and finish. */
+ if (pos_step.size != 0) {
+ int vbo_len_used = GPU_vertbuf_raw_used(&pos_step);
+ if (vbo_len_used < vbo_len_capacity) {
+ GPU_vertbuf_data_resize(vbo_pos_nor, vbo_len_used);
+ }
+ }
+ if (uv_step.size != 0) {
+ int vbo_len_used = GPU_vertbuf_raw_used(&uv_step);
+ if (vbo_len_used < vbo_len_capacity) {
+ GPU_vertbuf_data_resize(vbo_uv, vbo_len_used);
+ }
+ }
}
/* Edge detection/adjecency */
#define NO_EDGE INT_MAX
-static void set_edge_adjacency_lines_indices(EdgeHash *eh, GPUIndexBufBuilder *elb, bool *r_is_manifold, uint v1, uint v2, uint v3)
+static void set_edge_adjacency_lines_indices(
+ EdgeHash *eh, GPUIndexBufBuilder *elb, bool *r_is_manifold, uint v1, uint v2, uint v3)
{
- bool inv_indices = (v2 > v3);
- void **pval;
- bool value_is_init = BLI_edgehash_ensure_p(eh, v2, v3, &pval);
- int v_data = POINTER_AS_INT(*pval);
- if (!value_is_init || v_data == NO_EDGE) {
- /* Save the winding order inside the sign bit. Because the
- * edgehash sort the keys and we need to compare winding later. */
- int value = (int)v1 + 1; /* Int 0 bm_looptricannot be signed */
- *pval = POINTER_FROM_INT((inv_indices) ? -value : value);
- }
- else {
- /* HACK Tag as not used. Prevent overhead of BLI_edgehash_remove. */
- *pval = POINTER_FROM_INT(NO_EDGE);
- bool inv_opposite = (v_data < 0);
- uint v_opposite = (uint)abs(v_data) - 1;
-
- if (inv_opposite == inv_indices) {
- /* Don't share edge if triangles have non matching winding. */
- GPU_indexbuf_add_line_adj_verts(elb, v1, v2, v3, v1);
- GPU_indexbuf_add_line_adj_verts(elb, v_opposite, v2, v3, v_opposite);
- *r_is_manifold = false;
- }
- else {
- GPU_indexbuf_add_line_adj_verts(elb, v1, v2, v3, v_opposite);
- }
- }
+ bool inv_indices = (v2 > v3);
+ void **pval;
+ bool value_is_init = BLI_edgehash_ensure_p(eh, v2, v3, &pval);
+ int v_data = POINTER_AS_INT(*pval);
+ if (!value_is_init || v_data == NO_EDGE) {
+ /* Save the winding order inside the sign bit. Because the
+ * edgehash sort the keys and we need to compare winding later. */
+ int value = (int)v1 + 1; /* Int 0 bm_looptricannot be signed */
+ *pval = POINTER_FROM_INT((inv_indices) ? -value : value);
+ }
+ else {
+ /* HACK Tag as not used. Prevent overhead of BLI_edgehash_remove. */
+ *pval = POINTER_FROM_INT(NO_EDGE);
+ bool inv_opposite = (v_data < 0);
+ uint v_opposite = (uint)abs(v_data) - 1;
+
+ if (inv_opposite == inv_indices) {
+ /* Don't share edge if triangles have non matching winding. */
+ GPU_indexbuf_add_line_adj_verts(elb, v1, v2, v3, v1);
+ GPU_indexbuf_add_line_adj_verts(elb, v_opposite, v2, v3, v_opposite);
+ *r_is_manifold = false;
+ }
+ else {
+ GPU_indexbuf_add_line_adj_verts(elb, v1, v2, v3, v_opposite);
+ }
+ }
}
static void set_edges_adjacency_lines_indices(void *thunk, uint v1, uint v2, uint v3)
{
- void **packed = (void **)thunk;
- GPUIndexBufBuilder *elb = (GPUIndexBufBuilder *)packed[0];
- EdgeHash *eh = (EdgeHash *)packed[1];
- bool *r_is_manifold = (bool *)packed[2];
-
- set_edge_adjacency_lines_indices(eh, elb, r_is_manifold, v1, v2, v3);
- set_edge_adjacency_lines_indices(eh, elb, r_is_manifold, v2, v3, v1);
- set_edge_adjacency_lines_indices(eh, elb, r_is_manifold, v3, v1, v2);
+ void **packed = (void **)thunk;
+ GPUIndexBufBuilder *elb = (GPUIndexBufBuilder *)packed[0];
+ EdgeHash *eh = (EdgeHash *)packed[1];
+ bool *r_is_manifold = (bool *)packed[2];
+
+ set_edge_adjacency_lines_indices(eh, elb, r_is_manifold, v1, v2, v3);
+ set_edge_adjacency_lines_indices(eh, elb, r_is_manifold, v2, v3, v1);
+ set_edge_adjacency_lines_indices(eh, elb, r_is_manifold, v3, v1, v2);
}
-void DRW_displist_indexbuf_create_edges_adjacency_lines(struct ListBase *lb, struct GPUIndexBuf *ibo, bool *r_is_manifold)
+void DRW_displist_indexbuf_create_edges_adjacency_lines(struct ListBase *lb,
+ struct GPUIndexBuf *ibo,
+ bool *r_is_manifold)
{
- const int tri_len = curve_render_surface_tri_len_get(lb);
- const int vert_len = curve_render_surface_vert_len_get(lb);
-
- *r_is_manifold = true;
-
- /* Allocate max but only used indices are sent to GPU. */
- GPUIndexBufBuilder elb;
- GPU_indexbuf_init(&elb, GPU_PRIM_LINES_ADJ, tri_len * 3, vert_len);
-
- EdgeHash *eh = BLI_edgehash_new_ex(__func__, tri_len * 3);
-
- /* pack values to pass to `set_edges_adjacency_lines_indices` function. */
- void *thunk[3] = {&elb, eh, r_is_manifold};
- int v_idx = 0;
- for (const DispList *dl = lb->first; dl; dl = dl->next) {
- displist_indexbufbuilder_set(
- (SetTriIndicesFn *)set_edges_adjacency_lines_indices,
- (SetTriIndicesFn *)set_edges_adjacency_lines_indices,
- thunk, dl, v_idx);
- v_idx += dl_vert_len(dl);
- }
-
- /* Create edges for remaning non manifold edges. */
- EdgeHashIterator *ehi;
- for (ehi = BLI_edgehashIterator_new(eh);
- BLI_edgehashIterator_isDone(ehi) == false;
- BLI_edgehashIterator_step(ehi))
- {
- uint v1, v2;
- int v_data = POINTER_AS_INT(BLI_edgehashIterator_getValue(ehi));
- if (v_data == NO_EDGE) {
- continue;
- }
- BLI_edgehashIterator_getKey(ehi, &v1, &v2);
- uint v0 = (uint)abs(v_data) - 1;
- if (v_data < 0) { /* inv_opposite */
- SWAP(uint, v1, v2);
- }
- GPU_indexbuf_add_line_adj_verts(&elb, v0, v1, v2, v0);
- *r_is_manifold = false;
- }
- BLI_edgehashIterator_free(ehi);
- BLI_edgehash_free(eh, NULL);
-
- GPU_indexbuf_build_in_place(&elb, ibo);
+ const int tri_len = curve_render_surface_tri_len_get(lb);
+ const int vert_len = curve_render_surface_vert_len_get(lb);
+
+ *r_is_manifold = true;
+
+ /* Allocate max but only used indices are sent to GPU. */
+ GPUIndexBufBuilder elb;
+ GPU_indexbuf_init(&elb, GPU_PRIM_LINES_ADJ, tri_len * 3, vert_len);
+
+ EdgeHash *eh = BLI_edgehash_new_ex(__func__, tri_len * 3);
+
+ /* pack values to pass to `set_edges_adjacency_lines_indices` function. */
+ void *thunk[3] = {&elb, eh, r_is_manifold};
+ int v_idx = 0;
+ for (const DispList *dl = lb->first; dl; dl = dl->next) {
+ displist_indexbufbuilder_set((SetTriIndicesFn *)set_edges_adjacency_lines_indices,
+ (SetTriIndicesFn *)set_edges_adjacency_lines_indices,
+ thunk,
+ dl,
+ v_idx);
+ v_idx += dl_vert_len(dl);
+ }
+
+ /* Create edges for remaning non manifold edges. */
+ EdgeHashIterator *ehi;
+ for (ehi = BLI_edgehashIterator_new(eh); BLI_edgehashIterator_isDone(ehi) == false;
+ BLI_edgehashIterator_step(ehi)) {
+ uint v1, v2;
+ int v_data = POINTER_AS_INT(BLI_edgehashIterator_getValue(ehi));
+ if (v_data == NO_EDGE) {
+ continue;
+ }
+ BLI_edgehashIterator_getKey(ehi, &v1, &v2);
+ uint v0 = (uint)abs(v_data) - 1;
+ if (v_data < 0) { /* inv_opposite */
+ SWAP(uint, v1, v2);
+ }
+ GPU_indexbuf_add_line_adj_verts(&elb, v0, v1, v2, v0);
+ *r_is_manifold = false;
+ }
+ BLI_edgehashIterator_free(ehi);
+ BLI_edgehash_free(eh, NULL);
+
+ GPU_indexbuf_build_in_place(&elb, ibo);
}
#undef NO_EDGE
diff --git a/source/blender/draw/intern/draw_cache_impl_lattice.c b/source/blender/draw/intern/draw_cache_impl_lattice.c
index 12763467f8c..b5223bc047c 100644
--- a/source/blender/draw/intern/draw_cache_impl_lattice.c
+++ b/source/blender/draw/intern/draw_cache_impl_lattice.c
@@ -39,9 +39,9 @@
#include "GPU_batch.h"
-#include "draw_cache_impl.h" /* own include */
+#include "draw_cache_impl.h" /* own include */
-#define SELECT 1
+#define SELECT 1
/**
* TODO
@@ -56,519 +56,527 @@ static void lattice_batch_cache_clear(Lattice *lt);
static int vert_len_calc(int u, int v, int w)
{
- if (u <= 0 || v <= 0 || w <= 0) {
- return 0;
- }
- return u * v * w;
+ if (u <= 0 || v <= 0 || w <= 0) {
+ return 0;
+ }
+ return u * v * w;
}
static int edge_len_calc(int u, int v, int w)
{
- if (u <= 0 || v <= 0 || w <= 0) {
- return 0;
- }
- return (((((u - 1) * v) +
- ((v - 1) * u)) * w) +
- ((w - 1) * (u * v)));
+ if (u <= 0 || v <= 0 || w <= 0) {
+ return 0;
+ }
+ return (((((u - 1) * v) + ((v - 1) * u)) * w) + ((w - 1) * (u * v)));
}
static int lattice_render_verts_len_get(Lattice *lt)
{
- if (lt->editlatt) {
- lt = lt->editlatt->latt;
- }
-
- const int u = lt->pntsu;
- const int v = lt->pntsv;
- const int w = lt->pntsw;
-
- if ((lt->flag & LT_OUTSIDE) == 0) {
- return vert_len_calc(u, v, w);
- }
- else {
- /* TODO remove internal coords */
- return vert_len_calc(u, v, w);
- }
+ if (lt->editlatt) {
+ lt = lt->editlatt->latt;
+ }
+
+ const int u = lt->pntsu;
+ const int v = lt->pntsv;
+ const int w = lt->pntsw;
+
+ if ((lt->flag & LT_OUTSIDE) == 0) {
+ return vert_len_calc(u, v, w);
+ }
+ else {
+ /* TODO remove internal coords */
+ return vert_len_calc(u, v, w);
+ }
}
static int lattice_render_edges_len_get(Lattice *lt)
{
- if (lt->editlatt) {
- lt = lt->editlatt->latt;
- }
-
- const int u = lt->pntsu;
- const int v = lt->pntsv;
- const int w = lt->pntsw;
-
- if ((lt->flag & LT_OUTSIDE) == 0) {
- return edge_len_calc(u, v, w);
- }
- else {
- /* TODO remove internal coords */
- return edge_len_calc(u, v, w);
- }
+ if (lt->editlatt) {
+ lt = lt->editlatt->latt;
+ }
+
+ const int u = lt->pntsu;
+ const int v = lt->pntsv;
+ const int w = lt->pntsw;
+
+ if ((lt->flag & LT_OUTSIDE) == 0) {
+ return edge_len_calc(u, v, w);
+ }
+ else {
+ /* TODO remove internal coords */
+ return edge_len_calc(u, v, w);
+ }
}
/* ---------------------------------------------------------------------- */
/* Lattice Interface, indirect, partially cached access to complex data. */
typedef struct LatticeRenderData {
- int types;
+ int types;
- int vert_len;
- int edge_len;
+ int vert_len;
+ int edge_len;
- struct {
- int u_len, v_len, w_len;
- } dims;
- bool show_only_outside;
+ struct {
+ int u_len, v_len, w_len;
+ } dims;
+ bool show_only_outside;
- struct EditLatt *edit_latt;
- BPoint *bp;
+ struct EditLatt *edit_latt;
+ BPoint *bp;
- int actbp;
+ int actbp;
- struct MDeformVert *dvert;
+ struct MDeformVert *dvert;
} LatticeRenderData;
enum {
- LR_DATATYPE_VERT = 1 << 0,
- LR_DATATYPE_EDGE = 1 << 1,
- LR_DATATYPE_OVERLAY = 1 << 2,
+ LR_DATATYPE_VERT = 1 << 0,
+ LR_DATATYPE_EDGE = 1 << 1,
+ LR_DATATYPE_OVERLAY = 1 << 2,
};
static LatticeRenderData *lattice_render_data_create(Lattice *lt, const int types)
{
- LatticeRenderData *rdata = MEM_callocN(sizeof(*rdata), __func__);
- rdata->types = types;
-
- if (lt->editlatt) {
- EditLatt *editlatt = lt->editlatt;
- lt = editlatt->latt;
-
- rdata->edit_latt = editlatt;
-
- rdata->dvert = lt->dvert;
-
- if (types & (LR_DATATYPE_VERT)) {
- rdata->vert_len = lattice_render_verts_len_get(lt);
- }
- if (types & (LR_DATATYPE_EDGE)) {
- rdata->edge_len = lattice_render_edges_len_get(lt);
- }
- if (types & LR_DATATYPE_OVERLAY) {
- rdata->actbp = lt->actbp;
- }
- }
- else {
- rdata->dvert = NULL;
-
- if (types & (LR_DATATYPE_VERT)) {
- rdata->vert_len = lattice_render_verts_len_get(lt);
- }
- if (types & (LR_DATATYPE_EDGE)) {
- rdata->edge_len = lattice_render_edges_len_get(lt);
- /*no edge data */
- }
- }
-
- rdata->bp = lt->def;
-
- rdata->dims.u_len = lt->pntsu;
- rdata->dims.v_len = lt->pntsv;
- rdata->dims.w_len = lt->pntsw;
-
- rdata->show_only_outside = (lt->flag & LT_OUTSIDE) != 0;
- rdata->actbp = lt->actbp;
-
- return rdata;
+ LatticeRenderData *rdata = MEM_callocN(sizeof(*rdata), __func__);
+ rdata->types = types;
+
+ if (lt->editlatt) {
+ EditLatt *editlatt = lt->editlatt;
+ lt = editlatt->latt;
+
+ rdata->edit_latt = editlatt;
+
+ rdata->dvert = lt->dvert;
+
+ if (types & (LR_DATATYPE_VERT)) {
+ rdata->vert_len = lattice_render_verts_len_get(lt);
+ }
+ if (types & (LR_DATATYPE_EDGE)) {
+ rdata->edge_len = lattice_render_edges_len_get(lt);
+ }
+ if (types & LR_DATATYPE_OVERLAY) {
+ rdata->actbp = lt->actbp;
+ }
+ }
+ else {
+ rdata->dvert = NULL;
+
+ if (types & (LR_DATATYPE_VERT)) {
+ rdata->vert_len = lattice_render_verts_len_get(lt);
+ }
+ if (types & (LR_DATATYPE_EDGE)) {
+ rdata->edge_len = lattice_render_edges_len_get(lt);
+ /*no edge data */
+ }
+ }
+
+ rdata->bp = lt->def;
+
+ rdata->dims.u_len = lt->pntsu;
+ rdata->dims.v_len = lt->pntsv;
+ rdata->dims.w_len = lt->pntsw;
+
+ rdata->show_only_outside = (lt->flag & LT_OUTSIDE) != 0;
+ rdata->actbp = lt->actbp;
+
+ return rdata;
}
static void lattice_render_data_free(LatticeRenderData *rdata)
{
#if 0
- if (rdata->loose_verts) {
- MEM_freeN(rdata->loose_verts);
- }
+ if (rdata->loose_verts) {
+ MEM_freeN(rdata->loose_verts);
+ }
#endif
- MEM_freeN(rdata);
+ MEM_freeN(rdata);
}
static int lattice_render_data_verts_len_get(const LatticeRenderData *rdata)
{
- BLI_assert(rdata->types & LR_DATATYPE_VERT);
- return rdata->vert_len;
+ BLI_assert(rdata->types & LR_DATATYPE_VERT);
+ return rdata->vert_len;
}
static int lattice_render_data_edges_len_get(const LatticeRenderData *rdata)
{
- BLI_assert(rdata->types & LR_DATATYPE_EDGE);
- return rdata->edge_len;
+ BLI_assert(rdata->types & LR_DATATYPE_EDGE);
+ return rdata->edge_len;
}
-static const BPoint *lattice_render_data_vert_bpoint(const LatticeRenderData *rdata, const int vert_idx)
+static const BPoint *lattice_render_data_vert_bpoint(const LatticeRenderData *rdata,
+ const int vert_idx)
{
- BLI_assert(rdata->types & LR_DATATYPE_VERT);
- return &rdata->bp[vert_idx];
+ BLI_assert(rdata->types & LR_DATATYPE_VERT);
+ return &rdata->bp[vert_idx];
}
/* TODO, move into shader? */
static void rgb_from_weight(float r_rgb[3], const float weight)
{
- const float blend = ((weight / 2.0f) + 0.5f);
-
- if (weight <= 0.25f) { /* blue->cyan */
- r_rgb[0] = 0.0f;
- r_rgb[1] = blend * weight * 4.0f;
- r_rgb[2] = blend;
- }
- else if (weight <= 0.50f) { /* cyan->green */
- r_rgb[0] = 0.0f;
- r_rgb[1] = blend;
- r_rgb[2] = blend * (1.0f - ((weight - 0.25f) * 4.0f));
- }
- else if (weight <= 0.75f) { /* green->yellow */
- r_rgb[0] = blend * ((weight - 0.50f) * 4.0f);
- r_rgb[1] = blend;
- r_rgb[2] = 0.0f;
- }
- else if (weight <= 1.0f) { /* yellow->red */
- r_rgb[0] = blend;
- r_rgb[1] = blend * (1.0f - ((weight - 0.75f) * 4.0f));
- r_rgb[2] = 0.0f;
- }
- else {
- /* exceptional value, unclamped or nan,
- * avoid uninitialized memory use */
- r_rgb[0] = 1.0f;
- r_rgb[1] = 0.0f;
- r_rgb[2] = 1.0f;
- }
+ const float blend = ((weight / 2.0f) + 0.5f);
+
+ if (weight <= 0.25f) { /* blue->cyan */
+ r_rgb[0] = 0.0f;
+ r_rgb[1] = blend * weight * 4.0f;
+ r_rgb[2] = blend;
+ }
+ else if (weight <= 0.50f) { /* cyan->green */
+ r_rgb[0] = 0.0f;
+ r_rgb[1] = blend;
+ r_rgb[2] = blend * (1.0f - ((weight - 0.25f) * 4.0f));
+ }
+ else if (weight <= 0.75f) { /* green->yellow */
+ r_rgb[0] = blend * ((weight - 0.50f) * 4.0f);
+ r_rgb[1] = blend;
+ r_rgb[2] = 0.0f;
+ }
+ else if (weight <= 1.0f) { /* yellow->red */
+ r_rgb[0] = blend;
+ r_rgb[1] = blend * (1.0f - ((weight - 0.75f) * 4.0f));
+ r_rgb[2] = 0.0f;
+ }
+ else {
+ /* exceptional value, unclamped or nan,
+ * avoid uninitialized memory use */
+ r_rgb[0] = 1.0f;
+ r_rgb[1] = 0.0f;
+ r_rgb[2] = 1.0f;
+ }
}
-static void lattice_render_data_weight_col_get(const LatticeRenderData *rdata, const int vert_idx,
- const int actdef, float r_col[4])
+static void lattice_render_data_weight_col_get(const LatticeRenderData *rdata,
+ const int vert_idx,
+ const int actdef,
+ float r_col[4])
{
- if (actdef > -1) {
- float weight = defvert_find_weight(rdata->dvert + vert_idx, actdef);
-
- if (U.flag & USER_CUSTOM_RANGE) {
- BKE_colorband_evaluate(&U.coba_weight, weight, r_col);
- }
- else {
- rgb_from_weight(r_col, weight);
- }
-
- r_col[3] = 1.0f;
- }
- else {
- zero_v4(r_col);
- }
+ if (actdef > -1) {
+ float weight = defvert_find_weight(rdata->dvert + vert_idx, actdef);
+
+ if (U.flag & USER_CUSTOM_RANGE) {
+ BKE_colorband_evaluate(&U.coba_weight, weight, r_col);
+ }
+ else {
+ rgb_from_weight(r_col, weight);
+ }
+
+ r_col[3] = 1.0f;
+ }
+ else {
+ zero_v4(r_col);
+ }
}
/* ---------------------------------------------------------------------- */
/* Lattice GPUBatch Cache */
typedef struct LatticeBatchCache {
- GPUVertBuf *pos;
- GPUIndexBuf *edges;
+ GPUVertBuf *pos;
+ GPUIndexBuf *edges;
- GPUBatch *all_verts;
- GPUBatch *all_edges;
+ GPUBatch *all_verts;
+ GPUBatch *all_edges;
- GPUBatch *overlay_verts;
+ GPUBatch *overlay_verts;
- /* settings to determine if cache is invalid */
- bool is_dirty;
+ /* settings to determine if cache is invalid */
+ bool is_dirty;
- struct {
- int u_len, v_len, w_len;
- } dims;
- bool show_only_outside;
+ struct {
+ int u_len, v_len, w_len;
+ } dims;
+ bool show_only_outside;
- bool is_editmode;
+ bool is_editmode;
} LatticeBatchCache;
/* GPUBatch cache management. */
static bool lattice_batch_cache_valid(Lattice *lt)
{
- LatticeBatchCache *cache = lt->batch_cache;
-
- if (cache == NULL) {
- return false;
- }
-
- if (cache->is_editmode != (lt->editlatt != NULL)) {
- return false;
- }
-
- if (cache->is_dirty) {
- return false;
- }
- else {
- if ((cache->dims.u_len != lt->pntsu) ||
- (cache->dims.v_len != lt->pntsv) ||
- (cache->dims.w_len != lt->pntsw) ||
- ((cache->show_only_outside != ((lt->flag & LT_OUTSIDE) != 0))))
- {
- return false;
- }
- }
-
- return true;
+ LatticeBatchCache *cache = lt->batch_cache;
+
+ if (cache == NULL) {
+ return false;
+ }
+
+ if (cache->is_editmode != (lt->editlatt != NULL)) {
+ return false;
+ }
+
+ if (cache->is_dirty) {
+ return false;
+ }
+ else {
+ if ((cache->dims.u_len != lt->pntsu) || (cache->dims.v_len != lt->pntsv) ||
+ (cache->dims.w_len != lt->pntsw) ||
+ ((cache->show_only_outside != ((lt->flag & LT_OUTSIDE) != 0)))) {
+ return false;
+ }
+ }
+
+ return true;
}
static void lattice_batch_cache_init(Lattice *lt)
{
- LatticeBatchCache *cache = lt->batch_cache;
+ LatticeBatchCache *cache = lt->batch_cache;
- if (!cache) {
- cache = lt->batch_cache = MEM_callocN(sizeof(*cache), __func__);
- }
- else {
- memset(cache, 0, sizeof(*cache));
- }
+ if (!cache) {
+ cache = lt->batch_cache = MEM_callocN(sizeof(*cache), __func__);
+ }
+ else {
+ memset(cache, 0, sizeof(*cache));
+ }
- cache->dims.u_len = lt->pntsu;
- cache->dims.v_len = lt->pntsv;
- cache->dims.w_len = lt->pntsw;
- cache->show_only_outside = (lt->flag & LT_OUTSIDE) != 0;
+ cache->dims.u_len = lt->pntsu;
+ cache->dims.v_len = lt->pntsv;
+ cache->dims.w_len = lt->pntsw;
+ cache->show_only_outside = (lt->flag & LT_OUTSIDE) != 0;
- cache->is_editmode = lt->editlatt != NULL;
+ cache->is_editmode = lt->editlatt != NULL;
- cache->is_dirty = false;
+ cache->is_dirty = false;
}
static LatticeBatchCache *lattice_batch_cache_get(Lattice *lt)
{
- if (!lattice_batch_cache_valid(lt)) {
- lattice_batch_cache_clear(lt);
- lattice_batch_cache_init(lt);
- }
- return lt->batch_cache;
+ if (!lattice_batch_cache_valid(lt)) {
+ lattice_batch_cache_clear(lt);
+ lattice_batch_cache_init(lt);
+ }
+ return lt->batch_cache;
}
void DRW_lattice_batch_cache_dirty_tag(Lattice *lt, int mode)
{
- LatticeBatchCache *cache = lt->batch_cache;
- if (cache == NULL) {
- return;
- }
- switch (mode) {
- case BKE_LATTICE_BATCH_DIRTY_ALL:
- cache->is_dirty = true;
- break;
- case BKE_LATTICE_BATCH_DIRTY_SELECT:
- /* TODO Separate Flag vbo */
- GPU_BATCH_DISCARD_SAFE(cache->overlay_verts);
- break;
- default:
- BLI_assert(0);
- }
+ LatticeBatchCache *cache = lt->batch_cache;
+ if (cache == NULL) {
+ return;
+ }
+ switch (mode) {
+ case BKE_LATTICE_BATCH_DIRTY_ALL:
+ cache->is_dirty = true;
+ break;
+ case BKE_LATTICE_BATCH_DIRTY_SELECT:
+ /* TODO Separate Flag vbo */
+ GPU_BATCH_DISCARD_SAFE(cache->overlay_verts);
+ break;
+ default:
+ BLI_assert(0);
+ }
}
static void lattice_batch_cache_clear(Lattice *lt)
{
- LatticeBatchCache *cache = lt->batch_cache;
- if (!cache) {
- return;
- }
+ LatticeBatchCache *cache = lt->batch_cache;
+ if (!cache) {
+ return;
+ }
- GPU_BATCH_DISCARD_SAFE(cache->all_verts);
- GPU_BATCH_DISCARD_SAFE(cache->all_edges);
- GPU_BATCH_DISCARD_SAFE(cache->overlay_verts);
+ GPU_BATCH_DISCARD_SAFE(cache->all_verts);
+ GPU_BATCH_DISCARD_SAFE(cache->all_edges);
+ GPU_BATCH_DISCARD_SAFE(cache->overlay_verts);
- GPU_VERTBUF_DISCARD_SAFE(cache->pos);
- GPU_INDEXBUF_DISCARD_SAFE(cache->edges);
+ GPU_VERTBUF_DISCARD_SAFE(cache->pos);
+ GPU_INDEXBUF_DISCARD_SAFE(cache->edges);
}
void DRW_lattice_batch_cache_free(Lattice *lt)
{
- lattice_batch_cache_clear(lt);
- MEM_SAFE_FREE(lt->batch_cache);
+ lattice_batch_cache_clear(lt);
+ MEM_SAFE_FREE(lt->batch_cache);
}
/* GPUBatch cache usage. */
-static GPUVertBuf *lattice_batch_cache_get_pos(LatticeRenderData *rdata, LatticeBatchCache *cache,
- bool use_weight, const int actdef)
+static GPUVertBuf *lattice_batch_cache_get_pos(LatticeRenderData *rdata,
+ LatticeBatchCache *cache,
+ bool use_weight,
+ const int actdef)
{
- BLI_assert(rdata->types & LR_DATATYPE_VERT);
+ BLI_assert(rdata->types & LR_DATATYPE_VERT);
- if (cache->pos == NULL) {
- static GPUVertFormat format = { 0 };
- static struct { uint pos, col; } attr_id;
+ if (cache->pos == NULL) {
+ static GPUVertFormat format = {0};
+ static struct {
+ uint pos, col;
+ } attr_id;
- GPU_vertformat_clear(&format);
+ GPU_vertformat_clear(&format);
- /* initialize vertex format */
- attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
+ /* initialize vertex format */
+ attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
- if (use_weight) {
- attr_id.col = GPU_vertformat_attr_add(&format, "color", GPU_COMP_F32, 4, GPU_FETCH_FLOAT);
- }
+ if (use_weight) {
+ attr_id.col = GPU_vertformat_attr_add(&format, "color", GPU_COMP_F32, 4, GPU_FETCH_FLOAT);
+ }
- const int vert_len = lattice_render_data_verts_len_get(rdata);
+ const int vert_len = lattice_render_data_verts_len_get(rdata);
- cache->pos = GPU_vertbuf_create_with_format(&format);
- GPU_vertbuf_data_alloc(cache->pos, vert_len);
- for (int i = 0; i < vert_len; ++i) {
- const BPoint *bp = lattice_render_data_vert_bpoint(rdata, i);
- GPU_vertbuf_attr_set(cache->pos, attr_id.pos, i, bp->vec);
+ cache->pos = GPU_vertbuf_create_with_format(&format);
+ GPU_vertbuf_data_alloc(cache->pos, vert_len);
+ for (int i = 0; i < vert_len; ++i) {
+ const BPoint *bp = lattice_render_data_vert_bpoint(rdata, i);
+ GPU_vertbuf_attr_set(cache->pos, attr_id.pos, i, bp->vec);
- if (use_weight) {
- float w_col[4];
- lattice_render_data_weight_col_get(rdata, i, actdef, w_col);
- w_col[3] = 1.0f;
+ if (use_weight) {
+ float w_col[4];
+ lattice_render_data_weight_col_get(rdata, i, actdef, w_col);
+ w_col[3] = 1.0f;
- GPU_vertbuf_attr_set(cache->pos, attr_id.col, i, w_col);
- }
- }
- }
+ GPU_vertbuf_attr_set(cache->pos, attr_id.col, i, w_col);
+ }
+ }
+ }
- return cache->pos;
+ return cache->pos;
}
-static GPUIndexBuf *lattice_batch_cache_get_edges(LatticeRenderData *rdata, LatticeBatchCache *cache)
+static GPUIndexBuf *lattice_batch_cache_get_edges(LatticeRenderData *rdata,
+ LatticeBatchCache *cache)
{
- BLI_assert(rdata->types & (LR_DATATYPE_VERT | LR_DATATYPE_EDGE));
-
- if (cache->edges == NULL) {
- const int vert_len = lattice_render_data_verts_len_get(rdata);
- const int edge_len = lattice_render_data_edges_len_get(rdata);
- int edge_len_real = 0;
-
- GPUIndexBufBuilder elb;
- GPU_indexbuf_init(&elb, GPU_PRIM_LINES, edge_len, vert_len);
-
-#define LATT_INDEX(u, v, w) \
- ((((w) * rdata->dims.v_len + (v)) * rdata->dims.u_len) + (u))
-
- for (int w = 0; w < rdata->dims.w_len; w++) {
- int wxt = (w == 0 || w == rdata->dims.w_len - 1);
- for (int v = 0; v < rdata->dims.v_len; v++) {
- int vxt = (v == 0 || v == rdata->dims.v_len - 1);
- for (int u = 0; u < rdata->dims.u_len; u++) {
- int uxt = (u == 0 || u == rdata->dims.u_len - 1);
-
- if (w && ((uxt || vxt) || !rdata->show_only_outside)) {
- GPU_indexbuf_add_line_verts(&elb, LATT_INDEX(u, v, w - 1), LATT_INDEX(u, v, w));
- BLI_assert(edge_len_real <= edge_len);
- edge_len_real++;
- }
- if (v && ((uxt || wxt) || !rdata->show_only_outside)) {
- GPU_indexbuf_add_line_verts(&elb, LATT_INDEX(u, v - 1, w), LATT_INDEX(u, v, w));
- BLI_assert(edge_len_real <= edge_len);
- edge_len_real++;
- }
- if (u && ((vxt || wxt) || !rdata->show_only_outside)) {
- GPU_indexbuf_add_line_verts(&elb, LATT_INDEX(u - 1, v, w), LATT_INDEX(u, v, w));
- BLI_assert(edge_len_real <= edge_len);
- edge_len_real++;
- }
- }
- }
- }
+ BLI_assert(rdata->types & (LR_DATATYPE_VERT | LR_DATATYPE_EDGE));
+
+ if (cache->edges == NULL) {
+ const int vert_len = lattice_render_data_verts_len_get(rdata);
+ const int edge_len = lattice_render_data_edges_len_get(rdata);
+ int edge_len_real = 0;
+
+ GPUIndexBufBuilder elb;
+ GPU_indexbuf_init(&elb, GPU_PRIM_LINES, edge_len, vert_len);
+
+#define LATT_INDEX(u, v, w) ((((w)*rdata->dims.v_len + (v)) * rdata->dims.u_len) + (u))
+
+ for (int w = 0; w < rdata->dims.w_len; w++) {
+ int wxt = (w == 0 || w == rdata->dims.w_len - 1);
+ for (int v = 0; v < rdata->dims.v_len; v++) {
+ int vxt = (v == 0 || v == rdata->dims.v_len - 1);
+ for (int u = 0; u < rdata->dims.u_len; u++) {
+ int uxt = (u == 0 || u == rdata->dims.u_len - 1);
+
+ if (w && ((uxt || vxt) || !rdata->show_only_outside)) {
+ GPU_indexbuf_add_line_verts(&elb, LATT_INDEX(u, v, w - 1), LATT_INDEX(u, v, w));
+ BLI_assert(edge_len_real <= edge_len);
+ edge_len_real++;
+ }
+ if (v && ((uxt || wxt) || !rdata->show_only_outside)) {
+ GPU_indexbuf_add_line_verts(&elb, LATT_INDEX(u, v - 1, w), LATT_INDEX(u, v, w));
+ BLI_assert(edge_len_real <= edge_len);
+ edge_len_real++;
+ }
+ if (u && ((vxt || wxt) || !rdata->show_only_outside)) {
+ GPU_indexbuf_add_line_verts(&elb, LATT_INDEX(u - 1, v, w), LATT_INDEX(u, v, w));
+ BLI_assert(edge_len_real <= edge_len);
+ edge_len_real++;
+ }
+ }
+ }
+ }
#undef LATT_INDEX
- if (rdata->show_only_outside) {
- BLI_assert(edge_len_real <= edge_len);
- }
- else {
- BLI_assert(edge_len_real == edge_len);
- }
+ if (rdata->show_only_outside) {
+ BLI_assert(edge_len_real <= edge_len);
+ }
+ else {
+ BLI_assert(edge_len_real == edge_len);
+ }
- cache->edges = GPU_indexbuf_build(&elb);
- }
+ cache->edges = GPU_indexbuf_build(&elb);
+ }
- return cache->edges;
+ return cache->edges;
}
static void lattice_batch_cache_create_overlay_batches(Lattice *lt)
{
- /* Since LR_DATATYPE_OVERLAY is slow to generate, generate them all at once */
- int options = LR_DATATYPE_VERT | LR_DATATYPE_OVERLAY;
-
- LatticeBatchCache *cache = lattice_batch_cache_get(lt);
- LatticeRenderData *rdata = lattice_render_data_create(lt, options);
-
- if (cache->overlay_verts == NULL) {
- static GPUVertFormat format = { 0 };
- static struct { uint pos, data; } attr_id;
- if (format.attr_len == 0) {
- /* initialize vertex format */
- attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
- attr_id.data = GPU_vertformat_attr_add(&format, "data", GPU_COMP_U8, 1, GPU_FETCH_INT);
- }
-
- const int vert_len = lattice_render_data_verts_len_get(rdata);
-
- GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
- GPU_vertbuf_data_alloc(vbo, vert_len);
- for (int i = 0; i < vert_len; ++i) {
- const BPoint *bp = lattice_render_data_vert_bpoint(rdata, i);
-
- char vflag = 0;
- if (bp->f1 & SELECT) {
- if (i == rdata->actbp) {
- vflag |= VFLAG_VERT_ACTIVE;
- }
- else {
- vflag |= VFLAG_VERT_SELECTED;
- }
- }
-
- GPU_vertbuf_attr_set(vbo, attr_id.pos, i, bp->vec);
- GPU_vertbuf_attr_set(vbo, attr_id.data, i, &vflag);
- }
-
- cache->overlay_verts = GPU_batch_create_ex(GPU_PRIM_POINTS, vbo, NULL, GPU_BATCH_OWNS_VBO);
- }
-
- lattice_render_data_free(rdata);
+ /* Since LR_DATATYPE_OVERLAY is slow to generate, generate them all at once */
+ int options = LR_DATATYPE_VERT | LR_DATATYPE_OVERLAY;
+
+ LatticeBatchCache *cache = lattice_batch_cache_get(lt);
+ LatticeRenderData *rdata = lattice_render_data_create(lt, options);
+
+ if (cache->overlay_verts == NULL) {
+ static GPUVertFormat format = {0};
+ static struct {
+ uint pos, data;
+ } attr_id;
+ if (format.attr_len == 0) {
+ /* initialize vertex format */
+ attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
+ attr_id.data = GPU_vertformat_attr_add(&format, "data", GPU_COMP_U8, 1, GPU_FETCH_INT);
+ }
+
+ const int vert_len = lattice_render_data_verts_len_get(rdata);
+
+ GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
+ GPU_vertbuf_data_alloc(vbo, vert_len);
+ for (int i = 0; i < vert_len; ++i) {
+ const BPoint *bp = lattice_render_data_vert_bpoint(rdata, i);
+
+ char vflag = 0;
+ if (bp->f1 & SELECT) {
+ if (i == rdata->actbp) {
+ vflag |= VFLAG_VERT_ACTIVE;
+ }
+ else {
+ vflag |= VFLAG_VERT_SELECTED;
+ }
+ }
+
+ GPU_vertbuf_attr_set(vbo, attr_id.pos, i, bp->vec);
+ GPU_vertbuf_attr_set(vbo, attr_id.data, i, &vflag);
+ }
+
+ cache->overlay_verts = GPU_batch_create_ex(GPU_PRIM_POINTS, vbo, NULL, GPU_BATCH_OWNS_VBO);
+ }
+
+ lattice_render_data_free(rdata);
}
GPUBatch *DRW_lattice_batch_cache_get_all_edges(Lattice *lt, bool use_weight, const int actdef)
{
- LatticeBatchCache *cache = lattice_batch_cache_get(lt);
+ LatticeBatchCache *cache = lattice_batch_cache_get(lt);
- if (cache->all_edges == NULL) {
- /* create batch from Lattice */
- LatticeRenderData *rdata = lattice_render_data_create(lt, LR_DATATYPE_VERT | LR_DATATYPE_EDGE);
+ if (cache->all_edges == NULL) {
+ /* create batch from Lattice */
+ LatticeRenderData *rdata = lattice_render_data_create(lt, LR_DATATYPE_VERT | LR_DATATYPE_EDGE);
- cache->all_edges = GPU_batch_create(GPU_PRIM_LINES, lattice_batch_cache_get_pos(rdata, cache, use_weight, actdef),
- lattice_batch_cache_get_edges(rdata, cache));
+ cache->all_edges = GPU_batch_create(
+ GPU_PRIM_LINES,
+ lattice_batch_cache_get_pos(rdata, cache, use_weight, actdef),
+ lattice_batch_cache_get_edges(rdata, cache));
- lattice_render_data_free(rdata);
- }
+ lattice_render_data_free(rdata);
+ }
- return cache->all_edges;
+ return cache->all_edges;
}
GPUBatch *DRW_lattice_batch_cache_get_all_verts(Lattice *lt)
{
- LatticeBatchCache *cache = lattice_batch_cache_get(lt);
+ LatticeBatchCache *cache = lattice_batch_cache_get(lt);
- if (cache->all_verts == NULL) {
- LatticeRenderData *rdata = lattice_render_data_create(lt, LR_DATATYPE_VERT);
+ if (cache->all_verts == NULL) {
+ LatticeRenderData *rdata = lattice_render_data_create(lt, LR_DATATYPE_VERT);
- cache->all_verts = GPU_batch_create(GPU_PRIM_POINTS, lattice_batch_cache_get_pos(rdata, cache, false, -1), NULL);
+ cache->all_verts = GPU_batch_create(
+ GPU_PRIM_POINTS, lattice_batch_cache_get_pos(rdata, cache, false, -1), NULL);
- lattice_render_data_free(rdata);
- }
+ lattice_render_data_free(rdata);
+ }
- return cache->all_verts;
+ return cache->all_verts;
}
GPUBatch *DRW_lattice_batch_cache_get_edit_verts(Lattice *lt)
{
- LatticeBatchCache *cache = lattice_batch_cache_get(lt);
+ LatticeBatchCache *cache = lattice_batch_cache_get(lt);
- if (cache->overlay_verts == NULL) {
- lattice_batch_cache_create_overlay_batches(lt);
- }
+ if (cache->overlay_verts == NULL) {
+ lattice_batch_cache_create_overlay_batches(lt);
+ }
- return cache->overlay_verts;
+ return cache->overlay_verts;
}
diff --git a/source/blender/draw/intern/draw_cache_impl_mesh.c b/source/blender/draw/intern/draw_cache_impl_mesh.c
index e35a191cad5..ded9d0963b7 100644
--- a/source/blender/draw/intern/draw_cache_impl_mesh.c
+++ b/source/blender/draw/intern/draw_cache_impl_mesh.c
@@ -61,36 +61,35 @@
#include "ED_mesh.h"
#include "ED_uvedit.h"
-#include "draw_cache_impl.h" /* own include */
-
+#include "draw_cache_impl.h" /* own include */
static void mesh_batch_cache_clear(Mesh *me);
/* Vertex Group Selection and display options */
typedef struct DRW_MeshWeightState {
- int defgroup_active;
- int defgroup_len;
+ int defgroup_active;
+ int defgroup_len;
- short flags;
- char alert_mode;
+ short flags;
+ char alert_mode;
- /* Set of all selected bones for Multipaint. */
- bool *defgroup_sel; /* [defgroup_len] */
- int defgroup_sel_count;
+ /* Set of all selected bones for Multipaint. */
+ bool *defgroup_sel; /* [defgroup_len] */
+ int defgroup_sel_count;
} DRW_MeshWeightState;
typedef struct DRW_MeshCDMask {
- uint32_t uv : 8;
- uint32_t tan : 8;
- uint32_t vcol : 8;
- uint32_t orco : 1;
- uint32_t tan_orco : 1;
+ uint32_t uv : 8;
+ uint32_t tan : 8;
+ uint32_t vcol : 8;
+ uint32_t orco : 1;
+ uint32_t tan_orco : 1;
} DRW_MeshCDMask;
/* DRW_MeshWeightState.flags */
enum {
- DRW_MESH_WEIGHT_STATE_MULTIPAINT = (1 << 0),
- DRW_MESH_WEIGHT_STATE_AUTO_NORMALIZE = (1 << 1),
+ DRW_MESH_WEIGHT_STATE_MULTIPAINT = (1 << 0),
+ DRW_MESH_WEIGHT_STATE_AUTO_NORMALIZE = (1 << 1),
};
/* ---------------------------------------------------------------------- */
@@ -103,26 +102,26 @@ enum {
*/
BLI_INLINE BMLoop *bm_vert_find_first_loop_visible_inline(BMVert *v)
{
- if (v->e) {
- BMLoop *l = v->e->l;
- if (l && !BM_elem_flag_test(l->f, BM_ELEM_HIDDEN)) {
- return l->v == v ? l : l->next;
- }
- return BM_vert_find_first_loop_visible(v);
- }
- return NULL;
+ if (v->e) {
+ BMLoop *l = v->e->l;
+ if (l && !BM_elem_flag_test(l->f, BM_ELEM_HIDDEN)) {
+ return l->v == v ? l : l->next;
+ }
+ return BM_vert_find_first_loop_visible(v);
+ }
+ return NULL;
}
BLI_INLINE BMLoop *bm_edge_find_first_loop_visible_inline(BMEdge *e)
{
- if (e->l) {
- BMLoop *l = e->l;
- if (!BM_elem_flag_test(l->f, BM_ELEM_HIDDEN)) {
- return l;
- }
- return BM_edge_find_first_loop_visible(e);
- }
- return NULL;
+ if (e->l) {
+ BMLoop *l = e->l;
+ if (!BM_elem_flag_test(l->f, BM_ELEM_HIDDEN)) {
+ return l;
+ }
+ return BM_edge_find_first_loop_visible(e);
+ }
+ return NULL;
}
/** \} */
@@ -133,192 +132,191 @@ BLI_INLINE BMLoop *bm_edge_find_first_loop_visible_inline(BMEdge *e)
static int mesh_render_verts_len_get(Mesh *me)
{
- return me->edit_mesh ? me->edit_mesh->bm->totvert : me->totvert;
+ return me->edit_mesh ? me->edit_mesh->bm->totvert : me->totvert;
}
static int mesh_render_edges_len_get(Mesh *me)
{
- return me->edit_mesh ? me->edit_mesh->bm->totedge : me->totedge;
+ return me->edit_mesh ? me->edit_mesh->bm->totedge : me->totedge;
}
static int mesh_render_looptri_len_get(Mesh *me)
{
- return me->edit_mesh ? me->edit_mesh->tottri : poly_to_tri_count(me->totpoly, me->totloop);
+ return me->edit_mesh ? me->edit_mesh->tottri : poly_to_tri_count(me->totpoly, me->totloop);
}
static int mesh_render_polys_len_get(Mesh *me)
{
- return me->edit_mesh ? me->edit_mesh->bm->totface : me->totpoly;
+ return me->edit_mesh ? me->edit_mesh->bm->totface : me->totpoly;
}
static int mesh_render_mat_len_get(Mesh *me)
{
- return MAX2(1, me->totcol);
+ return MAX2(1, me->totcol);
}
static int UNUSED_FUNCTION(mesh_render_loops_len_get)(Mesh *me)
{
- return me->edit_mesh ? me->edit_mesh->bm->totloop : me->totloop;
+ return me->edit_mesh ? me->edit_mesh->bm->totloop : me->totloop;
}
/** \} */
-
/* ---------------------------------------------------------------------- */
/** \name Mesh/BMesh Interface (indirect, partially cached access to complex data).
* \{ */
typedef struct EdgeAdjacentPolys {
- int count;
- int face_index[2];
+ int count;
+ int face_index[2];
} EdgeAdjacentPolys;
typedef struct EdgeAdjacentVerts {
- int vert_index[2]; /* -1 if none */
+ int vert_index[2]; /* -1 if none */
} EdgeAdjacentVerts;
typedef struct EdgeDrawAttr {
- uchar v_flag;
- uchar e_flag;
- uchar crease;
- uchar bweight;
+ uchar v_flag;
+ uchar e_flag;
+ uchar crease;
+ uchar bweight;
} EdgeDrawAttr;
typedef struct MeshRenderData {
- int types;
-
- int vert_len;
- int edge_len;
- int tri_len;
- int loop_len;
- int poly_len;
- int mat_len;
- int loose_vert_len;
- int loose_edge_len;
-
- /* Support for mapped mesh data. */
- struct {
- /* Must be set if we want to get mapped data. */
- bool use;
- bool supported;
-
- Mesh *me_cage;
-
- int vert_len;
- int edge_len;
- int tri_len;
- int loop_len;
- int poly_len;
-
- int *loose_verts;
- int loose_vert_len;
-
- int *loose_edges;
- int loose_edge_len;
-
- /* origindex layers */
- int *v_origindex;
- int *e_origindex;
- int *l_origindex;
- int *p_origindex;
- } mapped;
-
- BMEditMesh *edit_bmesh;
- struct EditMeshData *edit_data;
- const ToolSettings *toolsettings;
-
- Mesh *me;
-
- MVert *mvert;
- const MEdge *medge;
- const MLoop *mloop;
- const MPoly *mpoly;
- float (*orco)[3]; /* vertex coordinates normalized to bounding box */
- bool is_orco_allocated;
- MDeformVert *dvert;
- MLoopUV *mloopuv;
- MLoopCol *mloopcol;
- float (*loop_normals)[3];
-
- /* CustomData 'cd' cache for efficient access. */
- struct {
- struct {
- MLoopUV **uv;
- int uv_len;
- int uv_active;
- int uv_mask_active;
-
- MLoopCol **vcol;
- int vcol_len;
- int vcol_active;
-
- float (**tangent)[4];
- int tangent_len;
- int tangent_active;
-
- bool *auto_vcol;
- } layers;
-
- /* Custom-data offsets (only needed for BMesh access) */
- struct {
- int crease;
- int bweight;
- int *uv;
- int *vcol;
+ int types;
+
+ int vert_len;
+ int edge_len;
+ int tri_len;
+ int loop_len;
+ int poly_len;
+ int mat_len;
+ int loose_vert_len;
+ int loose_edge_len;
+
+ /* Support for mapped mesh data. */
+ struct {
+ /* Must be set if we want to get mapped data. */
+ bool use;
+ bool supported;
+
+ Mesh *me_cage;
+
+ int vert_len;
+ int edge_len;
+ int tri_len;
+ int loop_len;
+ int poly_len;
+
+ int *loose_verts;
+ int loose_vert_len;
+
+ int *loose_edges;
+ int loose_edge_len;
+
+ /* origindex layers */
+ int *v_origindex;
+ int *e_origindex;
+ int *l_origindex;
+ int *p_origindex;
+ } mapped;
+
+ BMEditMesh *edit_bmesh;
+ struct EditMeshData *edit_data;
+ const ToolSettings *toolsettings;
+
+ Mesh *me;
+
+ MVert *mvert;
+ const MEdge *medge;
+ const MLoop *mloop;
+ const MPoly *mpoly;
+ float (*orco)[3]; /* vertex coordinates normalized to bounding box */
+ bool is_orco_allocated;
+ MDeformVert *dvert;
+ MLoopUV *mloopuv;
+ MLoopCol *mloopcol;
+ float (*loop_normals)[3];
+
+ /* CustomData 'cd' cache for efficient access. */
+ struct {
+ struct {
+ MLoopUV **uv;
+ int uv_len;
+ int uv_active;
+ int uv_mask_active;
+
+ MLoopCol **vcol;
+ int vcol_len;
+ int vcol_active;
+
+ float (**tangent)[4];
+ int tangent_len;
+ int tangent_active;
+
+ bool *auto_vcol;
+ } layers;
+
+ /* Custom-data offsets (only needed for BMesh access) */
+ struct {
+ int crease;
+ int bweight;
+ int *uv;
+ int *vcol;
#ifdef WITH_FREESTYLE
- int freestyle_edge;
- int freestyle_face;
+ int freestyle_edge;
+ int freestyle_face;
#endif
- } offset;
-
- struct {
- char (*auto_mix)[32];
- char (*uv)[32];
- char (*vcol)[32];
- char (*tangent)[32];
- } uuid;
-
- /* for certain cases we need an output loop-data storage (bmesh tangents) */
- struct {
- CustomData ldata;
- /* grr, special case variable (use in place of 'dm->tangent_mask') */
- short tangent_mask;
- } output;
- } cd;
-
- BMVert *eve_act;
- BMEdge *eed_act;
- BMFace *efa_act;
- BMFace *efa_act_uv;
-
- /* Data created on-demand (usually not for bmesh-based data). */
- EdgeAdjacentPolys *edges_adjacent_polys;
- MLoopTri *mlooptri;
- int *loose_edges;
- int *loose_verts;
-
- float (*poly_normals)[3];
- float *vert_weight;
- char (*vert_color)[3];
- GPUPackedNormal *poly_normals_pack;
- GPUPackedNormal *vert_normals_pack;
- bool *edge_select_bool;
- bool *edge_visible_bool;
+ } offset;
+
+ struct {
+ char (*auto_mix)[32];
+ char (*uv)[32];
+ char (*vcol)[32];
+ char (*tangent)[32];
+ } uuid;
+
+ /* for certain cases we need an output loop-data storage (bmesh tangents) */
+ struct {
+ CustomData ldata;
+ /* grr, special case variable (use in place of 'dm->tangent_mask') */
+ short tangent_mask;
+ } output;
+ } cd;
+
+ BMVert *eve_act;
+ BMEdge *eed_act;
+ BMFace *efa_act;
+ BMFace *efa_act_uv;
+
+ /* Data created on-demand (usually not for bmesh-based data). */
+ EdgeAdjacentPolys *edges_adjacent_polys;
+ MLoopTri *mlooptri;
+ int *loose_edges;
+ int *loose_verts;
+
+ float (*poly_normals)[3];
+ float *vert_weight;
+ char (*vert_color)[3];
+ GPUPackedNormal *poly_normals_pack;
+ GPUPackedNormal *vert_normals_pack;
+ bool *edge_select_bool;
+ bool *edge_visible_bool;
} MeshRenderData;
enum {
- MR_DATATYPE_VERT = 1 << 0,
- MR_DATATYPE_EDGE = 1 << 1,
- MR_DATATYPE_LOOPTRI = 1 << 2,
- MR_DATATYPE_LOOP = 1 << 3,
- MR_DATATYPE_POLY = 1 << 4,
- MR_DATATYPE_OVERLAY = 1 << 5,
- MR_DATATYPE_SHADING = 1 << 6,
- MR_DATATYPE_DVERT = 1 << 7,
- MR_DATATYPE_LOOPCOL = 1 << 8,
- MR_DATATYPE_LOOPUV = 1 << 9,
- MR_DATATYPE_LOOSE_VERT = 1 << 10,
- MR_DATATYPE_LOOSE_EDGE = 1 << 11,
+ MR_DATATYPE_VERT = 1 << 0,
+ MR_DATATYPE_EDGE = 1 << 1,
+ MR_DATATYPE_LOOPTRI = 1 << 2,
+ MR_DATATYPE_LOOP = 1 << 3,
+ MR_DATATYPE_POLY = 1 << 4,
+ MR_DATATYPE_OVERLAY = 1 << 5,
+ MR_DATATYPE_SHADING = 1 << 6,
+ MR_DATATYPE_DVERT = 1 << 7,
+ MR_DATATYPE_LOOPCOL = 1 << 8,
+ MR_DATATYPE_LOOPUV = 1 << 9,
+ MR_DATATYPE_LOOSE_VERT = 1 << 10,
+ MR_DATATYPE_LOOSE_EDGE = 1 << 11,
};
/**
@@ -327,274 +325,278 @@ enum {
*/
static bool bm_vert_has_visible_edge(const BMVert *v)
{
- const BMEdge *e_iter, *e_first;
-
- e_iter = e_first = v->e;
- do {
- if (!BM_elem_flag_test(e_iter, BM_ELEM_HIDDEN)) {
- return true;
- }
- } while ((e_iter = BM_DISK_EDGE_NEXT(e_iter, v)) != e_first);
- return false;
+ const BMEdge *e_iter, *e_first;
+
+ e_iter = e_first = v->e;
+ do {
+ if (!BM_elem_flag_test(e_iter, BM_ELEM_HIDDEN)) {
+ return true;
+ }
+ } while ((e_iter = BM_DISK_EDGE_NEXT(e_iter, v)) != e_first);
+ return false;
}
static bool bm_edge_has_visible_face(const BMEdge *e)
{
- const BMLoop *l_iter, *l_first;
- l_iter = l_first = e->l;
- do {
- if (!BM_elem_flag_test(l_iter->f, BM_ELEM_HIDDEN)) {
- return true;
- }
- } while ((l_iter = l_iter->radial_next) != l_first);
- return false;
+ const BMLoop *l_iter, *l_first;
+ l_iter = l_first = e->l;
+ do {
+ if (!BM_elem_flag_test(l_iter->f, BM_ELEM_HIDDEN)) {
+ return true;
+ }
+ } while ((l_iter = l_iter->radial_next) != l_first);
+ return false;
}
BLI_INLINE bool bm_vert_is_loose_and_visible(const BMVert *v)
{
- return (!BM_elem_flag_test(v, BM_ELEM_HIDDEN) &&
- (v->e == NULL || !bm_vert_has_visible_edge(v)));
+ return (!BM_elem_flag_test(v, BM_ELEM_HIDDEN) && (v->e == NULL || !bm_vert_has_visible_edge(v)));
}
BLI_INLINE bool bm_edge_is_loose_and_visible(const BMEdge *e)
{
- return (!BM_elem_flag_test(e, BM_ELEM_HIDDEN) &&
- (e->l == NULL || !bm_edge_has_visible_face(e)));
+ return (!BM_elem_flag_test(e, BM_ELEM_HIDDEN) && (e->l == NULL || !bm_edge_has_visible_face(e)));
}
/* Return true is all layers in _b_ are inside _a_. */
BLI_INLINE bool mesh_cd_layers_type_overlap(DRW_MeshCDMask a, DRW_MeshCDMask b)
{
- return (*((uint32_t *)&a) & *((uint32_t *)&b)) == *((uint32_t *)&b);
+ return (*((uint32_t *)&a) & *((uint32_t *)&b)) == *((uint32_t *)&b);
}
BLI_INLINE void mesh_cd_layers_type_merge(DRW_MeshCDMask *a, DRW_MeshCDMask b)
{
- atomic_fetch_and_or_uint32((uint32_t *)a, *(uint32_t *)&b);
+ atomic_fetch_and_or_uint32((uint32_t *)a, *(uint32_t *)&b);
}
BLI_INLINE void mesh_cd_layers_type_clear(DRW_MeshCDMask *a)
{
- *((uint32_t *)a) = 0;
+ *((uint32_t *)a) = 0;
}
-static void mesh_cd_calc_active_uv_layer(
- const Mesh *me, DRW_MeshCDMask *cd_used)
+static void mesh_cd_calc_active_uv_layer(const Mesh *me, DRW_MeshCDMask *cd_used)
{
- const CustomData *cd_ldata = (me->edit_mesh) ? &me->edit_mesh->bm->ldata : &me->ldata;
+ const CustomData *cd_ldata = (me->edit_mesh) ? &me->edit_mesh->bm->ldata : &me->ldata;
- int layer = CustomData_get_active_layer(cd_ldata, CD_MLOOPUV);
- if (layer != -1) {
- cd_used->uv |= (1 << layer);
- }
+ int layer = CustomData_get_active_layer(cd_ldata, CD_MLOOPUV);
+ if (layer != -1) {
+ cd_used->uv |= (1 << layer);
+ }
}
-static void mesh_cd_calc_active_mask_uv_layer(
- const Mesh *me, DRW_MeshCDMask *cd_used)
+static void mesh_cd_calc_active_mask_uv_layer(const Mesh *me, DRW_MeshCDMask *cd_used)
{
- const CustomData *cd_ldata = (me->edit_mesh) ? &me->edit_mesh->bm->ldata : &me->ldata;
+ const CustomData *cd_ldata = (me->edit_mesh) ? &me->edit_mesh->bm->ldata : &me->ldata;
- int layer = CustomData_get_stencil_layer(cd_ldata, CD_MLOOPUV);
- if (layer != -1) {
- cd_used->uv |= (1 << layer);
- }
+ int layer = CustomData_get_stencil_layer(cd_ldata, CD_MLOOPUV);
+ if (layer != -1) {
+ cd_used->uv |= (1 << layer);
+ }
}
-static void mesh_cd_calc_active_vcol_layer(
- const Mesh *me, DRW_MeshCDMask *cd_used)
+static void mesh_cd_calc_active_vcol_layer(const Mesh *me, DRW_MeshCDMask *cd_used)
{
- const CustomData *cd_ldata = (me->edit_mesh) ? &me->edit_mesh->bm->ldata : &me->ldata;
+ const CustomData *cd_ldata = (me->edit_mesh) ? &me->edit_mesh->bm->ldata : &me->ldata;
- int layer = CustomData_get_active_layer(cd_ldata, CD_MLOOPCOL);
- if (layer != -1) {
- cd_used->vcol |= (1 << layer);
- }
+ int layer = CustomData_get_active_layer(cd_ldata, CD_MLOOPCOL);
+ if (layer != -1) {
+ cd_used->vcol |= (1 << layer);
+ }
}
-static DRW_MeshCDMask mesh_cd_calc_used_gpu_layers(
- const Mesh *me, struct GPUMaterial **gpumat_array, int gpumat_array_len)
+static DRW_MeshCDMask mesh_cd_calc_used_gpu_layers(const Mesh *me,
+ struct GPUMaterial **gpumat_array,
+ int gpumat_array_len)
{
- const CustomData *cd_ldata = (me->edit_mesh) ? &me->edit_mesh->bm->ldata : &me->ldata;
-
- /* See: DM_vertex_attributes_from_gpu for similar logic */
- GPUVertAttrLayers gpu_attrs = {{{0}}};
- DRW_MeshCDMask cd_used;
- mesh_cd_layers_type_clear(&cd_used);
-
- for (int i = 0; i < gpumat_array_len; i++) {
- GPUMaterial *gpumat = gpumat_array[i];
- if (gpumat) {
- GPU_material_vertex_attrs(gpumat, &gpu_attrs);
- for (int j = 0; j < gpu_attrs.totlayer; j++) {
- const char *name = gpu_attrs.layer[j].name;
- int type = gpu_attrs.layer[j].type;
- int layer = -1;
-
- if (type == CD_AUTO_FROM_NAME) {
- /* We need to deduct what exact layer is used.
- *
- * We do it based on the specified name.
- */
- if (name[0] != '\0') {
- layer = CustomData_get_named_layer(cd_ldata, CD_MLOOPUV, name);
- type = CD_MTFACE;
-
- if (layer == -1) {
- layer = CustomData_get_named_layer(cd_ldata, CD_MLOOPCOL, name);
- type = CD_MCOL;
- }
-#if 0 /* Tangents are always from UV's - this will never happen. */
- if (layer == -1) {
- layer = CustomData_get_named_layer(cd_ldata, CD_TANGENT, name);
- type = CD_TANGENT;
- }
+ const CustomData *cd_ldata = (me->edit_mesh) ? &me->edit_mesh->bm->ldata : &me->ldata;
+
+ /* See: DM_vertex_attributes_from_gpu for similar logic */
+ GPUVertAttrLayers gpu_attrs = {{{0}}};
+ DRW_MeshCDMask cd_used;
+ mesh_cd_layers_type_clear(&cd_used);
+
+ for (int i = 0; i < gpumat_array_len; i++) {
+ GPUMaterial *gpumat = gpumat_array[i];
+ if (gpumat) {
+ GPU_material_vertex_attrs(gpumat, &gpu_attrs);
+ for (int j = 0; j < gpu_attrs.totlayer; j++) {
+ const char *name = gpu_attrs.layer[j].name;
+ int type = gpu_attrs.layer[j].type;
+ int layer = -1;
+
+ if (type == CD_AUTO_FROM_NAME) {
+ /* We need to deduct what exact layer is used.
+ *
+ * We do it based on the specified name.
+ */
+ if (name[0] != '\0') {
+ layer = CustomData_get_named_layer(cd_ldata, CD_MLOOPUV, name);
+ type = CD_MTFACE;
+
+ if (layer == -1) {
+ layer = CustomData_get_named_layer(cd_ldata, CD_MLOOPCOL, name);
+ type = CD_MCOL;
+ }
+#if 0 /* Tangents are always from UV's - this will never happen. */
+ if (layer == -1) {
+ layer = CustomData_get_named_layer(cd_ldata, CD_TANGENT, name);
+ type = CD_TANGENT;
+ }
#endif
- if (layer == -1) {
- continue;
- }
- }
- else {
- /* Fall back to the UV layer, which matches old behavior. */
- type = CD_MTFACE;
- }
- }
-
- switch (type) {
- case CD_MTFACE:
- {
- if (layer == -1) {
- layer = (name[0] != '\0') ?
- CustomData_get_named_layer(cd_ldata, CD_MLOOPUV, name) :
- CustomData_get_active_layer(cd_ldata, CD_MLOOPUV);
- }
- if (layer != -1) {
- cd_used.uv |= (1 << layer);
- }
- break;
- }
- case CD_TANGENT:
- {
- if (layer == -1) {
- layer = (name[0] != '\0') ?
- CustomData_get_named_layer(cd_ldata, CD_MLOOPUV, name) :
- CustomData_get_active_layer(cd_ldata, CD_MLOOPUV);
-
- /* Only fallback to orco (below) when we have no UV layers, see: T56545 */
- if (layer == -1 && name[0] != '\0') {
- layer = CustomData_get_active_layer(cd_ldata, CD_MLOOPUV);
- }
- }
- if (layer != -1) {
- cd_used.tan |= (1 << layer);
- }
- else {
- /* no UV layers at all => requesting orco */
- cd_used.tan_orco = 1;
- cd_used.orco = 1;
- }
- break;
- }
- case CD_MCOL:
- {
- if (layer == -1) {
- layer = (name[0] != '\0') ?
- CustomData_get_named_layer(cd_ldata, CD_MLOOPCOL, name) :
- CustomData_get_active_layer(cd_ldata, CD_MLOOPCOL);
- }
- if (layer != -1) {
- cd_used.vcol |= (1 << layer);
- }
- break;
- }
- case CD_ORCO:
- {
- cd_used.orco = 1;
- break;
- }
- }
- }
- }
- }
- return cd_used;
+ if (layer == -1) {
+ continue;
+ }
+ }
+ else {
+ /* Fall back to the UV layer, which matches old behavior. */
+ type = CD_MTFACE;
+ }
+ }
+
+ switch (type) {
+ case CD_MTFACE: {
+ if (layer == -1) {
+ layer = (name[0] != '\0') ? CustomData_get_named_layer(cd_ldata, CD_MLOOPUV, name) :
+ CustomData_get_active_layer(cd_ldata, CD_MLOOPUV);
+ }
+ if (layer != -1) {
+ cd_used.uv |= (1 << layer);
+ }
+ break;
+ }
+ case CD_TANGENT: {
+ if (layer == -1) {
+ layer = (name[0] != '\0') ? CustomData_get_named_layer(cd_ldata, CD_MLOOPUV, name) :
+ CustomData_get_active_layer(cd_ldata, CD_MLOOPUV);
+
+ /* Only fallback to orco (below) when we have no UV layers, see: T56545 */
+ if (layer == -1 && name[0] != '\0') {
+ layer = CustomData_get_active_layer(cd_ldata, CD_MLOOPUV);
+ }
+ }
+ if (layer != -1) {
+ cd_used.tan |= (1 << layer);
+ }
+ else {
+ /* no UV layers at all => requesting orco */
+ cd_used.tan_orco = 1;
+ cd_used.orco = 1;
+ }
+ break;
+ }
+ case CD_MCOL: {
+ if (layer == -1) {
+ layer = (name[0] != '\0') ? CustomData_get_named_layer(cd_ldata, CD_MLOOPCOL, name) :
+ CustomData_get_active_layer(cd_ldata, CD_MLOOPCOL);
+ }
+ if (layer != -1) {
+ cd_used.vcol |= (1 << layer);
+ }
+ break;
+ }
+ case CD_ORCO: {
+ cd_used.orco = 1;
+ break;
+ }
+ }
+ }
+ }
+ }
+ return cd_used;
}
-
-static void mesh_render_calc_normals_loop_and_poly(const Mesh *me, const float split_angle, MeshRenderData *rdata)
+static void mesh_render_calc_normals_loop_and_poly(const Mesh *me,
+ const float split_angle,
+ MeshRenderData *rdata)
{
- BLI_assert((me->flag & ME_AUTOSMOOTH) != 0);
-
- int totloop = me->totloop;
- int totpoly = me->totpoly;
- float (*loop_normals)[3] = MEM_mallocN(sizeof(*loop_normals) * totloop, __func__);
- float (*poly_normals)[3] = MEM_mallocN(sizeof(*poly_normals) * totpoly, __func__);
- short (*clnors)[2] = CustomData_get_layer(&me->ldata, CD_CUSTOMLOOPNORMAL);
-
- BKE_mesh_calc_normals_poly(
- me->mvert, NULL, me->totvert,
- me->mloop, me->mpoly, totloop, totpoly, poly_normals, false);
-
- BKE_mesh_normals_loop_split(
- me->mvert, me->totvert, me->medge, me->totedge,
- me->mloop, loop_normals, totloop, me->mpoly, poly_normals, totpoly,
- true, split_angle, NULL, clnors, NULL);
-
- rdata->loop_len = totloop;
- rdata->poly_len = totpoly;
- rdata->loop_normals = loop_normals;
- rdata->poly_normals = poly_normals;
+ BLI_assert((me->flag & ME_AUTOSMOOTH) != 0);
+
+ int totloop = me->totloop;
+ int totpoly = me->totpoly;
+ float(*loop_normals)[3] = MEM_mallocN(sizeof(*loop_normals) * totloop, __func__);
+ float(*poly_normals)[3] = MEM_mallocN(sizeof(*poly_normals) * totpoly, __func__);
+ short(*clnors)[2] = CustomData_get_layer(&me->ldata, CD_CUSTOMLOOPNORMAL);
+
+ BKE_mesh_calc_normals_poly(
+ me->mvert, NULL, me->totvert, me->mloop, me->mpoly, totloop, totpoly, poly_normals, false);
+
+ BKE_mesh_normals_loop_split(me->mvert,
+ me->totvert,
+ me->medge,
+ me->totedge,
+ me->mloop,
+ loop_normals,
+ totloop,
+ me->mpoly,
+ poly_normals,
+ totpoly,
+ true,
+ split_angle,
+ NULL,
+ clnors,
+ NULL);
+
+ rdata->loop_len = totloop;
+ rdata->poly_len = totpoly;
+ rdata->loop_normals = loop_normals;
+ rdata->poly_normals = poly_normals;
}
-static void mesh_cd_extract_auto_layers_names_and_srgb(
- Mesh *me, DRW_MeshCDMask cd_used,
- char **r_auto_layers_names, int **r_auto_layers_srgb, int *r_auto_layers_len)
+static void mesh_cd_extract_auto_layers_names_and_srgb(Mesh *me,
+ DRW_MeshCDMask cd_used,
+ char **r_auto_layers_names,
+ int **r_auto_layers_srgb,
+ int *r_auto_layers_len)
{
- const CustomData *cd_ldata = (me->edit_mesh) ? &me->edit_mesh->bm->ldata : &me->ldata;
-
- int uv_len_used = count_bits_i(cd_used.uv);
- int vcol_len_used = count_bits_i(cd_used.vcol);
- int uv_len = CustomData_number_of_layers(cd_ldata, CD_MLOOPUV);
- int vcol_len = CustomData_number_of_layers(cd_ldata, CD_MLOOPCOL);
-
- uint auto_names_len = 32 * (uv_len_used + vcol_len_used);
- uint auto_ofs = 0;
- /* Allocate max, resize later. */
- char *auto_names = MEM_callocN(sizeof(char) * auto_names_len, __func__);
- int *auto_is_srgb = MEM_callocN(sizeof(int) * (uv_len_used + vcol_len_used), __func__);
-
- for (int i = 0; i < uv_len; i++) {
- if ((cd_used.uv & (1 << i)) != 0) {
- const char *name = CustomData_get_layer_name(cd_ldata, CD_MLOOPUV, i);
- uint hash = BLI_ghashutil_strhash_p(name);
- /* +1 to include '\0' terminator. */
- auto_ofs += 1 + BLI_snprintf_rlen(auto_names + auto_ofs, auto_names_len - auto_ofs, "ba%u", hash);
- }
- }
-
- uint auto_is_srgb_ofs = uv_len_used;
- for (int i = 0; i < vcol_len; i++) {
- if ((cd_used.vcol & (1 << i)) != 0) {
- const char *name = CustomData_get_layer_name(cd_ldata, CD_MLOOPCOL, i);
- /* We only do vcols that are not overridden by a uv layer with same name. */
- if (CustomData_get_named_layer_index(cd_ldata, CD_MLOOPUV, name) == -1) {
- uint hash = BLI_ghashutil_strhash_p(name);
- /* +1 to include '\0' terminator. */
- auto_ofs += 1 + BLI_snprintf_rlen(auto_names + auto_ofs, auto_names_len - auto_ofs, "ba%u", hash);
- auto_is_srgb[auto_is_srgb_ofs] = true;
- auto_is_srgb_ofs++;
- }
- }
- }
-
- auto_names = MEM_reallocN(auto_names, sizeof(char) * auto_ofs);
- auto_is_srgb = MEM_reallocN(auto_is_srgb, sizeof(int) * auto_is_srgb_ofs);
-
- /* WATCH: May have been referenced somewhere before freeing. */
- MEM_SAFE_FREE(*r_auto_layers_names);
- MEM_SAFE_FREE(*r_auto_layers_srgb);
-
- *r_auto_layers_names = auto_names;
- *r_auto_layers_srgb = auto_is_srgb;
- *r_auto_layers_len = auto_is_srgb_ofs;
+ const CustomData *cd_ldata = (me->edit_mesh) ? &me->edit_mesh->bm->ldata : &me->ldata;
+
+ int uv_len_used = count_bits_i(cd_used.uv);
+ int vcol_len_used = count_bits_i(cd_used.vcol);
+ int uv_len = CustomData_number_of_layers(cd_ldata, CD_MLOOPUV);
+ int vcol_len = CustomData_number_of_layers(cd_ldata, CD_MLOOPCOL);
+
+ uint auto_names_len = 32 * (uv_len_used + vcol_len_used);
+ uint auto_ofs = 0;
+ /* Allocate max, resize later. */
+ char *auto_names = MEM_callocN(sizeof(char) * auto_names_len, __func__);
+ int *auto_is_srgb = MEM_callocN(sizeof(int) * (uv_len_used + vcol_len_used), __func__);
+
+ for (int i = 0; i < uv_len; i++) {
+ if ((cd_used.uv & (1 << i)) != 0) {
+ const char *name = CustomData_get_layer_name(cd_ldata, CD_MLOOPUV, i);
+ uint hash = BLI_ghashutil_strhash_p(name);
+ /* +1 to include '\0' terminator. */
+ auto_ofs += 1 + BLI_snprintf_rlen(
+ auto_names + auto_ofs, auto_names_len - auto_ofs, "ba%u", hash);
+ }
+ }
+
+ uint auto_is_srgb_ofs = uv_len_used;
+ for (int i = 0; i < vcol_len; i++) {
+ if ((cd_used.vcol & (1 << i)) != 0) {
+ const char *name = CustomData_get_layer_name(cd_ldata, CD_MLOOPCOL, i);
+ /* We only do vcols that are not overridden by a uv layer with same name. */
+ if (CustomData_get_named_layer_index(cd_ldata, CD_MLOOPUV, name) == -1) {
+ uint hash = BLI_ghashutil_strhash_p(name);
+ /* +1 to include '\0' terminator. */
+ auto_ofs += 1 + BLI_snprintf_rlen(
+ auto_names + auto_ofs, auto_names_len - auto_ofs, "ba%u", hash);
+ auto_is_srgb[auto_is_srgb_ofs] = true;
+ auto_is_srgb_ofs++;
+ }
+ }
+ }
+
+ auto_names = MEM_reallocN(auto_names, sizeof(char) * auto_ofs);
+ auto_is_srgb = MEM_reallocN(auto_is_srgb, sizeof(int) * auto_is_srgb_ofs);
+
+ /* WATCH: May have been referenced somewhere before freeing. */
+ MEM_SAFE_FREE(*r_auto_layers_names);
+ MEM_SAFE_FREE(*r_auto_layers_srgb);
+
+ *r_auto_layers_names = auto_names;
+ *r_auto_layers_srgb = auto_is_srgb;
+ *r_auto_layers_len = auto_is_srgb_ofs;
}
/**
@@ -602,586 +604,619 @@ static void mesh_cd_extract_auto_layers_names_and_srgb(
* While not default, object materials should be supported.
* Although this only impacts the data that's generated, not the materials that display.
*/
-static MeshRenderData *mesh_render_data_create_ex(
- Mesh *me, const int types, const DRW_MeshCDMask *cd_used,
- const ToolSettings *ts)
+static MeshRenderData *mesh_render_data_create_ex(Mesh *me,
+ const int types,
+ const DRW_MeshCDMask *cd_used,
+ const ToolSettings *ts)
{
- MeshRenderData *rdata = MEM_callocN(sizeof(*rdata), __func__);
- rdata->types = types;
- rdata->toolsettings = ts;
- rdata->mat_len = mesh_render_mat_len_get(me);
-
- CustomData_reset(&rdata->cd.output.ldata);
-
- const bool is_auto_smooth = (me->flag & ME_AUTOSMOOTH) != 0;
- const float split_angle = is_auto_smooth ? me->smoothresh : (float)M_PI;
-
- if (me->edit_mesh) {
- BMEditMesh *embm = me->edit_mesh;
- BMesh *bm = embm->bm;
-
- rdata->edit_bmesh = embm;
- rdata->edit_data = me->runtime.edit_data;
-
- if (embm->mesh_eval_cage && (embm->mesh_eval_cage->runtime.is_original == false)) {
- Mesh *me_cage = embm->mesh_eval_cage;
-
- rdata->mapped.me_cage = me_cage;
- if (types & MR_DATATYPE_VERT) {
- rdata->mapped.vert_len = me_cage->totvert;
- }
- if (types & MR_DATATYPE_EDGE) {
- rdata->mapped.edge_len = me_cage->totedge;
- }
- if (types & MR_DATATYPE_LOOP) {
- rdata->mapped.loop_len = me_cage->totloop;
- }
- if (types & MR_DATATYPE_POLY) {
- rdata->mapped.poly_len = me_cage->totpoly;
- }
- if (types & MR_DATATYPE_LOOPTRI) {
- rdata->mapped.tri_len = poly_to_tri_count(me_cage->totpoly, me_cage->totloop);
- }
- if (types & MR_DATATYPE_LOOPUV) {
- rdata->mloopuv = CustomData_get_layer(&me_cage->ldata, CD_MLOOPUV);
- }
-
- rdata->mapped.v_origindex = CustomData_get_layer(&me_cage->vdata, CD_ORIGINDEX);
- rdata->mapped.e_origindex = CustomData_get_layer(&me_cage->edata, CD_ORIGINDEX);
- rdata->mapped.l_origindex = CustomData_get_layer(&me_cage->ldata, CD_ORIGINDEX);
- rdata->mapped.p_origindex = CustomData_get_layer(&me_cage->pdata, CD_ORIGINDEX);
- rdata->mapped.supported = (
- rdata->mapped.v_origindex ||
- rdata->mapped.e_origindex ||
- rdata->mapped.p_origindex);
- }
-
- int bm_ensure_types = 0;
- if (types & MR_DATATYPE_VERT) {
- rdata->vert_len = bm->totvert;
- bm_ensure_types |= BM_VERT;
- }
- if (types & MR_DATATYPE_EDGE) {
- rdata->edge_len = bm->totedge;
- bm_ensure_types |= BM_EDGE;
- }
- if (types & MR_DATATYPE_LOOPTRI) {
- bm_ensure_types |= BM_LOOP;
- }
- if (types & MR_DATATYPE_LOOP) {
- int totloop = bm->totloop;
- if (is_auto_smooth) {
- rdata->loop_normals = MEM_mallocN(sizeof(*rdata->loop_normals) * totloop, __func__);
- int cd_loop_clnors_offset = CustomData_get_offset(&bm->ldata, CD_CUSTOMLOOPNORMAL);
- BM_loops_calc_normal_vcos(
- bm, NULL, NULL, NULL, true, split_angle, rdata->loop_normals, NULL, NULL,
- cd_loop_clnors_offset, false);
- }
- rdata->loop_len = totloop;
- bm_ensure_types |= BM_LOOP;
- }
- if (types & MR_DATATYPE_POLY) {
- rdata->poly_len = bm->totface;
- bm_ensure_types |= BM_FACE;
- }
- if (types & MR_DATATYPE_OVERLAY) {
- rdata->efa_act_uv = EDBM_uv_active_face_get(embm, false, false);
- rdata->efa_act = BM_mesh_active_face_get(bm, false, true);
- rdata->eed_act = BM_mesh_active_edge_get(bm);
- rdata->eve_act = BM_mesh_active_vert_get(bm);
- rdata->cd.offset.crease = CustomData_get_offset(&bm->edata, CD_CREASE);
- rdata->cd.offset.bweight = CustomData_get_offset(&bm->edata, CD_BWEIGHT);
+ MeshRenderData *rdata = MEM_callocN(sizeof(*rdata), __func__);
+ rdata->types = types;
+ rdata->toolsettings = ts;
+ rdata->mat_len = mesh_render_mat_len_get(me);
+
+ CustomData_reset(&rdata->cd.output.ldata);
+
+ const bool is_auto_smooth = (me->flag & ME_AUTOSMOOTH) != 0;
+ const float split_angle = is_auto_smooth ? me->smoothresh : (float)M_PI;
+
+ if (me->edit_mesh) {
+ BMEditMesh *embm = me->edit_mesh;
+ BMesh *bm = embm->bm;
+
+ rdata->edit_bmesh = embm;
+ rdata->edit_data = me->runtime.edit_data;
+
+ if (embm->mesh_eval_cage && (embm->mesh_eval_cage->runtime.is_original == false)) {
+ Mesh *me_cage = embm->mesh_eval_cage;
+
+ rdata->mapped.me_cage = me_cage;
+ if (types & MR_DATATYPE_VERT) {
+ rdata->mapped.vert_len = me_cage->totvert;
+ }
+ if (types & MR_DATATYPE_EDGE) {
+ rdata->mapped.edge_len = me_cage->totedge;
+ }
+ if (types & MR_DATATYPE_LOOP) {
+ rdata->mapped.loop_len = me_cage->totloop;
+ }
+ if (types & MR_DATATYPE_POLY) {
+ rdata->mapped.poly_len = me_cage->totpoly;
+ }
+ if (types & MR_DATATYPE_LOOPTRI) {
+ rdata->mapped.tri_len = poly_to_tri_count(me_cage->totpoly, me_cage->totloop);
+ }
+ if (types & MR_DATATYPE_LOOPUV) {
+ rdata->mloopuv = CustomData_get_layer(&me_cage->ldata, CD_MLOOPUV);
+ }
+
+ rdata->mapped.v_origindex = CustomData_get_layer(&me_cage->vdata, CD_ORIGINDEX);
+ rdata->mapped.e_origindex = CustomData_get_layer(&me_cage->edata, CD_ORIGINDEX);
+ rdata->mapped.l_origindex = CustomData_get_layer(&me_cage->ldata, CD_ORIGINDEX);
+ rdata->mapped.p_origindex = CustomData_get_layer(&me_cage->pdata, CD_ORIGINDEX);
+ rdata->mapped.supported = (rdata->mapped.v_origindex || rdata->mapped.e_origindex ||
+ rdata->mapped.p_origindex);
+ }
+
+ int bm_ensure_types = 0;
+ if (types & MR_DATATYPE_VERT) {
+ rdata->vert_len = bm->totvert;
+ bm_ensure_types |= BM_VERT;
+ }
+ if (types & MR_DATATYPE_EDGE) {
+ rdata->edge_len = bm->totedge;
+ bm_ensure_types |= BM_EDGE;
+ }
+ if (types & MR_DATATYPE_LOOPTRI) {
+ bm_ensure_types |= BM_LOOP;
+ }
+ if (types & MR_DATATYPE_LOOP) {
+ int totloop = bm->totloop;
+ if (is_auto_smooth) {
+ rdata->loop_normals = MEM_mallocN(sizeof(*rdata->loop_normals) * totloop, __func__);
+ int cd_loop_clnors_offset = CustomData_get_offset(&bm->ldata, CD_CUSTOMLOOPNORMAL);
+ BM_loops_calc_normal_vcos(bm,
+ NULL,
+ NULL,
+ NULL,
+ true,
+ split_angle,
+ rdata->loop_normals,
+ NULL,
+ NULL,
+ cd_loop_clnors_offset,
+ false);
+ }
+ rdata->loop_len = totloop;
+ bm_ensure_types |= BM_LOOP;
+ }
+ if (types & MR_DATATYPE_POLY) {
+ rdata->poly_len = bm->totface;
+ bm_ensure_types |= BM_FACE;
+ }
+ if (types & MR_DATATYPE_OVERLAY) {
+ rdata->efa_act_uv = EDBM_uv_active_face_get(embm, false, false);
+ rdata->efa_act = BM_mesh_active_face_get(bm, false, true);
+ rdata->eed_act = BM_mesh_active_edge_get(bm);
+ rdata->eve_act = BM_mesh_active_vert_get(bm);
+ rdata->cd.offset.crease = CustomData_get_offset(&bm->edata, CD_CREASE);
+ rdata->cd.offset.bweight = CustomData_get_offset(&bm->edata, CD_BWEIGHT);
#ifdef WITH_FREESTYLE
- rdata->cd.offset.freestyle_edge = CustomData_get_offset(&bm->edata, CD_FREESTYLE_EDGE);
- rdata->cd.offset.freestyle_face = CustomData_get_offset(&bm->pdata, CD_FREESTYLE_FACE);
+ rdata->cd.offset.freestyle_edge = CustomData_get_offset(&bm->edata, CD_FREESTYLE_EDGE);
+ rdata->cd.offset.freestyle_face = CustomData_get_offset(&bm->pdata, CD_FREESTYLE_FACE);
#endif
- }
- if (types & (MR_DATATYPE_DVERT)) {
- bm_ensure_types |= BM_VERT;
- }
- if (rdata->edit_data != NULL) {
- bm_ensure_types |= BM_VERT;
- }
-
- BM_mesh_elem_index_ensure(bm, bm_ensure_types);
- BM_mesh_elem_table_ensure(bm, bm_ensure_types & ~BM_LOOP);
-
- if (types & MR_DATATYPE_LOOPTRI) {
- /* Edit mode ensures this is valid, no need to calculate. */
- BLI_assert((bm->totloop == 0) || (embm->looptris != NULL));
- int tottri = embm->tottri;
- MLoopTri *mlooptri = MEM_mallocN(sizeof(*rdata->mlooptri) * embm->tottri, __func__);
- for (int index = 0; index < tottri ; index ++ ) {
- BMLoop **bmtri = embm->looptris[index];
- MLoopTri *mtri = &mlooptri[index];
- mtri->tri[0] = BM_elem_index_get(bmtri[0]);
- mtri->tri[1] = BM_elem_index_get(bmtri[1]);
- mtri->tri[2] = BM_elem_index_get(bmtri[2]);
- }
- rdata->mlooptri = mlooptri;
- rdata->tri_len = tottri;
- }
-
- if (types & MR_DATATYPE_LOOSE_VERT) {
- BLI_assert(types & MR_DATATYPE_VERT);
- rdata->loose_vert_len = 0;
-
- {
- int *lverts = MEM_mallocN(rdata->vert_len * sizeof(int), __func__);
- BLI_assert((bm->elem_table_dirty & BM_VERT) == 0);
- for (int i = 0; i < bm->totvert; i++) {
- const BMVert *eve = BM_vert_at_index(bm, i);
- if (bm_vert_is_loose_and_visible(eve)) {
- lverts[rdata->loose_vert_len++] = i;
- }
- }
- rdata->loose_verts = MEM_reallocN(lverts, rdata->loose_vert_len * sizeof(int));
- }
-
- if (rdata->mapped.supported) {
- Mesh *me_cage = embm->mesh_eval_cage;
- rdata->mapped.loose_vert_len = 0;
-
- if (rdata->loose_vert_len) {
- int *lverts = MEM_mallocN(me_cage->totvert * sizeof(int), __func__);
- const int *v_origindex = rdata->mapped.v_origindex;
- for (int i = 0; i < me_cage->totvert; i++) {
- const int v_orig = v_origindex[i];
- if (v_orig != ORIGINDEX_NONE) {
- BMVert *eve = BM_vert_at_index(bm, v_orig);
- if (bm_vert_is_loose_and_visible(eve)) {
- lverts[rdata->mapped.loose_vert_len++] = i;
- }
- }
- }
- rdata->mapped.loose_verts = MEM_reallocN(lverts, rdata->mapped.loose_vert_len * sizeof(int));
- }
- }
- }
-
- if (types & MR_DATATYPE_LOOSE_EDGE) {
- BLI_assert(types & MR_DATATYPE_EDGE);
- rdata->loose_edge_len = 0;
-
- {
- int *ledges = MEM_mallocN(rdata->edge_len * sizeof(int), __func__);
- BLI_assert((bm->elem_table_dirty & BM_EDGE) == 0);
- for (int i = 0; i < bm->totedge; i++) {
- const BMEdge *eed = BM_edge_at_index(bm, i);
- if (bm_edge_is_loose_and_visible(eed)) {
- ledges[rdata->loose_edge_len++] = i;
- }
- }
- rdata->loose_edges = MEM_reallocN(ledges, rdata->loose_edge_len * sizeof(int));
- }
-
- if (rdata->mapped.supported) {
- Mesh *me_cage = embm->mesh_eval_cage;
- rdata->mapped.loose_edge_len = 0;
-
- if (rdata->loose_edge_len) {
- int *ledges = MEM_mallocN(me_cage->totedge * sizeof(int), __func__);
- const int *e_origindex = rdata->mapped.e_origindex;
- for (int i = 0; i < me_cage->totedge; i++) {
- const int e_orig = e_origindex[i];
- if (e_orig != ORIGINDEX_NONE) {
- BMEdge *eed = BM_edge_at_index(bm, e_orig);
- if (bm_edge_is_loose_and_visible(eed)) {
- ledges[rdata->mapped.loose_edge_len++] = i;
- }
- }
- }
- rdata->mapped.loose_edges = MEM_reallocN(ledges, rdata->mapped.loose_edge_len * sizeof(int));
- }
- }
- }
- }
- else {
- rdata->me = me;
-
- if (types & (MR_DATATYPE_VERT)) {
- rdata->vert_len = me->totvert;
- rdata->mvert = CustomData_get_layer(&me->vdata, CD_MVERT);
- }
- if (types & (MR_DATATYPE_EDGE)) {
- rdata->edge_len = me->totedge;
- rdata->medge = CustomData_get_layer(&me->edata, CD_MEDGE);
- }
- if (types & MR_DATATYPE_LOOPTRI) {
- const int tri_len = rdata->tri_len = poly_to_tri_count(me->totpoly, me->totloop);
- MLoopTri *mlooptri = MEM_mallocN(sizeof(*mlooptri) * tri_len, __func__);
- BKE_mesh_recalc_looptri(me->mloop, me->mpoly, me->mvert, me->totloop, me->totpoly, mlooptri);
- rdata->mlooptri = mlooptri;
- }
- if (types & MR_DATATYPE_LOOP) {
- rdata->loop_len = me->totloop;
- rdata->mloop = CustomData_get_layer(&me->ldata, CD_MLOOP);
-
- if (is_auto_smooth) {
- mesh_render_calc_normals_loop_and_poly(me, split_angle, rdata);
- }
- }
- if (types & MR_DATATYPE_POLY) {
- rdata->poly_len = me->totpoly;
- rdata->mpoly = CustomData_get_layer(&me->pdata, CD_MPOLY);
- }
- if (types & MR_DATATYPE_DVERT) {
- rdata->vert_len = me->totvert;
- rdata->dvert = CustomData_get_layer(&me->vdata, CD_MDEFORMVERT);
- }
- if (types & MR_DATATYPE_LOOPCOL) {
- rdata->loop_len = me->totloop;
- rdata->mloopcol = CustomData_get_layer(&me->ldata, CD_MLOOPCOL);
- }
- if (types & MR_DATATYPE_LOOPUV) {
- rdata->loop_len = me->totloop;
- rdata->mloopuv = CustomData_get_layer(&me->ldata, CD_MLOOPUV);
- }
- }
-
- if (types & MR_DATATYPE_SHADING) {
- CustomData *cd_vdata, *cd_ldata;
-
- BLI_assert(cd_used != NULL);
-
- if (me->edit_mesh) {
- BMesh *bm = me->edit_mesh->bm;
- cd_vdata = &bm->vdata;
- cd_ldata = &bm->ldata;
- }
- else {
- cd_vdata = &me->vdata;
- cd_ldata = &me->ldata;
- }
-
- rdata->cd.layers.uv_active = CustomData_get_active_layer(cd_ldata, CD_MLOOPUV);
- rdata->cd.layers.uv_mask_active = CustomData_get_stencil_layer(cd_ldata, CD_MLOOPUV);
- rdata->cd.layers.vcol_active = CustomData_get_active_layer(cd_ldata, CD_MLOOPCOL);
- rdata->cd.layers.tangent_active = rdata->cd.layers.uv_active;
+ }
+ if (types & (MR_DATATYPE_DVERT)) {
+ bm_ensure_types |= BM_VERT;
+ }
+ if (rdata->edit_data != NULL) {
+ bm_ensure_types |= BM_VERT;
+ }
+
+ BM_mesh_elem_index_ensure(bm, bm_ensure_types);
+ BM_mesh_elem_table_ensure(bm, bm_ensure_types & ~BM_LOOP);
+
+ if (types & MR_DATATYPE_LOOPTRI) {
+ /* Edit mode ensures this is valid, no need to calculate. */
+ BLI_assert((bm->totloop == 0) || (embm->looptris != NULL));
+ int tottri = embm->tottri;
+ MLoopTri *mlooptri = MEM_mallocN(sizeof(*rdata->mlooptri) * embm->tottri, __func__);
+ for (int index = 0; index < tottri; index++) {
+ BMLoop **bmtri = embm->looptris[index];
+ MLoopTri *mtri = &mlooptri[index];
+ mtri->tri[0] = BM_elem_index_get(bmtri[0]);
+ mtri->tri[1] = BM_elem_index_get(bmtri[1]);
+ mtri->tri[2] = BM_elem_index_get(bmtri[2]);
+ }
+ rdata->mlooptri = mlooptri;
+ rdata->tri_len = tottri;
+ }
+
+ if (types & MR_DATATYPE_LOOSE_VERT) {
+ BLI_assert(types & MR_DATATYPE_VERT);
+ rdata->loose_vert_len = 0;
+
+ {
+ int *lverts = MEM_mallocN(rdata->vert_len * sizeof(int), __func__);
+ BLI_assert((bm->elem_table_dirty & BM_VERT) == 0);
+ for (int i = 0; i < bm->totvert; i++) {
+ const BMVert *eve = BM_vert_at_index(bm, i);
+ if (bm_vert_is_loose_and_visible(eve)) {
+ lverts[rdata->loose_vert_len++] = i;
+ }
+ }
+ rdata->loose_verts = MEM_reallocN(lverts, rdata->loose_vert_len * sizeof(int));
+ }
+
+ if (rdata->mapped.supported) {
+ Mesh *me_cage = embm->mesh_eval_cage;
+ rdata->mapped.loose_vert_len = 0;
+
+ if (rdata->loose_vert_len) {
+ int *lverts = MEM_mallocN(me_cage->totvert * sizeof(int), __func__);
+ const int *v_origindex = rdata->mapped.v_origindex;
+ for (int i = 0; i < me_cage->totvert; i++) {
+ const int v_orig = v_origindex[i];
+ if (v_orig != ORIGINDEX_NONE) {
+ BMVert *eve = BM_vert_at_index(bm, v_orig);
+ if (bm_vert_is_loose_and_visible(eve)) {
+ lverts[rdata->mapped.loose_vert_len++] = i;
+ }
+ }
+ }
+ rdata->mapped.loose_verts = MEM_reallocN(lverts,
+ rdata->mapped.loose_vert_len * sizeof(int));
+ }
+ }
+ }
+
+ if (types & MR_DATATYPE_LOOSE_EDGE) {
+ BLI_assert(types & MR_DATATYPE_EDGE);
+ rdata->loose_edge_len = 0;
+
+ {
+ int *ledges = MEM_mallocN(rdata->edge_len * sizeof(int), __func__);
+ BLI_assert((bm->elem_table_dirty & BM_EDGE) == 0);
+ for (int i = 0; i < bm->totedge; i++) {
+ const BMEdge *eed = BM_edge_at_index(bm, i);
+ if (bm_edge_is_loose_and_visible(eed)) {
+ ledges[rdata->loose_edge_len++] = i;
+ }
+ }
+ rdata->loose_edges = MEM_reallocN(ledges, rdata->loose_edge_len * sizeof(int));
+ }
+
+ if (rdata->mapped.supported) {
+ Mesh *me_cage = embm->mesh_eval_cage;
+ rdata->mapped.loose_edge_len = 0;
+
+ if (rdata->loose_edge_len) {
+ int *ledges = MEM_mallocN(me_cage->totedge * sizeof(int), __func__);
+ const int *e_origindex = rdata->mapped.e_origindex;
+ for (int i = 0; i < me_cage->totedge; i++) {
+ const int e_orig = e_origindex[i];
+ if (e_orig != ORIGINDEX_NONE) {
+ BMEdge *eed = BM_edge_at_index(bm, e_orig);
+ if (bm_edge_is_loose_and_visible(eed)) {
+ ledges[rdata->mapped.loose_edge_len++] = i;
+ }
+ }
+ }
+ rdata->mapped.loose_edges = MEM_reallocN(ledges,
+ rdata->mapped.loose_edge_len * sizeof(int));
+ }
+ }
+ }
+ }
+ else {
+ rdata->me = me;
+
+ if (types & (MR_DATATYPE_VERT)) {
+ rdata->vert_len = me->totvert;
+ rdata->mvert = CustomData_get_layer(&me->vdata, CD_MVERT);
+ }
+ if (types & (MR_DATATYPE_EDGE)) {
+ rdata->edge_len = me->totedge;
+ rdata->medge = CustomData_get_layer(&me->edata, CD_MEDGE);
+ }
+ if (types & MR_DATATYPE_LOOPTRI) {
+ const int tri_len = rdata->tri_len = poly_to_tri_count(me->totpoly, me->totloop);
+ MLoopTri *mlooptri = MEM_mallocN(sizeof(*mlooptri) * tri_len, __func__);
+ BKE_mesh_recalc_looptri(me->mloop, me->mpoly, me->mvert, me->totloop, me->totpoly, mlooptri);
+ rdata->mlooptri = mlooptri;
+ }
+ if (types & MR_DATATYPE_LOOP) {
+ rdata->loop_len = me->totloop;
+ rdata->mloop = CustomData_get_layer(&me->ldata, CD_MLOOP);
+
+ if (is_auto_smooth) {
+ mesh_render_calc_normals_loop_and_poly(me, split_angle, rdata);
+ }
+ }
+ if (types & MR_DATATYPE_POLY) {
+ rdata->poly_len = me->totpoly;
+ rdata->mpoly = CustomData_get_layer(&me->pdata, CD_MPOLY);
+ }
+ if (types & MR_DATATYPE_DVERT) {
+ rdata->vert_len = me->totvert;
+ rdata->dvert = CustomData_get_layer(&me->vdata, CD_MDEFORMVERT);
+ }
+ if (types & MR_DATATYPE_LOOPCOL) {
+ rdata->loop_len = me->totloop;
+ rdata->mloopcol = CustomData_get_layer(&me->ldata, CD_MLOOPCOL);
+ }
+ if (types & MR_DATATYPE_LOOPUV) {
+ rdata->loop_len = me->totloop;
+ rdata->mloopuv = CustomData_get_layer(&me->ldata, CD_MLOOPUV);
+ }
+ }
+
+ if (types & MR_DATATYPE_SHADING) {
+ CustomData *cd_vdata, *cd_ldata;
+
+ BLI_assert(cd_used != NULL);
+
+ if (me->edit_mesh) {
+ BMesh *bm = me->edit_mesh->bm;
+ cd_vdata = &bm->vdata;
+ cd_ldata = &bm->ldata;
+ }
+ else {
+ cd_vdata = &me->vdata;
+ cd_ldata = &me->ldata;
+ }
+
+ rdata->cd.layers.uv_active = CustomData_get_active_layer(cd_ldata, CD_MLOOPUV);
+ rdata->cd.layers.uv_mask_active = CustomData_get_stencil_layer(cd_ldata, CD_MLOOPUV);
+ rdata->cd.layers.vcol_active = CustomData_get_active_layer(cd_ldata, CD_MLOOPCOL);
+ rdata->cd.layers.tangent_active = rdata->cd.layers.uv_active;
#define CD_VALIDATE_ACTIVE_LAYER(active_index, used) \
- if ((active_index != -1) && (used & (1 << active_index)) == 0) { \
- active_index = -1; \
- } ((void)0)
+ if ((active_index != -1) && (used & (1 << active_index)) == 0) { \
+ active_index = -1; \
+ } \
+ ((void)0)
- CD_VALIDATE_ACTIVE_LAYER(rdata->cd.layers.uv_active, cd_used->uv);
- CD_VALIDATE_ACTIVE_LAYER(rdata->cd.layers.uv_mask_active, cd_used->uv);
- CD_VALIDATE_ACTIVE_LAYER(rdata->cd.layers.tangent_active, cd_used->tan);
- CD_VALIDATE_ACTIVE_LAYER(rdata->cd.layers.vcol_active, cd_used->vcol);
+ CD_VALIDATE_ACTIVE_LAYER(rdata->cd.layers.uv_active, cd_used->uv);
+ CD_VALIDATE_ACTIVE_LAYER(rdata->cd.layers.uv_mask_active, cd_used->uv);
+ CD_VALIDATE_ACTIVE_LAYER(rdata->cd.layers.tangent_active, cd_used->tan);
+ CD_VALIDATE_ACTIVE_LAYER(rdata->cd.layers.vcol_active, cd_used->vcol);
#undef CD_VALIDATE_ACTIVE_LAYER
- rdata->is_orco_allocated = false;
- if (cd_used->orco != 0) {
- rdata->orco = CustomData_get_layer(cd_vdata, CD_ORCO);
- /* If orco is not available compute it ourselves */
- if (!rdata->orco) {
- rdata->is_orco_allocated = true;
- if (me->edit_mesh) {
- BMesh *bm = me->edit_mesh->bm;
- rdata->orco = MEM_mallocN(sizeof(*rdata->orco) * rdata->vert_len, "orco mesh");
- BLI_assert((bm->elem_table_dirty & BM_VERT) == 0);
- for (int i = 0; i < bm->totvert; i++) {
- copy_v3_v3(rdata->orco[i], BM_vert_at_index(bm, i)->co);
- }
- BKE_mesh_orco_verts_transform(me, rdata->orco, rdata->vert_len, 0);
- }
- else {
- rdata->orco = MEM_mallocN(sizeof(*rdata->orco) * rdata->vert_len, "orco mesh");
- MVert *mvert = rdata->mvert;
- for (int a = 0; a < rdata->vert_len; a++, mvert++) {
- copy_v3_v3(rdata->orco[a], mvert->co);
- }
- BKE_mesh_orco_verts_transform(me, rdata->orco, rdata->vert_len, 0);
- }
- }
- }
- else {
- rdata->orco = NULL;
- }
-
- /* don't access mesh directly, instead use vars taken from BMesh or Mesh */
+ rdata->is_orco_allocated = false;
+ if (cd_used->orco != 0) {
+ rdata->orco = CustomData_get_layer(cd_vdata, CD_ORCO);
+ /* If orco is not available compute it ourselves */
+ if (!rdata->orco) {
+ rdata->is_orco_allocated = true;
+ if (me->edit_mesh) {
+ BMesh *bm = me->edit_mesh->bm;
+ rdata->orco = MEM_mallocN(sizeof(*rdata->orco) * rdata->vert_len, "orco mesh");
+ BLI_assert((bm->elem_table_dirty & BM_VERT) == 0);
+ for (int i = 0; i < bm->totvert; i++) {
+ copy_v3_v3(rdata->orco[i], BM_vert_at_index(bm, i)->co);
+ }
+ BKE_mesh_orco_verts_transform(me, rdata->orco, rdata->vert_len, 0);
+ }
+ else {
+ rdata->orco = MEM_mallocN(sizeof(*rdata->orco) * rdata->vert_len, "orco mesh");
+ MVert *mvert = rdata->mvert;
+ for (int a = 0; a < rdata->vert_len; a++, mvert++) {
+ copy_v3_v3(rdata->orco[a], mvert->co);
+ }
+ BKE_mesh_orco_verts_transform(me, rdata->orco, rdata->vert_len, 0);
+ }
+ }
+ }
+ else {
+ rdata->orco = NULL;
+ }
+
+ /* don't access mesh directly, instead use vars taken from BMesh or Mesh */
#define me DONT_USE_THIS
-#ifdef me /* quiet warning */
+#ifdef me /* quiet warning */
#endif
- struct {
- uint uv_len;
- uint vcol_len;
- } cd_layers_src = {
- .uv_len = CustomData_number_of_layers(cd_ldata, CD_MLOOPUV),
- .vcol_len = CustomData_number_of_layers(cd_ldata, CD_MLOOPCOL),
- };
-
- rdata->cd.layers.uv_len = min_ii(cd_layers_src.uv_len, count_bits_i(cd_used->uv));
- rdata->cd.layers.tangent_len = count_bits_i(cd_used->tan) + cd_used->tan_orco;
- rdata->cd.layers.vcol_len = min_ii(cd_layers_src.vcol_len, count_bits_i(cd_used->vcol));
-
- rdata->cd.layers.uv = MEM_mallocN(sizeof(*rdata->cd.layers.uv) * rdata->cd.layers.uv_len, __func__);
- rdata->cd.layers.vcol = MEM_mallocN(sizeof(*rdata->cd.layers.vcol) * rdata->cd.layers.vcol_len, __func__);
- rdata->cd.layers.tangent = MEM_mallocN(sizeof(*rdata->cd.layers.tangent) * rdata->cd.layers.tangent_len, __func__);
-
- rdata->cd.uuid.uv = MEM_mallocN(sizeof(*rdata->cd.uuid.uv) * rdata->cd.layers.uv_len, __func__);
- rdata->cd.uuid.vcol = MEM_mallocN(sizeof(*rdata->cd.uuid.vcol) * rdata->cd.layers.vcol_len, __func__);
- rdata->cd.uuid.tangent = MEM_mallocN(sizeof(*rdata->cd.uuid.tangent) * rdata->cd.layers.tangent_len, __func__);
-
- rdata->cd.offset.uv = MEM_mallocN(sizeof(*rdata->cd.offset.uv) * rdata->cd.layers.uv_len, __func__);
- rdata->cd.offset.vcol = MEM_mallocN(sizeof(*rdata->cd.offset.vcol) * rdata->cd.layers.vcol_len, __func__);
-
- /* Allocate max */
- rdata->cd.layers.auto_vcol = MEM_callocN(
- sizeof(*rdata->cd.layers.auto_vcol) * rdata->cd.layers.vcol_len, __func__);
- rdata->cd.uuid.auto_mix = MEM_mallocN(
- sizeof(*rdata->cd.uuid.auto_mix) * (rdata->cd.layers.vcol_len + rdata->cd.layers.uv_len), __func__);
-
- /* XXX FIXME XXX */
- /* We use a hash to identify each data layer based on its name.
- * Gawain then search for this name in the current shader and bind if it exists.
- * NOTE : This is prone to hash collision.
- * One solution to hash collision would be to format the cd layer name
- * to a safe glsl var name, but without name clash.
- * NOTE 2 : Replicate changes to code_generate_vertex_new() in gpu_codegen.c */
- if (rdata->cd.layers.vcol_len != 0) {
- int act_vcol = rdata->cd.layers.vcol_active;
- for (int i_src = 0, i_dst = 0; i_src < cd_layers_src.vcol_len; i_src++, i_dst++) {
- if ((cd_used->vcol & (1 << i_src)) == 0) {
- /* This is a non-used VCol slot. Skip. */
- i_dst--;
- if (rdata->cd.layers.vcol_active >= i_src) {
- act_vcol--;
- }
- }
- else {
- const char *name = CustomData_get_layer_name(cd_ldata, CD_MLOOPCOL, i_src);
- uint hash = BLI_ghashutil_strhash_p(name);
- BLI_snprintf(rdata->cd.uuid.vcol[i_dst], sizeof(*rdata->cd.uuid.vcol), "c%u", hash);
- rdata->cd.layers.vcol[i_dst] = CustomData_get_layer_n(cd_ldata, CD_MLOOPCOL, i_src);
- if (rdata->edit_bmesh) {
- rdata->cd.offset.vcol[i_dst] = CustomData_get_n_offset(
- &rdata->edit_bmesh->bm->ldata, CD_MLOOPCOL, i_src);
- }
-
- /* Gather number of auto layers. */
- /* We only do vcols that are not overridden by uvs */
- if (CustomData_get_named_layer_index(cd_ldata, CD_MLOOPUV, name) == -1) {
- BLI_snprintf(
- rdata->cd.uuid.auto_mix[rdata->cd.layers.uv_len + i_dst],
- sizeof(*rdata->cd.uuid.auto_mix), "a%u", hash);
- rdata->cd.layers.auto_vcol[i_dst] = true;
- }
- }
- }
- if (rdata->cd.layers.vcol_active != -1) {
- /* Actual active Vcol slot inside vcol layers used for shading. */
- rdata->cd.layers.vcol_active = act_vcol;
- }
- }
-
- /* Start Fresh */
- CustomData_free_layers(cd_ldata, CD_TANGENT, rdata->loop_len);
- CustomData_free_layers(cd_ldata, CD_MLOOPTANGENT, rdata->loop_len);
-
- if (rdata->cd.layers.uv_len != 0) {
- int act_uv = rdata->cd.layers.uv_active;
- for (int i_src = 0, i_dst = 0; i_src < cd_layers_src.uv_len; i_src++, i_dst++) {
- if ((cd_used->uv & (1 << i_src)) == 0) {
- /* This is a non-used UV slot. Skip. */
- i_dst--;
- if (rdata->cd.layers.uv_active >= i_src) {
- act_uv--;
- }
- }
- else {
- const char *name = CustomData_get_layer_name(cd_ldata, CD_MLOOPUV, i_src);
- uint hash = BLI_ghashutil_strhash_p(name);
-
- BLI_snprintf(rdata->cd.uuid.uv[i_dst], sizeof(*rdata->cd.uuid.uv), "u%u", hash);
- rdata->cd.layers.uv[i_dst] = CustomData_get_layer_n(cd_ldata, CD_MLOOPUV, i_src);
- if (rdata->edit_bmesh) {
- rdata->cd.offset.uv[i_dst] = CustomData_get_n_offset(
- &rdata->edit_bmesh->bm->ldata, CD_MLOOPUV, i_src);
- }
- BLI_snprintf(rdata->cd.uuid.auto_mix[i_dst], sizeof(*rdata->cd.uuid.auto_mix), "a%u", hash);
- }
- }
- if (rdata->cd.layers.uv_active != -1) {
- /* Actual active UV slot inside uv layers used for shading. */
- rdata->cd.layers.uv_active = act_uv;
- }
- }
-
- if (rdata->cd.layers.tangent_len != 0) {
-
- /* -------------------------------------------------------------------- */
- /* Pre-calculate tangents into 'rdata->cd.output.ldata' */
-
- BLI_assert(!CustomData_has_layer(&rdata->cd.output.ldata, CD_TANGENT));
-
- /* Tangent Names */
- char tangent_names[MAX_MTFACE][MAX_NAME];
- for (int i_src = 0, i_dst = 0; i_src < cd_layers_src.uv_len; i_src++, i_dst++) {
- if ((cd_used->tan & (1 << i_src)) == 0) {
- i_dst--;
- }
- else {
- BLI_strncpy(
- tangent_names[i_dst],
- CustomData_get_layer_name(cd_ldata, CD_MLOOPUV, i_src), MAX_NAME);
- }
- }
-
- /* If tangent from orco is requested, decrement tangent_len */
- int actual_tangent_len = (cd_used->tan_orco != 0) ?
- rdata->cd.layers.tangent_len - 1 : rdata->cd.layers.tangent_len;
- if (rdata->edit_bmesh) {
- BMEditMesh *em = rdata->edit_bmesh;
- BMesh *bm = em->bm;
-
- if (is_auto_smooth && rdata->loop_normals == NULL) {
- /* Should we store the previous array of `loop_normals` in somewhere? */
- rdata->loop_len = bm->totloop;
- rdata->loop_normals = MEM_mallocN(sizeof(*rdata->loop_normals) * rdata->loop_len, __func__);
- BM_loops_calc_normal_vcos(bm, NULL, NULL, NULL, true, split_angle, rdata->loop_normals, NULL, NULL, -1, false);
- }
-
- bool calc_active_tangent = false;
-
- BKE_editmesh_loop_tangent_calc(
- em, calc_active_tangent,
- tangent_names, actual_tangent_len,
- rdata->poly_normals, rdata->loop_normals,
- rdata->orco,
- &rdata->cd.output.ldata, bm->totloop,
- &rdata->cd.output.tangent_mask);
- }
- else {
+ struct {
+ uint uv_len;
+ uint vcol_len;
+ } cd_layers_src = {
+ .uv_len = CustomData_number_of_layers(cd_ldata, CD_MLOOPUV),
+ .vcol_len = CustomData_number_of_layers(cd_ldata, CD_MLOOPCOL),
+ };
+
+ rdata->cd.layers.uv_len = min_ii(cd_layers_src.uv_len, count_bits_i(cd_used->uv));
+ rdata->cd.layers.tangent_len = count_bits_i(cd_used->tan) + cd_used->tan_orco;
+ rdata->cd.layers.vcol_len = min_ii(cd_layers_src.vcol_len, count_bits_i(cd_used->vcol));
+
+ rdata->cd.layers.uv = MEM_mallocN(sizeof(*rdata->cd.layers.uv) * rdata->cd.layers.uv_len,
+ __func__);
+ rdata->cd.layers.vcol = MEM_mallocN(sizeof(*rdata->cd.layers.vcol) * rdata->cd.layers.vcol_len,
+ __func__);
+ rdata->cd.layers.tangent = MEM_mallocN(
+ sizeof(*rdata->cd.layers.tangent) * rdata->cd.layers.tangent_len, __func__);
+
+ rdata->cd.uuid.uv = MEM_mallocN(sizeof(*rdata->cd.uuid.uv) * rdata->cd.layers.uv_len,
+ __func__);
+ rdata->cd.uuid.vcol = MEM_mallocN(sizeof(*rdata->cd.uuid.vcol) * rdata->cd.layers.vcol_len,
+ __func__);
+ rdata->cd.uuid.tangent = MEM_mallocN(
+ sizeof(*rdata->cd.uuid.tangent) * rdata->cd.layers.tangent_len, __func__);
+
+ rdata->cd.offset.uv = MEM_mallocN(sizeof(*rdata->cd.offset.uv) * rdata->cd.layers.uv_len,
+ __func__);
+ rdata->cd.offset.vcol = MEM_mallocN(sizeof(*rdata->cd.offset.vcol) * rdata->cd.layers.vcol_len,
+ __func__);
+
+ /* Allocate max */
+ rdata->cd.layers.auto_vcol = MEM_callocN(
+ sizeof(*rdata->cd.layers.auto_vcol) * rdata->cd.layers.vcol_len, __func__);
+ rdata->cd.uuid.auto_mix = MEM_mallocN(
+ sizeof(*rdata->cd.uuid.auto_mix) * (rdata->cd.layers.vcol_len + rdata->cd.layers.uv_len),
+ __func__);
+
+ /* XXX FIXME XXX */
+ /* We use a hash to identify each data layer based on its name.
+ * Gawain then search for this name in the current shader and bind if it exists.
+ * NOTE : This is prone to hash collision.
+ * One solution to hash collision would be to format the cd layer name
+ * to a safe glsl var name, but without name clash.
+ * NOTE 2 : Replicate changes to code_generate_vertex_new() in gpu_codegen.c */
+ if (rdata->cd.layers.vcol_len != 0) {
+ int act_vcol = rdata->cd.layers.vcol_active;
+ for (int i_src = 0, i_dst = 0; i_src < cd_layers_src.vcol_len; i_src++, i_dst++) {
+ if ((cd_used->vcol & (1 << i_src)) == 0) {
+ /* This is a non-used VCol slot. Skip. */
+ i_dst--;
+ if (rdata->cd.layers.vcol_active >= i_src) {
+ act_vcol--;
+ }
+ }
+ else {
+ const char *name = CustomData_get_layer_name(cd_ldata, CD_MLOOPCOL, i_src);
+ uint hash = BLI_ghashutil_strhash_p(name);
+ BLI_snprintf(rdata->cd.uuid.vcol[i_dst], sizeof(*rdata->cd.uuid.vcol), "c%u", hash);
+ rdata->cd.layers.vcol[i_dst] = CustomData_get_layer_n(cd_ldata, CD_MLOOPCOL, i_src);
+ if (rdata->edit_bmesh) {
+ rdata->cd.offset.vcol[i_dst] = CustomData_get_n_offset(
+ &rdata->edit_bmesh->bm->ldata, CD_MLOOPCOL, i_src);
+ }
+
+ /* Gather number of auto layers. */
+ /* We only do vcols that are not overridden by uvs */
+ if (CustomData_get_named_layer_index(cd_ldata, CD_MLOOPUV, name) == -1) {
+ BLI_snprintf(rdata->cd.uuid.auto_mix[rdata->cd.layers.uv_len + i_dst],
+ sizeof(*rdata->cd.uuid.auto_mix),
+ "a%u",
+ hash);
+ rdata->cd.layers.auto_vcol[i_dst] = true;
+ }
+ }
+ }
+ if (rdata->cd.layers.vcol_active != -1) {
+ /* Actual active Vcol slot inside vcol layers used for shading. */
+ rdata->cd.layers.vcol_active = act_vcol;
+ }
+ }
+
+ /* Start Fresh */
+ CustomData_free_layers(cd_ldata, CD_TANGENT, rdata->loop_len);
+ CustomData_free_layers(cd_ldata, CD_MLOOPTANGENT, rdata->loop_len);
+
+ if (rdata->cd.layers.uv_len != 0) {
+ int act_uv = rdata->cd.layers.uv_active;
+ for (int i_src = 0, i_dst = 0; i_src < cd_layers_src.uv_len; i_src++, i_dst++) {
+ if ((cd_used->uv & (1 << i_src)) == 0) {
+ /* This is a non-used UV slot. Skip. */
+ i_dst--;
+ if (rdata->cd.layers.uv_active >= i_src) {
+ act_uv--;
+ }
+ }
+ else {
+ const char *name = CustomData_get_layer_name(cd_ldata, CD_MLOOPUV, i_src);
+ uint hash = BLI_ghashutil_strhash_p(name);
+
+ BLI_snprintf(rdata->cd.uuid.uv[i_dst], sizeof(*rdata->cd.uuid.uv), "u%u", hash);
+ rdata->cd.layers.uv[i_dst] = CustomData_get_layer_n(cd_ldata, CD_MLOOPUV, i_src);
+ if (rdata->edit_bmesh) {
+ rdata->cd.offset.uv[i_dst] = CustomData_get_n_offset(
+ &rdata->edit_bmesh->bm->ldata, CD_MLOOPUV, i_src);
+ }
+ BLI_snprintf(
+ rdata->cd.uuid.auto_mix[i_dst], sizeof(*rdata->cd.uuid.auto_mix), "a%u", hash);
+ }
+ }
+ if (rdata->cd.layers.uv_active != -1) {
+ /* Actual active UV slot inside uv layers used for shading. */
+ rdata->cd.layers.uv_active = act_uv;
+ }
+ }
+
+ if (rdata->cd.layers.tangent_len != 0) {
+
+ /* -------------------------------------------------------------------- */
+ /* Pre-calculate tangents into 'rdata->cd.output.ldata' */
+
+ BLI_assert(!CustomData_has_layer(&rdata->cd.output.ldata, CD_TANGENT));
+
+ /* Tangent Names */
+ char tangent_names[MAX_MTFACE][MAX_NAME];
+ for (int i_src = 0, i_dst = 0; i_src < cd_layers_src.uv_len; i_src++, i_dst++) {
+ if ((cd_used->tan & (1 << i_src)) == 0) {
+ i_dst--;
+ }
+ else {
+ BLI_strncpy(tangent_names[i_dst],
+ CustomData_get_layer_name(cd_ldata, CD_MLOOPUV, i_src),
+ MAX_NAME);
+ }
+ }
+
+ /* If tangent from orco is requested, decrement tangent_len */
+ int actual_tangent_len = (cd_used->tan_orco != 0) ? rdata->cd.layers.tangent_len - 1 :
+ rdata->cd.layers.tangent_len;
+ if (rdata->edit_bmesh) {
+ BMEditMesh *em = rdata->edit_bmesh;
+ BMesh *bm = em->bm;
+
+ if (is_auto_smooth && rdata->loop_normals == NULL) {
+ /* Should we store the previous array of `loop_normals` in somewhere? */
+ rdata->loop_len = bm->totloop;
+ rdata->loop_normals = MEM_mallocN(sizeof(*rdata->loop_normals) * rdata->loop_len,
+ __func__);
+ BM_loops_calc_normal_vcos(
+ bm, NULL, NULL, NULL, true, split_angle, rdata->loop_normals, NULL, NULL, -1, false);
+ }
+
+ bool calc_active_tangent = false;
+
+ BKE_editmesh_loop_tangent_calc(em,
+ calc_active_tangent,
+ tangent_names,
+ actual_tangent_len,
+ rdata->poly_normals,
+ rdata->loop_normals,
+ rdata->orco,
+ &rdata->cd.output.ldata,
+ bm->totloop,
+ &rdata->cd.output.tangent_mask);
+ }
+ else {
#undef me
- if (is_auto_smooth && rdata->loop_normals == NULL) {
- /* Should we store the previous array of `loop_normals` in CustomData? */
- mesh_render_calc_normals_loop_and_poly(me, split_angle, rdata);
- }
-
- bool calc_active_tangent = false;
-
- BKE_mesh_calc_loop_tangent_ex(
- me->mvert,
- me->mpoly, me->totpoly,
- me->mloop,
- rdata->mlooptri, rdata->tri_len,
- cd_ldata,
- calc_active_tangent,
- tangent_names, actual_tangent_len,
- rdata->poly_normals, rdata->loop_normals,
- rdata->orco,
- &rdata->cd.output.ldata, me->totloop,
- &rdata->cd.output.tangent_mask);
-
- /* If we store tangents in the mesh, set temporary. */
+ if (is_auto_smooth && rdata->loop_normals == NULL) {
+ /* Should we store the previous array of `loop_normals` in CustomData? */
+ mesh_render_calc_normals_loop_and_poly(me, split_angle, rdata);
+ }
+
+ bool calc_active_tangent = false;
+
+ BKE_mesh_calc_loop_tangent_ex(me->mvert,
+ me->mpoly,
+ me->totpoly,
+ me->mloop,
+ rdata->mlooptri,
+ rdata->tri_len,
+ cd_ldata,
+ calc_active_tangent,
+ tangent_names,
+ actual_tangent_len,
+ rdata->poly_normals,
+ rdata->loop_normals,
+ rdata->orco,
+ &rdata->cd.output.ldata,
+ me->totloop,
+ &rdata->cd.output.tangent_mask);
+
+ /* If we store tangents in the mesh, set temporary. */
#if 0
- CustomData_set_layer_flag(cd_ldata, CD_TANGENT, CD_FLAG_TEMPORARY);
+ CustomData_set_layer_flag(cd_ldata, CD_TANGENT, CD_FLAG_TEMPORARY);
#endif
#define me DONT_USE_THIS
-#ifdef me /* quiet warning */
+#ifdef me /* quiet warning */
#endif
- }
-
- /* End tangent calculation */
- /* -------------------------------------------------------------------- */
-
- BLI_assert(CustomData_number_of_layers(&rdata->cd.output.ldata, CD_TANGENT) == rdata->cd.layers.tangent_len);
-
- int i_dst = 0;
- for (int i_src = 0; i_src < cd_layers_src.uv_len; i_src++, i_dst++) {
- if ((cd_used->tan & (1 << i_src)) == 0) {
- i_dst--;
- if (rdata->cd.layers.tangent_active >= i_src) {
- rdata->cd.layers.tangent_active--;
- }
- }
- else {
- const char *name = CustomData_get_layer_name(cd_ldata, CD_MLOOPUV, i_src);
- uint hash = BLI_ghashutil_strhash_p(name);
-
- BLI_snprintf(rdata->cd.uuid.tangent[i_dst], sizeof(*rdata->cd.uuid.tangent), "t%u", hash);
-
- /* Done adding tangents. */
-
- /* note: BKE_editmesh_loop_tangent_calc calculates 'CD_TANGENT',
- * not 'CD_MLOOPTANGENT' (as done below). It's OK, they're compatible. */
-
- /* note: normally we'd use 'i_src' here, but 'i_dst' is in sync with 'rdata->cd.output' */
- rdata->cd.layers.tangent[i_dst] = CustomData_get_layer_n(&rdata->cd.output.ldata, CD_TANGENT, i_dst);
- if (rdata->tri_len != 0) {
- BLI_assert(rdata->cd.layers.tangent[i_dst] != NULL);
- }
- }
- }
- if (cd_used->tan_orco != 0) {
- const char *name = CustomData_get_layer_name(&rdata->cd.output.ldata, CD_TANGENT, i_dst);
- uint hash = BLI_ghashutil_strhash_p(name);
- BLI_snprintf(rdata->cd.uuid.tangent[i_dst], sizeof(*rdata->cd.uuid.tangent), "t%u", hash);
-
- rdata->cd.layers.tangent[i_dst] = CustomData_get_layer_n(&rdata->cd.output.ldata, CD_TANGENT, i_dst);
- }
- }
+ }
+
+ /* End tangent calculation */
+ /* -------------------------------------------------------------------- */
+
+ BLI_assert(CustomData_number_of_layers(&rdata->cd.output.ldata, CD_TANGENT) ==
+ rdata->cd.layers.tangent_len);
+
+ int i_dst = 0;
+ for (int i_src = 0; i_src < cd_layers_src.uv_len; i_src++, i_dst++) {
+ if ((cd_used->tan & (1 << i_src)) == 0) {
+ i_dst--;
+ if (rdata->cd.layers.tangent_active >= i_src) {
+ rdata->cd.layers.tangent_active--;
+ }
+ }
+ else {
+ const char *name = CustomData_get_layer_name(cd_ldata, CD_MLOOPUV, i_src);
+ uint hash = BLI_ghashutil_strhash_p(name);
+
+ BLI_snprintf(
+ rdata->cd.uuid.tangent[i_dst], sizeof(*rdata->cd.uuid.tangent), "t%u", hash);
+
+ /* Done adding tangents. */
+
+ /* note: BKE_editmesh_loop_tangent_calc calculates 'CD_TANGENT',
+ * not 'CD_MLOOPTANGENT' (as done below). It's OK, they're compatible. */
+
+ /* note: normally we'd use 'i_src' here, but 'i_dst' is in sync with 'rdata->cd.output' */
+ rdata->cd.layers.tangent[i_dst] = CustomData_get_layer_n(
+ &rdata->cd.output.ldata, CD_TANGENT, i_dst);
+ if (rdata->tri_len != 0) {
+ BLI_assert(rdata->cd.layers.tangent[i_dst] != NULL);
+ }
+ }
+ }
+ if (cd_used->tan_orco != 0) {
+ const char *name = CustomData_get_layer_name(&rdata->cd.output.ldata, CD_TANGENT, i_dst);
+ uint hash = BLI_ghashutil_strhash_p(name);
+ BLI_snprintf(rdata->cd.uuid.tangent[i_dst], sizeof(*rdata->cd.uuid.tangent), "t%u", hash);
+
+ rdata->cd.layers.tangent[i_dst] = CustomData_get_layer_n(
+ &rdata->cd.output.ldata, CD_TANGENT, i_dst);
+ }
+ }
#undef me
- }
+ }
- return rdata;
+ return rdata;
}
/* Warning replace mesh pointer. */
#define MBC_GET_FINAL_MESH(me) \
- /* Hack to show the final result. */ \
- const bool _use_em_final = ( \
- (me)->edit_mesh && \
- (me)->edit_mesh->mesh_eval_final && \
- ((me)->edit_mesh->mesh_eval_final->runtime.is_original == false)); \
- Mesh _me_fake; \
- if (_use_em_final) { \
- _me_fake = *(me)->edit_mesh->mesh_eval_final; \
- _me_fake.mat = (me)->mat; \
- _me_fake.totcol = (me)->totcol; \
- (me) = &_me_fake; \
- } ((void)0)
+ /* Hack to show the final result. */ \
+ const bool _use_em_final = ((me)->edit_mesh && (me)->edit_mesh->mesh_eval_final && \
+ ((me)->edit_mesh->mesh_eval_final->runtime.is_original == false)); \
+ Mesh _me_fake; \
+ if (_use_em_final) { \
+ _me_fake = *(me)->edit_mesh->mesh_eval_final; \
+ _me_fake.mat = (me)->mat; \
+ _me_fake.totcol = (me)->totcol; \
+ (me) = &_me_fake; \
+ } \
+ ((void)0)
static void mesh_render_data_free(MeshRenderData *rdata)
{
- if (rdata->is_orco_allocated) {
- MEM_SAFE_FREE(rdata->orco);
- }
- MEM_SAFE_FREE(rdata->cd.offset.uv);
- MEM_SAFE_FREE(rdata->cd.offset.vcol);
- MEM_SAFE_FREE(rdata->cd.uuid.auto_mix);
- MEM_SAFE_FREE(rdata->cd.uuid.uv);
- MEM_SAFE_FREE(rdata->cd.uuid.vcol);
- MEM_SAFE_FREE(rdata->cd.uuid.tangent);
- MEM_SAFE_FREE(rdata->cd.layers.uv);
- MEM_SAFE_FREE(rdata->cd.layers.vcol);
- MEM_SAFE_FREE(rdata->cd.layers.tangent);
- MEM_SAFE_FREE(rdata->cd.layers.auto_vcol);
- MEM_SAFE_FREE(rdata->loose_verts);
- MEM_SAFE_FREE(rdata->loose_edges);
- MEM_SAFE_FREE(rdata->edges_adjacent_polys);
- MEM_SAFE_FREE(rdata->mlooptri);
- MEM_SAFE_FREE(rdata->loop_normals);
- MEM_SAFE_FREE(rdata->poly_normals);
- MEM_SAFE_FREE(rdata->poly_normals_pack);
- MEM_SAFE_FREE(rdata->vert_normals_pack);
- MEM_SAFE_FREE(rdata->vert_weight);
- MEM_SAFE_FREE(rdata->edge_select_bool);
- MEM_SAFE_FREE(rdata->edge_visible_bool);
- MEM_SAFE_FREE(rdata->vert_color);
-
- MEM_SAFE_FREE(rdata->mapped.loose_verts);
- MEM_SAFE_FREE(rdata->mapped.loose_edges);
-
- CustomData_free(&rdata->cd.output.ldata, rdata->loop_len);
-
- MEM_freeN(rdata);
+ if (rdata->is_orco_allocated) {
+ MEM_SAFE_FREE(rdata->orco);
+ }
+ MEM_SAFE_FREE(rdata->cd.offset.uv);
+ MEM_SAFE_FREE(rdata->cd.offset.vcol);
+ MEM_SAFE_FREE(rdata->cd.uuid.auto_mix);
+ MEM_SAFE_FREE(rdata->cd.uuid.uv);
+ MEM_SAFE_FREE(rdata->cd.uuid.vcol);
+ MEM_SAFE_FREE(rdata->cd.uuid.tangent);
+ MEM_SAFE_FREE(rdata->cd.layers.uv);
+ MEM_SAFE_FREE(rdata->cd.layers.vcol);
+ MEM_SAFE_FREE(rdata->cd.layers.tangent);
+ MEM_SAFE_FREE(rdata->cd.layers.auto_vcol);
+ MEM_SAFE_FREE(rdata->loose_verts);
+ MEM_SAFE_FREE(rdata->loose_edges);
+ MEM_SAFE_FREE(rdata->edges_adjacent_polys);
+ MEM_SAFE_FREE(rdata->mlooptri);
+ MEM_SAFE_FREE(rdata->loop_normals);
+ MEM_SAFE_FREE(rdata->poly_normals);
+ MEM_SAFE_FREE(rdata->poly_normals_pack);
+ MEM_SAFE_FREE(rdata->vert_normals_pack);
+ MEM_SAFE_FREE(rdata->vert_weight);
+ MEM_SAFE_FREE(rdata->edge_select_bool);
+ MEM_SAFE_FREE(rdata->edge_visible_bool);
+ MEM_SAFE_FREE(rdata->vert_color);
+
+ MEM_SAFE_FREE(rdata->mapped.loose_verts);
+ MEM_SAFE_FREE(rdata->mapped.loose_edges);
+
+ CustomData_free(&rdata->cd.output.ldata, rdata->loop_len);
+
+ MEM_freeN(rdata);
}
/** \} */
@@ -1192,125 +1227,126 @@ static void mesh_render_data_free(MeshRenderData *rdata)
static const char *mesh_render_data_uv_auto_layer_uuid_get(const MeshRenderData *rdata, int layer)
{
- BLI_assert(rdata->types & MR_DATATYPE_SHADING);
- return rdata->cd.uuid.auto_mix[layer];
+ BLI_assert(rdata->types & MR_DATATYPE_SHADING);
+ return rdata->cd.uuid.auto_mix[layer];
}
-static const char *mesh_render_data_vcol_auto_layer_uuid_get(const MeshRenderData *rdata, int layer)
+static const char *mesh_render_data_vcol_auto_layer_uuid_get(const MeshRenderData *rdata,
+ int layer)
{
- BLI_assert(rdata->types & MR_DATATYPE_SHADING);
- return rdata->cd.uuid.auto_mix[rdata->cd.layers.uv_len + layer];
+ BLI_assert(rdata->types & MR_DATATYPE_SHADING);
+ return rdata->cd.uuid.auto_mix[rdata->cd.layers.uv_len + layer];
}
static const char *mesh_render_data_uv_layer_uuid_get(const MeshRenderData *rdata, int layer)
{
- BLI_assert(rdata->types & MR_DATATYPE_SHADING);
- return rdata->cd.uuid.uv[layer];
+ BLI_assert(rdata->types & MR_DATATYPE_SHADING);
+ return rdata->cd.uuid.uv[layer];
}
static const char *mesh_render_data_vcol_layer_uuid_get(const MeshRenderData *rdata, int layer)
{
- BLI_assert(rdata->types & MR_DATATYPE_SHADING);
- return rdata->cd.uuid.vcol[layer];
+ BLI_assert(rdata->types & MR_DATATYPE_SHADING);
+ return rdata->cd.uuid.vcol[layer];
}
static const char *mesh_render_data_tangent_layer_uuid_get(const MeshRenderData *rdata, int layer)
{
- BLI_assert(rdata->types & MR_DATATYPE_SHADING);
- return rdata->cd.uuid.tangent[layer];
+ BLI_assert(rdata->types & MR_DATATYPE_SHADING);
+ return rdata->cd.uuid.tangent[layer];
}
static int UNUSED_FUNCTION(mesh_render_data_verts_len_get)(const MeshRenderData *rdata)
{
- BLI_assert(rdata->types & MR_DATATYPE_VERT);
- return rdata->vert_len;
+ BLI_assert(rdata->types & MR_DATATYPE_VERT);
+ return rdata->vert_len;
}
static int mesh_render_data_verts_len_get_maybe_mapped(const MeshRenderData *rdata)
{
- BLI_assert(rdata->types & MR_DATATYPE_VERT);
- return ((rdata->mapped.use == false) ? rdata->vert_len : rdata->mapped.vert_len);
+ BLI_assert(rdata->types & MR_DATATYPE_VERT);
+ return ((rdata->mapped.use == false) ? rdata->vert_len : rdata->mapped.vert_len);
}
static int UNUSED_FUNCTION(mesh_render_data_loose_verts_len_get)(const MeshRenderData *rdata)
{
- BLI_assert(rdata->types & MR_DATATYPE_LOOSE_VERT);
- return rdata->loose_vert_len;
+ BLI_assert(rdata->types & MR_DATATYPE_LOOSE_VERT);
+ return rdata->loose_vert_len;
}
static int mesh_render_data_loose_verts_len_get_maybe_mapped(const MeshRenderData *rdata)
{
- BLI_assert(rdata->types & MR_DATATYPE_LOOSE_VERT);
- return ((rdata->mapped.use == false) ? rdata->loose_vert_len : rdata->mapped.loose_vert_len);
+ BLI_assert(rdata->types & MR_DATATYPE_LOOSE_VERT);
+ return ((rdata->mapped.use == false) ? rdata->loose_vert_len : rdata->mapped.loose_vert_len);
}
static int mesh_render_data_edges_len_get(const MeshRenderData *rdata)
{
- BLI_assert(rdata->types & MR_DATATYPE_EDGE);
- return rdata->edge_len;
+ BLI_assert(rdata->types & MR_DATATYPE_EDGE);
+ return rdata->edge_len;
}
static int mesh_render_data_edges_len_get_maybe_mapped(const MeshRenderData *rdata)
{
- BLI_assert(rdata->types & MR_DATATYPE_EDGE);
- return ((rdata->mapped.use == false) ? rdata->edge_len : rdata->mapped.edge_len);
+ BLI_assert(rdata->types & MR_DATATYPE_EDGE);
+ return ((rdata->mapped.use == false) ? rdata->edge_len : rdata->mapped.edge_len);
}
static int UNUSED_FUNCTION(mesh_render_data_loose_edges_len_get)(const MeshRenderData *rdata)
{
- BLI_assert(rdata->types & MR_DATATYPE_LOOSE_EDGE);
- return rdata->loose_edge_len;
+ BLI_assert(rdata->types & MR_DATATYPE_LOOSE_EDGE);
+ return rdata->loose_edge_len;
}
static int mesh_render_data_loose_edges_len_get_maybe_mapped(const MeshRenderData *rdata)
{
- BLI_assert(rdata->types & MR_DATATYPE_LOOSE_EDGE);
- return ((rdata->mapped.use == false) ? rdata->loose_edge_len : rdata->mapped.loose_edge_len);
+ BLI_assert(rdata->types & MR_DATATYPE_LOOSE_EDGE);
+ return ((rdata->mapped.use == false) ? rdata->loose_edge_len : rdata->mapped.loose_edge_len);
}
static int mesh_render_data_looptri_len_get(const MeshRenderData *rdata)
{
- BLI_assert(rdata->types & MR_DATATYPE_LOOPTRI);
- return rdata->tri_len;
+ BLI_assert(rdata->types & MR_DATATYPE_LOOPTRI);
+ return rdata->tri_len;
}
static int mesh_render_data_looptri_len_get_maybe_mapped(const MeshRenderData *rdata)
{
- BLI_assert(rdata->types & MR_DATATYPE_LOOPTRI);
- return ((rdata->mapped.use == false) ? rdata->tri_len : rdata->mapped.tri_len);
+ BLI_assert(rdata->types & MR_DATATYPE_LOOPTRI);
+ return ((rdata->mapped.use == false) ? rdata->tri_len : rdata->mapped.tri_len);
}
static int UNUSED_FUNCTION(mesh_render_data_mat_len_get)(const MeshRenderData *rdata)
{
- BLI_assert(rdata->types & MR_DATATYPE_POLY);
- return rdata->mat_len;
+ BLI_assert(rdata->types & MR_DATATYPE_POLY);
+ return rdata->mat_len;
}
static int mesh_render_data_loops_len_get(const MeshRenderData *rdata)
{
- BLI_assert(rdata->types & MR_DATATYPE_LOOP);
- return rdata->loop_len;
+ BLI_assert(rdata->types & MR_DATATYPE_LOOP);
+ return rdata->loop_len;
}
static int mesh_render_data_loops_len_get_maybe_mapped(const MeshRenderData *rdata)
{
- BLI_assert(rdata->types & MR_DATATYPE_LOOP);
- return ((rdata->mapped.use == false) ? rdata->loop_len : rdata->mapped.loop_len);
+ BLI_assert(rdata->types & MR_DATATYPE_LOOP);
+ return ((rdata->mapped.use == false) ? rdata->loop_len : rdata->mapped.loop_len);
}
static int mesh_render_data_polys_len_get(const MeshRenderData *rdata)
{
- BLI_assert(rdata->types & MR_DATATYPE_POLY);
- return rdata->poly_len;
+ BLI_assert(rdata->types & MR_DATATYPE_POLY);
+ return rdata->poly_len;
}
static int mesh_render_data_polys_len_get_maybe_mapped(const MeshRenderData *rdata)
{
- BLI_assert(rdata->types & MR_DATATYPE_POLY);
- return ((rdata->mapped.use == false) ? rdata->poly_len : rdata->mapped.poly_len);
+ BLI_assert(rdata->types & MR_DATATYPE_POLY);
+ return ((rdata->mapped.use == false) ? rdata->poly_len : rdata->mapped.poly_len);
}
/** \} */
-
/* ---------------------------------------------------------------------- */
/* TODO remove prototype. */
-static void mesh_create_edit_facedots(MeshRenderData *rdata, GPUVertBuf *vbo_facedots_pos_nor_data);
+static void mesh_create_edit_facedots(MeshRenderData *rdata,
+ GPUVertBuf *vbo_facedots_pos_nor_data);
/** \name Internal Cache (Lazy Initialization)
* \{ */
@@ -1318,217 +1354,229 @@ static void mesh_create_edit_facedots(MeshRenderData *rdata, GPUVertBuf *vbo_fac
/** Ensure #MeshRenderData.poly_normals_pack */
static void mesh_render_data_ensure_poly_normals_pack(MeshRenderData *rdata)
{
- GPUPackedNormal *pnors_pack = rdata->poly_normals_pack;
- if (pnors_pack == NULL) {
- if (rdata->edit_bmesh) {
- BMesh *bm = rdata->edit_bmesh->bm;
- BMIter fiter;
- BMFace *efa;
- int i;
-
- pnors_pack = rdata->poly_normals_pack = MEM_mallocN(sizeof(*pnors_pack) * rdata->poly_len, __func__);
- if (rdata->edit_data && rdata->edit_data->vertexCos != NULL) {
- BKE_editmesh_cache_ensure_poly_normals(rdata->edit_bmesh, rdata->edit_data);
- const float (*pnors)[3] = rdata->edit_data->polyNos;
- for (i = 0; i < bm->totface; i++) {
- pnors_pack[i] = GPU_normal_convert_i10_v3(pnors[i]);
- }
- }
- else {
- BM_ITER_MESH_INDEX(efa, &fiter, bm, BM_FACES_OF_MESH, i) {
- pnors_pack[i] = GPU_normal_convert_i10_v3(efa->no);
- }
- }
- }
- else {
- float (*pnors)[3] = rdata->poly_normals;
-
- if (!pnors) {
- pnors = rdata->poly_normals = MEM_mallocN(sizeof(*pnors) * rdata->poly_len, __func__);
- BKE_mesh_calc_normals_poly(
- rdata->mvert, NULL, rdata->vert_len,
- rdata->mloop, rdata->mpoly, rdata->loop_len, rdata->poly_len, pnors, true);
- }
-
- pnors_pack = rdata->poly_normals_pack = MEM_mallocN(sizeof(*pnors_pack) * rdata->poly_len, __func__);
- for (int i = 0; i < rdata->poly_len; i++) {
- pnors_pack[i] = GPU_normal_convert_i10_v3(pnors[i]);
- }
- }
- }
+ GPUPackedNormal *pnors_pack = rdata->poly_normals_pack;
+ if (pnors_pack == NULL) {
+ if (rdata->edit_bmesh) {
+ BMesh *bm = rdata->edit_bmesh->bm;
+ BMIter fiter;
+ BMFace *efa;
+ int i;
+
+ pnors_pack = rdata->poly_normals_pack = MEM_mallocN(sizeof(*pnors_pack) * rdata->poly_len,
+ __func__);
+ if (rdata->edit_data && rdata->edit_data->vertexCos != NULL) {
+ BKE_editmesh_cache_ensure_poly_normals(rdata->edit_bmesh, rdata->edit_data);
+ const float(*pnors)[3] = rdata->edit_data->polyNos;
+ for (i = 0; i < bm->totface; i++) {
+ pnors_pack[i] = GPU_normal_convert_i10_v3(pnors[i]);
+ }
+ }
+ else {
+ BM_ITER_MESH_INDEX (efa, &fiter, bm, BM_FACES_OF_MESH, i) {
+ pnors_pack[i] = GPU_normal_convert_i10_v3(efa->no);
+ }
+ }
+ }
+ else {
+ float(*pnors)[3] = rdata->poly_normals;
+
+ if (!pnors) {
+ pnors = rdata->poly_normals = MEM_mallocN(sizeof(*pnors) * rdata->poly_len, __func__);
+ BKE_mesh_calc_normals_poly(rdata->mvert,
+ NULL,
+ rdata->vert_len,
+ rdata->mloop,
+ rdata->mpoly,
+ rdata->loop_len,
+ rdata->poly_len,
+ pnors,
+ true);
+ }
+
+ pnors_pack = rdata->poly_normals_pack = MEM_mallocN(sizeof(*pnors_pack) * rdata->poly_len,
+ __func__);
+ for (int i = 0; i < rdata->poly_len; i++) {
+ pnors_pack[i] = GPU_normal_convert_i10_v3(pnors[i]);
+ }
+ }
+ }
}
/** Ensure #MeshRenderData.vert_normals_pack */
static void mesh_render_data_ensure_vert_normals_pack(MeshRenderData *rdata)
{
- GPUPackedNormal *vnors_pack = rdata->vert_normals_pack;
- if (vnors_pack == NULL) {
- if (rdata->edit_bmesh) {
- BMesh *bm = rdata->edit_bmesh->bm;
- BMIter viter;
- BMVert *eve;
- int i;
-
- vnors_pack = rdata->vert_normals_pack = MEM_mallocN(sizeof(*vnors_pack) * rdata->vert_len, __func__);
- BM_ITER_MESH_INDEX(eve, &viter, bm, BM_VERT, i) {
- vnors_pack[i] = GPU_normal_convert_i10_v3(eve->no);
- }
- }
- else {
- /* data from mesh used directly */
- BLI_assert(0);
- }
- }
+ GPUPackedNormal *vnors_pack = rdata->vert_normals_pack;
+ if (vnors_pack == NULL) {
+ if (rdata->edit_bmesh) {
+ BMesh *bm = rdata->edit_bmesh->bm;
+ BMIter viter;
+ BMVert *eve;
+ int i;
+
+ vnors_pack = rdata->vert_normals_pack = MEM_mallocN(sizeof(*vnors_pack) * rdata->vert_len,
+ __func__);
+ BM_ITER_MESH_INDEX (eve, &viter, bm, BM_VERT, i) {
+ vnors_pack[i] = GPU_normal_convert_i10_v3(eve->no);
+ }
+ }
+ else {
+ /* data from mesh used directly */
+ BLI_assert(0);
+ }
+ }
}
-
/** Ensure #MeshRenderData.vert_color */
static void UNUSED_FUNCTION(mesh_render_data_ensure_vert_color)(MeshRenderData *rdata)
{
- char (*vcol)[3] = rdata->vert_color;
- if (vcol == NULL) {
- if (rdata->edit_bmesh) {
- BMesh *bm = rdata->edit_bmesh->bm;
- const int cd_loop_color_offset = CustomData_get_offset(&bm->ldata, CD_MLOOPCOL);
- if (cd_loop_color_offset == -1) {
- goto fallback;
- }
-
- vcol = rdata->vert_color = MEM_mallocN(sizeof(*vcol) * rdata->loop_len, __func__);
-
- BMIter fiter;
- BMFace *efa;
- int i = 0;
-
- BM_ITER_MESH(efa, &fiter, bm, BM_FACES_OF_MESH) {
- BMLoop *l_iter, *l_first;
- l_iter = l_first = BM_FACE_FIRST_LOOP(efa);
- do {
- const MLoopCol *lcol = BM_ELEM_CD_GET_VOID_P(l_iter, cd_loop_color_offset);
- vcol[i][0] = lcol->r;
- vcol[i][1] = lcol->g;
- vcol[i][2] = lcol->b;
- i += 1;
- } while ((l_iter = l_iter->next) != l_first);
- }
- BLI_assert(i == rdata->loop_len);
- }
- else {
- if (rdata->mloopcol == NULL) {
- goto fallback;
- }
-
- vcol = rdata->vert_color = MEM_mallocN(sizeof(*vcol) * rdata->loop_len, __func__);
-
- for (int i = 0; i < rdata->loop_len; i++) {
- vcol[i][0] = rdata->mloopcol[i].r;
- vcol[i][1] = rdata->mloopcol[i].g;
- vcol[i][2] = rdata->mloopcol[i].b;
- }
- }
- }
- return;
+ char(*vcol)[3] = rdata->vert_color;
+ if (vcol == NULL) {
+ if (rdata->edit_bmesh) {
+ BMesh *bm = rdata->edit_bmesh->bm;
+ const int cd_loop_color_offset = CustomData_get_offset(&bm->ldata, CD_MLOOPCOL);
+ if (cd_loop_color_offset == -1) {
+ goto fallback;
+ }
+
+ vcol = rdata->vert_color = MEM_mallocN(sizeof(*vcol) * rdata->loop_len, __func__);
+
+ BMIter fiter;
+ BMFace *efa;
+ int i = 0;
+
+ BM_ITER_MESH (efa, &fiter, bm, BM_FACES_OF_MESH) {
+ BMLoop *l_iter, *l_first;
+ l_iter = l_first = BM_FACE_FIRST_LOOP(efa);
+ do {
+ const MLoopCol *lcol = BM_ELEM_CD_GET_VOID_P(l_iter, cd_loop_color_offset);
+ vcol[i][0] = lcol->r;
+ vcol[i][1] = lcol->g;
+ vcol[i][2] = lcol->b;
+ i += 1;
+ } while ((l_iter = l_iter->next) != l_first);
+ }
+ BLI_assert(i == rdata->loop_len);
+ }
+ else {
+ if (rdata->mloopcol == NULL) {
+ goto fallback;
+ }
+
+ vcol = rdata->vert_color = MEM_mallocN(sizeof(*vcol) * rdata->loop_len, __func__);
+
+ for (int i = 0; i < rdata->loop_len; i++) {
+ vcol[i][0] = rdata->mloopcol[i].r;
+ vcol[i][1] = rdata->mloopcol[i].g;
+ vcol[i][2] = rdata->mloopcol[i].b;
+ }
+ }
+ }
+ return;
fallback:
- vcol = rdata->vert_color = MEM_mallocN(sizeof(*vcol) * rdata->loop_len, __func__);
+ vcol = rdata->vert_color = MEM_mallocN(sizeof(*vcol) * rdata->loop_len, __func__);
- for (int i = 0; i < rdata->loop_len; i++) {
- vcol[i][0] = 255;
- vcol[i][1] = 255;
- vcol[i][2] = 255;
- }
+ for (int i = 0; i < rdata->loop_len; i++) {
+ vcol[i][0] = 255;
+ vcol[i][1] = 255;
+ vcol[i][2] = 255;
+ }
}
static float evaluate_vertex_weight(const MDeformVert *dvert, const DRW_MeshWeightState *wstate)
{
- float input = 0.0f;
- bool show_alert_color = false;
-
- if (wstate->flags & DRW_MESH_WEIGHT_STATE_MULTIPAINT) {
- /* Multi-Paint feature */
- input = BKE_defvert_multipaint_collective_weight(
- dvert, wstate->defgroup_len, wstate->defgroup_sel, wstate->defgroup_sel_count,
- (wstate->flags & DRW_MESH_WEIGHT_STATE_AUTO_NORMALIZE) != 0);
-
- /* make it black if the selected groups have no weight on a vertex */
- if (input == 0.0f) {
- show_alert_color = true;
- }
- }
- else {
- /* default, non tricky behavior */
- input = defvert_find_weight(dvert, wstate->defgroup_active);
-
- if (input == 0.0f) {
- switch (wstate->alert_mode) {
- case OB_DRAW_GROUPUSER_ACTIVE:
- show_alert_color = true;
- break;
-
- case OB_DRAW_GROUPUSER_ALL:
- show_alert_color = defvert_is_weight_zero(dvert, wstate->defgroup_len);
- break;
- }
- }
- }
-
- if (show_alert_color) {
- return -1.0f;
- }
- else {
- CLAMP(input, 0.0f, 1.0f);
- return input;
- }
+ float input = 0.0f;
+ bool show_alert_color = false;
+
+ if (wstate->flags & DRW_MESH_WEIGHT_STATE_MULTIPAINT) {
+ /* Multi-Paint feature */
+ input = BKE_defvert_multipaint_collective_weight(
+ dvert,
+ wstate->defgroup_len,
+ wstate->defgroup_sel,
+ wstate->defgroup_sel_count,
+ (wstate->flags & DRW_MESH_WEIGHT_STATE_AUTO_NORMALIZE) != 0);
+
+ /* make it black if the selected groups have no weight on a vertex */
+ if (input == 0.0f) {
+ show_alert_color = true;
+ }
+ }
+ else {
+ /* default, non tricky behavior */
+ input = defvert_find_weight(dvert, wstate->defgroup_active);
+
+ if (input == 0.0f) {
+ switch (wstate->alert_mode) {
+ case OB_DRAW_GROUPUSER_ACTIVE:
+ show_alert_color = true;
+ break;
+
+ case OB_DRAW_GROUPUSER_ALL:
+ show_alert_color = defvert_is_weight_zero(dvert, wstate->defgroup_len);
+ break;
+ }
+ }
+ }
+
+ if (show_alert_color) {
+ return -1.0f;
+ }
+ else {
+ CLAMP(input, 0.0f, 1.0f);
+ return input;
+ }
}
/** Ensure #MeshRenderData.vert_weight */
-static void mesh_render_data_ensure_vert_weight(MeshRenderData *rdata, const struct DRW_MeshWeightState *wstate)
+static void mesh_render_data_ensure_vert_weight(MeshRenderData *rdata,
+ const struct DRW_MeshWeightState *wstate)
{
- float *vweight = rdata->vert_weight;
- if (vweight == NULL) {
- if (wstate->defgroup_active == -1) {
- goto fallback;
- }
-
- if (rdata->edit_bmesh) {
- BMesh *bm = rdata->edit_bmesh->bm;
- const int cd_dvert_offset = CustomData_get_offset(&bm->vdata, CD_MDEFORMVERT);
- if (cd_dvert_offset == -1) {
- goto fallback;
- }
-
- BMIter viter;
- BMVert *eve;
- int i;
-
- vweight = rdata->vert_weight = MEM_mallocN(sizeof(*vweight) * rdata->vert_len, __func__);
- BM_ITER_MESH_INDEX(eve, &viter, bm, BM_VERT, i) {
- const MDeformVert *dvert = BM_ELEM_CD_GET_VOID_P(eve, cd_dvert_offset);
- vweight[i] = evaluate_vertex_weight(dvert, wstate);
- }
- }
- else {
- if (rdata->dvert == NULL) {
- goto fallback;
- }
-
- vweight = rdata->vert_weight = MEM_mallocN(sizeof(*vweight) * rdata->vert_len, __func__);
- for (int i = 0; i < rdata->vert_len; i++) {
- vweight[i] = evaluate_vertex_weight(&rdata->dvert[i], wstate);
- }
- }
- }
- return;
+ float *vweight = rdata->vert_weight;
+ if (vweight == NULL) {
+ if (wstate->defgroup_active == -1) {
+ goto fallback;
+ }
+
+ if (rdata->edit_bmesh) {
+ BMesh *bm = rdata->edit_bmesh->bm;
+ const int cd_dvert_offset = CustomData_get_offset(&bm->vdata, CD_MDEFORMVERT);
+ if (cd_dvert_offset == -1) {
+ goto fallback;
+ }
+
+ BMIter viter;
+ BMVert *eve;
+ int i;
+
+ vweight = rdata->vert_weight = MEM_mallocN(sizeof(*vweight) * rdata->vert_len, __func__);
+ BM_ITER_MESH_INDEX (eve, &viter, bm, BM_VERT, i) {
+ const MDeformVert *dvert = BM_ELEM_CD_GET_VOID_P(eve, cd_dvert_offset);
+ vweight[i] = evaluate_vertex_weight(dvert, wstate);
+ }
+ }
+ else {
+ if (rdata->dvert == NULL) {
+ goto fallback;
+ }
+
+ vweight = rdata->vert_weight = MEM_mallocN(sizeof(*vweight) * rdata->vert_len, __func__);
+ for (int i = 0; i < rdata->vert_len; i++) {
+ vweight[i] = evaluate_vertex_weight(&rdata->dvert[i], wstate);
+ }
+ }
+ }
+ return;
fallback:
- vweight = rdata->vert_weight = MEM_callocN(sizeof(*vweight) * rdata->vert_len, __func__);
-
- if ((wstate->defgroup_active < 0) && (wstate->defgroup_len > 0)) {
- copy_vn_fl(vweight, rdata->vert_len, -2.0f);
- }
- else if (wstate->alert_mode != OB_DRAW_GROUPUSER_NONE) {
- copy_vn_fl(vweight, rdata->vert_len, -1.0f);
- }
+ vweight = rdata->vert_weight = MEM_callocN(sizeof(*vweight) * rdata->vert_len, __func__);
+
+ if ((wstate->defgroup_active < 0) && (wstate->defgroup_len > 0)) {
+ copy_vn_fl(vweight, rdata->vert_len, -2.0f);
+ }
+ else if (wstate->alert_mode != OB_DRAW_GROUPUSER_NONE) {
+ copy_vn_fl(vweight, rdata->vert_len, -1.0f);
+ }
}
/** \} */
@@ -1539,205 +1587,211 @@ fallback:
static uchar mesh_render_data_face_flag(MeshRenderData *rdata, const BMFace *efa, const int cd_ofs)
{
- uchar fflag = 0;
-
- if (efa == rdata->efa_act) {
- fflag |= VFLAG_FACE_ACTIVE;
- }
- if (BM_elem_flag_test(efa, BM_ELEM_SELECT)) {
- fflag |= VFLAG_FACE_SELECTED;
- }
-
- if (efa == rdata->efa_act_uv) {
- fflag |= VFLAG_FACE_UV_ACTIVE;
- }
- if ((cd_ofs != -1) && uvedit_face_select_test_ex(rdata->toolsettings, (BMFace *)efa, cd_ofs)) {
- fflag |= VFLAG_FACE_UV_SELECT;
- }
+ uchar fflag = 0;
+
+ if (efa == rdata->efa_act) {
+ fflag |= VFLAG_FACE_ACTIVE;
+ }
+ if (BM_elem_flag_test(efa, BM_ELEM_SELECT)) {
+ fflag |= VFLAG_FACE_SELECTED;
+ }
+
+ if (efa == rdata->efa_act_uv) {
+ fflag |= VFLAG_FACE_UV_ACTIVE;
+ }
+ if ((cd_ofs != -1) && uvedit_face_select_test_ex(rdata->toolsettings, (BMFace *)efa, cd_ofs)) {
+ fflag |= VFLAG_FACE_UV_SELECT;
+ }
#ifdef WITH_FREESTYLE
- if (rdata->cd.offset.freestyle_face != -1) {
- const FreestyleFace *ffa = BM_ELEM_CD_GET_VOID_P(efa, rdata->cd.offset.freestyle_face);
- if (ffa->flag & FREESTYLE_FACE_MARK) {
- fflag |= VFLAG_FACE_FREESTYLE;
- }
- }
+ if (rdata->cd.offset.freestyle_face != -1) {
+ const FreestyleFace *ffa = BM_ELEM_CD_GET_VOID_P(efa, rdata->cd.offset.freestyle_face);
+ if (ffa->flag & FREESTYLE_FACE_MARK) {
+ fflag |= VFLAG_FACE_FREESTYLE;
+ }
+ }
#endif
- return fflag;
+ return fflag;
}
-static void mesh_render_data_edge_flag(
- const MeshRenderData *rdata, const BMEdge *eed,
- EdgeDrawAttr *eattr)
+static void mesh_render_data_edge_flag(const MeshRenderData *rdata,
+ const BMEdge *eed,
+ EdgeDrawAttr *eattr)
{
- const ToolSettings *ts = rdata->toolsettings;
- const bool is_vertex_select_mode = (ts != NULL) && (ts->selectmode & SCE_SELECT_VERTEX) != 0;
- const bool is_face_only_select_mode = (ts != NULL) && (ts->selectmode == SCE_SELECT_FACE);
-
- if (eed == rdata->eed_act) {
- eattr->e_flag |= VFLAG_EDGE_ACTIVE;
- }
- if (!is_vertex_select_mode &&
- BM_elem_flag_test(eed, BM_ELEM_SELECT))
- {
- eattr->e_flag |= VFLAG_EDGE_SELECTED;
- }
- if (is_vertex_select_mode &&
- BM_elem_flag_test(eed->v1, BM_ELEM_SELECT) &&
- BM_elem_flag_test(eed->v2, BM_ELEM_SELECT))
- {
- eattr->e_flag |= VFLAG_EDGE_SELECTED;
- eattr->e_flag |= VFLAG_VERT_SELECTED;
- }
- if (BM_elem_flag_test(eed, BM_ELEM_SEAM)) {
- eattr->e_flag |= VFLAG_EDGE_SEAM;
- }
- if (!BM_elem_flag_test(eed, BM_ELEM_SMOOTH)) {
- eattr->e_flag |= VFLAG_EDGE_SHARP;
- }
-
- /* Use active edge color for active face edges because
- * specular highlights make it hard to see T55456#510873.
- *
- * This isn't ideal since it can't be used when mixing edge/face modes
+ const ToolSettings *ts = rdata->toolsettings;
+ const bool is_vertex_select_mode = (ts != NULL) && (ts->selectmode & SCE_SELECT_VERTEX) != 0;
+ const bool is_face_only_select_mode = (ts != NULL) && (ts->selectmode == SCE_SELECT_FACE);
+
+ if (eed == rdata->eed_act) {
+ eattr->e_flag |= VFLAG_EDGE_ACTIVE;
+ }
+ if (!is_vertex_select_mode && BM_elem_flag_test(eed, BM_ELEM_SELECT)) {
+ eattr->e_flag |= VFLAG_EDGE_SELECTED;
+ }
+ if (is_vertex_select_mode && BM_elem_flag_test(eed->v1, BM_ELEM_SELECT) &&
+ BM_elem_flag_test(eed->v2, BM_ELEM_SELECT)) {
+ eattr->e_flag |= VFLAG_EDGE_SELECTED;
+ eattr->e_flag |= VFLAG_VERT_SELECTED;
+ }
+ if (BM_elem_flag_test(eed, BM_ELEM_SEAM)) {
+ eattr->e_flag |= VFLAG_EDGE_SEAM;
+ }
+ if (!BM_elem_flag_test(eed, BM_ELEM_SMOOTH)) {
+ eattr->e_flag |= VFLAG_EDGE_SHARP;
+ }
+
+ /* Use active edge color for active face edges because
+ * specular highlights make it hard to see T55456#510873.
+ *
+ * This isn't ideal since it can't be used when mixing edge/face modes
* but it's still better then not being able to see the active face. */
- if (is_face_only_select_mode) {
- if (rdata->efa_act != NULL) {
- if (BM_edge_in_face(eed, rdata->efa_act)) {
- eattr->e_flag |= VFLAG_EDGE_ACTIVE;
- }
- }
- }
-
- /* Use a byte for value range */
- if (rdata->cd.offset.crease != -1) {
- float crease = BM_ELEM_CD_GET_FLOAT(eed, rdata->cd.offset.crease);
- if (crease > 0) {
- eattr->crease = (uchar)(crease * 255.0f);
- }
- }
- /* Use a byte for value range */
- if (rdata->cd.offset.bweight != -1) {
- float bweight = BM_ELEM_CD_GET_FLOAT(eed, rdata->cd.offset.bweight);
- if (bweight > 0) {
- eattr->bweight = (uchar)(bweight * 255.0f);
- }
- }
+ if (is_face_only_select_mode) {
+ if (rdata->efa_act != NULL) {
+ if (BM_edge_in_face(eed, rdata->efa_act)) {
+ eattr->e_flag |= VFLAG_EDGE_ACTIVE;
+ }
+ }
+ }
+
+ /* Use a byte for value range */
+ if (rdata->cd.offset.crease != -1) {
+ float crease = BM_ELEM_CD_GET_FLOAT(eed, rdata->cd.offset.crease);
+ if (crease > 0) {
+ eattr->crease = (uchar)(crease * 255.0f);
+ }
+ }
+ /* Use a byte for value range */
+ if (rdata->cd.offset.bweight != -1) {
+ float bweight = BM_ELEM_CD_GET_FLOAT(eed, rdata->cd.offset.bweight);
+ if (bweight > 0) {
+ eattr->bweight = (uchar)(bweight * 255.0f);
+ }
+ }
#ifdef WITH_FREESTYLE
- if (rdata->cd.offset.freestyle_edge != -1) {
- const FreestyleEdge *fed = BM_ELEM_CD_GET_VOID_P(eed, rdata->cd.offset.freestyle_edge);
- if (fed->flag & FREESTYLE_EDGE_MARK) {
- eattr->e_flag |= VFLAG_EDGE_FREESTYLE;
- }
- }
+ if (rdata->cd.offset.freestyle_edge != -1) {
+ const FreestyleEdge *fed = BM_ELEM_CD_GET_VOID_P(eed, rdata->cd.offset.freestyle_edge);
+ if (fed->flag & FREESTYLE_EDGE_MARK) {
+ eattr->e_flag |= VFLAG_EDGE_FREESTYLE;
+ }
+ }
#endif
}
-static void mesh_render_data_loop_flag(MeshRenderData *rdata, BMLoop *loop, const int cd_ofs, EdgeDrawAttr *eattr)
+static void mesh_render_data_loop_flag(MeshRenderData *rdata,
+ BMLoop *loop,
+ const int cd_ofs,
+ EdgeDrawAttr *eattr)
{
- if (cd_ofs == -1) {
- return;
- }
- MLoopUV *luv = BM_ELEM_CD_GET_VOID_P(loop, cd_ofs);
- if (luv != NULL && (luv->flag & MLOOPUV_PINNED)) {
- eattr->v_flag |= VFLAG_VERT_UV_PINNED;
- }
- if (uvedit_uv_select_test_ex(rdata->toolsettings, loop, cd_ofs)) {
- eattr->v_flag |= VFLAG_VERT_UV_SELECT;
- }
- if (uvedit_edge_select_test_ex(rdata->toolsettings, loop, cd_ofs)) {
- eattr->v_flag |= VFLAG_EDGE_UV_SELECT;
- }
+ if (cd_ofs == -1) {
+ return;
+ }
+ MLoopUV *luv = BM_ELEM_CD_GET_VOID_P(loop, cd_ofs);
+ if (luv != NULL && (luv->flag & MLOOPUV_PINNED)) {
+ eattr->v_flag |= VFLAG_VERT_UV_PINNED;
+ }
+ if (uvedit_uv_select_test_ex(rdata->toolsettings, loop, cd_ofs)) {
+ eattr->v_flag |= VFLAG_VERT_UV_SELECT;
+ }
+ if (uvedit_edge_select_test_ex(rdata->toolsettings, loop, cd_ofs)) {
+ eattr->v_flag |= VFLAG_EDGE_UV_SELECT;
+ }
}
-static void mesh_render_data_vert_flag(MeshRenderData *rdata, const BMVert *eve, EdgeDrawAttr *eattr)
+static void mesh_render_data_vert_flag(MeshRenderData *rdata,
+ const BMVert *eve,
+ EdgeDrawAttr *eattr)
{
- if (eve == rdata->eve_act) {
- eattr->e_flag |= VFLAG_VERT_ACTIVE;
- }
- if (BM_elem_flag_test(eve, BM_ELEM_SELECT)) {
- eattr->e_flag |= VFLAG_VERT_SELECTED;
- }
+ if (eve == rdata->eve_act) {
+ eattr->e_flag |= VFLAG_VERT_ACTIVE;
+ }
+ if (BM_elem_flag_test(eve, BM_ELEM_SELECT)) {
+ eattr->e_flag |= VFLAG_VERT_SELECTED;
+ }
}
-static bool add_edit_facedot(
- MeshRenderData *rdata, GPUVertBuf *vbo,
- const uint fdot_pos_id, const uint fdot_nor_flag_id,
- const int poly, const int base_vert_idx)
+static bool add_edit_facedot(MeshRenderData *rdata,
+ GPUVertBuf *vbo,
+ const uint fdot_pos_id,
+ const uint fdot_nor_flag_id,
+ const int poly,
+ const int base_vert_idx)
{
- BLI_assert(rdata->types & (MR_DATATYPE_VERT | MR_DATATYPE_LOOP | MR_DATATYPE_POLY));
- float pnor[3], center[3];
- int facedot_flag;
- if (rdata->edit_bmesh) {
- BMEditMesh *em = rdata->edit_bmesh;
- const BMFace *efa = BM_face_at_index(em->bm, poly);
- if (BM_elem_flag_test(efa, BM_ELEM_HIDDEN)) {
- return false;
- }
- if (rdata->edit_data && rdata->edit_data->vertexCos) {
- copy_v3_v3(center, rdata->edit_data->polyCos[poly]);
- copy_v3_v3(pnor, rdata->edit_data->polyNos[poly]);
- }
- else {
- BM_face_calc_center_median(efa, center);
- copy_v3_v3(pnor, efa->no);
- }
- facedot_flag = BM_elem_flag_test(efa, BM_ELEM_SELECT) ? ((efa == em->bm->act_face) ? -1 : 1) : 0;
- }
- else {
- MVert *mvert = rdata->mvert;
- const MPoly *mpoly = rdata->mpoly + poly;
- const MLoop *mloop = rdata->mloop + mpoly->loopstart;
-
- BKE_mesh_calc_poly_center(mpoly, mloop, mvert, center);
- BKE_mesh_calc_poly_normal(mpoly, mloop, mvert, pnor);
- /* No selection if not in edit mode. */
- facedot_flag = 0;
- }
-
- GPUPackedNormal nor = GPU_normal_convert_i10_v3(pnor);
- nor.w = facedot_flag;
- GPU_vertbuf_attr_set(vbo, fdot_nor_flag_id, base_vert_idx, &nor);
- GPU_vertbuf_attr_set(vbo, fdot_pos_id, base_vert_idx, center);
-
- return true;
+ BLI_assert(rdata->types & (MR_DATATYPE_VERT | MR_DATATYPE_LOOP | MR_DATATYPE_POLY));
+ float pnor[3], center[3];
+ int facedot_flag;
+ if (rdata->edit_bmesh) {
+ BMEditMesh *em = rdata->edit_bmesh;
+ const BMFace *efa = BM_face_at_index(em->bm, poly);
+ if (BM_elem_flag_test(efa, BM_ELEM_HIDDEN)) {
+ return false;
+ }
+ if (rdata->edit_data && rdata->edit_data->vertexCos) {
+ copy_v3_v3(center, rdata->edit_data->polyCos[poly]);
+ copy_v3_v3(pnor, rdata->edit_data->polyNos[poly]);
+ }
+ else {
+ BM_face_calc_center_median(efa, center);
+ copy_v3_v3(pnor, efa->no);
+ }
+ facedot_flag = BM_elem_flag_test(efa, BM_ELEM_SELECT) ? ((efa == em->bm->act_face) ? -1 : 1) :
+ 0;
+ }
+ else {
+ MVert *mvert = rdata->mvert;
+ const MPoly *mpoly = rdata->mpoly + poly;
+ const MLoop *mloop = rdata->mloop + mpoly->loopstart;
+
+ BKE_mesh_calc_poly_center(mpoly, mloop, mvert, center);
+ BKE_mesh_calc_poly_normal(mpoly, mloop, mvert, pnor);
+ /* No selection if not in edit mode. */
+ facedot_flag = 0;
+ }
+
+ GPUPackedNormal nor = GPU_normal_convert_i10_v3(pnor);
+ nor.w = facedot_flag;
+ GPU_vertbuf_attr_set(vbo, fdot_nor_flag_id, base_vert_idx, &nor);
+ GPU_vertbuf_attr_set(vbo, fdot_pos_id, base_vert_idx, center);
+
+ return true;
}
-static bool add_edit_facedot_mapped(
- MeshRenderData *rdata, GPUVertBuf *vbo,
- const uint fdot_pos_id, const uint fdot_nor_flag_id,
- const int poly, const int base_vert_idx)
+static bool add_edit_facedot_mapped(MeshRenderData *rdata,
+ GPUVertBuf *vbo,
+ const uint fdot_pos_id,
+ const uint fdot_nor_flag_id,
+ const int poly,
+ const int base_vert_idx)
{
- BLI_assert(rdata->types & (MR_DATATYPE_VERT | MR_DATATYPE_LOOP | MR_DATATYPE_POLY));
- float pnor[3], center[3];
- const int *p_origindex = rdata->mapped.p_origindex;
- const int p_orig = p_origindex[poly];
- if (p_orig == ORIGINDEX_NONE) {
- return false;
- }
- BMEditMesh *em = rdata->edit_bmesh;
- const BMFace *efa = BM_face_at_index(em->bm, p_orig);
- if (BM_elem_flag_test(efa, BM_ELEM_HIDDEN)) {
- return false;
- }
-
- Mesh *me_cage = em->mesh_eval_cage;
- const MVert *mvert = me_cage->mvert;
- const MLoop *mloop = me_cage->mloop;
- const MPoly *mpoly = me_cage->mpoly;
-
- const MPoly *mp = mpoly + poly;
- const MLoop *ml = mloop + mp->loopstart;
-
- BKE_mesh_calc_poly_center(mp, ml, mvert, center);
- BKE_mesh_calc_poly_normal(mp, ml, mvert, pnor);
-
- GPUPackedNormal nor = GPU_normal_convert_i10_v3(pnor);
- nor.w = BM_elem_flag_test(efa, BM_ELEM_SELECT) ? ((efa == em->bm->act_face) ? -1 : 1) : 0;
- GPU_vertbuf_attr_set(vbo, fdot_nor_flag_id, base_vert_idx, &nor);
- GPU_vertbuf_attr_set(vbo, fdot_pos_id, base_vert_idx, center);
-
- return true;
+ BLI_assert(rdata->types & (MR_DATATYPE_VERT | MR_DATATYPE_LOOP | MR_DATATYPE_POLY));
+ float pnor[3], center[3];
+ const int *p_origindex = rdata->mapped.p_origindex;
+ const int p_orig = p_origindex[poly];
+ if (p_orig == ORIGINDEX_NONE) {
+ return false;
+ }
+ BMEditMesh *em = rdata->edit_bmesh;
+ const BMFace *efa = BM_face_at_index(em->bm, p_orig);
+ if (BM_elem_flag_test(efa, BM_ELEM_HIDDEN)) {
+ return false;
+ }
+
+ Mesh *me_cage = em->mesh_eval_cage;
+ const MVert *mvert = me_cage->mvert;
+ const MLoop *mloop = me_cage->mloop;
+ const MPoly *mpoly = me_cage->mpoly;
+
+ const MPoly *mp = mpoly + poly;
+ const MLoop *ml = mloop + mp->loopstart;
+
+ BKE_mesh_calc_poly_center(mp, ml, mvert, center);
+ BKE_mesh_calc_poly_normal(mp, ml, mvert, pnor);
+
+ GPUPackedNormal nor = GPU_normal_convert_i10_v3(pnor);
+ nor.w = BM_elem_flag_test(efa, BM_ELEM_SELECT) ? ((efa == em->bm->act_face) ? -1 : 1) : 0;
+ GPU_vertbuf_attr_set(vbo, fdot_nor_flag_id, base_vert_idx, &nor);
+ GPU_vertbuf_attr_set(vbo, fdot_pos_id, base_vert_idx, center);
+
+ return true;
}
/** \} */
@@ -1749,70 +1803,76 @@ static bool add_edit_facedot_mapped(
/** Reset the selection structure, deallocating heap memory as appropriate. */
static void drw_mesh_weight_state_clear(struct DRW_MeshWeightState *wstate)
{
- MEM_SAFE_FREE(wstate->defgroup_sel);
+ MEM_SAFE_FREE(wstate->defgroup_sel);
- memset(wstate, 0, sizeof(*wstate));
+ memset(wstate, 0, sizeof(*wstate));
- wstate->defgroup_active = -1;
+ wstate->defgroup_active = -1;
}
/** Copy selection data from one structure to another, including heap memory. */
-static void drw_mesh_weight_state_copy(
- struct DRW_MeshWeightState *wstate_dst, const struct DRW_MeshWeightState *wstate_src)
+static void drw_mesh_weight_state_copy(struct DRW_MeshWeightState *wstate_dst,
+ const struct DRW_MeshWeightState *wstate_src)
{
- MEM_SAFE_FREE(wstate_dst->defgroup_sel);
+ MEM_SAFE_FREE(wstate_dst->defgroup_sel);
- memcpy(wstate_dst, wstate_src, sizeof(*wstate_dst));
+ memcpy(wstate_dst, wstate_src, sizeof(*wstate_dst));
- if (wstate_src->defgroup_sel) {
- wstate_dst->defgroup_sel = MEM_dupallocN(wstate_src->defgroup_sel);
- }
+ if (wstate_src->defgroup_sel) {
+ wstate_dst->defgroup_sel = MEM_dupallocN(wstate_src->defgroup_sel);
+ }
}
/** Compare two selection structures. */
-static bool drw_mesh_weight_state_compare(const struct DRW_MeshWeightState *a, const struct DRW_MeshWeightState *b)
+static bool drw_mesh_weight_state_compare(const struct DRW_MeshWeightState *a,
+ const struct DRW_MeshWeightState *b)
{
- return a->defgroup_active == b->defgroup_active &&
- a->defgroup_len == b->defgroup_len &&
- a->flags == b->flags &&
- a->alert_mode == b->alert_mode &&
- a->defgroup_sel_count == b->defgroup_sel_count &&
- ((!a->defgroup_sel && !b->defgroup_sel) ||
- (a->defgroup_sel && b->defgroup_sel &&
- memcmp(a->defgroup_sel, b->defgroup_sel, a->defgroup_len * sizeof(bool)) == 0));
+ return a->defgroup_active == b->defgroup_active && a->defgroup_len == b->defgroup_len &&
+ a->flags == b->flags && a->alert_mode == b->alert_mode &&
+ a->defgroup_sel_count == b->defgroup_sel_count &&
+ ((!a->defgroup_sel && !b->defgroup_sel) ||
+ (a->defgroup_sel && b->defgroup_sel &&
+ memcmp(a->defgroup_sel, b->defgroup_sel, a->defgroup_len * sizeof(bool)) == 0));
}
-static void drw_mesh_weight_state_extract(
- Object *ob, Mesh *me, const ToolSettings *ts, bool paint_mode,
- struct DRW_MeshWeightState *wstate)
+static void drw_mesh_weight_state_extract(Object *ob,
+ Mesh *me,
+ const ToolSettings *ts,
+ bool paint_mode,
+ struct DRW_MeshWeightState *wstate)
{
- /* Extract complete vertex weight group selection state and mode flags. */
- memset(wstate, 0, sizeof(*wstate));
-
- wstate->defgroup_active = ob->actdef - 1;
- wstate->defgroup_len = BLI_listbase_count(&ob->defbase);
-
- wstate->alert_mode = ts->weightuser;
-
- if (paint_mode && ts->multipaint) {
- /* Multipaint needs to know all selected bones, not just the active group.
- * This is actually a relatively expensive operation, but caching would be difficult. */
- wstate->defgroup_sel = BKE_object_defgroup_selected_get(ob, wstate->defgroup_len, &wstate->defgroup_sel_count);
-
- if (wstate->defgroup_sel_count > 1) {
- wstate->flags |= DRW_MESH_WEIGHT_STATE_MULTIPAINT | (ts->auto_normalize ? DRW_MESH_WEIGHT_STATE_AUTO_NORMALIZE : 0);
-
- if (me->editflag & ME_EDIT_MIRROR_X) {
- BKE_object_defgroup_mirror_selection(
- ob, wstate->defgroup_len, wstate->defgroup_sel, wstate->defgroup_sel, &wstate->defgroup_sel_count);
- }
- }
- /* With only one selected bone Multipaint reverts to regular mode. */
- else {
- wstate->defgroup_sel_count = 0;
- MEM_SAFE_FREE(wstate->defgroup_sel);
- }
- }
+ /* Extract complete vertex weight group selection state and mode flags. */
+ memset(wstate, 0, sizeof(*wstate));
+
+ wstate->defgroup_active = ob->actdef - 1;
+ wstate->defgroup_len = BLI_listbase_count(&ob->defbase);
+
+ wstate->alert_mode = ts->weightuser;
+
+ if (paint_mode && ts->multipaint) {
+ /* Multipaint needs to know all selected bones, not just the active group.
+ * This is actually a relatively expensive operation, but caching would be difficult. */
+ wstate->defgroup_sel = BKE_object_defgroup_selected_get(
+ ob, wstate->defgroup_len, &wstate->defgroup_sel_count);
+
+ if (wstate->defgroup_sel_count > 1) {
+ wstate->flags |= DRW_MESH_WEIGHT_STATE_MULTIPAINT |
+ (ts->auto_normalize ? DRW_MESH_WEIGHT_STATE_AUTO_NORMALIZE : 0);
+
+ if (me->editflag & ME_EDIT_MIRROR_X) {
+ BKE_object_defgroup_mirror_selection(ob,
+ wstate->defgroup_len,
+ wstate->defgroup_sel,
+ wstate->defgroup_sel,
+ &wstate->defgroup_sel_count);
+ }
+ }
+ /* With only one selected bone Multipaint reverts to regular mode. */
+ else {
+ wstate->defgroup_sel_count = 0;
+ MEM_SAFE_FREE(wstate->defgroup_sel);
+ }
+ }
}
/** \} */
@@ -1822,1318 +1882,1352 @@ static void drw_mesh_weight_state_extract(
* \{ */
typedef struct MeshBatchCache {
- /* In order buffers: All verts only specified once
- * or once per loop. To be used with a GPUIndexBuf. */
- struct {
- /* Vertex data. */
- GPUVertBuf *pos_nor;
- GPUVertBuf *weights;
- /* Loop data. */
- GPUVertBuf *loop_pos_nor;
- GPUVertBuf *loop_uv_tan;
- GPUVertBuf *loop_vcol;
- GPUVertBuf *loop_edge_fac;
- GPUVertBuf *loop_orco;
- } ordered;
-
- /* Edit Mesh Data:
- * Edit cage can be different from final mesh so vertex count
- * might differ. */
- struct {
- /* TODO(fclem): Reuse ordered.loop_pos_nor and maybe even
- * ordered.loop_uv_tan when cage match final mesh. */
- GPUVertBuf *loop_pos_nor;
- GPUVertBuf *loop_data;
- GPUVertBuf *loop_lnor;
- GPUVertBuf *facedots_pos_nor_data;
- /* UV data without modifier applied.
- * Vertex count is always the one of the cage. */
- GPUVertBuf *loop_uv;
- GPUVertBuf *loop_uv_data;
- GPUVertBuf *loop_stretch_angle;
- GPUVertBuf *loop_stretch_area;
- GPUVertBuf *facedots_uv;
- GPUVertBuf *facedots_uv_data;
- /* Selection */
- GPUVertBuf *loop_vert_idx;
- GPUVertBuf *loop_edge_idx;
- GPUVertBuf *loop_face_idx;
- GPUVertBuf *facedots_idx;
- } edit;
-
- /* Index Buffers:
- * Only need to be updated when topology changes. */
- struct {
- /* Indices to verts. */
- GPUIndexBuf *surf_tris;
- GPUIndexBuf *edges_lines;
- GPUIndexBuf *edges_adj_lines;
- GPUIndexBuf *loose_edges_lines;
- /* Indices to vloops. */
- GPUIndexBuf *loops_tris;
- GPUIndexBuf *loops_lines;
- GPUIndexBuf *loops_line_strips;
- /* Edit mode. */
- GPUIndexBuf *edit_loops_points; /* verts */
- GPUIndexBuf *edit_loops_lines; /* edges */
- GPUIndexBuf *edit_loops_tris; /* faces */
- /* Edit UVs */
- GPUIndexBuf *edituv_loops_points; /* verts */
- GPUIndexBuf *edituv_loops_line_strips; /* edges */
- GPUIndexBuf *edituv_loops_tri_fans; /* faces */
- } ibo;
-
- struct {
- /* Surfaces / Render */
- GPUBatch *surface;
- GPUBatch *surface_weights;
- /* Edit mode */
- GPUBatch *edit_triangles;
- GPUBatch *edit_vertices;
- GPUBatch *edit_edges;
- GPUBatch *edit_lnor;
- GPUBatch *edit_facedots;
- /* Edit UVs */
- GPUBatch *edituv_faces_strech_area;
- GPUBatch *edituv_faces_strech_angle;
- GPUBatch *edituv_faces;
- GPUBatch *edituv_edges;
- GPUBatch *edituv_verts;
- GPUBatch *edituv_facedots;
- /* Edit selection */
- GPUBatch *edit_selection_verts;
- GPUBatch *edit_selection_edges;
- GPUBatch *edit_selection_faces;
- GPUBatch *edit_selection_facedots;
- /* Common display / Other */
- GPUBatch *all_verts;
- GPUBatch *all_edges;
- GPUBatch *loose_edges;
- GPUBatch *edge_detection;
- GPUBatch *wire_edges; /* Individual edges with face normals. */
- GPUBatch *wire_loops; /* Loops around faces. */
- GPUBatch *wire_loops_uvs; /* Same as wire_loops but only has uvs. */
- } batch;
-
- GPUIndexBuf **surf_per_mat_tris;
- GPUBatch **surf_per_mat;
-
- /* arrays of bool uniform names (and value) that will be use to
- * set srgb conversion for auto attributes.*/
- char *auto_layer_names;
- int *auto_layer_is_srgb;
- int auto_layer_len;
-
- /* settings to determine if cache is invalid */
- bool is_maybe_dirty;
- bool is_dirty; /* Instantly invalidates cache, skipping mesh check */
- int edge_len;
- int tri_len;
- int poly_len;
- int vert_len;
- int mat_len;
- bool is_editmode;
- bool is_uvsyncsel;
-
- struct DRW_MeshWeightState weight_state;
-
- DRW_MeshCDMask cd_used, cd_needed;
-
- /* XXX, only keep for as long as sculpt mode uses shaded drawing. */
- bool is_sculpt_points_tag;
-
- /* Valid only if edge_detection is up to date. */
- bool is_manifold;
+ /* In order buffers: All verts only specified once
+ * or once per loop. To be used with a GPUIndexBuf. */
+ struct {
+ /* Vertex data. */
+ GPUVertBuf *pos_nor;
+ GPUVertBuf *weights;
+ /* Loop data. */
+ GPUVertBuf *loop_pos_nor;
+ GPUVertBuf *loop_uv_tan;
+ GPUVertBuf *loop_vcol;
+ GPUVertBuf *loop_edge_fac;
+ GPUVertBuf *loop_orco;
+ } ordered;
+
+ /* Edit Mesh Data:
+ * Edit cage can be different from final mesh so vertex count
+ * might differ. */
+ struct {
+ /* TODO(fclem): Reuse ordered.loop_pos_nor and maybe even
+ * ordered.loop_uv_tan when cage match final mesh. */
+ GPUVertBuf *loop_pos_nor;
+ GPUVertBuf *loop_data;
+ GPUVertBuf *loop_lnor;
+ GPUVertBuf *facedots_pos_nor_data;
+ /* UV data without modifier applied.
+ * Vertex count is always the one of the cage. */
+ GPUVertBuf *loop_uv;
+ GPUVertBuf *loop_uv_data;
+ GPUVertBuf *loop_stretch_angle;
+ GPUVertBuf *loop_stretch_area;
+ GPUVertBuf *facedots_uv;
+ GPUVertBuf *facedots_uv_data;
+ /* Selection */
+ GPUVertBuf *loop_vert_idx;
+ GPUVertBuf *loop_edge_idx;
+ GPUVertBuf *loop_face_idx;
+ GPUVertBuf *facedots_idx;
+ } edit;
+
+ /* Index Buffers:
+ * Only need to be updated when topology changes. */
+ struct {
+ /* Indices to verts. */
+ GPUIndexBuf *surf_tris;
+ GPUIndexBuf *edges_lines;
+ GPUIndexBuf *edges_adj_lines;
+ GPUIndexBuf *loose_edges_lines;
+ /* Indices to vloops. */
+ GPUIndexBuf *loops_tris;
+ GPUIndexBuf *loops_lines;
+ GPUIndexBuf *loops_line_strips;
+ /* Edit mode. */
+ GPUIndexBuf *edit_loops_points; /* verts */
+ GPUIndexBuf *edit_loops_lines; /* edges */
+ GPUIndexBuf *edit_loops_tris; /* faces */
+ /* Edit UVs */
+ GPUIndexBuf *edituv_loops_points; /* verts */
+ GPUIndexBuf *edituv_loops_line_strips; /* edges */
+ GPUIndexBuf *edituv_loops_tri_fans; /* faces */
+ } ibo;
+
+ struct {
+ /* Surfaces / Render */
+ GPUBatch *surface;
+ GPUBatch *surface_weights;
+ /* Edit mode */
+ GPUBatch *edit_triangles;
+ GPUBatch *edit_vertices;
+ GPUBatch *edit_edges;
+ GPUBatch *edit_lnor;
+ GPUBatch *edit_facedots;
+ /* Edit UVs */
+ GPUBatch *edituv_faces_strech_area;
+ GPUBatch *edituv_faces_strech_angle;
+ GPUBatch *edituv_faces;
+ GPUBatch *edituv_edges;
+ GPUBatch *edituv_verts;
+ GPUBatch *edituv_facedots;
+ /* Edit selection */
+ GPUBatch *edit_selection_verts;
+ GPUBatch *edit_selection_edges;
+ GPUBatch *edit_selection_faces;
+ GPUBatch *edit_selection_facedots;
+ /* Common display / Other */
+ GPUBatch *all_verts;
+ GPUBatch *all_edges;
+ GPUBatch *loose_edges;
+ GPUBatch *edge_detection;
+ GPUBatch *wire_edges; /* Individual edges with face normals. */
+ GPUBatch *wire_loops; /* Loops around faces. */
+ GPUBatch *wire_loops_uvs; /* Same as wire_loops but only has uvs. */
+ } batch;
+
+ GPUIndexBuf **surf_per_mat_tris;
+ GPUBatch **surf_per_mat;
+
+ /* arrays of bool uniform names (and value) that will be use to
+ * set srgb conversion for auto attributes.*/
+ char *auto_layer_names;
+ int *auto_layer_is_srgb;
+ int auto_layer_len;
+
+ /* settings to determine if cache is invalid */
+ bool is_maybe_dirty;
+ bool is_dirty; /* Instantly invalidates cache, skipping mesh check */
+ int edge_len;
+ int tri_len;
+ int poly_len;
+ int vert_len;
+ int mat_len;
+ bool is_editmode;
+ bool is_uvsyncsel;
+
+ struct DRW_MeshWeightState weight_state;
+
+ DRW_MeshCDMask cd_used, cd_needed;
+
+ /* XXX, only keep for as long as sculpt mode uses shaded drawing. */
+ bool is_sculpt_points_tag;
+
+ /* Valid only if edge_detection is up to date. */
+ bool is_manifold;
} MeshBatchCache;
/* GPUBatch cache management. */
static bool mesh_batch_cache_valid(Mesh *me)
{
- MeshBatchCache *cache = me->runtime.batch_cache;
-
- if (cache == NULL) {
- return false;
- }
-
- if (cache->mat_len != mesh_render_mat_len_get(me)) {
- return false;
- }
-
- if (cache->is_editmode != (me->edit_mesh != NULL)) {
- return false;
- }
-
- if (cache->is_dirty) {
- return false;
- }
-
- if (cache->is_maybe_dirty == false) {
- return true;
- }
- else {
- if (cache->is_editmode) {
- return false;
- }
- else if ((cache->vert_len != mesh_render_verts_len_get(me)) ||
- (cache->edge_len != mesh_render_edges_len_get(me)) ||
- (cache->tri_len != mesh_render_looptri_len_get(me)) ||
- (cache->poly_len != mesh_render_polys_len_get(me)) ||
- (cache->mat_len != mesh_render_mat_len_get(me)))
- {
- return false;
- }
- }
-
- return true;
+ MeshBatchCache *cache = me->runtime.batch_cache;
+
+ if (cache == NULL) {
+ return false;
+ }
+
+ if (cache->mat_len != mesh_render_mat_len_get(me)) {
+ return false;
+ }
+
+ if (cache->is_editmode != (me->edit_mesh != NULL)) {
+ return false;
+ }
+
+ if (cache->is_dirty) {
+ return false;
+ }
+
+ if (cache->is_maybe_dirty == false) {
+ return true;
+ }
+ else {
+ if (cache->is_editmode) {
+ return false;
+ }
+ else if ((cache->vert_len != mesh_render_verts_len_get(me)) ||
+ (cache->edge_len != mesh_render_edges_len_get(me)) ||
+ (cache->tri_len != mesh_render_looptri_len_get(me)) ||
+ (cache->poly_len != mesh_render_polys_len_get(me)) ||
+ (cache->mat_len != mesh_render_mat_len_get(me))) {
+ return false;
+ }
+ }
+
+ return true;
}
static void mesh_batch_cache_init(Mesh *me)
{
- MeshBatchCache *cache = me->runtime.batch_cache;
+ MeshBatchCache *cache = me->runtime.batch_cache;
- if (!cache) {
- cache = me->runtime.batch_cache = MEM_callocN(sizeof(*cache), __func__);
- }
- else {
- memset(cache, 0, sizeof(*cache));
- }
+ if (!cache) {
+ cache = me->runtime.batch_cache = MEM_callocN(sizeof(*cache), __func__);
+ }
+ else {
+ memset(cache, 0, sizeof(*cache));
+ }
- cache->is_editmode = me->edit_mesh != NULL;
+ cache->is_editmode = me->edit_mesh != NULL;
- if (cache->is_editmode == false) {
- cache->edge_len = mesh_render_edges_len_get(me);
- cache->tri_len = mesh_render_looptri_len_get(me);
- cache->poly_len = mesh_render_polys_len_get(me);
- cache->vert_len = mesh_render_verts_len_get(me);
- }
+ if (cache->is_editmode == false) {
+ cache->edge_len = mesh_render_edges_len_get(me);
+ cache->tri_len = mesh_render_looptri_len_get(me);
+ cache->poly_len = mesh_render_polys_len_get(me);
+ cache->vert_len = mesh_render_verts_len_get(me);
+ }
- cache->mat_len = mesh_render_mat_len_get(me);
- cache->surf_per_mat_tris = MEM_callocN(sizeof(*cache->surf_per_mat_tris) * cache->mat_len, __func__);
- cache->surf_per_mat = MEM_callocN(sizeof(*cache->surf_per_mat) * cache->mat_len, __func__);
+ cache->mat_len = mesh_render_mat_len_get(me);
+ cache->surf_per_mat_tris = MEM_callocN(sizeof(*cache->surf_per_mat_tris) * cache->mat_len,
+ __func__);
+ cache->surf_per_mat = MEM_callocN(sizeof(*cache->surf_per_mat) * cache->mat_len, __func__);
- cache->is_maybe_dirty = false;
- cache->is_dirty = false;
+ cache->is_maybe_dirty = false;
+ cache->is_dirty = false;
- drw_mesh_weight_state_clear(&cache->weight_state);
+ drw_mesh_weight_state_clear(&cache->weight_state);
}
static MeshBatchCache *mesh_batch_cache_get(Mesh *me)
{
- if (!mesh_batch_cache_valid(me)) {
- mesh_batch_cache_clear(me);
- mesh_batch_cache_init(me);
- }
- return me->runtime.batch_cache;
+ if (!mesh_batch_cache_valid(me)) {
+ mesh_batch_cache_clear(me);
+ mesh_batch_cache_init(me);
+ }
+ return me->runtime.batch_cache;
}
-static void mesh_batch_cache_check_vertex_group(MeshBatchCache *cache, const struct DRW_MeshWeightState *wstate)
+static void mesh_batch_cache_check_vertex_group(MeshBatchCache *cache,
+ const struct DRW_MeshWeightState *wstate)
{
- if (!drw_mesh_weight_state_compare(&cache->weight_state, wstate)) {
- GPU_BATCH_CLEAR_SAFE(cache->batch.surface_weights);
- GPU_VERTBUF_DISCARD_SAFE(cache->ordered.weights);
+ if (!drw_mesh_weight_state_compare(&cache->weight_state, wstate)) {
+ GPU_BATCH_CLEAR_SAFE(cache->batch.surface_weights);
+ GPU_VERTBUF_DISCARD_SAFE(cache->ordered.weights);
- drw_mesh_weight_state_clear(&cache->weight_state);
- }
+ drw_mesh_weight_state_clear(&cache->weight_state);
+ }
}
static void mesh_batch_cache_discard_shaded_tri(MeshBatchCache *cache)
{
- GPU_VERTBUF_DISCARD_SAFE(cache->ordered.loop_pos_nor);
- GPU_VERTBUF_DISCARD_SAFE(cache->ordered.loop_uv_tan);
- GPU_VERTBUF_DISCARD_SAFE(cache->ordered.loop_vcol);
- GPU_VERTBUF_DISCARD_SAFE(cache->ordered.loop_orco);
-
- if (cache->surf_per_mat_tris) {
- for (int i = 0; i < cache->mat_len; i++) {
- GPU_INDEXBUF_DISCARD_SAFE(cache->surf_per_mat_tris[i]);
- }
- }
- MEM_SAFE_FREE(cache->surf_per_mat_tris);
- if (cache->surf_per_mat) {
- for (int i = 0; i < cache->mat_len; i++) {
- GPU_BATCH_DISCARD_SAFE(cache->surf_per_mat[i]);
- }
- }
- MEM_SAFE_FREE(cache->surf_per_mat);
-
- MEM_SAFE_FREE(cache->auto_layer_names);
- MEM_SAFE_FREE(cache->auto_layer_is_srgb);
-
- cache->mat_len = 0;
+ GPU_VERTBUF_DISCARD_SAFE(cache->ordered.loop_pos_nor);
+ GPU_VERTBUF_DISCARD_SAFE(cache->ordered.loop_uv_tan);
+ GPU_VERTBUF_DISCARD_SAFE(cache->ordered.loop_vcol);
+ GPU_VERTBUF_DISCARD_SAFE(cache->ordered.loop_orco);
+
+ if (cache->surf_per_mat_tris) {
+ for (int i = 0; i < cache->mat_len; i++) {
+ GPU_INDEXBUF_DISCARD_SAFE(cache->surf_per_mat_tris[i]);
+ }
+ }
+ MEM_SAFE_FREE(cache->surf_per_mat_tris);
+ if (cache->surf_per_mat) {
+ for (int i = 0; i < cache->mat_len; i++) {
+ GPU_BATCH_DISCARD_SAFE(cache->surf_per_mat[i]);
+ }
+ }
+ MEM_SAFE_FREE(cache->surf_per_mat);
+
+ MEM_SAFE_FREE(cache->auto_layer_names);
+ MEM_SAFE_FREE(cache->auto_layer_is_srgb);
+
+ cache->mat_len = 0;
}
static void mesh_batch_cache_discard_uvedit(MeshBatchCache *cache)
{
- GPU_VERTBUF_DISCARD_SAFE(cache->edit.loop_stretch_angle);
- GPU_VERTBUF_DISCARD_SAFE(cache->edit.loop_stretch_area);
- GPU_VERTBUF_DISCARD_SAFE(cache->edit.loop_uv);
- GPU_VERTBUF_DISCARD_SAFE(cache->edit.loop_uv_data);
- GPU_VERTBUF_DISCARD_SAFE(cache->edit.facedots_uv);
- GPU_VERTBUF_DISCARD_SAFE(cache->edit.facedots_uv_data);
- GPU_INDEXBUF_DISCARD_SAFE(cache->ibo.edituv_loops_tri_fans);
- GPU_INDEXBUF_DISCARD_SAFE(cache->ibo.edituv_loops_line_strips);
- GPU_INDEXBUF_DISCARD_SAFE(cache->ibo.edituv_loops_points);
- GPU_BATCH_DISCARD_SAFE(cache->batch.edituv_faces_strech_area);
- GPU_BATCH_DISCARD_SAFE(cache->batch.edituv_faces_strech_angle);
- GPU_BATCH_DISCARD_SAFE(cache->batch.edituv_faces);
- GPU_BATCH_DISCARD_SAFE(cache->batch.edituv_edges);
- GPU_BATCH_DISCARD_SAFE(cache->batch.edituv_verts);
- GPU_BATCH_DISCARD_SAFE(cache->batch.edituv_facedots);
+ GPU_VERTBUF_DISCARD_SAFE(cache->edit.loop_stretch_angle);
+ GPU_VERTBUF_DISCARD_SAFE(cache->edit.loop_stretch_area);
+ GPU_VERTBUF_DISCARD_SAFE(cache->edit.loop_uv);
+ GPU_VERTBUF_DISCARD_SAFE(cache->edit.loop_uv_data);
+ GPU_VERTBUF_DISCARD_SAFE(cache->edit.facedots_uv);
+ GPU_VERTBUF_DISCARD_SAFE(cache->edit.facedots_uv_data);
+ GPU_INDEXBUF_DISCARD_SAFE(cache->ibo.edituv_loops_tri_fans);
+ GPU_INDEXBUF_DISCARD_SAFE(cache->ibo.edituv_loops_line_strips);
+ GPU_INDEXBUF_DISCARD_SAFE(cache->ibo.edituv_loops_points);
+ GPU_BATCH_DISCARD_SAFE(cache->batch.edituv_faces_strech_area);
+ GPU_BATCH_DISCARD_SAFE(cache->batch.edituv_faces_strech_angle);
+ GPU_BATCH_DISCARD_SAFE(cache->batch.edituv_faces);
+ GPU_BATCH_DISCARD_SAFE(cache->batch.edituv_edges);
+ GPU_BATCH_DISCARD_SAFE(cache->batch.edituv_verts);
+ GPU_BATCH_DISCARD_SAFE(cache->batch.edituv_facedots);
}
void DRW_mesh_batch_cache_dirty_tag(Mesh *me, int mode)
{
- MeshBatchCache *cache = me->runtime.batch_cache;
- if (cache == NULL) {
- return;
- }
- switch (mode) {
- case BKE_MESH_BATCH_DIRTY_MAYBE_ALL:
- cache->is_maybe_dirty = true;
- break;
- case BKE_MESH_BATCH_DIRTY_SELECT:
- GPU_VERTBUF_DISCARD_SAFE(cache->edit.loop_data);
- GPU_VERTBUF_DISCARD_SAFE(cache->edit.facedots_pos_nor_data);
- GPU_BATCH_DISCARD_SAFE(cache->batch.edit_triangles);
- GPU_BATCH_DISCARD_SAFE(cache->batch.edit_vertices);
- GPU_BATCH_DISCARD_SAFE(cache->batch.edit_edges);
- GPU_BATCH_DISCARD_SAFE(cache->batch.edit_facedots);
- /* Paint mode selection */
- /* TODO only do that in paint mode. */
- GPU_VERTBUF_DISCARD_SAFE(cache->ordered.loop_pos_nor);
- GPU_BATCH_DISCARD_SAFE(cache->batch.surface);
- GPU_BATCH_DISCARD_SAFE(cache->batch.wire_loops);
- if (cache->surf_per_mat) {
- for (int i = 0; i < cache->mat_len; i++) {
- GPU_BATCH_DISCARD_SAFE(cache->surf_per_mat[i]);
- }
- }
- /* Because visible UVs depends on edit mode selection, discard everything. */
- mesh_batch_cache_discard_uvedit(cache);
- break;
- case BKE_MESH_BATCH_DIRTY_ALL:
- cache->is_dirty = true;
- break;
- case BKE_MESH_BATCH_DIRTY_SHADING:
- mesh_batch_cache_discard_shaded_tri(cache);
- mesh_batch_cache_discard_uvedit(cache);
- break;
- case BKE_MESH_BATCH_DIRTY_SCULPT_COORDS:
- cache->is_sculpt_points_tag = true;
- break;
- case BKE_MESH_BATCH_DIRTY_UVEDIT_ALL:
- mesh_batch_cache_discard_uvedit(cache);
- break;
- case BKE_MESH_BATCH_DIRTY_UVEDIT_SELECT:
- GPU_VERTBUF_DISCARD_SAFE(cache->edit.loop_uv_data);
- GPU_VERTBUF_DISCARD_SAFE(cache->edit.facedots_uv_data);
- GPU_BATCH_DISCARD_SAFE(cache->batch.edituv_faces_strech_area);
- GPU_BATCH_DISCARD_SAFE(cache->batch.edituv_faces_strech_angle);
- GPU_BATCH_DISCARD_SAFE(cache->batch.edituv_faces);
- GPU_BATCH_DISCARD_SAFE(cache->batch.edituv_edges);
- GPU_BATCH_DISCARD_SAFE(cache->batch.edituv_verts);
- GPU_BATCH_DISCARD_SAFE(cache->batch.edituv_facedots);
- break;
- default:
- BLI_assert(0);
- }
+ MeshBatchCache *cache = me->runtime.batch_cache;
+ if (cache == NULL) {
+ return;
+ }
+ switch (mode) {
+ case BKE_MESH_BATCH_DIRTY_MAYBE_ALL:
+ cache->is_maybe_dirty = true;
+ break;
+ case BKE_MESH_BATCH_DIRTY_SELECT:
+ GPU_VERTBUF_DISCARD_SAFE(cache->edit.loop_data);
+ GPU_VERTBUF_DISCARD_SAFE(cache->edit.facedots_pos_nor_data);
+ GPU_BATCH_DISCARD_SAFE(cache->batch.edit_triangles);
+ GPU_BATCH_DISCARD_SAFE(cache->batch.edit_vertices);
+ GPU_BATCH_DISCARD_SAFE(cache->batch.edit_edges);
+ GPU_BATCH_DISCARD_SAFE(cache->batch.edit_facedots);
+ /* Paint mode selection */
+ /* TODO only do that in paint mode. */
+ GPU_VERTBUF_DISCARD_SAFE(cache->ordered.loop_pos_nor);
+ GPU_BATCH_DISCARD_SAFE(cache->batch.surface);
+ GPU_BATCH_DISCARD_SAFE(cache->batch.wire_loops);
+ if (cache->surf_per_mat) {
+ for (int i = 0; i < cache->mat_len; i++) {
+ GPU_BATCH_DISCARD_SAFE(cache->surf_per_mat[i]);
+ }
+ }
+ /* Because visible UVs depends on edit mode selection, discard everything. */
+ mesh_batch_cache_discard_uvedit(cache);
+ break;
+ case BKE_MESH_BATCH_DIRTY_ALL:
+ cache->is_dirty = true;
+ break;
+ case BKE_MESH_BATCH_DIRTY_SHADING:
+ mesh_batch_cache_discard_shaded_tri(cache);
+ mesh_batch_cache_discard_uvedit(cache);
+ break;
+ case BKE_MESH_BATCH_DIRTY_SCULPT_COORDS:
+ cache->is_sculpt_points_tag = true;
+ break;
+ case BKE_MESH_BATCH_DIRTY_UVEDIT_ALL:
+ mesh_batch_cache_discard_uvedit(cache);
+ break;
+ case BKE_MESH_BATCH_DIRTY_UVEDIT_SELECT:
+ GPU_VERTBUF_DISCARD_SAFE(cache->edit.loop_uv_data);
+ GPU_VERTBUF_DISCARD_SAFE(cache->edit.facedots_uv_data);
+ GPU_BATCH_DISCARD_SAFE(cache->batch.edituv_faces_strech_area);
+ GPU_BATCH_DISCARD_SAFE(cache->batch.edituv_faces_strech_angle);
+ GPU_BATCH_DISCARD_SAFE(cache->batch.edituv_faces);
+ GPU_BATCH_DISCARD_SAFE(cache->batch.edituv_edges);
+ GPU_BATCH_DISCARD_SAFE(cache->batch.edituv_verts);
+ GPU_BATCH_DISCARD_SAFE(cache->batch.edituv_facedots);
+ break;
+ default:
+ BLI_assert(0);
+ }
}
static void mesh_batch_cache_clear(Mesh *me)
{
- MeshBatchCache *cache = me->runtime.batch_cache;
- if (!cache) {
- return;
- }
-
- for (int i = 0; i < sizeof(cache->ordered) / sizeof(void *); ++i) {
- GPUVertBuf **vbo = (GPUVertBuf **)&cache->ordered;
- GPU_VERTBUF_DISCARD_SAFE(vbo[i]);
- }
- for (int i = 0; i < sizeof(cache->edit) / sizeof(void *); ++i) {
- GPUVertBuf **vbo = (GPUVertBuf **)&cache->edit;
- GPU_VERTBUF_DISCARD_SAFE(vbo[i]);
- }
- for (int i = 0; i < sizeof(cache->ibo) / sizeof(void *); ++i) {
- GPUIndexBuf **ibo = (GPUIndexBuf **)&cache->ibo;
- GPU_INDEXBUF_DISCARD_SAFE(ibo[i]);
- }
- for (int i = 0; i < sizeof(cache->batch) / sizeof(void *); ++i) {
- GPUBatch **batch = (GPUBatch **)&cache->batch;
- GPU_BATCH_DISCARD_SAFE(batch[i]);
- }
-
- mesh_batch_cache_discard_shaded_tri(cache);
-
- mesh_batch_cache_discard_uvedit(cache);
-
- drw_mesh_weight_state_clear(&cache->weight_state);
+ MeshBatchCache *cache = me->runtime.batch_cache;
+ if (!cache) {
+ return;
+ }
+
+ for (int i = 0; i < sizeof(cache->ordered) / sizeof(void *); ++i) {
+ GPUVertBuf **vbo = (GPUVertBuf **)&cache->ordered;
+ GPU_VERTBUF_DISCARD_SAFE(vbo[i]);
+ }
+ for (int i = 0; i < sizeof(cache->edit) / sizeof(void *); ++i) {
+ GPUVertBuf **vbo = (GPUVertBuf **)&cache->edit;
+ GPU_VERTBUF_DISCARD_SAFE(vbo[i]);
+ }
+ for (int i = 0; i < sizeof(cache->ibo) / sizeof(void *); ++i) {
+ GPUIndexBuf **ibo = (GPUIndexBuf **)&cache->ibo;
+ GPU_INDEXBUF_DISCARD_SAFE(ibo[i]);
+ }
+ for (int i = 0; i < sizeof(cache->batch) / sizeof(void *); ++i) {
+ GPUBatch **batch = (GPUBatch **)&cache->batch;
+ GPU_BATCH_DISCARD_SAFE(batch[i]);
+ }
+
+ mesh_batch_cache_discard_shaded_tri(cache);
+
+ mesh_batch_cache_discard_uvedit(cache);
+
+ drw_mesh_weight_state_clear(&cache->weight_state);
}
void DRW_mesh_batch_cache_free(Mesh *me)
{
- mesh_batch_cache_clear(me);
- MEM_SAFE_FREE(me->runtime.batch_cache);
+ mesh_batch_cache_clear(me);
+ MEM_SAFE_FREE(me->runtime.batch_cache);
}
/* GPUBatch cache usage. */
-static void mesh_create_edit_vertex_loops(
- MeshRenderData *rdata,
- GPUVertBuf *vbo_pos_nor,
- GPUVertBuf *vbo_lnor,
- GPUVertBuf *vbo_uv,
- GPUVertBuf *vbo_data,
- GPUVertBuf *vbo_verts,
- GPUVertBuf *vbo_edges,
- GPUVertBuf *vbo_faces)
+static void mesh_create_edit_vertex_loops(MeshRenderData *rdata,
+ GPUVertBuf *vbo_pos_nor,
+ GPUVertBuf *vbo_lnor,
+ GPUVertBuf *vbo_uv,
+ GPUVertBuf *vbo_data,
+ GPUVertBuf *vbo_verts,
+ GPUVertBuf *vbo_edges,
+ GPUVertBuf *vbo_faces)
{
#if 0
- const int vert_len = mesh_render_data_verts_len_get_maybe_mapped(rdata);
- const int edge_len = mesh_render_data_edges_len_get_maybe_mapped(rdata);
+ const int vert_len = mesh_render_data_verts_len_get_maybe_mapped(rdata);
+ const int edge_len = mesh_render_data_edges_len_get_maybe_mapped(rdata);
#endif
- const int poly_len = mesh_render_data_polys_len_get_maybe_mapped(rdata);
- const int lvert_len = mesh_render_data_loose_verts_len_get_maybe_mapped(rdata);
- const int ledge_len = mesh_render_data_loose_edges_len_get_maybe_mapped(rdata);
- const int loop_len = mesh_render_data_loops_len_get_maybe_mapped(rdata);
- const int tot_loop_len = loop_len + ledge_len * 2 + lvert_len;
- float (*lnors)[3] = rdata->loop_normals;
- uchar fflag;
-
- /* Static formats */
- static struct { GPUVertFormat sel_id, pos_nor, lnor, flag, uv; } format = {{ 0 }};
- static struct { uint sel_id, pos, nor, lnor, data, uvs; } attr_id;
- if (format.sel_id.attr_len == 0) {
- attr_id.sel_id = GPU_vertformat_attr_add(&format.sel_id, "color", GPU_COMP_U32, 1, GPU_FETCH_INT);
- attr_id.pos = GPU_vertformat_attr_add(&format.pos_nor, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
- attr_id.nor = GPU_vertformat_attr_add(&format.pos_nor, "vnor", GPU_COMP_I10, 3, GPU_FETCH_INT_TO_FLOAT_UNIT);
- attr_id.lnor = GPU_vertformat_attr_add(&format.lnor, "lnor", GPU_COMP_I10, 3, GPU_FETCH_INT_TO_FLOAT_UNIT);
- attr_id.data = GPU_vertformat_attr_add(&format.flag, "data", GPU_COMP_U8, 4, GPU_FETCH_INT);
- attr_id.uvs = GPU_vertformat_attr_add(&format.uv, "u", GPU_COMP_F32, 2, GPU_FETCH_FLOAT);
- GPU_vertformat_alias_add(&format.uv, "pos");
- GPU_vertformat_alias_add(&format.flag, "flag");
- }
-
- GPUVertBufRaw raw_verts, raw_edges, raw_faces, raw_pos, raw_nor, raw_lnor, raw_uv, raw_data;
- if (DRW_TEST_ASSIGN_VBO(vbo_pos_nor)) {
- GPU_vertbuf_init_with_format(vbo_pos_nor, &format.pos_nor);
- GPU_vertbuf_data_alloc(vbo_pos_nor, tot_loop_len);
- GPU_vertbuf_attr_get_raw_data(vbo_pos_nor, attr_id.pos, &raw_pos);
- GPU_vertbuf_attr_get_raw_data(vbo_pos_nor, attr_id.nor, &raw_nor);
- }
- if (DRW_TEST_ASSIGN_VBO(vbo_lnor)) {
- GPU_vertbuf_init_with_format(vbo_lnor, &format.lnor);
- GPU_vertbuf_data_alloc(vbo_lnor, tot_loop_len);
- GPU_vertbuf_attr_get_raw_data(vbo_lnor, attr_id.lnor, &raw_lnor);
- }
- if (DRW_TEST_ASSIGN_VBO(vbo_data)) {
- GPU_vertbuf_init_with_format(vbo_data, &format.flag);
- GPU_vertbuf_data_alloc(vbo_data, tot_loop_len);
- GPU_vertbuf_attr_get_raw_data(vbo_data, attr_id.data, &raw_data);
- }
- if (DRW_TEST_ASSIGN_VBO(vbo_uv)) {
- GPU_vertbuf_init_with_format(vbo_uv, &format.uv);
- GPU_vertbuf_data_alloc(vbo_uv, tot_loop_len);
- GPU_vertbuf_attr_get_raw_data(vbo_uv, attr_id.uvs, &raw_uv);
- }
- /* Select Idx */
- if (DRW_TEST_ASSIGN_VBO(vbo_verts)) {
- GPU_vertbuf_init_with_format(vbo_verts, &format.sel_id);
- GPU_vertbuf_data_alloc(vbo_verts, tot_loop_len);
- GPU_vertbuf_attr_get_raw_data(vbo_verts, attr_id.sel_id, &raw_verts);
- }
- if (DRW_TEST_ASSIGN_VBO(vbo_edges)) {
- GPU_vertbuf_init_with_format(vbo_edges, &format.sel_id);
- GPU_vertbuf_data_alloc(vbo_edges, tot_loop_len);
- GPU_vertbuf_attr_get_raw_data(vbo_edges, attr_id.sel_id, &raw_edges);
- }
- if (DRW_TEST_ASSIGN_VBO(vbo_faces)) {
- GPU_vertbuf_init_with_format(vbo_faces, &format.sel_id);
- GPU_vertbuf_data_alloc(vbo_faces, tot_loop_len);
- GPU_vertbuf_attr_get_raw_data(vbo_faces, attr_id.sel_id, &raw_faces);
- }
-
- if (rdata->edit_bmesh && rdata->mapped.use == false) {
- BMesh *bm = rdata->edit_bmesh->bm;
- BMIter iter_efa, iter_loop, iter_vert;
- BMFace *efa;
- BMEdge *eed;
- BMVert *eve;
- BMLoop *loop;
- const int cd_loop_uv_offset = CustomData_get_offset(&bm->ldata, CD_MLOOPUV);
-
- /* Face Loops */
- BM_ITER_MESH (efa, &iter_efa, bm, BM_FACES_OF_MESH) {
- int fidx = BM_elem_index_get(efa);
- if (vbo_data) {
- fflag = mesh_render_data_face_flag(rdata, efa, cd_loop_uv_offset);
- }
- BM_ITER_ELEM (loop, &iter_loop, efa, BM_LOOPS_OF_FACE) {
- if (vbo_pos_nor) {
- GPUPackedNormal *vnor = (GPUPackedNormal *)GPU_vertbuf_raw_step(&raw_nor);
- *vnor = GPU_normal_convert_i10_v3(loop->v->no);
- copy_v3_v3(GPU_vertbuf_raw_step(&raw_pos), loop->v->co);
- }
- if (vbo_lnor) {
- const float *nor = (lnors) ? lnors[BM_elem_index_get(loop)] : efa->no;
- GPUPackedNormal *lnor = (GPUPackedNormal *)GPU_vertbuf_raw_step(&raw_lnor);
- *lnor = GPU_normal_convert_i10_v3(nor);
- }
- if (vbo_data) {
- EdgeDrawAttr eattr = { .v_flag = fflag };
- mesh_render_data_edge_flag(rdata, loop->e, &eattr);
- mesh_render_data_vert_flag(rdata, loop->v, &eattr);
- mesh_render_data_loop_flag(rdata, loop, cd_loop_uv_offset, &eattr);
- memcpy(GPU_vertbuf_raw_step(&raw_data), &eattr, sizeof(EdgeDrawAttr));
- }
- if (vbo_uv) {
- MLoopUV *luv = BM_ELEM_CD_GET_VOID_P(loop, cd_loop_uv_offset);
- copy_v2_v2(GPU_vertbuf_raw_step(&raw_uv), luv->uv);
- }
- /* Select Idx */
- if (vbo_verts) {
- int vidx = BM_elem_index_get(loop->v);
- *((uint *)GPU_vertbuf_raw_step(&raw_verts)) = vidx;
- }
- if (vbo_edges) {
- int eidx = BM_elem_index_get(loop->e);
- *((uint *)GPU_vertbuf_raw_step(&raw_edges)) = eidx;
- }
- if (vbo_faces) {
- *((uint *)GPU_vertbuf_raw_step(&raw_faces)) = fidx;
- }
- }
- }
- /* Loose edges */
- for (int e = 0; e < ledge_len; e++) {
- eed = BM_edge_at_index(bm, rdata->loose_edges[e]);
- BM_ITER_ELEM (eve, &iter_vert, eed, BM_VERTS_OF_EDGE) {
- if (vbo_pos_nor) {
- GPUPackedNormal *vnor = (GPUPackedNormal *)GPU_vertbuf_raw_step(&raw_nor);
- *vnor = GPU_normal_convert_i10_v3(eve->no);
- copy_v3_v3(GPU_vertbuf_raw_step(&raw_pos), eve->co);
- }
- if (vbo_data) {
- EdgeDrawAttr eattr = { 0 };
- mesh_render_data_edge_flag(rdata, eed, &eattr);
- mesh_render_data_vert_flag(rdata, eve, &eattr);
- memcpy(GPU_vertbuf_raw_step(&raw_data), &eattr, sizeof(EdgeDrawAttr));
- }
- if (vbo_lnor) {
- memset(GPU_vertbuf_raw_step(&raw_lnor), 0, sizeof(GPUPackedNormal));
- }
- /* Select Idx */
- if (vbo_verts) {
- int vidx = BM_elem_index_get(eve);
- *((uint *)GPU_vertbuf_raw_step(&raw_verts)) = vidx;
- }
- if (vbo_edges) {
- int eidx = BM_elem_index_get(eed);
- *((uint *)GPU_vertbuf_raw_step(&raw_edges)) = eidx;
- }
- }
- }
- /* Loose verts */
- for (int e = 0; e < lvert_len; e++) {
- eve = BM_vert_at_index(bm, rdata->loose_verts[e]);
- if (vbo_pos_nor) {
- GPUPackedNormal *vnor = (GPUPackedNormal *)GPU_vertbuf_raw_step(&raw_nor);
- *vnor = GPU_normal_convert_i10_v3(eve->no);
- copy_v3_v3(GPU_vertbuf_raw_step(&raw_pos), eve->co);
- }
- if (vbo_lnor) {
- memset(GPU_vertbuf_raw_step(&raw_lnor), 0, sizeof(GPUPackedNormal));
- }
- if (vbo_data) {
- EdgeDrawAttr eattr = { 0 };
- mesh_render_data_vert_flag(rdata, eve, &eattr);
- memcpy(GPU_vertbuf_raw_step(&raw_data), &eattr, sizeof(EdgeDrawAttr));
- }
- /* Select Idx */
- if (vbo_verts) {
- int vidx = BM_elem_index_get(eve);
- *((uint *)GPU_vertbuf_raw_step(&raw_verts)) = vidx;
- }
- }
- }
- else if (rdata->mapped.use == true) {
- BMesh *bm = rdata->edit_bmesh->bm;
- const int cd_loop_uv_offset = CustomData_get_offset(&bm->ldata, CD_MLOOPUV);
-
- const MPoly *mpoly = rdata->mapped.me_cage->mpoly;
- const MEdge *medge = rdata->mapped.me_cage->medge;
- const MVert *mvert = rdata->mapped.me_cage->mvert;
- const MLoop *mloop = rdata->mapped.me_cage->mloop;
-
- const int *v_origindex = rdata->mapped.v_origindex;
- const int *e_origindex = rdata->mapped.e_origindex;
- const int *p_origindex = rdata->mapped.p_origindex;
-
- /* Face Loops */
- for (int poly = 0; poly < poly_len; poly++, mpoly++) {
- const MLoop *l = &mloop[mpoly->loopstart];
- int fidx = p_origindex[poly];
- BMFace *efa = NULL;
- if (vbo_data) {
- fflag = 0;
- if (fidx != ORIGINDEX_NONE) {
- efa = BM_face_at_index(bm, fidx);
- fflag = mesh_render_data_face_flag(rdata, efa, cd_loop_uv_offset);
- }
- }
- for (int i = 0; i < mpoly->totloop; i++, l++) {
- if (vbo_pos_nor) {
- copy_v3_v3(GPU_vertbuf_raw_step(&raw_pos), mvert[l->v].co);
- }
- if (vbo_lnor || vbo_pos_nor) {
- GPUPackedNormal vnor = GPU_normal_convert_i10_s3(mvert[l->v].no);
- if (vbo_pos_nor) {
- *(GPUPackedNormal *)GPU_vertbuf_raw_step(&raw_nor) = vnor;
- }
- if (vbo_lnor) {
- /* Mapped does not support lnors yet. */
- *(GPUPackedNormal *)GPU_vertbuf_raw_step(&raw_lnor) = vnor;
- }
- }
- if (vbo_data) {
- EdgeDrawAttr eattr = { .v_flag = fflag };
- int vidx = v_origindex[l->v];
- int eidx = e_origindex[l->e];
- if (vidx != ORIGINDEX_NONE) {
- BMVert *eve = BM_vert_at_index(bm, vidx);
- mesh_render_data_vert_flag(rdata, eve, &eattr);
- }
- if (eidx != ORIGINDEX_NONE) {
- BMEdge *eed = BM_edge_at_index(bm, eidx);
- mesh_render_data_edge_flag(rdata, eed, &eattr);
- if (efa) {
- BMLoop *loop = BM_face_edge_share_loop(efa, eed);
- if (loop) {
- mesh_render_data_loop_flag(rdata, loop, cd_loop_uv_offset, &eattr);
- }
- }
- }
- memcpy(GPU_vertbuf_raw_step(&raw_data), &eattr, sizeof(EdgeDrawAttr));
- }
- if (vbo_uv) {
- MLoopUV *luv = &rdata->mloopuv[mpoly->loopstart + i];
- copy_v2_v2(GPU_vertbuf_raw_step(&raw_uv), luv->uv);
- }
- /* Select Idx */
- if (vbo_verts) {
- int vidx = v_origindex[l->v];
- *((uint *)GPU_vertbuf_raw_step(&raw_verts)) = vidx;
- }
- if (vbo_edges) {
- int eidx = e_origindex[l->e];
- *((uint *)GPU_vertbuf_raw_step(&raw_edges)) = eidx;
- }
- if (vbo_faces) {
- *((uint *)GPU_vertbuf_raw_step(&raw_faces)) = fidx;
- }
- }
- }
- /* Loose edges */
- for (int j = 0; j < ledge_len; j++) {
- const int e = rdata->mapped.loose_edges[j];
- for (int i = 0; i < 2; ++i) {
- int v = (i == 0) ? medge[e].v1 : medge[e].v2;
- if (vbo_pos_nor) {
- GPUPackedNormal vnor = GPU_normal_convert_i10_s3(mvert[v].no);
- *(GPUPackedNormal *)GPU_vertbuf_raw_step(&raw_nor) = vnor;
- copy_v3_v3(GPU_vertbuf_raw_step(&raw_pos), mvert[v].co);
- }
- if (vbo_lnor) {
- memset(GPU_vertbuf_raw_step(&raw_lnor), 0, sizeof(GPUPackedNormal));
- }
- if (vbo_data) {
- EdgeDrawAttr eattr = { 0 };
- int vidx = v_origindex[v];
- int eidx = e_origindex[e];
- if (vidx != ORIGINDEX_NONE) {
- BMVert *eve = BM_vert_at_index(bm, vidx);
- mesh_render_data_vert_flag(rdata, eve, &eattr);
- }
- if (eidx != ORIGINDEX_NONE) {
- BMEdge *eed = BM_edge_at_index(bm, eidx);
- mesh_render_data_edge_flag(rdata, eed, &eattr);
- }
- memcpy(GPU_vertbuf_raw_step(&raw_data), &eattr, sizeof(EdgeDrawAttr));
- }
- /* Select Idx */
- if (vbo_verts) {
- int vidx = v_origindex[v];
- *((uint *)GPU_vertbuf_raw_step(&raw_verts)) = vidx;
- }
- if (vbo_edges) {
- int eidx = e_origindex[e];
- *((uint *)GPU_vertbuf_raw_step(&raw_edges)) = eidx;
- }
- }
- }
- /* Loose verts */
- for (int i = 0; i < lvert_len; i++) {
- const int v = rdata->mapped.loose_verts[i];
- if (vbo_pos_nor) {
- GPUPackedNormal vnor = GPU_normal_convert_i10_s3(mvert[v].no);
- *(GPUPackedNormal *)GPU_vertbuf_raw_step(&raw_nor) = vnor;
- copy_v3_v3(GPU_vertbuf_raw_step(&raw_pos), mvert[v].co);
- }
- if (vbo_lnor) {
- memset(GPU_vertbuf_raw_step(&raw_lnor), 0, sizeof(GPUPackedNormal));
- }
- if (vbo_data) {
- EdgeDrawAttr eattr = { 0 };
- int vidx = v_origindex[v];
- if (vidx != ORIGINDEX_NONE) {
- BMVert *eve = BM_vert_at_index(bm, vidx);
- mesh_render_data_vert_flag(rdata, eve, &eattr);
- }
- memcpy(GPU_vertbuf_raw_step(&raw_data), &eattr, sizeof(EdgeDrawAttr));
- }
- /* Select Idx */
- if (vbo_verts) {
- int vidx = v_origindex[v];
- *((uint *)GPU_vertbuf_raw_step(&raw_verts)) = vidx;
- }
- }
- }
- else {
- const MPoly *mpoly = rdata->mpoly;
- const MVert *mvert = rdata->mvert;
- const MLoop *mloop = rdata->mloop;
-
- const int *v_origindex = CustomData_get_layer(&rdata->me->vdata, CD_ORIGINDEX);
- const int *e_origindex = CustomData_get_layer(&rdata->me->edata, CD_ORIGINDEX);
- const int *p_origindex = CustomData_get_layer(&rdata->me->pdata, CD_ORIGINDEX);
-
- /* Face Loops */
- for (int poly = 0; poly < poly_len; poly++, mpoly++) {
- const MLoop *l = &mloop[mpoly->loopstart];
- int fidx = p_origindex ? p_origindex[poly] : poly;
- for (int i = 0; i < mpoly->totloop; i++, l++) {
- if (vbo_pos_nor) {
- copy_v3_v3(GPU_vertbuf_raw_step(&raw_pos), mvert[l->v].co);
- }
- if (vbo_lnor || vbo_pos_nor) {
- GPUPackedNormal vnor = GPU_normal_convert_i10_s3(mvert[l->v].no);
- if (vbo_pos_nor) {
- *(GPUPackedNormal *)GPU_vertbuf_raw_step(&raw_nor) = vnor;
- }
- if (vbo_lnor) {
- /* Mapped does not support lnors yet. */
- *(GPUPackedNormal *)GPU_vertbuf_raw_step(&raw_lnor) = vnor;
- }
- }
- if (vbo_uv) {
- MLoopUV *luv = &rdata->mloopuv[mpoly->loopstart + i];
- copy_v2_v2(GPU_vertbuf_raw_step(&raw_uv), luv->uv);
- }
- /* Select Idx */
- if (vbo_verts) {
- int vidx = v_origindex ? v_origindex[l->v] : l->v;
- *((uint *)GPU_vertbuf_raw_step(&raw_verts)) = vidx;
- }
- if (vbo_edges) {
- int eidx = e_origindex ? e_origindex[l->e] : l->e;
- *((uint *)GPU_vertbuf_raw_step(&raw_edges)) = eidx;
- }
- if (vbo_faces) {
- *((uint *)GPU_vertbuf_raw_step(&raw_faces)) = fidx;
- }
- }
- }
- /* TODO(fclem): Until we find a way to detect
- * loose verts easily outside of edit mode, this
- * will remain disabled. */
+ const int poly_len = mesh_render_data_polys_len_get_maybe_mapped(rdata);
+ const int lvert_len = mesh_render_data_loose_verts_len_get_maybe_mapped(rdata);
+ const int ledge_len = mesh_render_data_loose_edges_len_get_maybe_mapped(rdata);
+ const int loop_len = mesh_render_data_loops_len_get_maybe_mapped(rdata);
+ const int tot_loop_len = loop_len + ledge_len * 2 + lvert_len;
+ float(*lnors)[3] = rdata->loop_normals;
+ uchar fflag;
+
+ /* Static formats */
+ static struct {
+ GPUVertFormat sel_id, pos_nor, lnor, flag, uv;
+ } format = {{0}};
+ static struct {
+ uint sel_id, pos, nor, lnor, data, uvs;
+ } attr_id;
+ if (format.sel_id.attr_len == 0) {
+ attr_id.sel_id = GPU_vertformat_attr_add(
+ &format.sel_id, "color", GPU_COMP_U32, 1, GPU_FETCH_INT);
+ attr_id.pos = GPU_vertformat_attr_add(
+ &format.pos_nor, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
+ attr_id.nor = GPU_vertformat_attr_add(
+ &format.pos_nor, "vnor", GPU_COMP_I10, 3, GPU_FETCH_INT_TO_FLOAT_UNIT);
+ attr_id.lnor = GPU_vertformat_attr_add(
+ &format.lnor, "lnor", GPU_COMP_I10, 3, GPU_FETCH_INT_TO_FLOAT_UNIT);
+ attr_id.data = GPU_vertformat_attr_add(&format.flag, "data", GPU_COMP_U8, 4, GPU_FETCH_INT);
+ attr_id.uvs = GPU_vertformat_attr_add(&format.uv, "u", GPU_COMP_F32, 2, GPU_FETCH_FLOAT);
+ GPU_vertformat_alias_add(&format.uv, "pos");
+ GPU_vertformat_alias_add(&format.flag, "flag");
+ }
+
+ GPUVertBufRaw raw_verts, raw_edges, raw_faces, raw_pos, raw_nor, raw_lnor, raw_uv, raw_data;
+ if (DRW_TEST_ASSIGN_VBO(vbo_pos_nor)) {
+ GPU_vertbuf_init_with_format(vbo_pos_nor, &format.pos_nor);
+ GPU_vertbuf_data_alloc(vbo_pos_nor, tot_loop_len);
+ GPU_vertbuf_attr_get_raw_data(vbo_pos_nor, attr_id.pos, &raw_pos);
+ GPU_vertbuf_attr_get_raw_data(vbo_pos_nor, attr_id.nor, &raw_nor);
+ }
+ if (DRW_TEST_ASSIGN_VBO(vbo_lnor)) {
+ GPU_vertbuf_init_with_format(vbo_lnor, &format.lnor);
+ GPU_vertbuf_data_alloc(vbo_lnor, tot_loop_len);
+ GPU_vertbuf_attr_get_raw_data(vbo_lnor, attr_id.lnor, &raw_lnor);
+ }
+ if (DRW_TEST_ASSIGN_VBO(vbo_data)) {
+ GPU_vertbuf_init_with_format(vbo_data, &format.flag);
+ GPU_vertbuf_data_alloc(vbo_data, tot_loop_len);
+ GPU_vertbuf_attr_get_raw_data(vbo_data, attr_id.data, &raw_data);
+ }
+ if (DRW_TEST_ASSIGN_VBO(vbo_uv)) {
+ GPU_vertbuf_init_with_format(vbo_uv, &format.uv);
+ GPU_vertbuf_data_alloc(vbo_uv, tot_loop_len);
+ GPU_vertbuf_attr_get_raw_data(vbo_uv, attr_id.uvs, &raw_uv);
+ }
+ /* Select Idx */
+ if (DRW_TEST_ASSIGN_VBO(vbo_verts)) {
+ GPU_vertbuf_init_with_format(vbo_verts, &format.sel_id);
+ GPU_vertbuf_data_alloc(vbo_verts, tot_loop_len);
+ GPU_vertbuf_attr_get_raw_data(vbo_verts, attr_id.sel_id, &raw_verts);
+ }
+ if (DRW_TEST_ASSIGN_VBO(vbo_edges)) {
+ GPU_vertbuf_init_with_format(vbo_edges, &format.sel_id);
+ GPU_vertbuf_data_alloc(vbo_edges, tot_loop_len);
+ GPU_vertbuf_attr_get_raw_data(vbo_edges, attr_id.sel_id, &raw_edges);
+ }
+ if (DRW_TEST_ASSIGN_VBO(vbo_faces)) {
+ GPU_vertbuf_init_with_format(vbo_faces, &format.sel_id);
+ GPU_vertbuf_data_alloc(vbo_faces, tot_loop_len);
+ GPU_vertbuf_attr_get_raw_data(vbo_faces, attr_id.sel_id, &raw_faces);
+ }
+
+ if (rdata->edit_bmesh && rdata->mapped.use == false) {
+ BMesh *bm = rdata->edit_bmesh->bm;
+ BMIter iter_efa, iter_loop, iter_vert;
+ BMFace *efa;
+ BMEdge *eed;
+ BMVert *eve;
+ BMLoop *loop;
+ const int cd_loop_uv_offset = CustomData_get_offset(&bm->ldata, CD_MLOOPUV);
+
+ /* Face Loops */
+ BM_ITER_MESH (efa, &iter_efa, bm, BM_FACES_OF_MESH) {
+ int fidx = BM_elem_index_get(efa);
+ if (vbo_data) {
+ fflag = mesh_render_data_face_flag(rdata, efa, cd_loop_uv_offset);
+ }
+ BM_ITER_ELEM (loop, &iter_loop, efa, BM_LOOPS_OF_FACE) {
+ if (vbo_pos_nor) {
+ GPUPackedNormal *vnor = (GPUPackedNormal *)GPU_vertbuf_raw_step(&raw_nor);
+ *vnor = GPU_normal_convert_i10_v3(loop->v->no);
+ copy_v3_v3(GPU_vertbuf_raw_step(&raw_pos), loop->v->co);
+ }
+ if (vbo_lnor) {
+ const float *nor = (lnors) ? lnors[BM_elem_index_get(loop)] : efa->no;
+ GPUPackedNormal *lnor = (GPUPackedNormal *)GPU_vertbuf_raw_step(&raw_lnor);
+ *lnor = GPU_normal_convert_i10_v3(nor);
+ }
+ if (vbo_data) {
+ EdgeDrawAttr eattr = {.v_flag = fflag};
+ mesh_render_data_edge_flag(rdata, loop->e, &eattr);
+ mesh_render_data_vert_flag(rdata, loop->v, &eattr);
+ mesh_render_data_loop_flag(rdata, loop, cd_loop_uv_offset, &eattr);
+ memcpy(GPU_vertbuf_raw_step(&raw_data), &eattr, sizeof(EdgeDrawAttr));
+ }
+ if (vbo_uv) {
+ MLoopUV *luv = BM_ELEM_CD_GET_VOID_P(loop, cd_loop_uv_offset);
+ copy_v2_v2(GPU_vertbuf_raw_step(&raw_uv), luv->uv);
+ }
+ /* Select Idx */
+ if (vbo_verts) {
+ int vidx = BM_elem_index_get(loop->v);
+ *((uint *)GPU_vertbuf_raw_step(&raw_verts)) = vidx;
+ }
+ if (vbo_edges) {
+ int eidx = BM_elem_index_get(loop->e);
+ *((uint *)GPU_vertbuf_raw_step(&raw_edges)) = eidx;
+ }
+ if (vbo_faces) {
+ *((uint *)GPU_vertbuf_raw_step(&raw_faces)) = fidx;
+ }
+ }
+ }
+ /* Loose edges */
+ for (int e = 0; e < ledge_len; e++) {
+ eed = BM_edge_at_index(bm, rdata->loose_edges[e]);
+ BM_ITER_ELEM (eve, &iter_vert, eed, BM_VERTS_OF_EDGE) {
+ if (vbo_pos_nor) {
+ GPUPackedNormal *vnor = (GPUPackedNormal *)GPU_vertbuf_raw_step(&raw_nor);
+ *vnor = GPU_normal_convert_i10_v3(eve->no);
+ copy_v3_v3(GPU_vertbuf_raw_step(&raw_pos), eve->co);
+ }
+ if (vbo_data) {
+ EdgeDrawAttr eattr = {0};
+ mesh_render_data_edge_flag(rdata, eed, &eattr);
+ mesh_render_data_vert_flag(rdata, eve, &eattr);
+ memcpy(GPU_vertbuf_raw_step(&raw_data), &eattr, sizeof(EdgeDrawAttr));
+ }
+ if (vbo_lnor) {
+ memset(GPU_vertbuf_raw_step(&raw_lnor), 0, sizeof(GPUPackedNormal));
+ }
+ /* Select Idx */
+ if (vbo_verts) {
+ int vidx = BM_elem_index_get(eve);
+ *((uint *)GPU_vertbuf_raw_step(&raw_verts)) = vidx;
+ }
+ if (vbo_edges) {
+ int eidx = BM_elem_index_get(eed);
+ *((uint *)GPU_vertbuf_raw_step(&raw_edges)) = eidx;
+ }
+ }
+ }
+ /* Loose verts */
+ for (int e = 0; e < lvert_len; e++) {
+ eve = BM_vert_at_index(bm, rdata->loose_verts[e]);
+ if (vbo_pos_nor) {
+ GPUPackedNormal *vnor = (GPUPackedNormal *)GPU_vertbuf_raw_step(&raw_nor);
+ *vnor = GPU_normal_convert_i10_v3(eve->no);
+ copy_v3_v3(GPU_vertbuf_raw_step(&raw_pos), eve->co);
+ }
+ if (vbo_lnor) {
+ memset(GPU_vertbuf_raw_step(&raw_lnor), 0, sizeof(GPUPackedNormal));
+ }
+ if (vbo_data) {
+ EdgeDrawAttr eattr = {0};
+ mesh_render_data_vert_flag(rdata, eve, &eattr);
+ memcpy(GPU_vertbuf_raw_step(&raw_data), &eattr, sizeof(EdgeDrawAttr));
+ }
+ /* Select Idx */
+ if (vbo_verts) {
+ int vidx = BM_elem_index_get(eve);
+ *((uint *)GPU_vertbuf_raw_step(&raw_verts)) = vidx;
+ }
+ }
+ }
+ else if (rdata->mapped.use == true) {
+ BMesh *bm = rdata->edit_bmesh->bm;
+ const int cd_loop_uv_offset = CustomData_get_offset(&bm->ldata, CD_MLOOPUV);
+
+ const MPoly *mpoly = rdata->mapped.me_cage->mpoly;
+ const MEdge *medge = rdata->mapped.me_cage->medge;
+ const MVert *mvert = rdata->mapped.me_cage->mvert;
+ const MLoop *mloop = rdata->mapped.me_cage->mloop;
+
+ const int *v_origindex = rdata->mapped.v_origindex;
+ const int *e_origindex = rdata->mapped.e_origindex;
+ const int *p_origindex = rdata->mapped.p_origindex;
+
+ /* Face Loops */
+ for (int poly = 0; poly < poly_len; poly++, mpoly++) {
+ const MLoop *l = &mloop[mpoly->loopstart];
+ int fidx = p_origindex[poly];
+ BMFace *efa = NULL;
+ if (vbo_data) {
+ fflag = 0;
+ if (fidx != ORIGINDEX_NONE) {
+ efa = BM_face_at_index(bm, fidx);
+ fflag = mesh_render_data_face_flag(rdata, efa, cd_loop_uv_offset);
+ }
+ }
+ for (int i = 0; i < mpoly->totloop; i++, l++) {
+ if (vbo_pos_nor) {
+ copy_v3_v3(GPU_vertbuf_raw_step(&raw_pos), mvert[l->v].co);
+ }
+ if (vbo_lnor || vbo_pos_nor) {
+ GPUPackedNormal vnor = GPU_normal_convert_i10_s3(mvert[l->v].no);
+ if (vbo_pos_nor) {
+ *(GPUPackedNormal *)GPU_vertbuf_raw_step(&raw_nor) = vnor;
+ }
+ if (vbo_lnor) {
+ /* Mapped does not support lnors yet. */
+ *(GPUPackedNormal *)GPU_vertbuf_raw_step(&raw_lnor) = vnor;
+ }
+ }
+ if (vbo_data) {
+ EdgeDrawAttr eattr = {.v_flag = fflag};
+ int vidx = v_origindex[l->v];
+ int eidx = e_origindex[l->e];
+ if (vidx != ORIGINDEX_NONE) {
+ BMVert *eve = BM_vert_at_index(bm, vidx);
+ mesh_render_data_vert_flag(rdata, eve, &eattr);
+ }
+ if (eidx != ORIGINDEX_NONE) {
+ BMEdge *eed = BM_edge_at_index(bm, eidx);
+ mesh_render_data_edge_flag(rdata, eed, &eattr);
+ if (efa) {
+ BMLoop *loop = BM_face_edge_share_loop(efa, eed);
+ if (loop) {
+ mesh_render_data_loop_flag(rdata, loop, cd_loop_uv_offset, &eattr);
+ }
+ }
+ }
+ memcpy(GPU_vertbuf_raw_step(&raw_data), &eattr, sizeof(EdgeDrawAttr));
+ }
+ if (vbo_uv) {
+ MLoopUV *luv = &rdata->mloopuv[mpoly->loopstart + i];
+ copy_v2_v2(GPU_vertbuf_raw_step(&raw_uv), luv->uv);
+ }
+ /* Select Idx */
+ if (vbo_verts) {
+ int vidx = v_origindex[l->v];
+ *((uint *)GPU_vertbuf_raw_step(&raw_verts)) = vidx;
+ }
+ if (vbo_edges) {
+ int eidx = e_origindex[l->e];
+ *((uint *)GPU_vertbuf_raw_step(&raw_edges)) = eidx;
+ }
+ if (vbo_faces) {
+ *((uint *)GPU_vertbuf_raw_step(&raw_faces)) = fidx;
+ }
+ }
+ }
+ /* Loose edges */
+ for (int j = 0; j < ledge_len; j++) {
+ const int e = rdata->mapped.loose_edges[j];
+ for (int i = 0; i < 2; ++i) {
+ int v = (i == 0) ? medge[e].v1 : medge[e].v2;
+ if (vbo_pos_nor) {
+ GPUPackedNormal vnor = GPU_normal_convert_i10_s3(mvert[v].no);
+ *(GPUPackedNormal *)GPU_vertbuf_raw_step(&raw_nor) = vnor;
+ copy_v3_v3(GPU_vertbuf_raw_step(&raw_pos), mvert[v].co);
+ }
+ if (vbo_lnor) {
+ memset(GPU_vertbuf_raw_step(&raw_lnor), 0, sizeof(GPUPackedNormal));
+ }
+ if (vbo_data) {
+ EdgeDrawAttr eattr = {0};
+ int vidx = v_origindex[v];
+ int eidx = e_origindex[e];
+ if (vidx != ORIGINDEX_NONE) {
+ BMVert *eve = BM_vert_at_index(bm, vidx);
+ mesh_render_data_vert_flag(rdata, eve, &eattr);
+ }
+ if (eidx != ORIGINDEX_NONE) {
+ BMEdge *eed = BM_edge_at_index(bm, eidx);
+ mesh_render_data_edge_flag(rdata, eed, &eattr);
+ }
+ memcpy(GPU_vertbuf_raw_step(&raw_data), &eattr, sizeof(EdgeDrawAttr));
+ }
+ /* Select Idx */
+ if (vbo_verts) {
+ int vidx = v_origindex[v];
+ *((uint *)GPU_vertbuf_raw_step(&raw_verts)) = vidx;
+ }
+ if (vbo_edges) {
+ int eidx = e_origindex[e];
+ *((uint *)GPU_vertbuf_raw_step(&raw_edges)) = eidx;
+ }
+ }
+ }
+ /* Loose verts */
+ for (int i = 0; i < lvert_len; i++) {
+ const int v = rdata->mapped.loose_verts[i];
+ if (vbo_pos_nor) {
+ GPUPackedNormal vnor = GPU_normal_convert_i10_s3(mvert[v].no);
+ *(GPUPackedNormal *)GPU_vertbuf_raw_step(&raw_nor) = vnor;
+ copy_v3_v3(GPU_vertbuf_raw_step(&raw_pos), mvert[v].co);
+ }
+ if (vbo_lnor) {
+ memset(GPU_vertbuf_raw_step(&raw_lnor), 0, sizeof(GPUPackedNormal));
+ }
+ if (vbo_data) {
+ EdgeDrawAttr eattr = {0};
+ int vidx = v_origindex[v];
+ if (vidx != ORIGINDEX_NONE) {
+ BMVert *eve = BM_vert_at_index(bm, vidx);
+ mesh_render_data_vert_flag(rdata, eve, &eattr);
+ }
+ memcpy(GPU_vertbuf_raw_step(&raw_data), &eattr, sizeof(EdgeDrawAttr));
+ }
+ /* Select Idx */
+ if (vbo_verts) {
+ int vidx = v_origindex[v];
+ *((uint *)GPU_vertbuf_raw_step(&raw_verts)) = vidx;
+ }
+ }
+ }
+ else {
+ const MPoly *mpoly = rdata->mpoly;
+ const MVert *mvert = rdata->mvert;
+ const MLoop *mloop = rdata->mloop;
+
+ const int *v_origindex = CustomData_get_layer(&rdata->me->vdata, CD_ORIGINDEX);
+ const int *e_origindex = CustomData_get_layer(&rdata->me->edata, CD_ORIGINDEX);
+ const int *p_origindex = CustomData_get_layer(&rdata->me->pdata, CD_ORIGINDEX);
+
+ /* Face Loops */
+ for (int poly = 0; poly < poly_len; poly++, mpoly++) {
+ const MLoop *l = &mloop[mpoly->loopstart];
+ int fidx = p_origindex ? p_origindex[poly] : poly;
+ for (int i = 0; i < mpoly->totloop; i++, l++) {
+ if (vbo_pos_nor) {
+ copy_v3_v3(GPU_vertbuf_raw_step(&raw_pos), mvert[l->v].co);
+ }
+ if (vbo_lnor || vbo_pos_nor) {
+ GPUPackedNormal vnor = GPU_normal_convert_i10_s3(mvert[l->v].no);
+ if (vbo_pos_nor) {
+ *(GPUPackedNormal *)GPU_vertbuf_raw_step(&raw_nor) = vnor;
+ }
+ if (vbo_lnor) {
+ /* Mapped does not support lnors yet. */
+ *(GPUPackedNormal *)GPU_vertbuf_raw_step(&raw_lnor) = vnor;
+ }
+ }
+ if (vbo_uv) {
+ MLoopUV *luv = &rdata->mloopuv[mpoly->loopstart + i];
+ copy_v2_v2(GPU_vertbuf_raw_step(&raw_uv), luv->uv);
+ }
+ /* Select Idx */
+ if (vbo_verts) {
+ int vidx = v_origindex ? v_origindex[l->v] : l->v;
+ *((uint *)GPU_vertbuf_raw_step(&raw_verts)) = vidx;
+ }
+ if (vbo_edges) {
+ int eidx = e_origindex ? e_origindex[l->e] : l->e;
+ *((uint *)GPU_vertbuf_raw_step(&raw_edges)) = eidx;
+ }
+ if (vbo_faces) {
+ *((uint *)GPU_vertbuf_raw_step(&raw_faces)) = fidx;
+ }
+ }
+ }
+ /* TODO(fclem): Until we find a way to detect
+ * loose verts easily outside of edit mode, this
+ * will remain disabled. */
#if 0
- /* Loose edges */
- for (int e = 0; e < edge_len; e++, medge++) {
- int eidx = e_origindex[e];
- if (eidx != ORIGINDEX_NONE && (medge->flag & ME_LOOSEEDGE)) {
- for (int i = 0; i < 2; ++i) {
- int vidx = (i == 0) ? medge->v1 : medge->v2;
- if (vbo_pos) {
- copy_v3_v3(GPU_vertbuf_raw_step(&raw_pos), mvert[vidx].co);
- }
- if (vbo_verts) {
- *((uint *)GPU_vertbuf_raw_step(&raw_verts)) = vidx;
- }
- if (vbo_edges) {
- *((uint *)GPU_vertbuf_raw_step(&raw_edges)) = eidx;
- }
- }
- }
- }
- /* Loose verts */
- for (int v = 0; v < vert_len; v++, mvert++) {
- int vidx = v_origindex[v];
- if (vidx != ORIGINDEX_NONE) {
- MVert *eve = BM_vert_at_index(bm, vidx);
- if (eve->e == NULL) {
- if (vbo_pos) {
- copy_v3_v3(GPU_vertbuf_raw_step(&raw_pos), mvert->co);
- }
- if (vbo_verts) {
- *((uint *)GPU_vertbuf_raw_step(&raw_verts)) = vidx;
- }
- }
- }
- }
+ /* Loose edges */
+ for (int e = 0; e < edge_len; e++, medge++) {
+ int eidx = e_origindex[e];
+ if (eidx != ORIGINDEX_NONE && (medge->flag & ME_LOOSEEDGE)) {
+ for (int i = 0; i < 2; ++i) {
+ int vidx = (i == 0) ? medge->v1 : medge->v2;
+ if (vbo_pos) {
+ copy_v3_v3(GPU_vertbuf_raw_step(&raw_pos), mvert[vidx].co);
+ }
+ if (vbo_verts) {
+ *((uint *)GPU_vertbuf_raw_step(&raw_verts)) = vidx;
+ }
+ if (vbo_edges) {
+ *((uint *)GPU_vertbuf_raw_step(&raw_edges)) = eidx;
+ }
+ }
+ }
+ }
+ /* Loose verts */
+ for (int v = 0; v < vert_len; v++, mvert++) {
+ int vidx = v_origindex[v];
+ if (vidx != ORIGINDEX_NONE) {
+ MVert *eve = BM_vert_at_index(bm, vidx);
+ if (eve->e == NULL) {
+ if (vbo_pos) {
+ copy_v3_v3(GPU_vertbuf_raw_step(&raw_pos), mvert->co);
+ }
+ if (vbo_verts) {
+ *((uint *)GPU_vertbuf_raw_step(&raw_verts)) = vidx;
+ }
+ }
+ }
+ }
#endif
- }
- /* Don't resize */
+ }
+ /* Don't resize */
}
/* TODO: We could use gl_PrimitiveID as index instead of using another VBO. */
-static void mesh_create_edit_facedots_select_id(
- MeshRenderData *rdata,
- GPUVertBuf *vbo)
+static void mesh_create_edit_facedots_select_id(MeshRenderData *rdata, GPUVertBuf *vbo)
{
- const int poly_len = mesh_render_data_polys_len_get_maybe_mapped(rdata);
-
- static GPUVertFormat format = { 0 };
- static struct { uint idx; } attr_id;
- if (format.attr_len == 0) {
- attr_id.idx = GPU_vertformat_attr_add(&format, "color", GPU_COMP_U32, 1, GPU_FETCH_INT);
- }
-
- GPUVertBufRaw idx_step;
- GPU_vertbuf_init_with_format(vbo, &format);
- GPU_vertbuf_data_alloc(vbo, poly_len);
- GPU_vertbuf_attr_get_raw_data(vbo, attr_id.idx, &idx_step);
-
- /* Keep in sync with mesh_create_edit_facedots(). */
- if (rdata->mapped.use == false) {
- if (rdata->edit_bmesh) {
- for (int poly = 0; poly < poly_len; poly++) {
- const BMFace *efa = BM_face_at_index(rdata->edit_bmesh->bm, poly);
- if (!BM_elem_flag_test(efa, BM_ELEM_HIDDEN)) {
- *((uint *)GPU_vertbuf_raw_step(&idx_step)) = poly;
- }
- }
- }
- else {
- for (int poly = 0; poly < poly_len; poly++) {
- *((uint *)GPU_vertbuf_raw_step(&idx_step)) = poly;
- }
- }
- }
- else {
- const int *p_origindex = rdata->mapped.p_origindex;
- for (int poly = 0; poly < poly_len; poly++) {
- const int p_orig = p_origindex[poly];
- if (p_orig != ORIGINDEX_NONE) {
- const BMFace *efa = BM_face_at_index(rdata->edit_bmesh->bm, p_orig);
- if (!BM_elem_flag_test(efa, BM_ELEM_HIDDEN)) {
- *((uint *)GPU_vertbuf_raw_step(&idx_step)) = poly;
- }
- }
- }
- }
-
- /* Resize & Finish */
- int facedot_len_used = GPU_vertbuf_raw_used(&idx_step);
- if (facedot_len_used != poly_len) {
- GPU_vertbuf_data_resize(vbo, facedot_len_used);
- }
+ const int poly_len = mesh_render_data_polys_len_get_maybe_mapped(rdata);
+
+ static GPUVertFormat format = {0};
+ static struct {
+ uint idx;
+ } attr_id;
+ if (format.attr_len == 0) {
+ attr_id.idx = GPU_vertformat_attr_add(&format, "color", GPU_COMP_U32, 1, GPU_FETCH_INT);
+ }
+
+ GPUVertBufRaw idx_step;
+ GPU_vertbuf_init_with_format(vbo, &format);
+ GPU_vertbuf_data_alloc(vbo, poly_len);
+ GPU_vertbuf_attr_get_raw_data(vbo, attr_id.idx, &idx_step);
+
+ /* Keep in sync with mesh_create_edit_facedots(). */
+ if (rdata->mapped.use == false) {
+ if (rdata->edit_bmesh) {
+ for (int poly = 0; poly < poly_len; poly++) {
+ const BMFace *efa = BM_face_at_index(rdata->edit_bmesh->bm, poly);
+ if (!BM_elem_flag_test(efa, BM_ELEM_HIDDEN)) {
+ *((uint *)GPU_vertbuf_raw_step(&idx_step)) = poly;
+ }
+ }
+ }
+ else {
+ for (int poly = 0; poly < poly_len; poly++) {
+ *((uint *)GPU_vertbuf_raw_step(&idx_step)) = poly;
+ }
+ }
+ }
+ else {
+ const int *p_origindex = rdata->mapped.p_origindex;
+ for (int poly = 0; poly < poly_len; poly++) {
+ const int p_orig = p_origindex[poly];
+ if (p_orig != ORIGINDEX_NONE) {
+ const BMFace *efa = BM_face_at_index(rdata->edit_bmesh->bm, p_orig);
+ if (!BM_elem_flag_test(efa, BM_ELEM_HIDDEN)) {
+ *((uint *)GPU_vertbuf_raw_step(&idx_step)) = poly;
+ }
+ }
+ }
+ }
+
+ /* Resize & Finish */
+ int facedot_len_used = GPU_vertbuf_raw_used(&idx_step);
+ if (facedot_len_used != poly_len) {
+ GPU_vertbuf_data_resize(vbo, facedot_len_used);
+ }
}
static void mesh_create_pos_and_nor(MeshRenderData *rdata, GPUVertBuf *vbo)
{
- static GPUVertFormat format = { 0 };
- static struct { uint pos, nor; } attr_id;
- if (format.attr_len == 0) {
- attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
- attr_id.nor = GPU_vertformat_attr_add(&format, "nor", GPU_COMP_I10, 4, GPU_FETCH_INT_TO_FLOAT_UNIT);
- }
-
- GPU_vertbuf_init_with_format(vbo, &format);
- const int vbo_len_capacity = mesh_render_data_verts_len_get_maybe_mapped(rdata);
- GPU_vertbuf_data_alloc(vbo, vbo_len_capacity);
-
- if (rdata->mapped.use == false) {
- if (rdata->edit_bmesh) {
- BMesh *bm = rdata->edit_bmesh->bm;
- BMIter iter;
- BMVert *eve;
- uint i;
-
- mesh_render_data_ensure_vert_normals_pack(rdata);
- GPUPackedNormal *vnor = rdata->vert_normals_pack;
-
- BM_ITER_MESH_INDEX (eve, &iter, bm, BM_VERTS_OF_MESH, i) {
- GPU_vertbuf_attr_set(vbo, attr_id.pos, i, eve->co);
- GPU_vertbuf_attr_set(vbo, attr_id.nor, i, &vnor[i]);
- }
- BLI_assert(i == vbo_len_capacity);
- }
- else {
- for (int i = 0; i < vbo_len_capacity; i++) {
- const MVert *mv = &rdata->mvert[i];
- GPUPackedNormal vnor_pack = GPU_normal_convert_i10_s3(mv->no);
- vnor_pack.w = (mv->flag & ME_HIDE) ? -1 : ((mv->flag & SELECT) ? 1 : 0);
- GPU_vertbuf_attr_set(vbo, attr_id.pos, i, rdata->mvert[i].co);
- GPU_vertbuf_attr_set(vbo, attr_id.nor, i, &vnor_pack);
- }
- }
- }
- else {
- const MVert *mvert = rdata->mapped.me_cage->mvert;
- const int *v_origindex = rdata->mapped.v_origindex;
- for (int i = 0; i < vbo_len_capacity; i++) {
- const int v_orig = v_origindex[i];
- if (v_orig != ORIGINDEX_NONE) {
- const MVert *mv = &mvert[i];
- GPUPackedNormal vnor_pack = GPU_normal_convert_i10_s3(mv->no);
- vnor_pack.w = (mv->flag & ME_HIDE) ? -1 : ((mv->flag & SELECT) ? 1 : 0);
- GPU_vertbuf_attr_set(vbo, attr_id.pos, i, mv->co);
- GPU_vertbuf_attr_set(vbo, attr_id.nor, i, &vnor_pack);
- }
- }
- }
+ static GPUVertFormat format = {0};
+ static struct {
+ uint pos, nor;
+ } attr_id;
+ if (format.attr_len == 0) {
+ attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
+ attr_id.nor = GPU_vertformat_attr_add(
+ &format, "nor", GPU_COMP_I10, 4, GPU_FETCH_INT_TO_FLOAT_UNIT);
+ }
+
+ GPU_vertbuf_init_with_format(vbo, &format);
+ const int vbo_len_capacity = mesh_render_data_verts_len_get_maybe_mapped(rdata);
+ GPU_vertbuf_data_alloc(vbo, vbo_len_capacity);
+
+ if (rdata->mapped.use == false) {
+ if (rdata->edit_bmesh) {
+ BMesh *bm = rdata->edit_bmesh->bm;
+ BMIter iter;
+ BMVert *eve;
+ uint i;
+
+ mesh_render_data_ensure_vert_normals_pack(rdata);
+ GPUPackedNormal *vnor = rdata->vert_normals_pack;
+
+ BM_ITER_MESH_INDEX (eve, &iter, bm, BM_VERTS_OF_MESH, i) {
+ GPU_vertbuf_attr_set(vbo, attr_id.pos, i, eve->co);
+ GPU_vertbuf_attr_set(vbo, attr_id.nor, i, &vnor[i]);
+ }
+ BLI_assert(i == vbo_len_capacity);
+ }
+ else {
+ for (int i = 0; i < vbo_len_capacity; i++) {
+ const MVert *mv = &rdata->mvert[i];
+ GPUPackedNormal vnor_pack = GPU_normal_convert_i10_s3(mv->no);
+ vnor_pack.w = (mv->flag & ME_HIDE) ? -1 : ((mv->flag & SELECT) ? 1 : 0);
+ GPU_vertbuf_attr_set(vbo, attr_id.pos, i, rdata->mvert[i].co);
+ GPU_vertbuf_attr_set(vbo, attr_id.nor, i, &vnor_pack);
+ }
+ }
+ }
+ else {
+ const MVert *mvert = rdata->mapped.me_cage->mvert;
+ const int *v_origindex = rdata->mapped.v_origindex;
+ for (int i = 0; i < vbo_len_capacity; i++) {
+ const int v_orig = v_origindex[i];
+ if (v_orig != ORIGINDEX_NONE) {
+ const MVert *mv = &mvert[i];
+ GPUPackedNormal vnor_pack = GPU_normal_convert_i10_s3(mv->no);
+ vnor_pack.w = (mv->flag & ME_HIDE) ? -1 : ((mv->flag & SELECT) ? 1 : 0);
+ GPU_vertbuf_attr_set(vbo, attr_id.pos, i, mv->co);
+ GPU_vertbuf_attr_set(vbo, attr_id.nor, i, &vnor_pack);
+ }
+ }
+ }
}
-static void mesh_create_weights(MeshRenderData *rdata, GPUVertBuf *vbo, DRW_MeshWeightState *wstate)
+static void mesh_create_weights(MeshRenderData *rdata,
+ GPUVertBuf *vbo,
+ DRW_MeshWeightState *wstate)
{
- static GPUVertFormat format = { 0 };
- static struct { uint weight; } attr_id;
- if (format.attr_len == 0) {
- attr_id.weight = GPU_vertformat_attr_add(&format, "weight", GPU_COMP_F32, 1, GPU_FETCH_FLOAT);
- }
-
- const int vbo_len_capacity = mesh_render_data_verts_len_get_maybe_mapped(rdata);
-
- mesh_render_data_ensure_vert_weight(rdata, wstate);
- const float *vert_weight = rdata->vert_weight;
-
- GPU_vertbuf_init_with_format(vbo, &format);
- /* Meh, another allocation / copy for no benefit.
- * Needed because rdata->vert_weight is freed afterwards and
- * GPU module don't have a GPU_vertbuf_data_from_memory or similar. */
- /* TODO get rid of the extra allocation/copy. */
- GPU_vertbuf_data_alloc(vbo, vbo_len_capacity);
- GPU_vertbuf_attr_fill(vbo, attr_id.weight, vert_weight);
+ static GPUVertFormat format = {0};
+ static struct {
+ uint weight;
+ } attr_id;
+ if (format.attr_len == 0) {
+ attr_id.weight = GPU_vertformat_attr_add(&format, "weight", GPU_COMP_F32, 1, GPU_FETCH_FLOAT);
+ }
+
+ const int vbo_len_capacity = mesh_render_data_verts_len_get_maybe_mapped(rdata);
+
+ mesh_render_data_ensure_vert_weight(rdata, wstate);
+ const float *vert_weight = rdata->vert_weight;
+
+ GPU_vertbuf_init_with_format(vbo, &format);
+ /* Meh, another allocation / copy for no benefit.
+ * Needed because rdata->vert_weight is freed afterwards and
+ * GPU module don't have a GPU_vertbuf_data_from_memory or similar. */
+ /* TODO get rid of the extra allocation/copy. */
+ GPU_vertbuf_data_alloc(vbo, vbo_len_capacity);
+ GPU_vertbuf_attr_fill(vbo, attr_id.weight, vert_weight);
}
-static float mesh_loop_edge_factor_get(
- const float f_no[3], const float v_co[3], const float v_no[3], const float v_next_co[3])
+static float mesh_loop_edge_factor_get(const float f_no[3],
+ const float v_co[3],
+ const float v_no[3],
+ const float v_next_co[3])
{
- float enor[3], evec[3];
- sub_v3_v3v3(evec, v_next_co, v_co);
- cross_v3_v3v3(enor, v_no, evec);
- normalize_v3(enor);
- float d = fabsf(dot_v3v3(enor, f_no));
- /* Rescale to the slider range. */
- d *= (1.0f / 0.065f);
- CLAMP(d, 0.0f, 1.0f);
- return d;
+ float enor[3], evec[3];
+ sub_v3_v3v3(evec, v_next_co, v_co);
+ cross_v3_v3v3(enor, v_no, evec);
+ normalize_v3(enor);
+ float d = fabsf(dot_v3v3(enor, f_no));
+ /* Rescale to the slider range. */
+ d *= (1.0f / 0.065f);
+ CLAMP(d, 0.0f, 1.0f);
+ return d;
}
static void vertbuf_raw_step_u8(GPUVertBufRaw *wd_step, const uchar wiredata)
{
- *((uchar *)GPU_vertbuf_raw_step(wd_step)) = wiredata;
+ *((uchar *)GPU_vertbuf_raw_step(wd_step)) = wiredata;
}
static void vertbuf_raw_step_u8_to_f32(GPUVertBufRaw *wd_step, const uchar wiredata)
{
- *((float *)GPU_vertbuf_raw_step(wd_step)) = wiredata / 255.0f;
+ *((float *)GPU_vertbuf_raw_step(wd_step)) = wiredata / 255.0f;
}
static void mesh_create_loop_edge_fac(MeshRenderData *rdata, GPUVertBuf *vbo)
{
- static GPUVertFormat format = { 0 };
- static struct { uint wd; } attr_id;
- static union { float f; uchar u; } data;
- static void (*vertbuf_raw_step)(GPUVertBufRaw *, const uchar);
- if (format.attr_len == 0) {
- if (!GPU_crappy_amd_driver()) {
- /* Some AMD drivers strangely crash with a vbo with this format. */
- attr_id.wd = GPU_vertformat_attr_add(&format, "wd", GPU_COMP_U8, 1, GPU_FETCH_INT_TO_FLOAT_UNIT);
- vertbuf_raw_step = vertbuf_raw_step_u8;
- data.u = UCHAR_MAX;
- }
- else {
- attr_id.wd = GPU_vertformat_attr_add(&format, "wd", GPU_COMP_F32, 1, GPU_FETCH_FLOAT);
- vertbuf_raw_step = vertbuf_raw_step_u8_to_f32;
- data.f = 1.0f;
- }
- }
- const int poly_len = mesh_render_data_polys_len_get(rdata);
- const int loop_len = mesh_render_data_loops_len_get(rdata);
- const int edge_len = mesh_render_data_edges_len_get(rdata);
-
- GPU_vertbuf_init_with_format(vbo, &format);
- GPU_vertbuf_data_alloc(vbo, loop_len);
-
- GPUVertBufRaw wd_step;
- GPU_vertbuf_attr_get_raw_data(vbo, attr_id.wd, &wd_step);
-
- if (rdata->mapped.use == false) {
- if (rdata->edit_bmesh) {
- BMesh *bm = rdata->edit_bmesh->bm;
- BMIter iter_efa, iter_loop;
- BMFace *efa;
- BMLoop *loop;
- uint f;
-
- BM_ITER_MESH_INDEX (efa, &iter_efa, bm, BM_FACES_OF_MESH, f) {
- BM_ITER_ELEM (loop, &iter_loop, efa, BM_LOOPS_OF_FACE) {
- float ratio = mesh_loop_edge_factor_get(efa->no, loop->v->co, loop->v->no, loop->next->v->co);
- vertbuf_raw_step(&wd_step, ratio * 255);
- }
- }
- BLI_assert(GPU_vertbuf_raw_used(&wd_step) == loop_len);
- }
- else {
- const MVert *mvert = rdata->mvert;
- const MPoly *mpoly = rdata->mpoly;
- const MLoop *mloop = rdata->mloop;
- MEdge *medge = (MEdge *)rdata->medge;
- bool use_edge_render = false;
-
- /* TODO(fclem) We don't need them to be packed. But we need rdata->poly_normals */
- mesh_render_data_ensure_poly_normals_pack(rdata);
-
- /* Reset flag */
- for (int edge = 0; edge < edge_len; ++edge) {
- /* NOTE: not thread safe. */
- medge[edge].flag &= ~ME_EDGE_TMP_TAG;
-
- /* HACK(fclem) Feels like a hack. Detecting the need for edge render. */
- if ((medge[edge].flag & ME_EDGERENDER) == 0) {
- use_edge_render = true;
- }
- }
-
- for (int a = 0; a < poly_len; a++, mpoly++) {
- const float *fnor = rdata->poly_normals[a];
- for (int b = 0; b < mpoly->totloop; b++) {
- const MLoop *ml1 = &mloop[mpoly->loopstart + b];
- const MLoop *ml2 = &mloop[mpoly->loopstart + (b + 1) % mpoly->totloop];
-
- /* Will only work for edges that have an odd number of faces connected. */
- MEdge *ed = (MEdge *)rdata->medge + ml1->e;
- ed->flag ^= ME_EDGE_TMP_TAG;
-
- if (use_edge_render) {
- vertbuf_raw_step(&wd_step, (ed->flag & ME_EDGERENDER) ? 255 : 0);
- }
- else {
- float vnor_f[3];
- normal_short_to_float_v3(vnor_f, mvert[ml1->v].no);
- float ratio = mesh_loop_edge_factor_get(fnor,
- mvert[ml1->v].co,
- vnor_f,
- mvert[ml2->v].co);
- vertbuf_raw_step(&wd_step, ratio * 253 + 1);
- }
- }
- }
- /* Gather non-manifold edges. */
- for (int l = 0; l < loop_len; l++, mloop++) {
- MEdge *ed = (MEdge *)rdata->medge + mloop->e;
- if (ed->flag & ME_EDGE_TMP_TAG) {
- GPU_vertbuf_attr_set(vbo, attr_id.wd, l, &data);
- }
- }
-
- BLI_assert(loop_len == GPU_vertbuf_raw_used(&wd_step));
- }
- }
- else {
- BLI_assert(0);
- }
+ static GPUVertFormat format = {0};
+ static struct {
+ uint wd;
+ } attr_id;
+ static union {
+ float f;
+ uchar u;
+ } data;
+ static void (*vertbuf_raw_step)(GPUVertBufRaw *, const uchar);
+ if (format.attr_len == 0) {
+ if (!GPU_crappy_amd_driver()) {
+ /* Some AMD drivers strangely crash with a vbo with this format. */
+ attr_id.wd = GPU_vertformat_attr_add(
+ &format, "wd", GPU_COMP_U8, 1, GPU_FETCH_INT_TO_FLOAT_UNIT);
+ vertbuf_raw_step = vertbuf_raw_step_u8;
+ data.u = UCHAR_MAX;
+ }
+ else {
+ attr_id.wd = GPU_vertformat_attr_add(&format, "wd", GPU_COMP_F32, 1, GPU_FETCH_FLOAT);
+ vertbuf_raw_step = vertbuf_raw_step_u8_to_f32;
+ data.f = 1.0f;
+ }
+ }
+ const int poly_len = mesh_render_data_polys_len_get(rdata);
+ const int loop_len = mesh_render_data_loops_len_get(rdata);
+ const int edge_len = mesh_render_data_edges_len_get(rdata);
+
+ GPU_vertbuf_init_with_format(vbo, &format);
+ GPU_vertbuf_data_alloc(vbo, loop_len);
+
+ GPUVertBufRaw wd_step;
+ GPU_vertbuf_attr_get_raw_data(vbo, attr_id.wd, &wd_step);
+
+ if (rdata->mapped.use == false) {
+ if (rdata->edit_bmesh) {
+ BMesh *bm = rdata->edit_bmesh->bm;
+ BMIter iter_efa, iter_loop;
+ BMFace *efa;
+ BMLoop *loop;
+ uint f;
+
+ BM_ITER_MESH_INDEX (efa, &iter_efa, bm, BM_FACES_OF_MESH, f) {
+ BM_ITER_ELEM (loop, &iter_loop, efa, BM_LOOPS_OF_FACE) {
+ float ratio = mesh_loop_edge_factor_get(
+ efa->no, loop->v->co, loop->v->no, loop->next->v->co);
+ vertbuf_raw_step(&wd_step, ratio * 255);
+ }
+ }
+ BLI_assert(GPU_vertbuf_raw_used(&wd_step) == loop_len);
+ }
+ else {
+ const MVert *mvert = rdata->mvert;
+ const MPoly *mpoly = rdata->mpoly;
+ const MLoop *mloop = rdata->mloop;
+ MEdge *medge = (MEdge *)rdata->medge;
+ bool use_edge_render = false;
+
+ /* TODO(fclem) We don't need them to be packed. But we need rdata->poly_normals */
+ mesh_render_data_ensure_poly_normals_pack(rdata);
+
+ /* Reset flag */
+ for (int edge = 0; edge < edge_len; ++edge) {
+ /* NOTE: not thread safe. */
+ medge[edge].flag &= ~ME_EDGE_TMP_TAG;
+
+ /* HACK(fclem) Feels like a hack. Detecting the need for edge render. */
+ if ((medge[edge].flag & ME_EDGERENDER) == 0) {
+ use_edge_render = true;
+ }
+ }
+
+ for (int a = 0; a < poly_len; a++, mpoly++) {
+ const float *fnor = rdata->poly_normals[a];
+ for (int b = 0; b < mpoly->totloop; b++) {
+ const MLoop *ml1 = &mloop[mpoly->loopstart + b];
+ const MLoop *ml2 = &mloop[mpoly->loopstart + (b + 1) % mpoly->totloop];
+
+ /* Will only work for edges that have an odd number of faces connected. */
+ MEdge *ed = (MEdge *)rdata->medge + ml1->e;
+ ed->flag ^= ME_EDGE_TMP_TAG;
+
+ if (use_edge_render) {
+ vertbuf_raw_step(&wd_step, (ed->flag & ME_EDGERENDER) ? 255 : 0);
+ }
+ else {
+ float vnor_f[3];
+ normal_short_to_float_v3(vnor_f, mvert[ml1->v].no);
+ float ratio = mesh_loop_edge_factor_get(
+ fnor, mvert[ml1->v].co, vnor_f, mvert[ml2->v].co);
+ vertbuf_raw_step(&wd_step, ratio * 253 + 1);
+ }
+ }
+ }
+ /* Gather non-manifold edges. */
+ for (int l = 0; l < loop_len; l++, mloop++) {
+ MEdge *ed = (MEdge *)rdata->medge + mloop->e;
+ if (ed->flag & ME_EDGE_TMP_TAG) {
+ GPU_vertbuf_attr_set(vbo, attr_id.wd, l, &data);
+ }
+ }
+
+ BLI_assert(loop_len == GPU_vertbuf_raw_used(&wd_step));
+ }
+ }
+ else {
+ BLI_assert(0);
+ }
}
static void mesh_create_loop_pos_and_nor(MeshRenderData *rdata, GPUVertBuf *vbo)
{
- /* TODO deduplicate format creation*/
- static GPUVertFormat format = { 0 };
- static struct { uint pos, nor; } attr_id;
- if (format.attr_len == 0) {
- attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
- attr_id.nor = GPU_vertformat_attr_add(&format, "nor", GPU_COMP_I10, 4, GPU_FETCH_INT_TO_FLOAT_UNIT);
- }
- const int poly_len = mesh_render_data_polys_len_get(rdata);
- const int loop_len = mesh_render_data_loops_len_get(rdata);
-
- GPU_vertbuf_init_with_format(vbo, &format);
- GPU_vertbuf_data_alloc(vbo, loop_len);
-
- GPUVertBufRaw pos_step, nor_step;
- GPU_vertbuf_attr_get_raw_data(vbo, attr_id.pos, &pos_step);
- GPU_vertbuf_attr_get_raw_data(vbo, attr_id.nor, &nor_step);
-
- if (rdata->mapped.use == false) {
- if (rdata->edit_bmesh) {
- const GPUPackedNormal *vnor, *pnor;
- const float (*lnors)[3] = rdata->loop_normals;
- BMesh *bm = rdata->edit_bmesh->bm;
- BMIter iter_efa, iter_loop;
- BMFace *efa;
- BMLoop *loop;
- uint f;
-
- if (rdata->loop_normals == NULL) {
- mesh_render_data_ensure_poly_normals_pack(rdata);
- mesh_render_data_ensure_vert_normals_pack(rdata);
- vnor = rdata->vert_normals_pack;
- pnor = rdata->poly_normals_pack;
- }
-
- BM_ITER_MESH_INDEX (efa, &iter_efa, bm, BM_FACES_OF_MESH, f) {
- const bool face_smooth = BM_elem_flag_test(efa, BM_ELEM_SMOOTH);
-
- BM_ITER_ELEM (loop, &iter_loop, efa, BM_LOOPS_OF_FACE) {
- BLI_assert(GPU_vertbuf_raw_used(&pos_step) == BM_elem_index_get(loop));
- copy_v3_v3(GPU_vertbuf_raw_step(&pos_step), loop->v->co);
-
- if (lnors) {
- GPUPackedNormal plnor = GPU_normal_convert_i10_v3(lnors[BM_elem_index_get(loop)]);
- *((GPUPackedNormal *)GPU_vertbuf_raw_step(&nor_step)) = plnor;
- }
- else if (!face_smooth) {
- *((GPUPackedNormal *)GPU_vertbuf_raw_step(&nor_step)) = pnor[f];
- }
- else {
- *((GPUPackedNormal *)GPU_vertbuf_raw_step(&nor_step)) = vnor[BM_elem_index_get(loop->v)];
- }
- }
- }
- BLI_assert(GPU_vertbuf_raw_used(&pos_step) == loop_len);
- }
- else {
- const MVert *mvert = rdata->mvert;
- const MPoly *mpoly = rdata->mpoly;
-
- if (rdata->loop_normals == NULL) {
- mesh_render_data_ensure_poly_normals_pack(rdata);
- }
-
- for (int a = 0; a < poly_len; a++, mpoly++) {
- const MLoop *mloop = rdata->mloop + mpoly->loopstart;
- const float (*lnors)[3] = (rdata->loop_normals) ? &rdata->loop_normals[mpoly->loopstart] : NULL;
- const GPUPackedNormal *fnor = (mpoly->flag & ME_SMOOTH) ? NULL : &rdata->poly_normals_pack[a];
- const int hide_select_flag = (mpoly->flag & ME_HIDE) ? -1 : ((mpoly->flag & ME_FACE_SEL) ? 1 : 0);
- for (int b = 0; b < mpoly->totloop; b++, mloop++) {
- copy_v3_v3(GPU_vertbuf_raw_step(&pos_step), mvert[mloop->v].co);
- GPUPackedNormal *pnor = (GPUPackedNormal *)GPU_vertbuf_raw_step(&nor_step);
- if (lnors) {
- *pnor = GPU_normal_convert_i10_v3(lnors[b]);
- }
- else if (fnor) {
- *pnor = *fnor;
- }
- else {
- *pnor = GPU_normal_convert_i10_s3(mvert[mloop->v].no);
- }
- pnor->w = hide_select_flag;
- }
- }
-
- BLI_assert(loop_len == GPU_vertbuf_raw_used(&pos_step));
- }
- }
- else {
- const int *p_origindex = rdata->mapped.p_origindex;
- const MVert *mvert = rdata->mvert;
- const MPoly *mpoly = rdata->mpoly;
-
- if (rdata->loop_normals == NULL) {
- mesh_render_data_ensure_poly_normals_pack(rdata);
- }
-
- for (int a = 0; a < poly_len; a++, mpoly++) {
- const MLoop *mloop = rdata->mloop + mpoly->loopstart;
- const float (*lnors)[3] = (rdata->loop_normals) ? &rdata->loop_normals[mpoly->loopstart] : NULL;
- const GPUPackedNormal *fnor = (mpoly->flag & ME_SMOOTH) ? NULL : &rdata->poly_normals_pack[a];
- if (p_origindex[a] == ORIGINDEX_NONE) {
- continue;
- }
- for (int b = 0; b < mpoly->totloop; b++, mloop++) {
- copy_v3_v3(GPU_vertbuf_raw_step(&pos_step), mvert[mloop->v].co);
- GPUPackedNormal *pnor = (GPUPackedNormal *)GPU_vertbuf_raw_step(&nor_step);
- if (lnors) {
- *pnor = GPU_normal_convert_i10_v3(lnors[b]);
- }
- else if (fnor) {
- *pnor = *fnor;
- }
- else {
- *pnor = GPU_normal_convert_i10_s3(mvert[mloop->v].no);
- }
- }
- }
- }
-
- int vbo_len_used = GPU_vertbuf_raw_used(&pos_step);
- if (vbo_len_used < loop_len) {
- GPU_vertbuf_data_resize(vbo, vbo_len_used);
- }
+ /* TODO deduplicate format creation*/
+ static GPUVertFormat format = {0};
+ static struct {
+ uint pos, nor;
+ } attr_id;
+ if (format.attr_len == 0) {
+ attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
+ attr_id.nor = GPU_vertformat_attr_add(
+ &format, "nor", GPU_COMP_I10, 4, GPU_FETCH_INT_TO_FLOAT_UNIT);
+ }
+ const int poly_len = mesh_render_data_polys_len_get(rdata);
+ const int loop_len = mesh_render_data_loops_len_get(rdata);
+
+ GPU_vertbuf_init_with_format(vbo, &format);
+ GPU_vertbuf_data_alloc(vbo, loop_len);
+
+ GPUVertBufRaw pos_step, nor_step;
+ GPU_vertbuf_attr_get_raw_data(vbo, attr_id.pos, &pos_step);
+ GPU_vertbuf_attr_get_raw_data(vbo, attr_id.nor, &nor_step);
+
+ if (rdata->mapped.use == false) {
+ if (rdata->edit_bmesh) {
+ const GPUPackedNormal *vnor, *pnor;
+ const float(*lnors)[3] = rdata->loop_normals;
+ BMesh *bm = rdata->edit_bmesh->bm;
+ BMIter iter_efa, iter_loop;
+ BMFace *efa;
+ BMLoop *loop;
+ uint f;
+
+ if (rdata->loop_normals == NULL) {
+ mesh_render_data_ensure_poly_normals_pack(rdata);
+ mesh_render_data_ensure_vert_normals_pack(rdata);
+ vnor = rdata->vert_normals_pack;
+ pnor = rdata->poly_normals_pack;
+ }
+
+ BM_ITER_MESH_INDEX (efa, &iter_efa, bm, BM_FACES_OF_MESH, f) {
+ const bool face_smooth = BM_elem_flag_test(efa, BM_ELEM_SMOOTH);
+
+ BM_ITER_ELEM (loop, &iter_loop, efa, BM_LOOPS_OF_FACE) {
+ BLI_assert(GPU_vertbuf_raw_used(&pos_step) == BM_elem_index_get(loop));
+ copy_v3_v3(GPU_vertbuf_raw_step(&pos_step), loop->v->co);
+
+ if (lnors) {
+ GPUPackedNormal plnor = GPU_normal_convert_i10_v3(lnors[BM_elem_index_get(loop)]);
+ *((GPUPackedNormal *)GPU_vertbuf_raw_step(&nor_step)) = plnor;
+ }
+ else if (!face_smooth) {
+ *((GPUPackedNormal *)GPU_vertbuf_raw_step(&nor_step)) = pnor[f];
+ }
+ else {
+ *((GPUPackedNormal *)GPU_vertbuf_raw_step(
+ &nor_step)) = vnor[BM_elem_index_get(loop->v)];
+ }
+ }
+ }
+ BLI_assert(GPU_vertbuf_raw_used(&pos_step) == loop_len);
+ }
+ else {
+ const MVert *mvert = rdata->mvert;
+ const MPoly *mpoly = rdata->mpoly;
+
+ if (rdata->loop_normals == NULL) {
+ mesh_render_data_ensure_poly_normals_pack(rdata);
+ }
+
+ for (int a = 0; a < poly_len; a++, mpoly++) {
+ const MLoop *mloop = rdata->mloop + mpoly->loopstart;
+ const float(*lnors)[3] = (rdata->loop_normals) ? &rdata->loop_normals[mpoly->loopstart] :
+ NULL;
+ const GPUPackedNormal *fnor = (mpoly->flag & ME_SMOOTH) ? NULL :
+ &rdata->poly_normals_pack[a];
+ const int hide_select_flag = (mpoly->flag & ME_HIDE) ?
+ -1 :
+ ((mpoly->flag & ME_FACE_SEL) ? 1 : 0);
+ for (int b = 0; b < mpoly->totloop; b++, mloop++) {
+ copy_v3_v3(GPU_vertbuf_raw_step(&pos_step), mvert[mloop->v].co);
+ GPUPackedNormal *pnor = (GPUPackedNormal *)GPU_vertbuf_raw_step(&nor_step);
+ if (lnors) {
+ *pnor = GPU_normal_convert_i10_v3(lnors[b]);
+ }
+ else if (fnor) {
+ *pnor = *fnor;
+ }
+ else {
+ *pnor = GPU_normal_convert_i10_s3(mvert[mloop->v].no);
+ }
+ pnor->w = hide_select_flag;
+ }
+ }
+
+ BLI_assert(loop_len == GPU_vertbuf_raw_used(&pos_step));
+ }
+ }
+ else {
+ const int *p_origindex = rdata->mapped.p_origindex;
+ const MVert *mvert = rdata->mvert;
+ const MPoly *mpoly = rdata->mpoly;
+
+ if (rdata->loop_normals == NULL) {
+ mesh_render_data_ensure_poly_normals_pack(rdata);
+ }
+
+ for (int a = 0; a < poly_len; a++, mpoly++) {
+ const MLoop *mloop = rdata->mloop + mpoly->loopstart;
+ const float(*lnors)[3] = (rdata->loop_normals) ? &rdata->loop_normals[mpoly->loopstart] :
+ NULL;
+ const GPUPackedNormal *fnor = (mpoly->flag & ME_SMOOTH) ? NULL :
+ &rdata->poly_normals_pack[a];
+ if (p_origindex[a] == ORIGINDEX_NONE) {
+ continue;
+ }
+ for (int b = 0; b < mpoly->totloop; b++, mloop++) {
+ copy_v3_v3(GPU_vertbuf_raw_step(&pos_step), mvert[mloop->v].co);
+ GPUPackedNormal *pnor = (GPUPackedNormal *)GPU_vertbuf_raw_step(&nor_step);
+ if (lnors) {
+ *pnor = GPU_normal_convert_i10_v3(lnors[b]);
+ }
+ else if (fnor) {
+ *pnor = *fnor;
+ }
+ else {
+ *pnor = GPU_normal_convert_i10_s3(mvert[mloop->v].no);
+ }
+ }
+ }
+ }
+
+ int vbo_len_used = GPU_vertbuf_raw_used(&pos_step);
+ if (vbo_len_used < loop_len) {
+ GPU_vertbuf_data_resize(vbo, vbo_len_used);
+ }
}
static void mesh_create_loop_orco(MeshRenderData *rdata, GPUVertBuf *vbo)
{
- const uint loops_len = mesh_render_data_loops_len_get(rdata);
-
- /* initialize vertex format */
- GPUVertFormat format = { 0 };
- GPUVertBufRaw vbo_step;
-
- /* FIXME(fclem): We use the last component as a way to differentiate from generic vertex attribs.
- * This is a substential waste of Vram and should be done another way. Unfortunately,
- * at the time of writting, I did not found any other "non disruptive" alternative. */
- uint attr_id = GPU_vertformat_attr_add(&format, "orco", GPU_COMP_F32, 4, GPU_FETCH_FLOAT);
-
- GPU_vertbuf_init_with_format(vbo, &format);
- GPU_vertbuf_data_alloc(vbo, loops_len);
- GPU_vertbuf_attr_get_raw_data(vbo, attr_id, &vbo_step);
-
- if (rdata->edit_bmesh) {
- BMesh *bm = rdata->edit_bmesh->bm;
- BMIter iter_efa, iter_loop;
- BMFace *efa;
- BMLoop *loop;
-
- BM_ITER_MESH (efa, &iter_efa, bm, BM_FACES_OF_MESH) {
- BM_ITER_ELEM (loop, &iter_loop, efa, BM_LOOPS_OF_FACE) {
- float *data = (float *)GPU_vertbuf_raw_step(&vbo_step);
- copy_v3_v3(data, rdata->orco[BM_elem_index_get(loop->v)]);
- data[3] = 0.0; /* Tag as not a generic attrib */
- }
- }
- }
- else {
- for (uint l = 0; l < loops_len; l++) {
- float *data = (float *)GPU_vertbuf_raw_step(&vbo_step);
- copy_v3_v3(data, rdata->orco[rdata->mloop[l].v]);
- data[3] = 0.0; /* Tag as not a generic attrib */
- }
- }
+ const uint loops_len = mesh_render_data_loops_len_get(rdata);
+
+ /* initialize vertex format */
+ GPUVertFormat format = {0};
+ GPUVertBufRaw vbo_step;
+
+ /* FIXME(fclem): We use the last component as a way to differentiate from generic vertex attribs.
+ * This is a substential waste of Vram and should be done another way. Unfortunately,
+ * at the time of writting, I did not found any other "non disruptive" alternative. */
+ uint attr_id = GPU_vertformat_attr_add(&format, "orco", GPU_COMP_F32, 4, GPU_FETCH_FLOAT);
+
+ GPU_vertbuf_init_with_format(vbo, &format);
+ GPU_vertbuf_data_alloc(vbo, loops_len);
+ GPU_vertbuf_attr_get_raw_data(vbo, attr_id, &vbo_step);
+
+ if (rdata->edit_bmesh) {
+ BMesh *bm = rdata->edit_bmesh->bm;
+ BMIter iter_efa, iter_loop;
+ BMFace *efa;
+ BMLoop *loop;
+
+ BM_ITER_MESH (efa, &iter_efa, bm, BM_FACES_OF_MESH) {
+ BM_ITER_ELEM (loop, &iter_loop, efa, BM_LOOPS_OF_FACE) {
+ float *data = (float *)GPU_vertbuf_raw_step(&vbo_step);
+ copy_v3_v3(data, rdata->orco[BM_elem_index_get(loop->v)]);
+ data[3] = 0.0; /* Tag as not a generic attrib */
+ }
+ }
+ }
+ else {
+ for (uint l = 0; l < loops_len; l++) {
+ float *data = (float *)GPU_vertbuf_raw_step(&vbo_step);
+ copy_v3_v3(data, rdata->orco[rdata->mloop[l].v]);
+ data[3] = 0.0; /* Tag as not a generic attrib */
+ }
+ }
}
static void mesh_create_loop_uv_and_tan(MeshRenderData *rdata, GPUVertBuf *vbo)
{
- const uint loops_len = mesh_render_data_loops_len_get(rdata);
- const uint uv_len = rdata->cd.layers.uv_len;
- const uint tangent_len = rdata->cd.layers.tangent_len;
- const uint layers_combined_len = uv_len + tangent_len;
+ const uint loops_len = mesh_render_data_loops_len_get(rdata);
+ const uint uv_len = rdata->cd.layers.uv_len;
+ const uint tangent_len = rdata->cd.layers.tangent_len;
+ const uint layers_combined_len = uv_len + tangent_len;
- GPUVertBufRaw *layers_combined_step = BLI_array_alloca(layers_combined_step, layers_combined_len);
- GPUVertBufRaw *uv_step = layers_combined_step;
- GPUVertBufRaw *tangent_step = uv_step + uv_len;
+ GPUVertBufRaw *layers_combined_step = BLI_array_alloca(layers_combined_step,
+ layers_combined_len);
+ GPUVertBufRaw *uv_step = layers_combined_step;
+ GPUVertBufRaw *tangent_step = uv_step + uv_len;
- uint *layers_combined_id = BLI_array_alloca(layers_combined_id, layers_combined_len);
- uint *uv_id = layers_combined_id;
- uint *tangent_id = uv_id + uv_len;
+ uint *layers_combined_id = BLI_array_alloca(layers_combined_id, layers_combined_len);
+ uint *uv_id = layers_combined_id;
+ uint *tangent_id = uv_id + uv_len;
- /* initialize vertex format */
- GPUVertFormat format = { 0 };
+ /* initialize vertex format */
+ GPUVertFormat format = {0};
- for (uint i = 0; i < uv_len; i++) {
- const char *attr_name = mesh_render_data_uv_layer_uuid_get(rdata, i);
+ for (uint i = 0; i < uv_len; i++) {
+ const char *attr_name = mesh_render_data_uv_layer_uuid_get(rdata, i);
#if 0 /* these are clamped. Maybe use them as an option in the future */
- uv_id[i] = GPU_vertformat_attr_add(&format, attr_name, GPU_COMP_I16, 2, GPU_FETCH_INT_TO_FLOAT_UNIT);
+ uv_id[i] = GPU_vertformat_attr_add(&format, attr_name, GPU_COMP_I16, 2, GPU_FETCH_INT_TO_FLOAT_UNIT);
#else
- uv_id[i] = GPU_vertformat_attr_add(&format, attr_name, GPU_COMP_F32, 2, GPU_FETCH_FLOAT);
+ uv_id[i] = GPU_vertformat_attr_add(&format, attr_name, GPU_COMP_F32, 2, GPU_FETCH_FLOAT);
#endif
- /* Auto Name */
- attr_name = mesh_render_data_uv_auto_layer_uuid_get(rdata, i);
- GPU_vertformat_alias_add(&format, attr_name);
-
- if (i == rdata->cd.layers.uv_active) {
- GPU_vertformat_alias_add(&format, "u");
- }
- if (i == rdata->cd.layers.uv_mask_active) {
- GPU_vertformat_alias_add(&format, "mu");
- }
- }
-
- for (uint i = 0; i < tangent_len; i++) {
- const char *attr_name = mesh_render_data_tangent_layer_uuid_get(rdata, i);
+ /* Auto Name */
+ attr_name = mesh_render_data_uv_auto_layer_uuid_get(rdata, i);
+ GPU_vertformat_alias_add(&format, attr_name);
+
+ if (i == rdata->cd.layers.uv_active) {
+ GPU_vertformat_alias_add(&format, "u");
+ }
+ if (i == rdata->cd.layers.uv_mask_active) {
+ GPU_vertformat_alias_add(&format, "mu");
+ }
+ }
+
+ for (uint i = 0; i < tangent_len; i++) {
+ const char *attr_name = mesh_render_data_tangent_layer_uuid_get(rdata, i);
#ifdef USE_COMP_MESH_DATA
- tangent_id[i] = GPU_vertformat_attr_add(&format, attr_name, GPU_COMP_I16, 4, GPU_FETCH_INT_TO_FLOAT_UNIT);
+ tangent_id[i] = GPU_vertformat_attr_add(
+ &format, attr_name, GPU_COMP_I16, 4, GPU_FETCH_INT_TO_FLOAT_UNIT);
#else
- tangent_id[i] = GPU_vertformat_attr_add(&format, attr_name, GPU_COMP_F32, 4, GPU_FETCH_FLOAT);
+ tangent_id[i] = GPU_vertformat_attr_add(&format, attr_name, GPU_COMP_F32, 4, GPU_FETCH_FLOAT);
#endif
- if (i == rdata->cd.layers.tangent_active) {
- GPU_vertformat_alias_add(&format, "t");
- }
- }
-
- /* HACK: Create a dummy attribute in case there is no valid UV/tangent layer. */
- if (layers_combined_len == 0) {
- GPU_vertformat_attr_add(&format, "dummy", GPU_COMP_U8, 1, GPU_FETCH_INT_TO_FLOAT_UNIT);
- }
-
- GPU_vertbuf_init_with_format(vbo, &format);
- GPU_vertbuf_data_alloc(vbo, loops_len);
-
- for (uint i = 0; i < uv_len; i++) {
- GPU_vertbuf_attr_get_raw_data(vbo, uv_id[i], &uv_step[i]);
- }
- for (uint i = 0; i < tangent_len; i++) {
- GPU_vertbuf_attr_get_raw_data(vbo, tangent_id[i], &tangent_step[i]);
- }
-
- if (rdata->edit_bmesh) {
- BMesh *bm = rdata->edit_bmesh->bm;
- BMIter iter_efa, iter_loop;
- BMFace *efa;
- BMLoop *loop;
-
- BM_ITER_MESH (efa, &iter_efa, bm, BM_FACES_OF_MESH) {
- BM_ITER_ELEM (loop, &iter_loop, efa, BM_LOOPS_OF_FACE) {
- /* UVs */
- for (uint j = 0; j < uv_len; j++) {
- const uint layer_offset = rdata->cd.offset.uv[j];
- const float *elem = ((MLoopUV *)BM_ELEM_CD_GET_VOID_P(loop, layer_offset))->uv;
- copy_v2_v2(GPU_vertbuf_raw_step(&uv_step[j]), elem);
- }
- /* TANGENTs */
- for (uint j = 0; j < tangent_len; j++) {
- float (*layer_data)[4] = rdata->cd.layers.tangent[j];
- const float *elem = layer_data[BM_elem_index_get(loop)];
+ if (i == rdata->cd.layers.tangent_active) {
+ GPU_vertformat_alias_add(&format, "t");
+ }
+ }
+
+ /* HACK: Create a dummy attribute in case there is no valid UV/tangent layer. */
+ if (layers_combined_len == 0) {
+ GPU_vertformat_attr_add(&format, "dummy", GPU_COMP_U8, 1, GPU_FETCH_INT_TO_FLOAT_UNIT);
+ }
+
+ GPU_vertbuf_init_with_format(vbo, &format);
+ GPU_vertbuf_data_alloc(vbo, loops_len);
+
+ for (uint i = 0; i < uv_len; i++) {
+ GPU_vertbuf_attr_get_raw_data(vbo, uv_id[i], &uv_step[i]);
+ }
+ for (uint i = 0; i < tangent_len; i++) {
+ GPU_vertbuf_attr_get_raw_data(vbo, tangent_id[i], &tangent_step[i]);
+ }
+
+ if (rdata->edit_bmesh) {
+ BMesh *bm = rdata->edit_bmesh->bm;
+ BMIter iter_efa, iter_loop;
+ BMFace *efa;
+ BMLoop *loop;
+
+ BM_ITER_MESH (efa, &iter_efa, bm, BM_FACES_OF_MESH) {
+ BM_ITER_ELEM (loop, &iter_loop, efa, BM_LOOPS_OF_FACE) {
+ /* UVs */
+ for (uint j = 0; j < uv_len; j++) {
+ const uint layer_offset = rdata->cd.offset.uv[j];
+ const float *elem = ((MLoopUV *)BM_ELEM_CD_GET_VOID_P(loop, layer_offset))->uv;
+ copy_v2_v2(GPU_vertbuf_raw_step(&uv_step[j]), elem);
+ }
+ /* TANGENTs */
+ for (uint j = 0; j < tangent_len; j++) {
+ float(*layer_data)[4] = rdata->cd.layers.tangent[j];
+ const float *elem = layer_data[BM_elem_index_get(loop)];
#ifdef USE_COMP_MESH_DATA
- normal_float_to_short_v4(GPU_vertbuf_raw_step(&tangent_step[j]), elem);
+ normal_float_to_short_v4(GPU_vertbuf_raw_step(&tangent_step[j]), elem);
#else
- copy_v4_v4(GPU_vertbuf_raw_step(&tangent_step[j]), elem);
+ copy_v4_v4(GPU_vertbuf_raw_step(&tangent_step[j]), elem);
#endif
- }
- }
- }
- }
- else {
- for (uint loop = 0; loop < loops_len; loop++) {
- /* UVs */
- for (uint j = 0; j < uv_len; j++) {
- const MLoopUV *layer_data = rdata->cd.layers.uv[j];
- const float *elem = layer_data[loop].uv;
- copy_v2_v2(GPU_vertbuf_raw_step(&uv_step[j]), elem);
- }
- /* TANGENTs */
- for (uint j = 0; j < tangent_len; j++) {
- float (*layer_data)[4] = rdata->cd.layers.tangent[j];
- const float *elem = layer_data[loop];
+ }
+ }
+ }
+ }
+ else {
+ for (uint loop = 0; loop < loops_len; loop++) {
+ /* UVs */
+ for (uint j = 0; j < uv_len; j++) {
+ const MLoopUV *layer_data = rdata->cd.layers.uv[j];
+ const float *elem = layer_data[loop].uv;
+ copy_v2_v2(GPU_vertbuf_raw_step(&uv_step[j]), elem);
+ }
+ /* TANGENTs */
+ for (uint j = 0; j < tangent_len; j++) {
+ float(*layer_data)[4] = rdata->cd.layers.tangent[j];
+ const float *elem = layer_data[loop];
#ifdef USE_COMP_MESH_DATA
- normal_float_to_short_v4(GPU_vertbuf_raw_step(&tangent_step[j]), elem);
+ normal_float_to_short_v4(GPU_vertbuf_raw_step(&tangent_step[j]), elem);
#else
- copy_v4_v4(GPU_vertbuf_raw_step(&tangent_step[j]), elem);
+ copy_v4_v4(GPU_vertbuf_raw_step(&tangent_step[j]), elem);
#endif
- }
- }
- }
+ }
+ }
+ }
#ifndef NDEBUG
- /* Check all layers are write aligned. */
- if (layers_combined_len > 0) {
- int vbo_len_used = GPU_vertbuf_raw_used(&layers_combined_step[0]);
- for (uint i = 0; i < layers_combined_len; i++) {
- BLI_assert(vbo_len_used == GPU_vertbuf_raw_used(&layers_combined_step[i]));
- }
- }
+ /* Check all layers are write aligned. */
+ if (layers_combined_len > 0) {
+ int vbo_len_used = GPU_vertbuf_raw_used(&layers_combined_step[0]);
+ for (uint i = 0; i < layers_combined_len; i++) {
+ BLI_assert(vbo_len_used == GPU_vertbuf_raw_used(&layers_combined_step[i]));
+ }
+ }
#endif
#undef USE_COMP_MESH_DATA
@@ -3141,999 +3235,1011 @@ static void mesh_create_loop_uv_and_tan(MeshRenderData *rdata, GPUVertBuf *vbo)
static void mesh_create_loop_vcol(MeshRenderData *rdata, GPUVertBuf *vbo)
{
- const uint loops_len = mesh_render_data_loops_len_get(rdata);
- const uint vcol_len = rdata->cd.layers.vcol_len;
-
- GPUVertBufRaw *vcol_step = BLI_array_alloca(vcol_step, vcol_len);
- uint *vcol_id = BLI_array_alloca(vcol_id, vcol_len);
-
- /* initialize vertex format */
- GPUVertFormat format = { 0 };
-
- for (uint i = 0; i < vcol_len; i++) {
- const char *attr_name = mesh_render_data_vcol_layer_uuid_get(rdata, i);
- vcol_id[i] = GPU_vertformat_attr_add(&format, attr_name, GPU_COMP_U8, 3, GPU_FETCH_INT_TO_FLOAT_UNIT);
- /* Auto layer */
- if (rdata->cd.layers.auto_vcol[i]) {
- attr_name = mesh_render_data_vcol_auto_layer_uuid_get(rdata, i);
- GPU_vertformat_alias_add(&format, attr_name);
- }
- if (i == rdata->cd.layers.vcol_active) {
- GPU_vertformat_alias_add(&format, "c");
- }
- }
-
- GPU_vertbuf_init_with_format(vbo, &format);
- GPU_vertbuf_data_alloc(vbo, loops_len);
-
- for (uint i = 0; i < vcol_len; i++) {
- GPU_vertbuf_attr_get_raw_data(vbo, vcol_id[i], &vcol_step[i]);
- }
-
- if (rdata->edit_bmesh) {
- BMesh *bm = rdata->edit_bmesh->bm;
- BMIter iter_efa, iter_loop;
- BMFace *efa;
- BMLoop *loop;
-
- BM_ITER_MESH (efa, &iter_efa, bm, BM_FACES_OF_MESH) {
- BM_ITER_ELEM (loop, &iter_loop, efa, BM_LOOPS_OF_FACE) {
- for (uint j = 0; j < vcol_len; j++) {
- const uint layer_offset = rdata->cd.offset.vcol[j];
- const uchar *elem = &((MLoopCol *)BM_ELEM_CD_GET_VOID_P(loop, layer_offset))->r;
- copy_v3_v3_uchar(GPU_vertbuf_raw_step(&vcol_step[j]), elem);
- }
- }
- }
- }
- else {
- for (uint loop = 0; loop < loops_len; loop++) {
- for (uint j = 0; j < vcol_len; j++) {
- const MLoopCol *layer_data = rdata->cd.layers.vcol[j];
- const uchar *elem = &layer_data[loop].r;
- copy_v3_v3_uchar(GPU_vertbuf_raw_step(&vcol_step[j]), elem);
- }
- }
- }
+ const uint loops_len = mesh_render_data_loops_len_get(rdata);
+ const uint vcol_len = rdata->cd.layers.vcol_len;
+
+ GPUVertBufRaw *vcol_step = BLI_array_alloca(vcol_step, vcol_len);
+ uint *vcol_id = BLI_array_alloca(vcol_id, vcol_len);
+
+ /* initialize vertex format */
+ GPUVertFormat format = {0};
+
+ for (uint i = 0; i < vcol_len; i++) {
+ const char *attr_name = mesh_render_data_vcol_layer_uuid_get(rdata, i);
+ vcol_id[i] = GPU_vertformat_attr_add(
+ &format, attr_name, GPU_COMP_U8, 3, GPU_FETCH_INT_TO_FLOAT_UNIT);
+ /* Auto layer */
+ if (rdata->cd.layers.auto_vcol[i]) {
+ attr_name = mesh_render_data_vcol_auto_layer_uuid_get(rdata, i);
+ GPU_vertformat_alias_add(&format, attr_name);
+ }
+ if (i == rdata->cd.layers.vcol_active) {
+ GPU_vertformat_alias_add(&format, "c");
+ }
+ }
+
+ GPU_vertbuf_init_with_format(vbo, &format);
+ GPU_vertbuf_data_alloc(vbo, loops_len);
+
+ for (uint i = 0; i < vcol_len; i++) {
+ GPU_vertbuf_attr_get_raw_data(vbo, vcol_id[i], &vcol_step[i]);
+ }
+
+ if (rdata->edit_bmesh) {
+ BMesh *bm = rdata->edit_bmesh->bm;
+ BMIter iter_efa, iter_loop;
+ BMFace *efa;
+ BMLoop *loop;
+
+ BM_ITER_MESH (efa, &iter_efa, bm, BM_FACES_OF_MESH) {
+ BM_ITER_ELEM (loop, &iter_loop, efa, BM_LOOPS_OF_FACE) {
+ for (uint j = 0; j < vcol_len; j++) {
+ const uint layer_offset = rdata->cd.offset.vcol[j];
+ const uchar *elem = &((MLoopCol *)BM_ELEM_CD_GET_VOID_P(loop, layer_offset))->r;
+ copy_v3_v3_uchar(GPU_vertbuf_raw_step(&vcol_step[j]), elem);
+ }
+ }
+ }
+ }
+ else {
+ for (uint loop = 0; loop < loops_len; loop++) {
+ for (uint j = 0; j < vcol_len; j++) {
+ const MLoopCol *layer_data = rdata->cd.layers.vcol[j];
+ const uchar *elem = &layer_data[loop].r;
+ copy_v3_v3_uchar(GPU_vertbuf_raw_step(&vcol_step[j]), elem);
+ }
+ }
+ }
#ifndef NDEBUG
- /* Check all layers are write aligned. */
- if (vcol_len > 0) {
- int vbo_len_used = GPU_vertbuf_raw_used(&vcol_step[0]);
- for (uint i = 0; i < vcol_len; i++) {
- BLI_assert(vbo_len_used == GPU_vertbuf_raw_used(&vcol_step[i]));
- }
- }
+ /* Check all layers are write aligned. */
+ if (vcol_len > 0) {
+ int vbo_len_used = GPU_vertbuf_raw_used(&vcol_step[0]);
+ for (uint i = 0; i < vcol_len; i++) {
+ BLI_assert(vbo_len_used == GPU_vertbuf_raw_used(&vcol_step[i]));
+ }
+ }
#endif
#undef USE_COMP_MESH_DATA
}
-static void mesh_create_edit_facedots(
- MeshRenderData *rdata,
- GPUVertBuf *vbo_facedots_pos_nor_data)
+static void mesh_create_edit_facedots(MeshRenderData *rdata, GPUVertBuf *vbo_facedots_pos_nor_data)
{
- const int poly_len = mesh_render_data_polys_len_get_maybe_mapped(rdata);
- const int verts_facedot_len = poly_len;
- int facedot_len_used = 0;
-
- static struct { uint fdot_pos, fdot_nor_flag; } attr_id;
- static GPUVertFormat facedot_format = { 0 };
- if (facedot_format.attr_len == 0) {
- attr_id.fdot_pos = GPU_vertformat_attr_add(&facedot_format, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
- attr_id.fdot_nor_flag = GPU_vertformat_attr_add(&facedot_format, "norAndFlag", GPU_COMP_I10, 4, GPU_FETCH_INT_TO_FLOAT_UNIT);
- }
-
- if (DRW_TEST_ASSIGN_VBO(vbo_facedots_pos_nor_data)) {
- GPU_vertbuf_init_with_format(vbo_facedots_pos_nor_data, &facedot_format);
- GPU_vertbuf_data_alloc(vbo_facedots_pos_nor_data, verts_facedot_len);
- /* TODO(fclem): Maybe move data generation to mesh_render_data_create() */
- if (rdata->edit_bmesh) {
- if (rdata->edit_data && rdata->edit_data->vertexCos != NULL) {
- BKE_editmesh_cache_ensure_poly_normals(rdata->edit_bmesh, rdata->edit_data);
- BKE_editmesh_cache_ensure_poly_centers(rdata->edit_bmesh, rdata->edit_data);
- }
- }
- }
-
- if (rdata->mapped.use == false) {
- for (int i = 0; i < poly_len; i++) {
- if (add_edit_facedot(rdata, vbo_facedots_pos_nor_data,
- attr_id.fdot_pos, attr_id.fdot_nor_flag,
- i, facedot_len_used))
- {
- facedot_len_used += 1;
- }
- }
- }
- else {
+ const int poly_len = mesh_render_data_polys_len_get_maybe_mapped(rdata);
+ const int verts_facedot_len = poly_len;
+ int facedot_len_used = 0;
+
+ static struct {
+ uint fdot_pos, fdot_nor_flag;
+ } attr_id;
+ static GPUVertFormat facedot_format = {0};
+ if (facedot_format.attr_len == 0) {
+ attr_id.fdot_pos = GPU_vertformat_attr_add(
+ &facedot_format, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
+ attr_id.fdot_nor_flag = GPU_vertformat_attr_add(
+ &facedot_format, "norAndFlag", GPU_COMP_I10, 4, GPU_FETCH_INT_TO_FLOAT_UNIT);
+ }
+
+ if (DRW_TEST_ASSIGN_VBO(vbo_facedots_pos_nor_data)) {
+ GPU_vertbuf_init_with_format(vbo_facedots_pos_nor_data, &facedot_format);
+ GPU_vertbuf_data_alloc(vbo_facedots_pos_nor_data, verts_facedot_len);
+ /* TODO(fclem): Maybe move data generation to mesh_render_data_create() */
+ if (rdata->edit_bmesh) {
+ if (rdata->edit_data && rdata->edit_data->vertexCos != NULL) {
+ BKE_editmesh_cache_ensure_poly_normals(rdata->edit_bmesh, rdata->edit_data);
+ BKE_editmesh_cache_ensure_poly_centers(rdata->edit_bmesh, rdata->edit_data);
+ }
+ }
+ }
+
+ if (rdata->mapped.use == false) {
+ for (int i = 0; i < poly_len; i++) {
+ if (add_edit_facedot(rdata,
+ vbo_facedots_pos_nor_data,
+ attr_id.fdot_pos,
+ attr_id.fdot_nor_flag,
+ i,
+ facedot_len_used)) {
+ facedot_len_used += 1;
+ }
+ }
+ }
+ else {
#if 0 /* TODO(fclem): Mapped facedots are not following the original face. */
- Mesh *me_cage = rdata->mapped.me_cage;
- const MVert *mvert = me_cage->mvert;
- const MEdge *medge = me_cage->medge;
- const int *e_origindex = rdata->mapped.e_origindex;
- const int *v_origindex = rdata->mapped.v_origindex;
+ Mesh *me_cage = rdata->mapped.me_cage;
+ const MVert *mvert = me_cage->mvert;
+ const MEdge *medge = me_cage->medge;
+ const int *e_origindex = rdata->mapped.e_origindex;
+ const int *v_origindex = rdata->mapped.v_origindex;
#endif
- for (int i = 0; i < poly_len; i++) {
- if (add_edit_facedot_mapped(rdata, vbo_facedots_pos_nor_data,
- attr_id.fdot_pos, attr_id.fdot_nor_flag,
- i, facedot_len_used))
- {
- facedot_len_used += 1;
- }
- }
- }
-
- /* Resize & Finish */
- if (facedot_len_used != verts_facedot_len) {
- if (vbo_facedots_pos_nor_data != NULL) {
- GPU_vertbuf_data_resize(vbo_facedots_pos_nor_data, facedot_len_used);
- }
- }
+ for (int i = 0; i < poly_len; i++) {
+ if (add_edit_facedot_mapped(rdata,
+ vbo_facedots_pos_nor_data,
+ attr_id.fdot_pos,
+ attr_id.fdot_nor_flag,
+ i,
+ facedot_len_used)) {
+ facedot_len_used += 1;
+ }
+ }
+ }
+
+ /* Resize & Finish */
+ if (facedot_len_used != verts_facedot_len) {
+ if (vbo_facedots_pos_nor_data != NULL) {
+ GPU_vertbuf_data_resize(vbo_facedots_pos_nor_data, facedot_len_used);
+ }
+ }
}
/* Indices */
#define NO_EDGE INT_MAX
-static void mesh_create_edges_adjacency_lines(
- MeshRenderData *rdata, GPUIndexBuf *ibo, bool *r_is_manifold, const bool use_hide)
+static void mesh_create_edges_adjacency_lines(MeshRenderData *rdata,
+ GPUIndexBuf *ibo,
+ bool *r_is_manifold,
+ const bool use_hide)
{
- const MLoopTri *mlooptri;
- const int vert_len = mesh_render_data_verts_len_get_maybe_mapped(rdata);
- const int tri_len = mesh_render_data_looptri_len_get_maybe_mapped(rdata);
-
- *r_is_manifold = true;
-
- /* Allocate max but only used indices are sent to GPU. */
- GPUIndexBufBuilder elb;
- GPU_indexbuf_init(&elb, GPU_PRIM_LINES_ADJ, tri_len * 3, vert_len);
-
- if (rdata->mapped.use) {
- Mesh *me_cage = rdata->mapped.me_cage;
- mlooptri = BKE_mesh_runtime_looptri_ensure(me_cage);
- }
- else {
- mlooptri = rdata->mlooptri;
- }
-
- EdgeHash *eh = BLI_edgehash_new_ex(__func__, tri_len * 3);
- /* Create edges for each pair of triangles sharing an edge. */
- for (int i = 0; i < tri_len; i++) {
- for (int e = 0; e < 3; e++) {
- uint v0, v1, v2;
- if (rdata->mapped.use) {
- const MLoop *mloop = rdata->mloop;
- const MLoopTri *mlt = mlooptri + i;
- const int p_orig = rdata->mapped.p_origindex[mlt->poly];
- if (p_orig != ORIGINDEX_NONE) {
- BMesh *bm = rdata->edit_bmesh->bm;
- BMFace *efa = BM_face_at_index(bm, p_orig);
- /* Assume 'use_hide' */
- if (BM_elem_flag_test(efa, BM_ELEM_HIDDEN)) {
- break;
- }
- }
- v0 = mloop[mlt->tri[e]].v;
- v1 = mloop[mlt->tri[(e + 1) % 3]].v;
- v2 = mloop[mlt->tri[(e + 2) % 3]].v;
- }
- else if (rdata->edit_bmesh) {
- const BMLoop **bm_looptri = (const BMLoop **)rdata->edit_bmesh->looptris[i];
- if (BM_elem_flag_test(bm_looptri[0]->f, BM_ELEM_HIDDEN)) {
- break;
- }
- v0 = BM_elem_index_get(bm_looptri[e]->v);
- v1 = BM_elem_index_get(bm_looptri[(e + 1) % 3]->v);
- v2 = BM_elem_index_get(bm_looptri[(e + 2) % 3]->v);
- }
- else {
- const MLoop *mloop = rdata->mloop;
- const MLoopTri *mlt = mlooptri + i;
- const MPoly *mp = &rdata->mpoly[mlt->poly];
- if (use_hide && (mp->flag & ME_HIDE)) {
- break;
- }
- v0 = mloop[mlt->tri[e]].v;
- v1 = mloop[mlt->tri[(e + 1) % 3]].v;
- v2 = mloop[mlt->tri[(e + 2) % 3]].v;
- }
- bool inv_indices = (v1 > v2);
- void **pval;
- bool value_is_init = BLI_edgehash_ensure_p(eh, v1, v2, &pval);
- int v_data = POINTER_AS_INT(*pval);
- if (!value_is_init || v_data == NO_EDGE) {
- /* Save the winding order inside the sign bit. Because the
- * edgehash sort the keys and we need to compare winding later. */
- int value = (int)v0 + 1; /* Int 0 bm_looptricannot be signed */
- *pval = POINTER_FROM_INT((inv_indices) ? -value : value);
- }
- else {
- /* HACK Tag as not used. Prevent overhead of BLI_edgehash_remove. */
- *pval = POINTER_FROM_INT(NO_EDGE);
- bool inv_opposite = (v_data < 0);
- uint v_opposite = (uint)abs(v_data) - 1;
-
- if (inv_opposite == inv_indices) {
- /* Don't share edge if triangles have non matching winding. */
- GPU_indexbuf_add_line_adj_verts(&elb, v0, v1, v2, v0);
- GPU_indexbuf_add_line_adj_verts(&elb, v_opposite, v1, v2, v_opposite);
- *r_is_manifold = false;
- }
- else {
- GPU_indexbuf_add_line_adj_verts(&elb, v0, v1, v2, v_opposite);
- }
- }
- }
- }
- /* Create edges for remaning non manifold edges. */
- EdgeHashIterator *ehi;
- for (ehi = BLI_edgehashIterator_new(eh);
- BLI_edgehashIterator_isDone(ehi) == false;
- BLI_edgehashIterator_step(ehi))
- {
- uint v1, v2;
- int v_data = POINTER_AS_INT(BLI_edgehashIterator_getValue(ehi));
- if (v_data == NO_EDGE) {
- continue;
- }
- BLI_edgehashIterator_getKey(ehi, &v1, &v2);
- uint v0 = (uint)abs(v_data) - 1;
- if (v_data < 0) { /* inv_opposite */
- SWAP(uint, v1, v2);
- }
- GPU_indexbuf_add_line_adj_verts(&elb, v0, v1, v2, v0);
- *r_is_manifold = false;
- }
- BLI_edgehashIterator_free(ehi);
- BLI_edgehash_free(eh, NULL);
-
- GPU_indexbuf_build_in_place(&elb, ibo);
+ const MLoopTri *mlooptri;
+ const int vert_len = mesh_render_data_verts_len_get_maybe_mapped(rdata);
+ const int tri_len = mesh_render_data_looptri_len_get_maybe_mapped(rdata);
+
+ *r_is_manifold = true;
+
+ /* Allocate max but only used indices are sent to GPU. */
+ GPUIndexBufBuilder elb;
+ GPU_indexbuf_init(&elb, GPU_PRIM_LINES_ADJ, tri_len * 3, vert_len);
+
+ if (rdata->mapped.use) {
+ Mesh *me_cage = rdata->mapped.me_cage;
+ mlooptri = BKE_mesh_runtime_looptri_ensure(me_cage);
+ }
+ else {
+ mlooptri = rdata->mlooptri;
+ }
+
+ EdgeHash *eh = BLI_edgehash_new_ex(__func__, tri_len * 3);
+ /* Create edges for each pair of triangles sharing an edge. */
+ for (int i = 0; i < tri_len; i++) {
+ for (int e = 0; e < 3; e++) {
+ uint v0, v1, v2;
+ if (rdata->mapped.use) {
+ const MLoop *mloop = rdata->mloop;
+ const MLoopTri *mlt = mlooptri + i;
+ const int p_orig = rdata->mapped.p_origindex[mlt->poly];
+ if (p_orig != ORIGINDEX_NONE) {
+ BMesh *bm = rdata->edit_bmesh->bm;
+ BMFace *efa = BM_face_at_index(bm, p_orig);
+ /* Assume 'use_hide' */
+ if (BM_elem_flag_test(efa, BM_ELEM_HIDDEN)) {
+ break;
+ }
+ }
+ v0 = mloop[mlt->tri[e]].v;
+ v1 = mloop[mlt->tri[(e + 1) % 3]].v;
+ v2 = mloop[mlt->tri[(e + 2) % 3]].v;
+ }
+ else if (rdata->edit_bmesh) {
+ const BMLoop **bm_looptri = (const BMLoop **)rdata->edit_bmesh->looptris[i];
+ if (BM_elem_flag_test(bm_looptri[0]->f, BM_ELEM_HIDDEN)) {
+ break;
+ }
+ v0 = BM_elem_index_get(bm_looptri[e]->v);
+ v1 = BM_elem_index_get(bm_looptri[(e + 1) % 3]->v);
+ v2 = BM_elem_index_get(bm_looptri[(e + 2) % 3]->v);
+ }
+ else {
+ const MLoop *mloop = rdata->mloop;
+ const MLoopTri *mlt = mlooptri + i;
+ const MPoly *mp = &rdata->mpoly[mlt->poly];
+ if (use_hide && (mp->flag & ME_HIDE)) {
+ break;
+ }
+ v0 = mloop[mlt->tri[e]].v;
+ v1 = mloop[mlt->tri[(e + 1) % 3]].v;
+ v2 = mloop[mlt->tri[(e + 2) % 3]].v;
+ }
+ bool inv_indices = (v1 > v2);
+ void **pval;
+ bool value_is_init = BLI_edgehash_ensure_p(eh, v1, v2, &pval);
+ int v_data = POINTER_AS_INT(*pval);
+ if (!value_is_init || v_data == NO_EDGE) {
+ /* Save the winding order inside the sign bit. Because the
+ * edgehash sort the keys and we need to compare winding later. */
+ int value = (int)v0 + 1; /* Int 0 bm_looptricannot be signed */
+ *pval = POINTER_FROM_INT((inv_indices) ? -value : value);
+ }
+ else {
+ /* HACK Tag as not used. Prevent overhead of BLI_edgehash_remove. */
+ *pval = POINTER_FROM_INT(NO_EDGE);
+ bool inv_opposite = (v_data < 0);
+ uint v_opposite = (uint)abs(v_data) - 1;
+
+ if (inv_opposite == inv_indices) {
+ /* Don't share edge if triangles have non matching winding. */
+ GPU_indexbuf_add_line_adj_verts(&elb, v0, v1, v2, v0);
+ GPU_indexbuf_add_line_adj_verts(&elb, v_opposite, v1, v2, v_opposite);
+ *r_is_manifold = false;
+ }
+ else {
+ GPU_indexbuf_add_line_adj_verts(&elb, v0, v1, v2, v_opposite);
+ }
+ }
+ }
+ }
+ /* Create edges for remaning non manifold edges. */
+ EdgeHashIterator *ehi;
+ for (ehi = BLI_edgehashIterator_new(eh); BLI_edgehashIterator_isDone(ehi) == false;
+ BLI_edgehashIterator_step(ehi)) {
+ uint v1, v2;
+ int v_data = POINTER_AS_INT(BLI_edgehashIterator_getValue(ehi));
+ if (v_data == NO_EDGE) {
+ continue;
+ }
+ BLI_edgehashIterator_getKey(ehi, &v1, &v2);
+ uint v0 = (uint)abs(v_data) - 1;
+ if (v_data < 0) { /* inv_opposite */
+ SWAP(uint, v1, v2);
+ }
+ GPU_indexbuf_add_line_adj_verts(&elb, v0, v1, v2, v0);
+ *r_is_manifold = false;
+ }
+ BLI_edgehashIterator_free(ehi);
+ BLI_edgehash_free(eh, NULL);
+
+ GPU_indexbuf_build_in_place(&elb, ibo);
}
#undef NO_EDGE
static void mesh_create_edges_lines(MeshRenderData *rdata, GPUIndexBuf *ibo, const bool use_hide)
{
- const int verts_len = mesh_render_data_verts_len_get_maybe_mapped(rdata);
- const int edges_len = mesh_render_data_edges_len_get_maybe_mapped(rdata);
-
- GPUIndexBufBuilder elb;
- GPU_indexbuf_init(&elb, GPU_PRIM_LINES, edges_len, verts_len);
-
- if (rdata->mapped.use == false) {
- if (rdata->edit_bmesh) {
- BMesh *bm = rdata->edit_bmesh->bm;
- BMIter iter;
- BMEdge *eed;
-
- BM_ITER_MESH (eed, &iter, bm, BM_EDGES_OF_MESH) {
- /* use_hide always for edit-mode */
- if (BM_elem_flag_test(eed, BM_ELEM_HIDDEN)) {
- continue;
- }
- GPU_indexbuf_add_line_verts(&elb, BM_elem_index_get(eed->v1), BM_elem_index_get(eed->v2));
- }
- }
- else {
- const MEdge *ed = rdata->medge;
- for (int i = 0; i < edges_len; i++, ed++) {
- if ((ed->flag & ME_EDGERENDER) == 0) {
- continue;
- }
- if (!(use_hide && (ed->flag & ME_HIDE))) {
- GPU_indexbuf_add_line_verts(&elb, ed->v1, ed->v2);
- }
- }
- }
- }
- else {
- BMesh *bm = rdata->edit_bmesh->bm;
- const MEdge *edge = rdata->medge;
- for (int i = 0; i < edges_len; i++, edge++) {
- const int p_orig = rdata->mapped.e_origindex[i];
- if (p_orig != ORIGINDEX_NONE) {
- BMEdge *eed = BM_edge_at_index(bm, p_orig);
- if (!BM_elem_flag_test(eed, BM_ELEM_HIDDEN)) {
- GPU_indexbuf_add_line_verts(&elb, edge->v1, edge->v2);
- }
- }
- }
- }
-
- GPU_indexbuf_build_in_place(&elb, ibo);
+ const int verts_len = mesh_render_data_verts_len_get_maybe_mapped(rdata);
+ const int edges_len = mesh_render_data_edges_len_get_maybe_mapped(rdata);
+
+ GPUIndexBufBuilder elb;
+ GPU_indexbuf_init(&elb, GPU_PRIM_LINES, edges_len, verts_len);
+
+ if (rdata->mapped.use == false) {
+ if (rdata->edit_bmesh) {
+ BMesh *bm = rdata->edit_bmesh->bm;
+ BMIter iter;
+ BMEdge *eed;
+
+ BM_ITER_MESH (eed, &iter, bm, BM_EDGES_OF_MESH) {
+ /* use_hide always for edit-mode */
+ if (BM_elem_flag_test(eed, BM_ELEM_HIDDEN)) {
+ continue;
+ }
+ GPU_indexbuf_add_line_verts(&elb, BM_elem_index_get(eed->v1), BM_elem_index_get(eed->v2));
+ }
+ }
+ else {
+ const MEdge *ed = rdata->medge;
+ for (int i = 0; i < edges_len; i++, ed++) {
+ if ((ed->flag & ME_EDGERENDER) == 0) {
+ continue;
+ }
+ if (!(use_hide && (ed->flag & ME_HIDE))) {
+ GPU_indexbuf_add_line_verts(&elb, ed->v1, ed->v2);
+ }
+ }
+ }
+ }
+ else {
+ BMesh *bm = rdata->edit_bmesh->bm;
+ const MEdge *edge = rdata->medge;
+ for (int i = 0; i < edges_len; i++, edge++) {
+ const int p_orig = rdata->mapped.e_origindex[i];
+ if (p_orig != ORIGINDEX_NONE) {
+ BMEdge *eed = BM_edge_at_index(bm, p_orig);
+ if (!BM_elem_flag_test(eed, BM_ELEM_HIDDEN)) {
+ GPU_indexbuf_add_line_verts(&elb, edge->v1, edge->v2);
+ }
+ }
+ }
+ }
+
+ GPU_indexbuf_build_in_place(&elb, ibo);
}
static void mesh_create_surf_tris(MeshRenderData *rdata, GPUIndexBuf *ibo, const bool use_hide)
{
- const int vert_len = mesh_render_data_verts_len_get_maybe_mapped(rdata);
- const int tri_len = mesh_render_data_looptri_len_get(rdata);
-
- GPUIndexBufBuilder elb;
- GPU_indexbuf_init(&elb, GPU_PRIM_TRIS, tri_len, vert_len * 3);
-
- if (rdata->mapped.use == false) {
- if (rdata->edit_bmesh) {
- for (int i = 0; i < tri_len; i++) {
- const BMLoop **bm_looptri = (const BMLoop **)rdata->edit_bmesh->looptris[i];
- const BMFace *bm_face = bm_looptri[0]->f;
- /* use_hide always for edit-mode */
- if (BM_elem_flag_test(bm_face, BM_ELEM_HIDDEN)) {
- continue;
- }
- GPU_indexbuf_add_tri_verts(
- &elb,
- BM_elem_index_get(bm_looptri[0]->v),
- BM_elem_index_get(bm_looptri[1]->v),
- BM_elem_index_get(bm_looptri[2]->v));
- }
- }
- else {
- const MLoop *loops = rdata->mloop;
- for (int i = 0; i < tri_len; i++) {
- const MLoopTri *mlt = &rdata->mlooptri[i];
- const MPoly *mp = &rdata->mpoly[mlt->poly];
- if (use_hide && (mp->flag & ME_HIDE)) {
- continue;
- }
- GPU_indexbuf_add_tri_verts(&elb, loops[mlt->tri[0]].v, loops[mlt->tri[1]].v, loops[mlt->tri[2]].v);
- }
- }
- }
- else {
- /* Note: mapped doesn't support lnors yet. */
- BMesh *bm = rdata->edit_bmesh->bm;
- Mesh *me_cage = rdata->mapped.me_cage;
-
- const MLoop *loops = rdata->mloop;
- const MLoopTri *mlooptri = BKE_mesh_runtime_looptri_ensure(me_cage);
- for (int i = 0; i < tri_len; i++) {
- const MLoopTri *mlt = &mlooptri[i];
- const int p_orig = rdata->mapped.p_origindex[mlt->poly];
- if (p_orig != ORIGINDEX_NONE) {
- /* Assume 'use_hide' */
- BMFace *efa = BM_face_at_index(bm, p_orig);
- if (!BM_elem_flag_test(efa, BM_ELEM_HIDDEN)) {
- GPU_indexbuf_add_tri_verts(&elb, loops[mlt->tri[0]].v, loops[mlt->tri[1]].v, loops[mlt->tri[2]].v);
- }
- }
- }
- }
-
- GPU_indexbuf_build_in_place(&elb, ibo);
+ const int vert_len = mesh_render_data_verts_len_get_maybe_mapped(rdata);
+ const int tri_len = mesh_render_data_looptri_len_get(rdata);
+
+ GPUIndexBufBuilder elb;
+ GPU_indexbuf_init(&elb, GPU_PRIM_TRIS, tri_len, vert_len * 3);
+
+ if (rdata->mapped.use == false) {
+ if (rdata->edit_bmesh) {
+ for (int i = 0; i < tri_len; i++) {
+ const BMLoop **bm_looptri = (const BMLoop **)rdata->edit_bmesh->looptris[i];
+ const BMFace *bm_face = bm_looptri[0]->f;
+ /* use_hide always for edit-mode */
+ if (BM_elem_flag_test(bm_face, BM_ELEM_HIDDEN)) {
+ continue;
+ }
+ GPU_indexbuf_add_tri_verts(&elb,
+ BM_elem_index_get(bm_looptri[0]->v),
+ BM_elem_index_get(bm_looptri[1]->v),
+ BM_elem_index_get(bm_looptri[2]->v));
+ }
+ }
+ else {
+ const MLoop *loops = rdata->mloop;
+ for (int i = 0; i < tri_len; i++) {
+ const MLoopTri *mlt = &rdata->mlooptri[i];
+ const MPoly *mp = &rdata->mpoly[mlt->poly];
+ if (use_hide && (mp->flag & ME_HIDE)) {
+ continue;
+ }
+ GPU_indexbuf_add_tri_verts(
+ &elb, loops[mlt->tri[0]].v, loops[mlt->tri[1]].v, loops[mlt->tri[2]].v);
+ }
+ }
+ }
+ else {
+ /* Note: mapped doesn't support lnors yet. */
+ BMesh *bm = rdata->edit_bmesh->bm;
+ Mesh *me_cage = rdata->mapped.me_cage;
+
+ const MLoop *loops = rdata->mloop;
+ const MLoopTri *mlooptri = BKE_mesh_runtime_looptri_ensure(me_cage);
+ for (int i = 0; i < tri_len; i++) {
+ const MLoopTri *mlt = &mlooptri[i];
+ const int p_orig = rdata->mapped.p_origindex[mlt->poly];
+ if (p_orig != ORIGINDEX_NONE) {
+ /* Assume 'use_hide' */
+ BMFace *efa = BM_face_at_index(bm, p_orig);
+ if (!BM_elem_flag_test(efa, BM_ELEM_HIDDEN)) {
+ GPU_indexbuf_add_tri_verts(
+ &elb, loops[mlt->tri[0]].v, loops[mlt->tri[1]].v, loops[mlt->tri[2]].v);
+ }
+ }
+ }
+ }
+
+ GPU_indexbuf_build_in_place(&elb, ibo);
}
-static void mesh_create_loops_lines(
- MeshRenderData *rdata, GPUIndexBuf *ibo, const bool use_hide)
+static void mesh_create_loops_lines(MeshRenderData *rdata, GPUIndexBuf *ibo, const bool use_hide)
{
- const int edge_len = mesh_render_data_edges_len_get(rdata);
- const int loop_len = mesh_render_data_loops_len_get(rdata);
- const int poly_len = mesh_render_data_polys_len_get(rdata);
-
- GPUIndexBufBuilder elb;
- GPU_indexbuf_init(&elb, GPU_PRIM_LINES, edge_len, loop_len);
-
- if (rdata->mapped.use == false) {
- if (rdata->edit_bmesh) {
- BMesh *bm = rdata->edit_bmesh->bm;
- BMIter iter;
- BMEdge *bm_edge;
-
- BM_ITER_MESH (bm_edge, &iter, bm, BM_EDGES_OF_MESH) {
- /* use_hide always for edit-mode */
- if (!BM_elem_flag_test(bm_edge, BM_ELEM_HIDDEN) &&
- bm_edge->l != NULL)
- {
- BMLoop *bm_loop1 = bm_vert_find_first_loop_visible_inline(bm_edge->v1);
- BMLoop *bm_loop2 = bm_vert_find_first_loop_visible_inline(bm_edge->v2);
- int v1 = BM_elem_index_get(bm_loop1);
- int v2 = BM_elem_index_get(bm_loop2);
- if (v1 > v2) {
- SWAP(int, v1, v2);
- }
- GPU_indexbuf_add_line_verts(&elb, v1, v2);
- }
- }
- }
- else {
- MLoop *mloop = (MLoop *)rdata->mloop;
- MEdge *medge = (MEdge *)rdata->medge;
-
- /* Reset flag */
- for (int edge = 0; edge < edge_len; ++edge) {
- /* NOTE: not thread safe. */
- medge[edge].flag &= ~ME_EDGE_TMP_TAG;
- }
-
- for (int poly = 0; poly < poly_len; poly++) {
- const MPoly *mp = &rdata->mpoly[poly];
- if (!(use_hide && (mp->flag & ME_HIDE))) {
- for (int j = 0; j < mp->totloop; j++) {
- MEdge *ed = (MEdge *)rdata->medge + mloop[mp->loopstart + j].e;
- if ((ed->flag & ME_EDGE_TMP_TAG) == 0) {
- ed->flag |= ME_EDGE_TMP_TAG;
- int v1 = mp->loopstart + j;
- int v2 = mp->loopstart + (j + 1) % mp->totloop;
- GPU_indexbuf_add_line_verts(&elb, v1, v2);
- }
- }
- }
- }
- }
- }
- else {
- /* Implement ... eventually if needed. */
- BLI_assert(0);
- }
-
- GPU_indexbuf_build_in_place(&elb, ibo);
+ const int edge_len = mesh_render_data_edges_len_get(rdata);
+ const int loop_len = mesh_render_data_loops_len_get(rdata);
+ const int poly_len = mesh_render_data_polys_len_get(rdata);
+
+ GPUIndexBufBuilder elb;
+ GPU_indexbuf_init(&elb, GPU_PRIM_LINES, edge_len, loop_len);
+
+ if (rdata->mapped.use == false) {
+ if (rdata->edit_bmesh) {
+ BMesh *bm = rdata->edit_bmesh->bm;
+ BMIter iter;
+ BMEdge *bm_edge;
+
+ BM_ITER_MESH (bm_edge, &iter, bm, BM_EDGES_OF_MESH) {
+ /* use_hide always for edit-mode */
+ if (!BM_elem_flag_test(bm_edge, BM_ELEM_HIDDEN) && bm_edge->l != NULL) {
+ BMLoop *bm_loop1 = bm_vert_find_first_loop_visible_inline(bm_edge->v1);
+ BMLoop *bm_loop2 = bm_vert_find_first_loop_visible_inline(bm_edge->v2);
+ int v1 = BM_elem_index_get(bm_loop1);
+ int v2 = BM_elem_index_get(bm_loop2);
+ if (v1 > v2) {
+ SWAP(int, v1, v2);
+ }
+ GPU_indexbuf_add_line_verts(&elb, v1, v2);
+ }
+ }
+ }
+ else {
+ MLoop *mloop = (MLoop *)rdata->mloop;
+ MEdge *medge = (MEdge *)rdata->medge;
+
+ /* Reset flag */
+ for (int edge = 0; edge < edge_len; ++edge) {
+ /* NOTE: not thread safe. */
+ medge[edge].flag &= ~ME_EDGE_TMP_TAG;
+ }
+
+ for (int poly = 0; poly < poly_len; poly++) {
+ const MPoly *mp = &rdata->mpoly[poly];
+ if (!(use_hide && (mp->flag & ME_HIDE))) {
+ for (int j = 0; j < mp->totloop; j++) {
+ MEdge *ed = (MEdge *)rdata->medge + mloop[mp->loopstart + j].e;
+ if ((ed->flag & ME_EDGE_TMP_TAG) == 0) {
+ ed->flag |= ME_EDGE_TMP_TAG;
+ int v1 = mp->loopstart + j;
+ int v2 = mp->loopstart + (j + 1) % mp->totloop;
+ GPU_indexbuf_add_line_verts(&elb, v1, v2);
+ }
+ }
+ }
+ }
+ }
+ }
+ else {
+ /* Implement ... eventually if needed. */
+ BLI_assert(0);
+ }
+
+ GPU_indexbuf_build_in_place(&elb, ibo);
}
-static void mesh_create_loops_line_strips(
- MeshRenderData *rdata, GPUIndexBuf *ibo, const bool use_hide)
+static void mesh_create_loops_line_strips(MeshRenderData *rdata,
+ GPUIndexBuf *ibo,
+ const bool use_hide)
{
- const int loop_len = mesh_render_data_loops_len_get(rdata);
- const int poly_len = mesh_render_data_polys_len_get(rdata);
-
- GPUIndexBufBuilder elb;
- GPU_indexbuf_init_ex(&elb, GPU_PRIM_LINE_STRIP, loop_len + poly_len * 2, loop_len, true);
-
- uint v_index = 0;
- if (rdata->mapped.use == false) {
- if (rdata->edit_bmesh) {
- BMesh *bm = rdata->edit_bmesh->bm;
- BMIter iter;
- BMFace *bm_face;
-
- BM_ITER_MESH (bm_face, &iter, bm, BM_FACES_OF_MESH) {
- /* use_hide always for edit-mode */
- if (!BM_elem_flag_test(bm_face, BM_ELEM_HIDDEN)) {
- for (int i = 0; i < bm_face->len; i++) {
- GPU_indexbuf_add_generic_vert(&elb, v_index + i);
- }
- /* Finish loop and restart primitive. */
- GPU_indexbuf_add_generic_vert(&elb, v_index);
- GPU_indexbuf_add_primitive_restart(&elb);
- }
- v_index += bm_face->len;
- }
- }
- else {
- for (int poly = 0; poly < poly_len; poly++) {
- const MPoly *mp = &rdata->mpoly[poly];
- if (!(use_hide && (mp->flag & ME_HIDE))) {
- const int loopend = mp->loopstart + mp->totloop;
- for (int j = mp->loopstart; j < loopend; j++) {
- GPU_indexbuf_add_generic_vert(&elb, j);
- }
- /* Finish loop and restart primitive. */
- GPU_indexbuf_add_generic_vert(&elb, mp->loopstart);
- GPU_indexbuf_add_primitive_restart(&elb);
- }
- v_index += mp->totloop;
- }
- }
- }
- else {
- /* Implement ... eventually if needed. */
- BLI_assert(0);
- }
-
- GPU_indexbuf_build_in_place(&elb, ibo);
+ const int loop_len = mesh_render_data_loops_len_get(rdata);
+ const int poly_len = mesh_render_data_polys_len_get(rdata);
+
+ GPUIndexBufBuilder elb;
+ GPU_indexbuf_init_ex(&elb, GPU_PRIM_LINE_STRIP, loop_len + poly_len * 2, loop_len, true);
+
+ uint v_index = 0;
+ if (rdata->mapped.use == false) {
+ if (rdata->edit_bmesh) {
+ BMesh *bm = rdata->edit_bmesh->bm;
+ BMIter iter;
+ BMFace *bm_face;
+
+ BM_ITER_MESH (bm_face, &iter, bm, BM_FACES_OF_MESH) {
+ /* use_hide always for edit-mode */
+ if (!BM_elem_flag_test(bm_face, BM_ELEM_HIDDEN)) {
+ for (int i = 0; i < bm_face->len; i++) {
+ GPU_indexbuf_add_generic_vert(&elb, v_index + i);
+ }
+ /* Finish loop and restart primitive. */
+ GPU_indexbuf_add_generic_vert(&elb, v_index);
+ GPU_indexbuf_add_primitive_restart(&elb);
+ }
+ v_index += bm_face->len;
+ }
+ }
+ else {
+ for (int poly = 0; poly < poly_len; poly++) {
+ const MPoly *mp = &rdata->mpoly[poly];
+ if (!(use_hide && (mp->flag & ME_HIDE))) {
+ const int loopend = mp->loopstart + mp->totloop;
+ for (int j = mp->loopstart; j < loopend; j++) {
+ GPU_indexbuf_add_generic_vert(&elb, j);
+ }
+ /* Finish loop and restart primitive. */
+ GPU_indexbuf_add_generic_vert(&elb, mp->loopstart);
+ GPU_indexbuf_add_primitive_restart(&elb);
+ }
+ v_index += mp->totloop;
+ }
+ }
+ }
+ else {
+ /* Implement ... eventually if needed. */
+ BLI_assert(0);
+ }
+
+ GPU_indexbuf_build_in_place(&elb, ibo);
}
-static void mesh_create_loose_edges_lines(
- MeshRenderData *rdata, GPUIndexBuf *ibo, const bool use_hide)
+static void mesh_create_loose_edges_lines(MeshRenderData *rdata,
+ GPUIndexBuf *ibo,
+ const bool use_hide)
{
- const int vert_len = mesh_render_data_verts_len_get_maybe_mapped(rdata);
- const int edge_len = mesh_render_data_edges_len_get_maybe_mapped(rdata);
-
- /* Alloc max (edge_len) and upload only needed range. */
- GPUIndexBufBuilder elb;
- GPU_indexbuf_init(&elb, GPU_PRIM_LINES, edge_len, vert_len);
-
- if (rdata->mapped.use == false) {
- if (rdata->edit_bmesh) {
- /* No need to support since edit mesh already draw them.
- * But some engines may want them ... */
- BMesh *bm = rdata->edit_bmesh->bm;
- BMIter eiter;
- BMEdge *eed;
- BM_ITER_MESH(eed, &eiter, bm, BM_EDGES_OF_MESH) {
- if (bm_edge_is_loose_and_visible(eed)) {
- GPU_indexbuf_add_line_verts(&elb, BM_elem_index_get(eed->v1), BM_elem_index_get(eed->v2));
- }
- }
- }
- else {
- for (int i = 0; i < edge_len; i++) {
- const MEdge *medge = &rdata->medge[i];
- if ((medge->flag & ME_LOOSEEDGE) &&
- !(use_hide && (medge->flag & ME_HIDE)))
- {
- GPU_indexbuf_add_line_verts(&elb, medge->v1, medge->v2);
- }
- }
- }
- }
- else {
- /* Hidden checks are already done when creating the loose edge list. */
- Mesh *me_cage = rdata->mapped.me_cage;
- for (int i_iter = 0; i_iter < rdata->mapped.loose_edge_len; i_iter++) {
- const int i = rdata->mapped.loose_edges[i_iter];
- const MEdge *medge = &me_cage->medge[i];
- GPU_indexbuf_add_line_verts(&elb, medge->v1, medge->v2);
- }
- }
-
- GPU_indexbuf_build_in_place(&elb, ibo);
+ const int vert_len = mesh_render_data_verts_len_get_maybe_mapped(rdata);
+ const int edge_len = mesh_render_data_edges_len_get_maybe_mapped(rdata);
+
+ /* Alloc max (edge_len) and upload only needed range. */
+ GPUIndexBufBuilder elb;
+ GPU_indexbuf_init(&elb, GPU_PRIM_LINES, edge_len, vert_len);
+
+ if (rdata->mapped.use == false) {
+ if (rdata->edit_bmesh) {
+ /* No need to support since edit mesh already draw them.
+ * But some engines may want them ... */
+ BMesh *bm = rdata->edit_bmesh->bm;
+ BMIter eiter;
+ BMEdge *eed;
+ BM_ITER_MESH (eed, &eiter, bm, BM_EDGES_OF_MESH) {
+ if (bm_edge_is_loose_and_visible(eed)) {
+ GPU_indexbuf_add_line_verts(
+ &elb, BM_elem_index_get(eed->v1), BM_elem_index_get(eed->v2));
+ }
+ }
+ }
+ else {
+ for (int i = 0; i < edge_len; i++) {
+ const MEdge *medge = &rdata->medge[i];
+ if ((medge->flag & ME_LOOSEEDGE) && !(use_hide && (medge->flag & ME_HIDE))) {
+ GPU_indexbuf_add_line_verts(&elb, medge->v1, medge->v2);
+ }
+ }
+ }
+ }
+ else {
+ /* Hidden checks are already done when creating the loose edge list. */
+ Mesh *me_cage = rdata->mapped.me_cage;
+ for (int i_iter = 0; i_iter < rdata->mapped.loose_edge_len; i_iter++) {
+ const int i = rdata->mapped.loose_edges[i_iter];
+ const MEdge *medge = &me_cage->medge[i];
+ GPU_indexbuf_add_line_verts(&elb, medge->v1, medge->v2);
+ }
+ }
+
+ GPU_indexbuf_build_in_place(&elb, ibo);
}
-static void mesh_create_loops_tris(
- MeshRenderData *rdata, GPUIndexBuf **ibo, int ibo_len, const bool use_hide)
+static void mesh_create_loops_tris(MeshRenderData *rdata,
+ GPUIndexBuf **ibo,
+ int ibo_len,
+ const bool use_hide)
{
- const int loop_len = mesh_render_data_loops_len_get(rdata);
- const int tri_len = mesh_render_data_looptri_len_get(rdata);
-
- GPUIndexBufBuilder *elb = BLI_array_alloca(elb, ibo_len);
-
- for (int i = 0; i < ibo_len; ++i) {
- /* TODO alloc minmum necessary. */
- GPU_indexbuf_init(&elb[i], GPU_PRIM_TRIS, tri_len, loop_len * 3);
- }
-
- if (rdata->mapped.use == false) {
- if (rdata->edit_bmesh) {
- for (int i = 0; i < tri_len; i++) {
- const BMLoop **bm_looptri = (const BMLoop **)rdata->edit_bmesh->looptris[i];
- const BMFace *bm_face = bm_looptri[0]->f;
- /* use_hide always for edit-mode */
- if (BM_elem_flag_test(bm_face, BM_ELEM_HIDDEN)) {
- continue;
- }
- int mat = min_ii(ibo_len - 1, bm_face->mat_nr);
- GPU_indexbuf_add_tri_verts(
- &elb[mat],
- BM_elem_index_get(bm_looptri[0]),
- BM_elem_index_get(bm_looptri[1]),
- BM_elem_index_get(bm_looptri[2]));
- }
- }
- else {
- for (int i = 0; i < tri_len; i++) {
- const MLoopTri *mlt = &rdata->mlooptri[i];
- const MPoly *mp = &rdata->mpoly[mlt->poly];
- if (use_hide && (mp->flag & ME_HIDE)) {
- continue;
- }
- int mat = min_ii(ibo_len - 1, mp->mat_nr);
- GPU_indexbuf_add_tri_verts(&elb[mat], mlt->tri[0], mlt->tri[1], mlt->tri[2]);
- }
- }
- }
- else {
- /* Note: mapped doesn't support lnors yet. */
- BMesh *bm = rdata->edit_bmesh->bm;
- Mesh *me_cage = rdata->mapped.me_cage;
-
- const MLoopTri *mlooptri = BKE_mesh_runtime_looptri_ensure(me_cage);
- for (int i = 0; i < tri_len; i++) {
- const MLoopTri *mlt = &mlooptri[i];
- const int p_orig = rdata->mapped.p_origindex[mlt->poly];
- if (p_orig != ORIGINDEX_NONE) {
- /* Assume 'use_hide' */
- BMFace *efa = BM_face_at_index(bm, p_orig);
- if (!BM_elem_flag_test(efa, BM_ELEM_HIDDEN)) {
- int mat = min_ii(ibo_len - 1, efa->mat_nr);
- GPU_indexbuf_add_tri_verts(&elb[mat], mlt->tri[0], mlt->tri[1], mlt->tri[2]);
- }
- }
- }
- }
-
- for (int i = 0; i < ibo_len; ++i) {
- GPU_indexbuf_build_in_place(&elb[i], ibo[i]);
- }
+ const int loop_len = mesh_render_data_loops_len_get(rdata);
+ const int tri_len = mesh_render_data_looptri_len_get(rdata);
+
+ GPUIndexBufBuilder *elb = BLI_array_alloca(elb, ibo_len);
+
+ for (int i = 0; i < ibo_len; ++i) {
+ /* TODO alloc minmum necessary. */
+ GPU_indexbuf_init(&elb[i], GPU_PRIM_TRIS, tri_len, loop_len * 3);
+ }
+
+ if (rdata->mapped.use == false) {
+ if (rdata->edit_bmesh) {
+ for (int i = 0; i < tri_len; i++) {
+ const BMLoop **bm_looptri = (const BMLoop **)rdata->edit_bmesh->looptris[i];
+ const BMFace *bm_face = bm_looptri[0]->f;
+ /* use_hide always for edit-mode */
+ if (BM_elem_flag_test(bm_face, BM_ELEM_HIDDEN)) {
+ continue;
+ }
+ int mat = min_ii(ibo_len - 1, bm_face->mat_nr);
+ GPU_indexbuf_add_tri_verts(&elb[mat],
+ BM_elem_index_get(bm_looptri[0]),
+ BM_elem_index_get(bm_looptri[1]),
+ BM_elem_index_get(bm_looptri[2]));
+ }
+ }
+ else {
+ for (int i = 0; i < tri_len; i++) {
+ const MLoopTri *mlt = &rdata->mlooptri[i];
+ const MPoly *mp = &rdata->mpoly[mlt->poly];
+ if (use_hide && (mp->flag & ME_HIDE)) {
+ continue;
+ }
+ int mat = min_ii(ibo_len - 1, mp->mat_nr);
+ GPU_indexbuf_add_tri_verts(&elb[mat], mlt->tri[0], mlt->tri[1], mlt->tri[2]);
+ }
+ }
+ }
+ else {
+ /* Note: mapped doesn't support lnors yet. */
+ BMesh *bm = rdata->edit_bmesh->bm;
+ Mesh *me_cage = rdata->mapped.me_cage;
+
+ const MLoopTri *mlooptri = BKE_mesh_runtime_looptri_ensure(me_cage);
+ for (int i = 0; i < tri_len; i++) {
+ const MLoopTri *mlt = &mlooptri[i];
+ const int p_orig = rdata->mapped.p_origindex[mlt->poly];
+ if (p_orig != ORIGINDEX_NONE) {
+ /* Assume 'use_hide' */
+ BMFace *efa = BM_face_at_index(bm, p_orig);
+ if (!BM_elem_flag_test(efa, BM_ELEM_HIDDEN)) {
+ int mat = min_ii(ibo_len - 1, efa->mat_nr);
+ GPU_indexbuf_add_tri_verts(&elb[mat], mlt->tri[0], mlt->tri[1], mlt->tri[2]);
+ }
+ }
+ }
+ }
+
+ for (int i = 0; i < ibo_len; ++i) {
+ GPU_indexbuf_build_in_place(&elb[i], ibo[i]);
+ }
}
/* Warning! this function is not thread safe!
* It writes to MEdge->flag with ME_EDGE_TMP_TAG. */
-static void mesh_create_edit_loops_points_lines(MeshRenderData *rdata, GPUIndexBuf *ibo_verts, GPUIndexBuf *ibo_edges)
+static void mesh_create_edit_loops_points_lines(MeshRenderData *rdata,
+ GPUIndexBuf *ibo_verts,
+ GPUIndexBuf *ibo_edges)
{
- BMIter iter;
- int i;
-
- const int vert_len = mesh_render_data_verts_len_get_maybe_mapped(rdata);
- const int edge_len = mesh_render_data_edges_len_get_maybe_mapped(rdata);
- const int loop_len = mesh_render_data_loops_len_get_maybe_mapped(rdata);
- const int poly_len = mesh_render_data_polys_len_get_maybe_mapped(rdata);
- const int lvert_len = mesh_render_data_loose_verts_len_get_maybe_mapped(rdata);
- const int ledge_len = mesh_render_data_loose_edges_len_get_maybe_mapped(rdata);
- const int tot_loop_len = loop_len + ledge_len * 2 + lvert_len;
-
- GPUIndexBufBuilder elb_vert, elb_edge;
- if (DRW_TEST_ASSIGN_IBO(ibo_edges)) {
- GPU_indexbuf_init(&elb_edge, GPU_PRIM_LINES, edge_len, tot_loop_len);
- }
- if (DRW_TEST_ASSIGN_IBO(ibo_verts)) {
- GPU_indexbuf_init(&elb_vert, GPU_PRIM_POINTS, tot_loop_len, tot_loop_len);
- }
-
- int loop_idx = 0;
- if (rdata->edit_bmesh && (rdata->mapped.use == false)) {
- BMesh *bm = rdata->edit_bmesh->bm;
- /* Edges not loose. */
- if (ibo_edges) {
- BMEdge *eed;
- BM_ITER_MESH (eed, &iter, bm, BM_EDGES_OF_MESH) {
- if (!BM_elem_flag_test(eed, BM_ELEM_HIDDEN)) {
- BMLoop *l = bm_edge_find_first_loop_visible_inline(eed);
- if (l != NULL) {
- int v1 = BM_elem_index_get(eed->l);
- int v2 = BM_elem_index_get(eed->l->next);
- GPU_indexbuf_add_line_verts(&elb_edge, v1, v2);
- }
- }
- }
- }
- /* Face Loops */
- if (ibo_verts) {
- BMVert *eve;
- BM_ITER_MESH (eve, &iter, bm, BM_VERTS_OF_MESH) {
- if (!BM_elem_flag_test(eve, BM_ELEM_HIDDEN)) {
- BMLoop *l = bm_vert_find_first_loop_visible_inline(eve);
- if (l != NULL) {
- int v = BM_elem_index_get(l);
- GPU_indexbuf_add_generic_vert(&elb_vert, v);
- }
- }
- }
- }
- loop_idx = loop_len;
- /* Loose edges */
- for (i = 0; i < ledge_len; ++i) {
- if (ibo_verts) {
- GPU_indexbuf_add_generic_vert(&elb_vert, loop_idx + 0);
- GPU_indexbuf_add_generic_vert(&elb_vert, loop_idx + 1);
- }
- if (ibo_edges) {
- GPU_indexbuf_add_line_verts(&elb_edge, loop_idx + 0, loop_idx + 1);
- }
- loop_idx += 2;
- }
- /* Loose verts */
- if (ibo_verts) {
- for (i = 0; i < lvert_len; ++i) {
- GPU_indexbuf_add_generic_vert(&elb_vert, loop_idx);
- loop_idx += 1;
- }
- }
- }
- else if (rdata->mapped.use) {
- const MPoly *mpoly = rdata->mapped.me_cage->mpoly;
- MVert *mvert = rdata->mapped.me_cage->mvert;
- MEdge *medge = rdata->mapped.me_cage->medge;
- BMesh *bm = rdata->edit_bmesh->bm;
-
- const int *v_origindex = rdata->mapped.v_origindex;
- const int *e_origindex = rdata->mapped.e_origindex;
- const int *p_origindex = rdata->mapped.p_origindex;
-
- /* Reset flag */
- for (int edge = 0; edge < edge_len; ++edge) {
- /* NOTE: not thread safe. */
- medge[edge].flag &= ~ME_EDGE_TMP_TAG;
- }
- for (int vert = 0; vert < vert_len; ++vert) {
- /* NOTE: not thread safe. */
- mvert[vert].flag &= ~ME_VERT_TMP_TAG;
- }
-
- /* Face Loops */
- for (int poly = 0; poly < poly_len; poly++, mpoly++) {
- int fidx = p_origindex[poly];
- if (fidx != ORIGINDEX_NONE) {
- BMFace *efa = BM_face_at_index(bm, fidx);
- if (!BM_elem_flag_test(efa, BM_ELEM_HIDDEN)) {
- const MLoop *mloop = &rdata->mapped.me_cage->mloop[mpoly->loopstart];
- for (i = 0; i < mpoly->totloop; ++i, ++mloop) {
- if (ibo_verts && (v_origindex[mloop->v] != ORIGINDEX_NONE) &&
- (mvert[mloop->v].flag & ME_VERT_TMP_TAG) == 0)
- {
- mvert[mloop->v].flag |= ME_VERT_TMP_TAG;
- GPU_indexbuf_add_generic_vert(&elb_vert, loop_idx + i);
- }
- if (ibo_edges && (e_origindex[mloop->e] != ORIGINDEX_NONE) &&
- ((medge[mloop->e].flag & ME_EDGE_TMP_TAG) == 0))
- {
- medge[mloop->e].flag |= ME_EDGE_TMP_TAG;
- int v1 = loop_idx + i;
- int v2 = loop_idx + ((i + 1) % mpoly->totloop);
- GPU_indexbuf_add_line_verts(&elb_edge, v1, v2);
- }
- }
- }
- }
- loop_idx += mpoly->totloop;
- }
- /* Loose edges */
- for (i = 0; i < ledge_len; ++i) {
- int eidx = e_origindex[rdata->mapped.loose_edges[i]];
- if (eidx != ORIGINDEX_NONE) {
- if (ibo_verts) {
- const MEdge *ed = &medge[rdata->mapped.loose_edges[i]];
- if (v_origindex[ed->v1] != ORIGINDEX_NONE) {
- GPU_indexbuf_add_generic_vert(&elb_vert, loop_idx + 0);
- }
- if (v_origindex[ed->v2] != ORIGINDEX_NONE) {
- GPU_indexbuf_add_generic_vert(&elb_vert, loop_idx + 1);
- }
- }
- if (ibo_edges) {
- GPU_indexbuf_add_line_verts(&elb_edge, loop_idx + 0, loop_idx + 1);
- }
- }
- loop_idx += 2;
- }
- /* Loose verts */
- if (ibo_verts) {
- for (i = 0; i < lvert_len; ++i) {
- int vidx = v_origindex[rdata->mapped.loose_verts[i]];
- if (vidx != ORIGINDEX_NONE) {
- GPU_indexbuf_add_generic_vert(&elb_vert, loop_idx);
- }
- loop_idx += 1;
- }
- }
- }
- else {
- const MPoly *mpoly = rdata->mpoly;
-
- /* Face Loops */
- for (int poly = 0; poly < poly_len; poly++, mpoly++) {
- if ((mpoly->flag & ME_HIDE) == 0) {
- for (i = 0; i < mpoly->totloop; ++i) {
- if (ibo_verts) {
- GPU_indexbuf_add_generic_vert(&elb_vert, loop_idx + i);
- }
- if (ibo_edges) {
- int v1 = loop_idx + i;
- int v2 = loop_idx + ((i + 1) % mpoly->totloop);
- GPU_indexbuf_add_line_verts(&elb_edge, v1, v2);
- }
- }
- }
- loop_idx += mpoly->totloop;
- }
- /* TODO(fclem): Until we find a way to detect
- * loose verts easily outside of edit mode, this
- * will remain disabled. */
+ BMIter iter;
+ int i;
+
+ const int vert_len = mesh_render_data_verts_len_get_maybe_mapped(rdata);
+ const int edge_len = mesh_render_data_edges_len_get_maybe_mapped(rdata);
+ const int loop_len = mesh_render_data_loops_len_get_maybe_mapped(rdata);
+ const int poly_len = mesh_render_data_polys_len_get_maybe_mapped(rdata);
+ const int lvert_len = mesh_render_data_loose_verts_len_get_maybe_mapped(rdata);
+ const int ledge_len = mesh_render_data_loose_edges_len_get_maybe_mapped(rdata);
+ const int tot_loop_len = loop_len + ledge_len * 2 + lvert_len;
+
+ GPUIndexBufBuilder elb_vert, elb_edge;
+ if (DRW_TEST_ASSIGN_IBO(ibo_edges)) {
+ GPU_indexbuf_init(&elb_edge, GPU_PRIM_LINES, edge_len, tot_loop_len);
+ }
+ if (DRW_TEST_ASSIGN_IBO(ibo_verts)) {
+ GPU_indexbuf_init(&elb_vert, GPU_PRIM_POINTS, tot_loop_len, tot_loop_len);
+ }
+
+ int loop_idx = 0;
+ if (rdata->edit_bmesh && (rdata->mapped.use == false)) {
+ BMesh *bm = rdata->edit_bmesh->bm;
+ /* Edges not loose. */
+ if (ibo_edges) {
+ BMEdge *eed;
+ BM_ITER_MESH (eed, &iter, bm, BM_EDGES_OF_MESH) {
+ if (!BM_elem_flag_test(eed, BM_ELEM_HIDDEN)) {
+ BMLoop *l = bm_edge_find_first_loop_visible_inline(eed);
+ if (l != NULL) {
+ int v1 = BM_elem_index_get(eed->l);
+ int v2 = BM_elem_index_get(eed->l->next);
+ GPU_indexbuf_add_line_verts(&elb_edge, v1, v2);
+ }
+ }
+ }
+ }
+ /* Face Loops */
+ if (ibo_verts) {
+ BMVert *eve;
+ BM_ITER_MESH (eve, &iter, bm, BM_VERTS_OF_MESH) {
+ if (!BM_elem_flag_test(eve, BM_ELEM_HIDDEN)) {
+ BMLoop *l = bm_vert_find_first_loop_visible_inline(eve);
+ if (l != NULL) {
+ int v = BM_elem_index_get(l);
+ GPU_indexbuf_add_generic_vert(&elb_vert, v);
+ }
+ }
+ }
+ }
+ loop_idx = loop_len;
+ /* Loose edges */
+ for (i = 0; i < ledge_len; ++i) {
+ if (ibo_verts) {
+ GPU_indexbuf_add_generic_vert(&elb_vert, loop_idx + 0);
+ GPU_indexbuf_add_generic_vert(&elb_vert, loop_idx + 1);
+ }
+ if (ibo_edges) {
+ GPU_indexbuf_add_line_verts(&elb_edge, loop_idx + 0, loop_idx + 1);
+ }
+ loop_idx += 2;
+ }
+ /* Loose verts */
+ if (ibo_verts) {
+ for (i = 0; i < lvert_len; ++i) {
+ GPU_indexbuf_add_generic_vert(&elb_vert, loop_idx);
+ loop_idx += 1;
+ }
+ }
+ }
+ else if (rdata->mapped.use) {
+ const MPoly *mpoly = rdata->mapped.me_cage->mpoly;
+ MVert *mvert = rdata->mapped.me_cage->mvert;
+ MEdge *medge = rdata->mapped.me_cage->medge;
+ BMesh *bm = rdata->edit_bmesh->bm;
+
+ const int *v_origindex = rdata->mapped.v_origindex;
+ const int *e_origindex = rdata->mapped.e_origindex;
+ const int *p_origindex = rdata->mapped.p_origindex;
+
+ /* Reset flag */
+ for (int edge = 0; edge < edge_len; ++edge) {
+ /* NOTE: not thread safe. */
+ medge[edge].flag &= ~ME_EDGE_TMP_TAG;
+ }
+ for (int vert = 0; vert < vert_len; ++vert) {
+ /* NOTE: not thread safe. */
+ mvert[vert].flag &= ~ME_VERT_TMP_TAG;
+ }
+
+ /* Face Loops */
+ for (int poly = 0; poly < poly_len; poly++, mpoly++) {
+ int fidx = p_origindex[poly];
+ if (fidx != ORIGINDEX_NONE) {
+ BMFace *efa = BM_face_at_index(bm, fidx);
+ if (!BM_elem_flag_test(efa, BM_ELEM_HIDDEN)) {
+ const MLoop *mloop = &rdata->mapped.me_cage->mloop[mpoly->loopstart];
+ for (i = 0; i < mpoly->totloop; ++i, ++mloop) {
+ if (ibo_verts && (v_origindex[mloop->v] != ORIGINDEX_NONE) &&
+ (mvert[mloop->v].flag & ME_VERT_TMP_TAG) == 0) {
+ mvert[mloop->v].flag |= ME_VERT_TMP_TAG;
+ GPU_indexbuf_add_generic_vert(&elb_vert, loop_idx + i);
+ }
+ if (ibo_edges && (e_origindex[mloop->e] != ORIGINDEX_NONE) &&
+ ((medge[mloop->e].flag & ME_EDGE_TMP_TAG) == 0)) {
+ medge[mloop->e].flag |= ME_EDGE_TMP_TAG;
+ int v1 = loop_idx + i;
+ int v2 = loop_idx + ((i + 1) % mpoly->totloop);
+ GPU_indexbuf_add_line_verts(&elb_edge, v1, v2);
+ }
+ }
+ }
+ }
+ loop_idx += mpoly->totloop;
+ }
+ /* Loose edges */
+ for (i = 0; i < ledge_len; ++i) {
+ int eidx = e_origindex[rdata->mapped.loose_edges[i]];
+ if (eidx != ORIGINDEX_NONE) {
+ if (ibo_verts) {
+ const MEdge *ed = &medge[rdata->mapped.loose_edges[i]];
+ if (v_origindex[ed->v1] != ORIGINDEX_NONE) {
+ GPU_indexbuf_add_generic_vert(&elb_vert, loop_idx + 0);
+ }
+ if (v_origindex[ed->v2] != ORIGINDEX_NONE) {
+ GPU_indexbuf_add_generic_vert(&elb_vert, loop_idx + 1);
+ }
+ }
+ if (ibo_edges) {
+ GPU_indexbuf_add_line_verts(&elb_edge, loop_idx + 0, loop_idx + 1);
+ }
+ }
+ loop_idx += 2;
+ }
+ /* Loose verts */
+ if (ibo_verts) {
+ for (i = 0; i < lvert_len; ++i) {
+ int vidx = v_origindex[rdata->mapped.loose_verts[i]];
+ if (vidx != ORIGINDEX_NONE) {
+ GPU_indexbuf_add_generic_vert(&elb_vert, loop_idx);
+ }
+ loop_idx += 1;
+ }
+ }
+ }
+ else {
+ const MPoly *mpoly = rdata->mpoly;
+
+ /* Face Loops */
+ for (int poly = 0; poly < poly_len; poly++, mpoly++) {
+ if ((mpoly->flag & ME_HIDE) == 0) {
+ for (i = 0; i < mpoly->totloop; ++i) {
+ if (ibo_verts) {
+ GPU_indexbuf_add_generic_vert(&elb_vert, loop_idx + i);
+ }
+ if (ibo_edges) {
+ int v1 = loop_idx + i;
+ int v2 = loop_idx + ((i + 1) % mpoly->totloop);
+ GPU_indexbuf_add_line_verts(&elb_edge, v1, v2);
+ }
+ }
+ }
+ loop_idx += mpoly->totloop;
+ }
+ /* TODO(fclem): Until we find a way to detect
+ * loose verts easily outside of edit mode, this
+ * will remain disabled. */
#if 0
- /* Loose edges */
- for (int e = 0; e < edge_len; e++, medge++) {
- if (medge->flag & ME_LOOSEEDGE) {
- int eidx = e_origindex[e];
- if (eidx != ORIGINDEX_NONE) {
- if ((medge->flag & ME_HIDE) == 0) {
- for (int j = 0; j < 2; ++j) {
- if (ibo_verts) {
- GPU_indexbuf_add_generic_vert(&elb_vert, loop_idx + j);
- }
- if (ibo_edges) {
- GPU_indexbuf_add_generic_vert(&elb_edge, loop_idx + j);
- }
- }
- }
- }
- loop_idx += 2;
- }
- }
- /* Loose verts */
- for (int v = 0; v < vert_len; v++, mvert++) {
- int vidx = v_origindex[v];
- if (vidx != ORIGINDEX_NONE) {
- if ((mvert->flag & ME_HIDE) == 0) {
- if (ibo_verts) {
- GPU_indexbuf_add_generic_vert(&elb_vert, loop_idx);
- }
- if (ibo_edges) {
- GPU_indexbuf_add_generic_vert(&elb_edge, loop_idx);
- }
- }
- loop_idx += 1;
- }
- }
+ /* Loose edges */
+ for (int e = 0; e < edge_len; e++, medge++) {
+ if (medge->flag & ME_LOOSEEDGE) {
+ int eidx = e_origindex[e];
+ if (eidx != ORIGINDEX_NONE) {
+ if ((medge->flag & ME_HIDE) == 0) {
+ for (int j = 0; j < 2; ++j) {
+ if (ibo_verts) {
+ GPU_indexbuf_add_generic_vert(&elb_vert, loop_idx + j);
+ }
+ if (ibo_edges) {
+ GPU_indexbuf_add_generic_vert(&elb_edge, loop_idx + j);
+ }
+ }
+ }
+ }
+ loop_idx += 2;
+ }
+ }
+ /* Loose verts */
+ for (int v = 0; v < vert_len; v++, mvert++) {
+ int vidx = v_origindex[v];
+ if (vidx != ORIGINDEX_NONE) {
+ if ((mvert->flag & ME_HIDE) == 0) {
+ if (ibo_verts) {
+ GPU_indexbuf_add_generic_vert(&elb_vert, loop_idx);
+ }
+ if (ibo_edges) {
+ GPU_indexbuf_add_generic_vert(&elb_edge, loop_idx);
+ }
+ }
+ loop_idx += 1;
+ }
+ }
#endif
- }
-
- if (ibo_verts) {
- GPU_indexbuf_build_in_place(&elb_vert, ibo_verts);
- }
- if (ibo_edges) {
- GPU_indexbuf_build_in_place(&elb_edge, ibo_edges);
- }
+ }
+
+ if (ibo_verts) {
+ GPU_indexbuf_build_in_place(&elb_vert, ibo_verts);
+ }
+ if (ibo_edges) {
+ GPU_indexbuf_build_in_place(&elb_edge, ibo_edges);
+ }
}
static void mesh_create_edit_loops_tris(MeshRenderData *rdata, GPUIndexBuf *ibo)
{
- const int loop_len = mesh_render_data_loops_len_get_maybe_mapped(rdata);
- const int tri_len = mesh_render_data_looptri_len_get_maybe_mapped(rdata);
-
- GPUIndexBufBuilder elb;
- /* TODO alloc minmum necessary. */
- GPU_indexbuf_init(&elb, GPU_PRIM_TRIS, tri_len, loop_len * 3);
-
- if (rdata->edit_bmesh && (rdata->mapped.use == false)) {
- for (int i = 0; i < tri_len; i++) {
- const BMLoop **bm_looptri = (const BMLoop **)rdata->edit_bmesh->looptris[i];
- const BMFace *bm_face = bm_looptri[0]->f;
- /* use_hide always for edit-mode */
- if (BM_elem_flag_test(bm_face, BM_ELEM_HIDDEN)) {
- continue;
- }
- GPU_indexbuf_add_tri_verts(&elb, BM_elem_index_get(bm_looptri[0]),
- BM_elem_index_get(bm_looptri[1]),
- BM_elem_index_get(bm_looptri[2]));
- }
- }
- else if (rdata->mapped.use == true) {
- BMesh *bm = rdata->edit_bmesh->bm;
- Mesh *me_cage = rdata->mapped.me_cage;
-
- const MLoopTri *mlooptri = BKE_mesh_runtime_looptri_ensure(me_cage);
- for (int i = 0; i < tri_len; i++) {
- const MLoopTri *mlt = &mlooptri[i];
- const int p_orig = rdata->mapped.p_origindex[mlt->poly];
- if (p_orig != ORIGINDEX_NONE) {
- /* Assume 'use_hide' */
- BMFace *efa = BM_face_at_index(bm, p_orig);
- if (!BM_elem_flag_test(efa, BM_ELEM_HIDDEN)) {
- GPU_indexbuf_add_tri_verts(&elb, mlt->tri[0], mlt->tri[1], mlt->tri[2]);
- }
- }
- }
- }
- else {
- const MLoopTri *mlt = rdata->mlooptri;
- for (int i = 0; i < tri_len; i++, mlt++) {
- const MPoly *mpoly = &rdata->mpoly[mlt->poly];
- /* Assume 'use_hide' */
- if ((mpoly->flag & ME_HIDE) == 0) {
- GPU_indexbuf_add_tri_verts(&elb, mlt->tri[0], mlt->tri[1], mlt->tri[2]);
- }
- }
- }
-
- GPU_indexbuf_build_in_place(&elb, ibo);
+ const int loop_len = mesh_render_data_loops_len_get_maybe_mapped(rdata);
+ const int tri_len = mesh_render_data_looptri_len_get_maybe_mapped(rdata);
+
+ GPUIndexBufBuilder elb;
+ /* TODO alloc minmum necessary. */
+ GPU_indexbuf_init(&elb, GPU_PRIM_TRIS, tri_len, loop_len * 3);
+
+ if (rdata->edit_bmesh && (rdata->mapped.use == false)) {
+ for (int i = 0; i < tri_len; i++) {
+ const BMLoop **bm_looptri = (const BMLoop **)rdata->edit_bmesh->looptris[i];
+ const BMFace *bm_face = bm_looptri[0]->f;
+ /* use_hide always for edit-mode */
+ if (BM_elem_flag_test(bm_face, BM_ELEM_HIDDEN)) {
+ continue;
+ }
+ GPU_indexbuf_add_tri_verts(&elb,
+ BM_elem_index_get(bm_looptri[0]),
+ BM_elem_index_get(bm_looptri[1]),
+ BM_elem_index_get(bm_looptri[2]));
+ }
+ }
+ else if (rdata->mapped.use == true) {
+ BMesh *bm = rdata->edit_bmesh->bm;
+ Mesh *me_cage = rdata->mapped.me_cage;
+
+ const MLoopTri *mlooptri = BKE_mesh_runtime_looptri_ensure(me_cage);
+ for (int i = 0; i < tri_len; i++) {
+ const MLoopTri *mlt = &mlooptri[i];
+ const int p_orig = rdata->mapped.p_origindex[mlt->poly];
+ if (p_orig != ORIGINDEX_NONE) {
+ /* Assume 'use_hide' */
+ BMFace *efa = BM_face_at_index(bm, p_orig);
+ if (!BM_elem_flag_test(efa, BM_ELEM_HIDDEN)) {
+ GPU_indexbuf_add_tri_verts(&elb, mlt->tri[0], mlt->tri[1], mlt->tri[2]);
+ }
+ }
+ }
+ }
+ else {
+ const MLoopTri *mlt = rdata->mlooptri;
+ for (int i = 0; i < tri_len; i++, mlt++) {
+ const MPoly *mpoly = &rdata->mpoly[mlt->poly];
+ /* Assume 'use_hide' */
+ if ((mpoly->flag & ME_HIDE) == 0) {
+ GPU_indexbuf_add_tri_verts(&elb, mlt->tri[0], mlt->tri[1], mlt->tri[2]);
+ }
+ }
+ }
+
+ GPU_indexbuf_build_in_place(&elb, ibo);
}
/** \} */
-
/* ---------------------------------------------------------------------- */
/** \name Public API
* \{ */
static void texpaint_request_active_uv(MeshBatchCache *cache, Mesh *me)
{
- DRW_MeshCDMask cd_needed;
- mesh_cd_layers_type_clear(&cd_needed);
- mesh_cd_calc_active_uv_layer(me, &cd_needed);
+ DRW_MeshCDMask cd_needed;
+ mesh_cd_layers_type_clear(&cd_needed);
+ mesh_cd_calc_active_uv_layer(me, &cd_needed);
- BLI_assert(cd_needed.uv != 0 && "No uv layer available in texpaint, but batches requested anyway!");
+ BLI_assert(cd_needed.uv != 0 &&
+ "No uv layer available in texpaint, but batches requested anyway!");
- mesh_cd_calc_active_mask_uv_layer(me, &cd_needed);
- mesh_cd_layers_type_merge(&cache->cd_needed, cd_needed);
+ mesh_cd_calc_active_mask_uv_layer(me, &cd_needed);
+ mesh_cd_layers_type_merge(&cache->cd_needed, cd_needed);
}
static void texpaint_request_active_vcol(MeshBatchCache *cache, Mesh *me)
{
- DRW_MeshCDMask cd_needed;
- mesh_cd_layers_type_clear(&cd_needed);
- mesh_cd_calc_active_vcol_layer(me, &cd_needed);
+ DRW_MeshCDMask cd_needed;
+ mesh_cd_layers_type_clear(&cd_needed);
+ mesh_cd_calc_active_vcol_layer(me, &cd_needed);
- BLI_assert(cd_needed.vcol != 0 && "No vcol layer available in vertpaint, but batches requested anyway!");
+ BLI_assert(cd_needed.vcol != 0 &&
+ "No vcol layer available in vertpaint, but batches requested anyway!");
- mesh_cd_layers_type_merge(&cache->cd_needed, cd_needed);
+ mesh_cd_layers_type_merge(&cache->cd_needed, cd_needed);
}
GPUBatch *DRW_mesh_batch_cache_get_all_verts(Mesh *me)
{
- MeshBatchCache *cache = mesh_batch_cache_get(me);
- return DRW_batch_request(&cache->batch.all_verts);
+ MeshBatchCache *cache = mesh_batch_cache_get(me);
+ return DRW_batch_request(&cache->batch.all_verts);
}
GPUBatch *DRW_mesh_batch_cache_get_all_edges(Mesh *me)
{
- MeshBatchCache *cache = mesh_batch_cache_get(me);
- return DRW_batch_request(&cache->batch.all_edges);
+ MeshBatchCache *cache = mesh_batch_cache_get(me);
+ return DRW_batch_request(&cache->batch.all_edges);
}
GPUBatch *DRW_mesh_batch_cache_get_surface(Mesh *me)
{
- MeshBatchCache *cache = mesh_batch_cache_get(me);
- return DRW_batch_request(&cache->batch.surface);
+ MeshBatchCache *cache = mesh_batch_cache_get(me);
+ return DRW_batch_request(&cache->batch.surface);
}
GPUBatch *DRW_mesh_batch_cache_get_loose_edges(Mesh *me)
{
- MeshBatchCache *cache = mesh_batch_cache_get(me);
- return DRW_batch_request(&cache->batch.loose_edges);
+ MeshBatchCache *cache = mesh_batch_cache_get(me);
+ return DRW_batch_request(&cache->batch.loose_edges);
}
GPUBatch *DRW_mesh_batch_cache_get_surface_weights(Mesh *me)
{
- MeshBatchCache *cache = mesh_batch_cache_get(me);
- return DRW_batch_request(&cache->batch.surface_weights);
+ MeshBatchCache *cache = mesh_batch_cache_get(me);
+ return DRW_batch_request(&cache->batch.surface_weights);
}
GPUBatch *DRW_mesh_batch_cache_get_edge_detection(Mesh *me, bool *r_is_manifold)
{
- MeshBatchCache *cache = mesh_batch_cache_get(me);
- /* Even if is_manifold is not correct (not updated),
- * the default (not manifold) is just the worst case. */
- if (r_is_manifold) {
- *r_is_manifold = cache->is_manifold;
- }
- return DRW_batch_request(&cache->batch.edge_detection);
+ MeshBatchCache *cache = mesh_batch_cache_get(me);
+ /* Even if is_manifold is not correct (not updated),
+ * the default (not manifold) is just the worst case. */
+ if (r_is_manifold) {
+ *r_is_manifold = cache->is_manifold;
+ }
+ return DRW_batch_request(&cache->batch.edge_detection);
}
GPUBatch *DRW_mesh_batch_cache_get_wireframes_face(Mesh *me)
{
- MeshBatchCache *cache = mesh_batch_cache_get(me);
- return DRW_batch_request(&cache->batch.wire_edges);
+ MeshBatchCache *cache = mesh_batch_cache_get(me);
+ return DRW_batch_request(&cache->batch.wire_edges);
}
-GPUBatch **DRW_mesh_batch_cache_get_surface_shaded(
- Mesh *me, struct GPUMaterial **gpumat_array, uint gpumat_array_len,
- char **auto_layer_names, int **auto_layer_is_srgb, int *auto_layer_count)
+GPUBatch **DRW_mesh_batch_cache_get_surface_shaded(Mesh *me,
+ struct GPUMaterial **gpumat_array,
+ uint gpumat_array_len,
+ char **auto_layer_names,
+ int **auto_layer_is_srgb,
+ int *auto_layer_count)
{
- MeshBatchCache *cache = mesh_batch_cache_get(me);
- DRW_MeshCDMask cd_needed = mesh_cd_calc_used_gpu_layers(me, gpumat_array, gpumat_array_len);
-
- BLI_assert(gpumat_array_len == cache->mat_len);
-
- bool cd_overlap = mesh_cd_layers_type_overlap(cache->cd_used, cd_needed);
- if (!cd_overlap) {
- mesh_cd_layers_type_merge(&cache->cd_needed, cd_needed);
-
- mesh_cd_extract_auto_layers_names_and_srgb(me,
- cache->cd_needed,
- &cache->auto_layer_names,
- &cache->auto_layer_is_srgb,
- &cache->auto_layer_len);
- }
- if (auto_layer_names) {
- *auto_layer_names = cache->auto_layer_names;
- *auto_layer_is_srgb = cache->auto_layer_is_srgb;
- *auto_layer_count = cache->auto_layer_len;
- }
- for (int i = 0; i < cache->mat_len; ++i) {
- DRW_batch_request(&cache->surf_per_mat[i]);
- }
- return cache->surf_per_mat;
+ MeshBatchCache *cache = mesh_batch_cache_get(me);
+ DRW_MeshCDMask cd_needed = mesh_cd_calc_used_gpu_layers(me, gpumat_array, gpumat_array_len);
+
+ BLI_assert(gpumat_array_len == cache->mat_len);
+
+ bool cd_overlap = mesh_cd_layers_type_overlap(cache->cd_used, cd_needed);
+ if (!cd_overlap) {
+ mesh_cd_layers_type_merge(&cache->cd_needed, cd_needed);
+
+ mesh_cd_extract_auto_layers_names_and_srgb(me,
+ cache->cd_needed,
+ &cache->auto_layer_names,
+ &cache->auto_layer_is_srgb,
+ &cache->auto_layer_len);
+ }
+ if (auto_layer_names) {
+ *auto_layer_names = cache->auto_layer_names;
+ *auto_layer_is_srgb = cache->auto_layer_is_srgb;
+ *auto_layer_count = cache->auto_layer_len;
+ }
+ for (int i = 0; i < cache->mat_len; ++i) {
+ DRW_batch_request(&cache->surf_per_mat[i]);
+ }
+ return cache->surf_per_mat;
}
GPUBatch **DRW_mesh_batch_cache_get_surface_texpaint(Mesh *me)
{
- MeshBatchCache *cache = mesh_batch_cache_get(me);
- texpaint_request_active_uv(cache, me);
- for (int i = 0; i < cache->mat_len; ++i) {
- DRW_batch_request(&cache->surf_per_mat[i]);
- }
- return cache->surf_per_mat;
+ MeshBatchCache *cache = mesh_batch_cache_get(me);
+ texpaint_request_active_uv(cache, me);
+ for (int i = 0; i < cache->mat_len; ++i) {
+ DRW_batch_request(&cache->surf_per_mat[i]);
+ }
+ return cache->surf_per_mat;
}
GPUBatch *DRW_mesh_batch_cache_get_surface_texpaint_single(Mesh *me)
{
- MeshBatchCache *cache = mesh_batch_cache_get(me);
- texpaint_request_active_uv(cache, me);
- return DRW_batch_request(&cache->batch.surface);
+ MeshBatchCache *cache = mesh_batch_cache_get(me);
+ texpaint_request_active_uv(cache, me);
+ return DRW_batch_request(&cache->batch.surface);
}
GPUBatch *DRW_mesh_batch_cache_get_surface_vertpaint(Mesh *me)
{
- MeshBatchCache *cache = mesh_batch_cache_get(me);
- texpaint_request_active_vcol(cache, me);
- return DRW_batch_request(&cache->batch.surface);
+ MeshBatchCache *cache = mesh_batch_cache_get(me);
+ texpaint_request_active_vcol(cache, me);
+ return DRW_batch_request(&cache->batch.surface);
}
/** \} */
@@ -4144,32 +4250,32 @@ GPUBatch *DRW_mesh_batch_cache_get_surface_vertpaint(Mesh *me)
GPUBatch *DRW_mesh_batch_cache_get_edit_triangles(Mesh *me)
{
- MeshBatchCache *cache = mesh_batch_cache_get(me);
- return DRW_batch_request(&cache->batch.edit_triangles);
+ MeshBatchCache *cache = mesh_batch_cache_get(me);
+ return DRW_batch_request(&cache->batch.edit_triangles);
}
GPUBatch *DRW_mesh_batch_cache_get_edit_edges(Mesh *me)
{
- MeshBatchCache *cache = mesh_batch_cache_get(me);
- return DRW_batch_request(&cache->batch.edit_edges);
+ MeshBatchCache *cache = mesh_batch_cache_get(me);
+ return DRW_batch_request(&cache->batch.edit_edges);
}
GPUBatch *DRW_mesh_batch_cache_get_edit_vertices(Mesh *me)
{
- MeshBatchCache *cache = mesh_batch_cache_get(me);
- return DRW_batch_request(&cache->batch.edit_vertices);
+ MeshBatchCache *cache = mesh_batch_cache_get(me);
+ return DRW_batch_request(&cache->batch.edit_vertices);
}
GPUBatch *DRW_mesh_batch_cache_get_edit_lnors(Mesh *me)
{
- MeshBatchCache *cache = mesh_batch_cache_get(me);
- return DRW_batch_request(&cache->batch.edit_lnor);
+ MeshBatchCache *cache = mesh_batch_cache_get(me);
+ return DRW_batch_request(&cache->batch.edit_lnor);
}
GPUBatch *DRW_mesh_batch_cache_get_edit_facedots(Mesh *me)
{
- MeshBatchCache *cache = mesh_batch_cache_get(me);
- return DRW_batch_request(&cache->batch.edit_facedots);
+ MeshBatchCache *cache = mesh_batch_cache_get(me);
+ return DRW_batch_request(&cache->batch.edit_facedots);
}
/** \} */
@@ -4180,26 +4286,26 @@ GPUBatch *DRW_mesh_batch_cache_get_edit_facedots(Mesh *me)
GPUBatch *DRW_mesh_batch_cache_get_triangles_with_select_id(Mesh *me)
{
- MeshBatchCache *cache = mesh_batch_cache_get(me);
- return DRW_batch_request(&cache->batch.edit_selection_faces);
+ MeshBatchCache *cache = mesh_batch_cache_get(me);
+ return DRW_batch_request(&cache->batch.edit_selection_faces);
}
GPUBatch *DRW_mesh_batch_cache_get_facedots_with_select_id(Mesh *me)
{
- MeshBatchCache *cache = mesh_batch_cache_get(me);
- return DRW_batch_request(&cache->batch.edit_selection_facedots);
+ MeshBatchCache *cache = mesh_batch_cache_get(me);
+ return DRW_batch_request(&cache->batch.edit_selection_facedots);
}
GPUBatch *DRW_mesh_batch_cache_get_edges_with_select_id(Mesh *me)
{
- MeshBatchCache *cache = mesh_batch_cache_get(me);
- return DRW_batch_request(&cache->batch.edit_selection_edges);
+ MeshBatchCache *cache = mesh_batch_cache_get(me);
+ return DRW_batch_request(&cache->batch.edit_selection_edges);
}
GPUBatch *DRW_mesh_batch_cache_get_verts_with_select_id(Mesh *me)
{
- MeshBatchCache *cache = mesh_batch_cache_get(me);
- return DRW_batch_request(&cache->batch.edit_selection_verts);
+ MeshBatchCache *cache = mesh_batch_cache_get(me);
+ return DRW_batch_request(&cache->batch.edit_selection_verts);
}
/** \} */
@@ -4210,51 +4316,51 @@ GPUBatch *DRW_mesh_batch_cache_get_verts_with_select_id(Mesh *me)
GPUBatch *DRW_mesh_batch_cache_get_edituv_faces_strech_area(Mesh *me)
{
- MeshBatchCache *cache = mesh_batch_cache_get(me);
- return DRW_batch_request(&cache->batch.edituv_faces_strech_area);
+ MeshBatchCache *cache = mesh_batch_cache_get(me);
+ return DRW_batch_request(&cache->batch.edituv_faces_strech_area);
}
GPUBatch *DRW_mesh_batch_cache_get_edituv_faces_strech_angle(Mesh *me)
{
- MeshBatchCache *cache = mesh_batch_cache_get(me);
- return DRW_batch_request(&cache->batch.edituv_faces_strech_angle);
+ MeshBatchCache *cache = mesh_batch_cache_get(me);
+ return DRW_batch_request(&cache->batch.edituv_faces_strech_angle);
}
GPUBatch *DRW_mesh_batch_cache_get_edituv_faces(Mesh *me)
{
- MeshBatchCache *cache = mesh_batch_cache_get(me);
- return DRW_batch_request(&cache->batch.edituv_faces);
+ MeshBatchCache *cache = mesh_batch_cache_get(me);
+ return DRW_batch_request(&cache->batch.edituv_faces);
}
GPUBatch *DRW_mesh_batch_cache_get_edituv_edges(Mesh *me)
{
- MeshBatchCache *cache = mesh_batch_cache_get(me);
- return DRW_batch_request(&cache->batch.edituv_edges);
+ MeshBatchCache *cache = mesh_batch_cache_get(me);
+ return DRW_batch_request(&cache->batch.edituv_edges);
}
GPUBatch *DRW_mesh_batch_cache_get_edituv_verts(Mesh *me)
{
- MeshBatchCache *cache = mesh_batch_cache_get(me);
- return DRW_batch_request(&cache->batch.edituv_verts);
+ MeshBatchCache *cache = mesh_batch_cache_get(me);
+ return DRW_batch_request(&cache->batch.edituv_verts);
}
GPUBatch *DRW_mesh_batch_cache_get_edituv_facedots(Mesh *me)
{
- MeshBatchCache *cache = mesh_batch_cache_get(me);
- return DRW_batch_request(&cache->batch.edituv_facedots);
+ MeshBatchCache *cache = mesh_batch_cache_get(me);
+ return DRW_batch_request(&cache->batch.edituv_facedots);
}
GPUBatch *DRW_mesh_batch_cache_get_uv_edges(Mesh *me)
{
- MeshBatchCache *cache = mesh_batch_cache_get(me);
- texpaint_request_active_uv(cache, me);
- return DRW_batch_request(&cache->batch.wire_loops_uvs);
+ MeshBatchCache *cache = mesh_batch_cache_get(me);
+ texpaint_request_active_uv(cache, me);
+ return DRW_batch_request(&cache->batch.wire_loops_uvs);
}
GPUBatch *DRW_mesh_batch_cache_get_surface_edges(Mesh *me)
{
- MeshBatchCache *cache = mesh_batch_cache_get(me);
- return DRW_batch_request(&cache->batch.wire_loops);
+ MeshBatchCache *cache = mesh_batch_cache_get(me);
+ return DRW_batch_request(&cache->batch.wire_loops);
}
/**
@@ -4263,759 +4369,818 @@ GPUBatch *DRW_mesh_batch_cache_get_surface_edges(Mesh *me)
void DRW_mesh_cache_sculpt_coords_ensure(Mesh *UNUSED(me))
{
#if 0 /* Unused for now */
- if (me->runtime.batch_cache) {
- MeshBatchCache *cache = mesh_batch_cache_get(me);
- if (cache && cache->pos_with_normals && cache->is_sculpt_points_tag) {
- /* XXX Force update of all the batches that contains the pos_with_normals buffer.
- * TODO(fclem): Ideally, Gawain should provide a way to update a buffer without destroying it. */
- mesh_batch_cache_clear_selective(me, cache->pos_with_normals);
- GPU_VERTBUF_DISCARD_SAFE(cache->pos_with_normals);
- }
- cache->is_sculpt_points_tag = false;
- }
+ if (me->runtime.batch_cache) {
+ MeshBatchCache *cache = mesh_batch_cache_get(me);
+ if (cache && cache->pos_with_normals && cache->is_sculpt_points_tag) {
+ /* XXX Force update of all the batches that contains the pos_with_normals buffer.
+ * TODO(fclem): Ideally, Gawain should provide a way to update a buffer without destroying it. */
+ mesh_batch_cache_clear_selective(me, cache->pos_with_normals);
+ GPU_VERTBUF_DISCARD_SAFE(cache->pos_with_normals);
+ }
+ cache->is_sculpt_points_tag = false;
+ }
#endif
}
/* Compute 3D & 2D areas and their sum. */
-BLI_INLINE void edit_uv_preprocess_stretch_area(
- BMFace *efa, const int cd_loop_uv_offset, uint fidx,
- float *totarea, float *totuvarea, float (*faces_areas)[2])
+BLI_INLINE void edit_uv_preprocess_stretch_area(BMFace *efa,
+ const int cd_loop_uv_offset,
+ uint fidx,
+ float *totarea,
+ float *totuvarea,
+ float (*faces_areas)[2])
{
- faces_areas[fidx][0] = BM_face_calc_area(efa);
- faces_areas[fidx][1] = BM_face_calc_area_uv(efa, cd_loop_uv_offset);
+ faces_areas[fidx][0] = BM_face_calc_area(efa);
+ faces_areas[fidx][1] = BM_face_calc_area_uv(efa, cd_loop_uv_offset);
- *totarea += faces_areas[fidx][0];
- *totuvarea += faces_areas[fidx][1];
+ *totarea += faces_areas[fidx][0];
+ *totuvarea += faces_areas[fidx][1];
}
BLI_INLINE float edit_uv_get_stretch_area(float area, float uvarea)
{
- if (area < FLT_EPSILON || uvarea < FLT_EPSILON) {
- return 1.0f;
- }
- else if (area > uvarea) {
- return 1.0f - (uvarea / area);
- }
- else {
- return 1.0f - (area / uvarea);
- }
+ if (area < FLT_EPSILON || uvarea < FLT_EPSILON) {
+ return 1.0f;
+ }
+ else if (area > uvarea) {
+ return 1.0f - (uvarea / area);
+ }
+ else {
+ return 1.0f - (area / uvarea);
+ }
}
/* Compute face's normalized contour vectors. */
-BLI_INLINE void edit_uv_preprocess_stretch_angle(
- float (*auv)[2], float (*av)[3], const int cd_loop_uv_offset, BMFace *efa)
+BLI_INLINE void edit_uv_preprocess_stretch_angle(float (*auv)[2],
+ float (*av)[3],
+ const int cd_loop_uv_offset,
+ BMFace *efa)
{
- BMLoop *l;
- BMIter liter;
- int i;
- BM_ITER_ELEM_INDEX(l, &liter, efa, BM_LOOPS_OF_FACE, i) {
- MLoopUV *luv = BM_ELEM_CD_GET_VOID_P(l, cd_loop_uv_offset);
- MLoopUV *luv_prev = BM_ELEM_CD_GET_VOID_P(l->prev, cd_loop_uv_offset);
-
- sub_v2_v2v2(auv[i], luv_prev->uv, luv->uv);
- normalize_v2(auv[i]);
-
- sub_v3_v3v3(av[i], l->prev->v->co, l->v->co);
- normalize_v3(av[i]);
- }
+ BMLoop *l;
+ BMIter liter;
+ int i;
+ BM_ITER_ELEM_INDEX(l, &liter, efa, BM_LOOPS_OF_FACE, i)
+ {
+ MLoopUV *luv = BM_ELEM_CD_GET_VOID_P(l, cd_loop_uv_offset);
+ MLoopUV *luv_prev = BM_ELEM_CD_GET_VOID_P(l->prev, cd_loop_uv_offset);
+
+ sub_v2_v2v2(auv[i], luv_prev->uv, luv->uv);
+ normalize_v2(auv[i]);
+
+ sub_v3_v3v3(av[i], l->prev->v->co, l->v->co);
+ normalize_v3(av[i]);
+ }
}
#if 0 /* here for reference, this is done in shader now. */
BLI_INLINE float edit_uv_get_loop_stretch_angle(
const float auv0[2], const float auv1[2], const float av0[3], const float av1[3])
{
- float uvang = angle_normalized_v2v2(auv0, auv1);
- float ang = angle_normalized_v3v3(av0, av1);
- float stretch = fabsf(uvang - ang) / (float)M_PI;
- return 1.0f - pow2f(1.0f - stretch);
+ float uvang = angle_normalized_v2v2(auv0, auv1);
+ float ang = angle_normalized_v3v3(av0, av1);
+ float stretch = fabsf(uvang - ang) / (float)M_PI;
+ return 1.0f - pow2f(1.0f - stretch);
}
#endif
static struct EditUVFormatIndex {
- uint area, angle, uv_adj, flag, fdots_uvs, fdots_flag;
+ uint area, angle, uv_adj, flag, fdots_uvs, fdots_flag;
} uv_attr_id = {0};
-static void uvedit_fill_buffer_data(
- MeshRenderData *rdata,
- GPUVertBuf *vbo_area, GPUVertBuf *vbo_angle,
- GPUVertBuf *vbo_fdots_pos, GPUVertBuf *vbo_fdots_data,
- GPUIndexBufBuilder *elb_vert,
- GPUIndexBufBuilder *elb_edge,
- GPUIndexBufBuilder *elb_face)
+static void uvedit_fill_buffer_data(MeshRenderData *rdata,
+ GPUVertBuf *vbo_area,
+ GPUVertBuf *vbo_angle,
+ GPUVertBuf *vbo_fdots_pos,
+ GPUVertBuf *vbo_fdots_data,
+ GPUIndexBufBuilder *elb_vert,
+ GPUIndexBufBuilder *elb_edge,
+ GPUIndexBufBuilder *elb_face)
{
- BMesh *bm = rdata->edit_bmesh->bm;
- BMIter iter, liter;
- BMFace *efa;
- uint vidx, fidx, fdot_idx, i;
- const int poly_len = mesh_render_data_polys_len_get_maybe_mapped(rdata);
- float (*faces_areas)[2] = NULL;
- float totarea = 0.0f, totuvarea = 0.0f;
- const int cd_loop_uv_offset = CustomData_get_offset(&bm->ldata, CD_MLOOPUV);
-
- BLI_buffer_declare_static(vec3f, vec3_buf, BLI_BUFFER_NOP, BM_DEFAULT_NGON_STACK_SIZE);
- BLI_buffer_declare_static(vec2f, vec2_buf, BLI_BUFFER_NOP, BM_DEFAULT_NGON_STACK_SIZE);
-
- if (vbo_area) {
- faces_areas = MEM_mallocN(sizeof(float) * 2 * bm->totface, "EDITUV faces areas");
- }
-
- /* Preprocess */
- fidx = 0;
- BM_ITER_MESH(efa, &iter, bm, BM_FACES_OF_MESH) {
- /* Tag hidden faces */
- BM_elem_flag_set(efa, BM_ELEM_TAG, uvedit_face_visible_nolocal_ex(rdata->toolsettings, efa));
-
- if (vbo_area && BM_elem_flag_test(efa, BM_ELEM_TAG)) {
- edit_uv_preprocess_stretch_area(efa, cd_loop_uv_offset, fidx++,
- &totarea, &totuvarea, faces_areas);
- }
- }
-
- vidx = 0;
- fidx = 0;
- fdot_idx = 0;
- if (rdata->mapped.use == false && rdata->edit_bmesh) {
- BMLoop *l;
- BM_ITER_MESH(efa, &iter, bm, BM_FACES_OF_MESH) {
- const bool face_visible = BM_elem_flag_test(efa, BM_ELEM_TAG);
- const int efa_len = efa->len;
- float fdot[2] = {0.0f, 0.0f};
- float (*av)[3], (*auv)[2];
- ushort area_stretch;
-
- /* Face preprocess */
- if (vbo_area) {
- area_stretch = edit_uv_get_stretch_area(faces_areas[fidx][0] / totarea,
- faces_areas[fidx][1] / totuvarea) * 65534.0f;
- }
- if (vbo_angle) {
- av = (float (*)[3])BLI_buffer_reinit_data(&vec3_buf, vec3f, efa_len);
- auv = (float (*)[2])BLI_buffer_reinit_data(&vec2_buf, vec2f, efa_len);
- edit_uv_preprocess_stretch_angle(auv, av, cd_loop_uv_offset, efa);
- }
-
- /* Skip hidden faces. */
- if (elb_face && face_visible) {
- for (i = 0; i < efa->len; ++i) {
- GPU_indexbuf_add_generic_vert(elb_face, vidx + i);
- GPU_indexbuf_add_generic_vert(elb_vert, vidx + i);
- GPU_indexbuf_add_line_verts(elb_edge, vidx + i, vidx + (i + 1) % efa->len);
- }
- }
-
- BM_ITER_ELEM_INDEX(l, &liter, efa, BM_LOOPS_OF_FACE, i) {
- MLoopUV *luv = BM_ELEM_CD_GET_VOID_P(l, cd_loop_uv_offset);
- if (vbo_area) {
- GPU_vertbuf_attr_set(vbo_area, uv_attr_id.area, vidx, &area_stretch);
- }
- if (vbo_angle) {
- int i_next = (i + 1) % efa_len;
- short suv[4];
- /* Send uvs to the shader and let it compute the aspect corrected angle. */
- normal_float_to_short_v2(&suv[0], auv[i]);
- normal_float_to_short_v2(&suv[2], auv[i_next]);
- GPU_vertbuf_attr_set(vbo_angle, uv_attr_id.uv_adj, vidx, suv);
- /* Compute 3D angle here */
- short angle = 32767.0f * angle_normalized_v3v3(av[i], av[i_next]) / (float)M_PI;
- GPU_vertbuf_attr_set(vbo_angle, uv_attr_id.angle, vidx, &angle);
- }
- if (vbo_fdots_pos) {
- add_v2_v2(fdot, luv->uv);
- }
- vidx++;
- }
-
- if (elb_face && face_visible) {
- GPU_indexbuf_add_generic_vert(elb_face, vidx - efa->len);
- GPU_indexbuf_add_primitive_restart(elb_face);
- }
- if (vbo_fdots_pos && face_visible) {
- mul_v2_fl(fdot, 1.0f / (float)efa->len);
- GPU_vertbuf_attr_set(vbo_fdots_pos, uv_attr_id.fdots_uvs, fdot_idx, fdot);
- }
- if (vbo_fdots_data && face_visible) {
- uchar face_flag = mesh_render_data_face_flag(rdata, efa, cd_loop_uv_offset);
- GPU_vertbuf_attr_set(vbo_fdots_data, uv_attr_id.fdots_flag, fdot_idx, &face_flag);
- }
- fdot_idx += face_visible ? 1 : 0;
- fidx++;
- }
- }
- else {
- const MPoly *mpoly = rdata->mapped.me_cage->mpoly;
- // const MEdge *medge = rdata->mapped.me_cage->medge;
- // const MVert *mvert = rdata->mapped.me_cage->mvert;
- const MLoop *mloop = rdata->mapped.me_cage->mloop;
-
- const int *v_origindex = rdata->mapped.v_origindex;
- const int *e_origindex = rdata->mapped.e_origindex;
- const int *p_origindex = rdata->mapped.p_origindex;
-
- /* Face Loops */
- for (int poly = 0; poly < poly_len; poly++, mpoly++) {
- float fdot[2] = {0.0f, 0.0f};
- const MLoop *l = &mloop[mpoly->loopstart];
- int fidx_ori = p_origindex[poly];
- efa = (fidx_ori != ORIGINDEX_NONE) ? BM_face_at_index(bm, fidx_ori) : NULL;
- const bool face_visible = efa != NULL && BM_elem_flag_test(efa, BM_ELEM_TAG);
- if (efa && vbo_fdots_data) {
- uchar face_flag = mesh_render_data_face_flag(rdata, efa, cd_loop_uv_offset);
- GPU_vertbuf_attr_set(vbo_fdots_data, uv_attr_id.fdots_flag, fdot_idx, &face_flag);
- }
- /* Skip hidden faces. */
- if (elb_face && face_visible) {
- for (i = 0; i < mpoly->totloop; ++i) {
- GPU_indexbuf_add_generic_vert(elb_face, vidx + i);
- if (e_origindex[l[i].e] != ORIGINDEX_NONE) {
- GPU_indexbuf_add_line_verts(elb_edge, vidx + i, vidx + (i + 1) % mpoly->totloop);
- }
- if (v_origindex[l[i].v] != ORIGINDEX_NONE) {
- GPU_indexbuf_add_generic_vert(elb_vert, vidx + i);
- }
- }
- GPU_indexbuf_add_generic_vert(elb_face, vidx);
- GPU_indexbuf_add_primitive_restart(elb_face);
- }
- for (i = 0; i < mpoly->totloop; i++, l++) {
- /* TODO support stretch. */
- if (vbo_fdots_pos) {
- MLoopUV *luv = &rdata->mloopuv[mpoly->loopstart + i];
- add_v2_v2(fdot, luv->uv);
- }
- vidx++;
- }
- if (vbo_fdots_pos && face_visible) {
- mul_v2_fl(fdot, 1.0f / mpoly->totloop);
- GPU_vertbuf_attr_set(vbo_fdots_pos, uv_attr_id.fdots_uvs, fdot_idx, fdot);
- }
- fidx++;
- fdot_idx += face_visible ? 1 : 0;
- }
- }
-
- if (faces_areas) {
- MEM_freeN(faces_areas);
- }
-
- BLI_buffer_free(&vec3_buf);
- BLI_buffer_free(&vec2_buf);
-
- if (fdot_idx < poly_len) {
- if (vbo_fdots_pos) {
- GPU_vertbuf_data_resize(vbo_fdots_pos, fdot_idx);
- }
- if (vbo_fdots_data) {
- GPU_vertbuf_data_resize(vbo_fdots_data, fdot_idx);
- }
- }
+ BMesh *bm = rdata->edit_bmesh->bm;
+ BMIter iter, liter;
+ BMFace *efa;
+ uint vidx, fidx, fdot_idx, i;
+ const int poly_len = mesh_render_data_polys_len_get_maybe_mapped(rdata);
+ float(*faces_areas)[2] = NULL;
+ float totarea = 0.0f, totuvarea = 0.0f;
+ const int cd_loop_uv_offset = CustomData_get_offset(&bm->ldata, CD_MLOOPUV);
+
+ BLI_buffer_declare_static(vec3f, vec3_buf, BLI_BUFFER_NOP, BM_DEFAULT_NGON_STACK_SIZE);
+ BLI_buffer_declare_static(vec2f, vec2_buf, BLI_BUFFER_NOP, BM_DEFAULT_NGON_STACK_SIZE);
+
+ if (vbo_area) {
+ faces_areas = MEM_mallocN(sizeof(float) * 2 * bm->totface, "EDITUV faces areas");
+ }
+
+ /* Preprocess */
+ fidx = 0;
+ BM_ITER_MESH (efa, &iter, bm, BM_FACES_OF_MESH) {
+ /* Tag hidden faces */
+ BM_elem_flag_set(efa, BM_ELEM_TAG, uvedit_face_visible_nolocal_ex(rdata->toolsettings, efa));
+
+ if (vbo_area && BM_elem_flag_test(efa, BM_ELEM_TAG)) {
+ edit_uv_preprocess_stretch_area(
+ efa, cd_loop_uv_offset, fidx++, &totarea, &totuvarea, faces_areas);
+ }
+ }
+
+ vidx = 0;
+ fidx = 0;
+ fdot_idx = 0;
+ if (rdata->mapped.use == false && rdata->edit_bmesh) {
+ BMLoop *l;
+ BM_ITER_MESH (efa, &iter, bm, BM_FACES_OF_MESH) {
+ const bool face_visible = BM_elem_flag_test(efa, BM_ELEM_TAG);
+ const int efa_len = efa->len;
+ float fdot[2] = {0.0f, 0.0f};
+ float(*av)[3], (*auv)[2];
+ ushort area_stretch;
+
+ /* Face preprocess */
+ if (vbo_area) {
+ area_stretch = edit_uv_get_stretch_area(faces_areas[fidx][0] / totarea,
+ faces_areas[fidx][1] / totuvarea) *
+ 65534.0f;
+ }
+ if (vbo_angle) {
+ av = (float(*)[3])BLI_buffer_reinit_data(&vec3_buf, vec3f, efa_len);
+ auv = (float(*)[2])BLI_buffer_reinit_data(&vec2_buf, vec2f, efa_len);
+ edit_uv_preprocess_stretch_angle(auv, av, cd_loop_uv_offset, efa);
+ }
+
+ /* Skip hidden faces. */
+ if (elb_face && face_visible) {
+ for (i = 0; i < efa->len; ++i) {
+ GPU_indexbuf_add_generic_vert(elb_face, vidx + i);
+ GPU_indexbuf_add_generic_vert(elb_vert, vidx + i);
+ GPU_indexbuf_add_line_verts(elb_edge, vidx + i, vidx + (i + 1) % efa->len);
+ }
+ }
+
+ BM_ITER_ELEM_INDEX(l, &liter, efa, BM_LOOPS_OF_FACE, i)
+ {
+ MLoopUV *luv = BM_ELEM_CD_GET_VOID_P(l, cd_loop_uv_offset);
+ if (vbo_area) {
+ GPU_vertbuf_attr_set(vbo_area, uv_attr_id.area, vidx, &area_stretch);
+ }
+ if (vbo_angle) {
+ int i_next = (i + 1) % efa_len;
+ short suv[4];
+ /* Send uvs to the shader and let it compute the aspect corrected angle. */
+ normal_float_to_short_v2(&suv[0], auv[i]);
+ normal_float_to_short_v2(&suv[2], auv[i_next]);
+ GPU_vertbuf_attr_set(vbo_angle, uv_attr_id.uv_adj, vidx, suv);
+ /* Compute 3D angle here */
+ short angle = 32767.0f * angle_normalized_v3v3(av[i], av[i_next]) / (float)M_PI;
+ GPU_vertbuf_attr_set(vbo_angle, uv_attr_id.angle, vidx, &angle);
+ }
+ if (vbo_fdots_pos) {
+ add_v2_v2(fdot, luv->uv);
+ }
+ vidx++;
+ }
+
+ if (elb_face && face_visible) {
+ GPU_indexbuf_add_generic_vert(elb_face, vidx - efa->len);
+ GPU_indexbuf_add_primitive_restart(elb_face);
+ }
+ if (vbo_fdots_pos && face_visible) {
+ mul_v2_fl(fdot, 1.0f / (float)efa->len);
+ GPU_vertbuf_attr_set(vbo_fdots_pos, uv_attr_id.fdots_uvs, fdot_idx, fdot);
+ }
+ if (vbo_fdots_data && face_visible) {
+ uchar face_flag = mesh_render_data_face_flag(rdata, efa, cd_loop_uv_offset);
+ GPU_vertbuf_attr_set(vbo_fdots_data, uv_attr_id.fdots_flag, fdot_idx, &face_flag);
+ }
+ fdot_idx += face_visible ? 1 : 0;
+ fidx++;
+ }
+ }
+ else {
+ const MPoly *mpoly = rdata->mapped.me_cage->mpoly;
+ // const MEdge *medge = rdata->mapped.me_cage->medge;
+ // const MVert *mvert = rdata->mapped.me_cage->mvert;
+ const MLoop *mloop = rdata->mapped.me_cage->mloop;
+
+ const int *v_origindex = rdata->mapped.v_origindex;
+ const int *e_origindex = rdata->mapped.e_origindex;
+ const int *p_origindex = rdata->mapped.p_origindex;
+
+ /* Face Loops */
+ for (int poly = 0; poly < poly_len; poly++, mpoly++) {
+ float fdot[2] = {0.0f, 0.0f};
+ const MLoop *l = &mloop[mpoly->loopstart];
+ int fidx_ori = p_origindex[poly];
+ efa = (fidx_ori != ORIGINDEX_NONE) ? BM_face_at_index(bm, fidx_ori) : NULL;
+ const bool face_visible = efa != NULL && BM_elem_flag_test(efa, BM_ELEM_TAG);
+ if (efa && vbo_fdots_data) {
+ uchar face_flag = mesh_render_data_face_flag(rdata, efa, cd_loop_uv_offset);
+ GPU_vertbuf_attr_set(vbo_fdots_data, uv_attr_id.fdots_flag, fdot_idx, &face_flag);
+ }
+ /* Skip hidden faces. */
+ if (elb_face && face_visible) {
+ for (i = 0; i < mpoly->totloop; ++i) {
+ GPU_indexbuf_add_generic_vert(elb_face, vidx + i);
+ if (e_origindex[l[i].e] != ORIGINDEX_NONE) {
+ GPU_indexbuf_add_line_verts(elb_edge, vidx + i, vidx + (i + 1) % mpoly->totloop);
+ }
+ if (v_origindex[l[i].v] != ORIGINDEX_NONE) {
+ GPU_indexbuf_add_generic_vert(elb_vert, vidx + i);
+ }
+ }
+ GPU_indexbuf_add_generic_vert(elb_face, vidx);
+ GPU_indexbuf_add_primitive_restart(elb_face);
+ }
+ for (i = 0; i < mpoly->totloop; i++, l++) {
+ /* TODO support stretch. */
+ if (vbo_fdots_pos) {
+ MLoopUV *luv = &rdata->mloopuv[mpoly->loopstart + i];
+ add_v2_v2(fdot, luv->uv);
+ }
+ vidx++;
+ }
+ if (vbo_fdots_pos && face_visible) {
+ mul_v2_fl(fdot, 1.0f / mpoly->totloop);
+ GPU_vertbuf_attr_set(vbo_fdots_pos, uv_attr_id.fdots_uvs, fdot_idx, fdot);
+ }
+ fidx++;
+ fdot_idx += face_visible ? 1 : 0;
+ }
+ }
+
+ if (faces_areas) {
+ MEM_freeN(faces_areas);
+ }
+
+ BLI_buffer_free(&vec3_buf);
+ BLI_buffer_free(&vec2_buf);
+
+ if (fdot_idx < poly_len) {
+ if (vbo_fdots_pos) {
+ GPU_vertbuf_data_resize(vbo_fdots_pos, fdot_idx);
+ }
+ if (vbo_fdots_data) {
+ GPU_vertbuf_data_resize(vbo_fdots_data, fdot_idx);
+ }
+ }
}
-static void mesh_create_uvedit_buffers(
- MeshRenderData *rdata,
- GPUVertBuf *vbo_area, GPUVertBuf *vbo_angle,
- GPUVertBuf *vbo_fdots_pos, GPUVertBuf *vbo_fdots_data,
- GPUIndexBuf *ibo_vert, GPUIndexBuf *ibo_edge, GPUIndexBuf *ibo_face)
+static void mesh_create_uvedit_buffers(MeshRenderData *rdata,
+ GPUVertBuf *vbo_area,
+ GPUVertBuf *vbo_angle,
+ GPUVertBuf *vbo_fdots_pos,
+ GPUVertBuf *vbo_fdots_data,
+ GPUIndexBuf *ibo_vert,
+ GPUIndexBuf *ibo_edge,
+ GPUIndexBuf *ibo_face)
{
- static GPUVertFormat format_area = { 0 };
- static GPUVertFormat format_angle = { 0 };
- static GPUVertFormat format_fdots_pos = { 0 };
- static GPUVertFormat format_fdots_flag = { 0 };
-
- if (format_area.attr_len == 0) {
- uv_attr_id.area = GPU_vertformat_attr_add(&format_area, "stretch", GPU_COMP_U16, 1, GPU_FETCH_INT_TO_FLOAT_UNIT);
- uv_attr_id.angle = GPU_vertformat_attr_add(&format_angle, "angle", GPU_COMP_I16, 1, GPU_FETCH_INT_TO_FLOAT_UNIT);
- uv_attr_id.uv_adj = GPU_vertformat_attr_add(&format_angle, "uv_adj", GPU_COMP_I16, 4, GPU_FETCH_INT_TO_FLOAT_UNIT);
-
- uv_attr_id.fdots_flag = GPU_vertformat_attr_add(&format_fdots_flag, "flag", GPU_COMP_U8, 1, GPU_FETCH_INT);
- uv_attr_id.fdots_uvs = GPU_vertformat_attr_add(&format_fdots_pos, "u", GPU_COMP_F32, 2, GPU_FETCH_FLOAT);
- GPU_vertformat_alias_add(&format_fdots_pos, "pos");
- }
-
- const int loop_len = mesh_render_data_loops_len_get_maybe_mapped(rdata);
- const int face_len = mesh_render_data_polys_len_get_maybe_mapped(rdata);
- const int idx_len = loop_len + face_len * 2;
-
- if (DRW_TEST_ASSIGN_VBO(vbo_area)) {
- GPU_vertbuf_init_with_format(vbo_area, &format_area);
- GPU_vertbuf_data_alloc(vbo_area, loop_len);
- }
- if (DRW_TEST_ASSIGN_VBO(vbo_angle)) {
- GPU_vertbuf_init_with_format(vbo_angle, &format_angle);
- GPU_vertbuf_data_alloc(vbo_angle, loop_len);
- }
- if (DRW_TEST_ASSIGN_VBO(vbo_fdots_pos)) {
- GPU_vertbuf_init_with_format(vbo_fdots_pos, &format_fdots_pos);
- GPU_vertbuf_data_alloc(vbo_fdots_pos, face_len);
- }
- if (DRW_TEST_ASSIGN_VBO(vbo_fdots_data)) {
- GPU_vertbuf_init_with_format(vbo_fdots_data, &format_fdots_flag);
- GPU_vertbuf_data_alloc(vbo_fdots_data, face_len);
- }
-
- GPUIndexBufBuilder elb_vert, elb_edge, elb_face;
- if (DRW_TEST_ASSIGN_IBO(ibo_vert)) {
- GPU_indexbuf_init_ex(&elb_vert, GPU_PRIM_POINTS, loop_len, loop_len, false);
- }
- if (DRW_TEST_ASSIGN_IBO(ibo_edge)) {
- GPU_indexbuf_init_ex(&elb_edge, GPU_PRIM_LINES, loop_len * 2, loop_len, false);
- }
- if (DRW_TEST_ASSIGN_IBO(ibo_face)) {
- GPU_indexbuf_init_ex(&elb_face, GPU_PRIM_TRI_FAN, idx_len, loop_len, true);
- }
-
- uvedit_fill_buffer_data(rdata,
- vbo_area, vbo_angle, vbo_fdots_pos, vbo_fdots_data,
- ibo_vert ? &elb_vert : NULL,
- ibo_edge ? &elb_edge : NULL,
- ibo_face ? &elb_face : NULL);
-
- if (ibo_vert) {
- GPU_indexbuf_build_in_place(&elb_vert, ibo_vert);
- }
-
- if (ibo_edge) {
- GPU_indexbuf_build_in_place(&elb_edge, ibo_edge);
- }
-
- if (ibo_face) {
- GPU_indexbuf_build_in_place(&elb_face, ibo_face);
- }
+ static GPUVertFormat format_area = {0};
+ static GPUVertFormat format_angle = {0};
+ static GPUVertFormat format_fdots_pos = {0};
+ static GPUVertFormat format_fdots_flag = {0};
+
+ if (format_area.attr_len == 0) {
+ uv_attr_id.area = GPU_vertformat_attr_add(
+ &format_area, "stretch", GPU_COMP_U16, 1, GPU_FETCH_INT_TO_FLOAT_UNIT);
+ uv_attr_id.angle = GPU_vertformat_attr_add(
+ &format_angle, "angle", GPU_COMP_I16, 1, GPU_FETCH_INT_TO_FLOAT_UNIT);
+ uv_attr_id.uv_adj = GPU_vertformat_attr_add(
+ &format_angle, "uv_adj", GPU_COMP_I16, 4, GPU_FETCH_INT_TO_FLOAT_UNIT);
+
+ uv_attr_id.fdots_flag = GPU_vertformat_attr_add(
+ &format_fdots_flag, "flag", GPU_COMP_U8, 1, GPU_FETCH_INT);
+ uv_attr_id.fdots_uvs = GPU_vertformat_attr_add(
+ &format_fdots_pos, "u", GPU_COMP_F32, 2, GPU_FETCH_FLOAT);
+ GPU_vertformat_alias_add(&format_fdots_pos, "pos");
+ }
+
+ const int loop_len = mesh_render_data_loops_len_get_maybe_mapped(rdata);
+ const int face_len = mesh_render_data_polys_len_get_maybe_mapped(rdata);
+ const int idx_len = loop_len + face_len * 2;
+
+ if (DRW_TEST_ASSIGN_VBO(vbo_area)) {
+ GPU_vertbuf_init_with_format(vbo_area, &format_area);
+ GPU_vertbuf_data_alloc(vbo_area, loop_len);
+ }
+ if (DRW_TEST_ASSIGN_VBO(vbo_angle)) {
+ GPU_vertbuf_init_with_format(vbo_angle, &format_angle);
+ GPU_vertbuf_data_alloc(vbo_angle, loop_len);
+ }
+ if (DRW_TEST_ASSIGN_VBO(vbo_fdots_pos)) {
+ GPU_vertbuf_init_with_format(vbo_fdots_pos, &format_fdots_pos);
+ GPU_vertbuf_data_alloc(vbo_fdots_pos, face_len);
+ }
+ if (DRW_TEST_ASSIGN_VBO(vbo_fdots_data)) {
+ GPU_vertbuf_init_with_format(vbo_fdots_data, &format_fdots_flag);
+ GPU_vertbuf_data_alloc(vbo_fdots_data, face_len);
+ }
+
+ GPUIndexBufBuilder elb_vert, elb_edge, elb_face;
+ if (DRW_TEST_ASSIGN_IBO(ibo_vert)) {
+ GPU_indexbuf_init_ex(&elb_vert, GPU_PRIM_POINTS, loop_len, loop_len, false);
+ }
+ if (DRW_TEST_ASSIGN_IBO(ibo_edge)) {
+ GPU_indexbuf_init_ex(&elb_edge, GPU_PRIM_LINES, loop_len * 2, loop_len, false);
+ }
+ if (DRW_TEST_ASSIGN_IBO(ibo_face)) {
+ GPU_indexbuf_init_ex(&elb_face, GPU_PRIM_TRI_FAN, idx_len, loop_len, true);
+ }
+
+ uvedit_fill_buffer_data(rdata,
+ vbo_area,
+ vbo_angle,
+ vbo_fdots_pos,
+ vbo_fdots_data,
+ ibo_vert ? &elb_vert : NULL,
+ ibo_edge ? &elb_edge : NULL,
+ ibo_face ? &elb_face : NULL);
+
+ if (ibo_vert) {
+ GPU_indexbuf_build_in_place(&elb_vert, ibo_vert);
+ }
+
+ if (ibo_edge) {
+ GPU_indexbuf_build_in_place(&elb_edge, ibo_edge);
+ }
+
+ if (ibo_face) {
+ GPU_indexbuf_build_in_place(&elb_face, ibo_face);
+ }
}
/** \} */
-
/* ---------------------------------------------------------------------- */
/** \name Grouped batch generation
* \{ */
/* Can be called for any surface type. Mesh *me is the final mesh. */
void DRW_mesh_batch_cache_create_requested(
- Object *ob, Mesh *me,
- const ToolSettings *ts, const bool is_paint_mode, const bool use_hide)
+ Object *ob, Mesh *me, const ToolSettings *ts, const bool is_paint_mode, const bool use_hide)
{
- MeshBatchCache *cache = mesh_batch_cache_get(me);
-
- /* Check vertex weights. */
- if ((cache->batch.surface_weights != 0) && (ts != NULL)) {
- struct DRW_MeshWeightState wstate;
- BLI_assert(ob->type == OB_MESH);
- drw_mesh_weight_state_extract(ob, me, ts, is_paint_mode, &wstate);
- mesh_batch_cache_check_vertex_group(cache, &wstate);
- drw_mesh_weight_state_copy(&cache->weight_state, &wstate);
- drw_mesh_weight_state_clear(&wstate);
- }
-
- /* Verify that all surface batches have needed attribute layers. */
- /* TODO(fclem): We could be a bit smarter here and only do it per material. */
- bool cd_overlap = mesh_cd_layers_type_overlap(cache->cd_used, cache->cd_needed);
- if (cd_overlap == false) {
- if ((cache->cd_used.uv & cache->cd_needed.uv) != cache->cd_needed.uv ||
- (cache->cd_used.tan & cache->cd_needed.tan) != cache->cd_needed.tan ||
- cache->cd_used.tan_orco != cache->cd_needed.tan_orco)
- {
- GPU_VERTBUF_DISCARD_SAFE(cache->ordered.loop_uv_tan);
- }
- if (cache->cd_used.orco != cache->cd_needed.orco) {
- GPU_VERTBUF_DISCARD_SAFE(cache->ordered.loop_orco);
- }
- if ((cache->cd_used.vcol & cache->cd_needed.vcol) != cache->cd_needed.vcol) {
- GPU_VERTBUF_DISCARD_SAFE(cache->ordered.loop_vcol);
- }
- /* We can't discard batches at this point as they have been
- * referenced for drawing. Just clear them in place. */
- for (int i = 0; i < cache->mat_len; ++i) {
- GPU_BATCH_CLEAR_SAFE(cache->surf_per_mat[i]);
- }
- GPU_BATCH_CLEAR_SAFE(cache->batch.surface);
-
- mesh_cd_layers_type_merge(&cache->cd_used, cache->cd_needed);
- }
- mesh_cd_layers_type_clear(&cache->cd_needed);
-
- /* Discard UV batches if sync_selection changes */
- if (ts != NULL) {
- const bool is_uvsyncsel = (ts->uv_flag & UV_SYNC_SELECTION);
- if (cache->is_uvsyncsel != is_uvsyncsel) {
- cache->is_uvsyncsel = is_uvsyncsel;
- GPU_VERTBUF_DISCARD_SAFE(cache->edit.loop_uv_data);
- GPU_VERTBUF_DISCARD_SAFE(cache->edit.loop_stretch_angle);
- GPU_VERTBUF_DISCARD_SAFE(cache->edit.loop_stretch_area);
- GPU_VERTBUF_DISCARD_SAFE(cache->edit.loop_uv);
- GPU_VERTBUF_DISCARD_SAFE(cache->edit.facedots_uv);
- GPU_INDEXBUF_DISCARD_SAFE(cache->ibo.edituv_loops_tri_fans);
- GPU_INDEXBUF_DISCARD_SAFE(cache->ibo.edituv_loops_line_strips);
- GPU_INDEXBUF_DISCARD_SAFE(cache->ibo.edituv_loops_points);
- /* We only clear the batches as they may already have been referenced. */
- GPU_BATCH_CLEAR_SAFE(cache->batch.edituv_faces_strech_area);
- GPU_BATCH_CLEAR_SAFE(cache->batch.edituv_faces_strech_angle);
- GPU_BATCH_CLEAR_SAFE(cache->batch.edituv_faces);
- GPU_BATCH_CLEAR_SAFE(cache->batch.edituv_edges);
- GPU_BATCH_CLEAR_SAFE(cache->batch.edituv_verts);
- GPU_BATCH_CLEAR_SAFE(cache->batch.edituv_facedots);
- }
- }
-
- /* Init batches and request VBOs & IBOs */
- if (DRW_batch_requested(cache->batch.surface, GPU_PRIM_TRIS)) {
- DRW_ibo_request(cache->batch.surface, &cache->ibo.loops_tris);
- DRW_vbo_request(cache->batch.surface, &cache->ordered.loop_pos_nor);
- /* For paint overlay. Active layer should have been queried. */
- if (cache->cd_used.uv != 0) {
- DRW_vbo_request(cache->batch.surface, &cache->ordered.loop_uv_tan);
- }
- if (cache->cd_used.vcol != 0) {
- DRW_vbo_request(cache->batch.surface, &cache->ordered.loop_vcol);
- }
- }
- if (DRW_batch_requested(cache->batch.all_verts, GPU_PRIM_POINTS)) {
- DRW_vbo_request(cache->batch.all_verts, &cache->ordered.pos_nor);
- }
- if (DRW_batch_requested(cache->batch.all_edges, GPU_PRIM_LINES)) {
- DRW_ibo_request(cache->batch.all_edges, &cache->ibo.edges_lines);
- DRW_vbo_request(cache->batch.all_edges, &cache->ordered.pos_nor);
- }
- if (DRW_batch_requested(cache->batch.loose_edges, GPU_PRIM_LINES)) {
- DRW_ibo_request(cache->batch.loose_edges, &cache->ibo.loose_edges_lines);
- DRW_vbo_request(cache->batch.loose_edges, &cache->ordered.pos_nor);
- }
- if (DRW_batch_requested(cache->batch.edge_detection, GPU_PRIM_LINES_ADJ)) {
- DRW_ibo_request(cache->batch.edge_detection, &cache->ibo.edges_adj_lines);
- DRW_vbo_request(cache->batch.edge_detection, &cache->ordered.pos_nor);
- }
- if (DRW_batch_requested(cache->batch.surface_weights, GPU_PRIM_TRIS)) {
- DRW_ibo_request(cache->batch.surface_weights, &cache->ibo.surf_tris);
- DRW_vbo_request(cache->batch.surface_weights, &cache->ordered.pos_nor);
- DRW_vbo_request(cache->batch.surface_weights, &cache->ordered.weights);
- }
- if (DRW_batch_requested(cache->batch.wire_loops, GPU_PRIM_LINE_STRIP)) {
- DRW_ibo_request(cache->batch.wire_loops, &cache->ibo.loops_line_strips);
- DRW_vbo_request(cache->batch.wire_loops, &cache->ordered.loop_pos_nor);
- }
- if (DRW_batch_requested(cache->batch.wire_edges, GPU_PRIM_LINES)) {
- DRW_ibo_request(cache->batch.wire_edges, &cache->ibo.loops_lines);
- DRW_vbo_request(cache->batch.wire_edges, &cache->ordered.loop_pos_nor);
- DRW_vbo_request(cache->batch.wire_edges, &cache->ordered.loop_edge_fac);
- }
- if (DRW_batch_requested(cache->batch.wire_loops_uvs, GPU_PRIM_LINE_STRIP)) {
- DRW_ibo_request(cache->batch.wire_loops_uvs, &cache->ibo.loops_line_strips);
- /* For paint overlay. Active layer should have been queried. */
- if (cache->cd_used.uv != 0) {
- DRW_vbo_request(cache->batch.wire_loops_uvs, &cache->ordered.loop_uv_tan);
- }
- }
-
- /* Edit Mesh */
- if (DRW_batch_requested(cache->batch.edit_triangles, GPU_PRIM_TRIS)) {
- DRW_ibo_request(cache->batch.edit_triangles, &cache->ibo.edit_loops_tris);
- DRW_vbo_request(cache->batch.edit_triangles, &cache->edit.loop_pos_nor);
- DRW_vbo_request(cache->batch.edit_triangles, &cache->edit.loop_data);
- }
- if (DRW_batch_requested(cache->batch.edit_vertices, GPU_PRIM_POINTS)) {
- DRW_ibo_request(cache->batch.edit_vertices, &cache->ibo.edit_loops_points);
- DRW_vbo_request(cache->batch.edit_vertices, &cache->edit.loop_pos_nor);
- DRW_vbo_request(cache->batch.edit_vertices, &cache->edit.loop_data);
- }
- if (DRW_batch_requested(cache->batch.edit_edges, GPU_PRIM_LINES)) {
- DRW_ibo_request(cache->batch.edit_edges, &cache->ibo.edit_loops_lines);
- DRW_vbo_request(cache->batch.edit_edges, &cache->edit.loop_pos_nor);
- DRW_vbo_request(cache->batch.edit_edges, &cache->edit.loop_data);
- }
- if (DRW_batch_requested(cache->batch.edit_lnor, GPU_PRIM_POINTS)) {
- DRW_ibo_request(cache->batch.edit_lnor, &cache->ibo.edit_loops_tris);
- DRW_vbo_request(cache->batch.edit_lnor, &cache->edit.loop_pos_nor);
- DRW_vbo_request(cache->batch.edit_lnor, &cache->edit.loop_lnor);
- }
- if (DRW_batch_requested(cache->batch.edit_facedots, GPU_PRIM_POINTS)) {
- DRW_vbo_request(cache->batch.edit_facedots, &cache->edit.facedots_pos_nor_data);
- }
-
- /* Edit UV */
- if (DRW_batch_requested(cache->batch.edituv_faces, GPU_PRIM_TRI_FAN)) {
- DRW_ibo_request(cache->batch.edituv_faces, &cache->ibo.edituv_loops_tri_fans);
- DRW_vbo_request(cache->batch.edituv_faces, &cache->edit.loop_uv);
- DRW_vbo_request(cache->batch.edituv_faces, &cache->edit.loop_uv_data);
- }
- if (DRW_batch_requested(cache->batch.edituv_faces_strech_area, GPU_PRIM_TRI_FAN)) {
- DRW_ibo_request(cache->batch.edituv_faces_strech_area, &cache->ibo.edituv_loops_tri_fans);
- DRW_vbo_request(cache->batch.edituv_faces_strech_area, &cache->edit.loop_uv);
- DRW_vbo_request(cache->batch.edituv_faces_strech_area, &cache->edit.loop_uv_data);
- DRW_vbo_request(cache->batch.edituv_faces_strech_area, &cache->edit.loop_stretch_area);
- }
- if (DRW_batch_requested(cache->batch.edituv_faces_strech_angle, GPU_PRIM_TRI_FAN)) {
- DRW_ibo_request(cache->batch.edituv_faces_strech_angle, &cache->ibo.edituv_loops_tri_fans);
- DRW_vbo_request(cache->batch.edituv_faces_strech_angle, &cache->edit.loop_uv);
- DRW_vbo_request(cache->batch.edituv_faces_strech_angle, &cache->edit.loop_uv_data);
- DRW_vbo_request(cache->batch.edituv_faces_strech_angle, &cache->edit.loop_stretch_angle);
- }
- if (DRW_batch_requested(cache->batch.edituv_edges, GPU_PRIM_LINES)) {
- DRW_ibo_request(cache->batch.edituv_edges, &cache->ibo.edituv_loops_line_strips);
- DRW_vbo_request(cache->batch.edituv_edges, &cache->edit.loop_uv);
- DRW_vbo_request(cache->batch.edituv_edges, &cache->edit.loop_uv_data);
- }
- if (DRW_batch_requested(cache->batch.edituv_verts, GPU_PRIM_POINTS)) {
- DRW_ibo_request(cache->batch.edituv_verts, &cache->ibo.edituv_loops_points);
- DRW_vbo_request(cache->batch.edituv_verts, &cache->edit.loop_uv);
- DRW_vbo_request(cache->batch.edituv_verts, &cache->edit.loop_uv_data);
- }
- if (DRW_batch_requested(cache->batch.edituv_facedots, GPU_PRIM_POINTS)) {
- DRW_vbo_request(cache->batch.edituv_facedots, &cache->edit.facedots_uv);
- DRW_vbo_request(cache->batch.edituv_facedots, &cache->edit.facedots_uv_data);
- }
-
- /* Selection */
- /* TODO reuse ordered.loop_pos_nor if possible. */
- if (DRW_batch_requested(cache->batch.edit_selection_verts, GPU_PRIM_POINTS)) {
- DRW_ibo_request(cache->batch.edit_selection_verts, &cache->ibo.edit_loops_points);
- DRW_vbo_request(cache->batch.edit_selection_verts, &cache->edit.loop_pos_nor);
- DRW_vbo_request(cache->batch.edit_selection_verts, &cache->edit.loop_vert_idx);
- }
- if (DRW_batch_requested(cache->batch.edit_selection_edges, GPU_PRIM_LINES)) {
- DRW_ibo_request(cache->batch.edit_selection_edges, &cache->ibo.edit_loops_lines);
- DRW_vbo_request(cache->batch.edit_selection_edges, &cache->edit.loop_pos_nor);
- DRW_vbo_request(cache->batch.edit_selection_edges, &cache->edit.loop_edge_idx);
- }
- if (DRW_batch_requested(cache->batch.edit_selection_faces, GPU_PRIM_TRIS)) {
- DRW_ibo_request(cache->batch.edit_selection_faces, &cache->ibo.edit_loops_tris);
- DRW_vbo_request(cache->batch.edit_selection_faces, &cache->edit.loop_pos_nor);
- DRW_vbo_request(cache->batch.edit_selection_faces, &cache->edit.loop_face_idx);
- }
- if (DRW_batch_requested(cache->batch.edit_selection_facedots, GPU_PRIM_POINTS)) {
- DRW_vbo_request(cache->batch.edit_selection_facedots, &cache->edit.facedots_pos_nor_data);
- DRW_vbo_request(cache->batch.edit_selection_facedots, &cache->edit.facedots_idx);
- }
-
- /* Per Material */
- for (int i = 0; i < cache->mat_len; ++i) {
- if (DRW_batch_requested(cache->surf_per_mat[i], GPU_PRIM_TRIS)) {
- if (cache->mat_len > 1) {
- DRW_ibo_request(cache->surf_per_mat[i], &cache->surf_per_mat_tris[i]);
- }
- else {
- DRW_ibo_request(cache->surf_per_mat[i], &cache->ibo.loops_tris);
- }
- DRW_vbo_request(cache->surf_per_mat[i], &cache->ordered.loop_pos_nor);
- if ((cache->cd_used.uv != 0) ||
- (cache->cd_used.tan != 0) ||
- (cache->cd_used.tan_orco != 0))
- {
- DRW_vbo_request(cache->surf_per_mat[i], &cache->ordered.loop_uv_tan);
- }
- if (cache->cd_used.vcol != 0) {
- DRW_vbo_request(cache->surf_per_mat[i], &cache->ordered.loop_vcol);
- }
- if (cache->cd_used.orco != 0) {
- /* OPTI : Only do that if there is modifiers that modify orcos. */
- CustomData *cd_vdata = (me->edit_mesh) ? &me->edit_mesh->bm->vdata : &me->vdata;
- if (CustomData_get_layer(cd_vdata, CD_ORCO) != NULL &&
- ob->modifiers.first != NULL)
- {
- DRW_vbo_request(cache->surf_per_mat[i], &cache->ordered.loop_orco);
- }
- else if (cache->cd_used.tan_orco == 0) {
- /* Skip orco calculation if not needed by tangent generation. */
- cache->cd_used.orco = 0;
- }
- }
- }
- }
-
- /* Generate MeshRenderData flags */
- int mr_flag = 0, mr_edit_flag = 0;
- DRW_ADD_FLAG_FROM_VBO_REQUEST(mr_flag, cache->ordered.pos_nor, MR_DATATYPE_VERT);
- DRW_ADD_FLAG_FROM_VBO_REQUEST(mr_flag, cache->ordered.weights, MR_DATATYPE_VERT | MR_DATATYPE_DVERT);
- DRW_ADD_FLAG_FROM_VBO_REQUEST(mr_flag, cache->ordered.loop_pos_nor, MR_DATATYPE_VERT | MR_DATATYPE_POLY | MR_DATATYPE_LOOP);
- DRW_ADD_FLAG_FROM_VBO_REQUEST(mr_flag, cache->ordered.loop_uv_tan, MR_DATATYPE_VERT | MR_DATATYPE_POLY | MR_DATATYPE_LOOP | MR_DATATYPE_SHADING | MR_DATATYPE_LOOPTRI);
- DRW_ADD_FLAG_FROM_VBO_REQUEST(mr_flag, cache->ordered.loop_orco, MR_DATATYPE_VERT | MR_DATATYPE_POLY | MR_DATATYPE_LOOP | MR_DATATYPE_SHADING);
- DRW_ADD_FLAG_FROM_VBO_REQUEST(mr_flag, cache->ordered.loop_vcol, MR_DATATYPE_VERT | MR_DATATYPE_POLY | MR_DATATYPE_LOOP | MR_DATATYPE_SHADING);
- DRW_ADD_FLAG_FROM_VBO_REQUEST(mr_flag, cache->ordered.loop_edge_fac, MR_DATATYPE_VERT | MR_DATATYPE_POLY | MR_DATATYPE_EDGE | MR_DATATYPE_LOOP);
- DRW_ADD_FLAG_FROM_IBO_REQUEST(mr_flag, cache->ibo.surf_tris, MR_DATATYPE_VERT | MR_DATATYPE_LOOP | MR_DATATYPE_POLY | MR_DATATYPE_LOOPTRI);
- DRW_ADD_FLAG_FROM_IBO_REQUEST(mr_flag, cache->ibo.loops_tris, MR_DATATYPE_LOOP | MR_DATATYPE_POLY | MR_DATATYPE_LOOPTRI);
- DRW_ADD_FLAG_FROM_IBO_REQUEST(mr_flag, cache->ibo.loops_lines, MR_DATATYPE_LOOP | MR_DATATYPE_EDGE | MR_DATATYPE_POLY);
- DRW_ADD_FLAG_FROM_IBO_REQUEST(mr_flag, cache->ibo.loops_line_strips, MR_DATATYPE_LOOP | MR_DATATYPE_POLY);
- DRW_ADD_FLAG_FROM_IBO_REQUEST(mr_flag, cache->ibo.edges_lines, MR_DATATYPE_VERT | MR_DATATYPE_EDGE);
- DRW_ADD_FLAG_FROM_IBO_REQUEST(mr_flag, cache->ibo.edges_adj_lines, MR_DATATYPE_VERT | MR_DATATYPE_LOOP | MR_DATATYPE_POLY | MR_DATATYPE_LOOPTRI);
- DRW_ADD_FLAG_FROM_IBO_REQUEST(mr_flag, cache->ibo.loose_edges_lines, MR_DATATYPE_VERT | MR_DATATYPE_EDGE);
- for (int i = 0; i < cache->mat_len; ++i) {
- DRW_ADD_FLAG_FROM_IBO_REQUEST(mr_flag, cache->surf_per_mat_tris[i], MR_DATATYPE_LOOP | MR_DATATYPE_POLY | MR_DATATYPE_LOOPTRI);
- }
-
- int combined_edit_flag = MR_DATATYPE_VERT | MR_DATATYPE_EDGE | MR_DATATYPE_LOOP | MR_DATATYPE_POLY |
- MR_DATATYPE_LOOSE_VERT | MR_DATATYPE_LOOSE_EDGE | MR_DATATYPE_OVERLAY;
- DRW_ADD_FLAG_FROM_VBO_REQUEST(mr_edit_flag, cache->edit.loop_pos_nor, combined_edit_flag);
- DRW_ADD_FLAG_FROM_VBO_REQUEST(mr_edit_flag, cache->edit.loop_lnor, combined_edit_flag);
- DRW_ADD_FLAG_FROM_VBO_REQUEST(mr_edit_flag, cache->edit.loop_data, combined_edit_flag);
- DRW_ADD_FLAG_FROM_VBO_REQUEST(mr_edit_flag, cache->edit.loop_uv_data, combined_edit_flag);
- DRW_ADD_FLAG_FROM_VBO_REQUEST(mr_edit_flag, cache->edit.loop_uv, combined_edit_flag | MR_DATATYPE_LOOPUV);
- DRW_ADD_FLAG_FROM_VBO_REQUEST(mr_edit_flag, cache->edit.loop_vert_idx, MR_DATATYPE_VERT | MR_DATATYPE_EDGE | MR_DATATYPE_LOOSE_VERT | MR_DATATYPE_LOOSE_EDGE | MR_DATATYPE_POLY | MR_DATATYPE_LOOP);
- DRW_ADD_FLAG_FROM_VBO_REQUEST(mr_edit_flag, cache->edit.loop_edge_idx, MR_DATATYPE_VERT | MR_DATATYPE_EDGE | MR_DATATYPE_LOOSE_VERT | MR_DATATYPE_LOOSE_EDGE | MR_DATATYPE_POLY | MR_DATATYPE_LOOP);
- DRW_ADD_FLAG_FROM_VBO_REQUEST(mr_edit_flag, cache->edit.loop_face_idx, MR_DATATYPE_VERT | MR_DATATYPE_EDGE | MR_DATATYPE_LOOSE_VERT | MR_DATATYPE_LOOSE_EDGE | MR_DATATYPE_POLY | MR_DATATYPE_LOOP);
- DRW_ADD_FLAG_FROM_VBO_REQUEST(mr_edit_flag, cache->edit.facedots_idx, MR_DATATYPE_POLY);
- DRW_ADD_FLAG_FROM_VBO_REQUEST(mr_edit_flag, cache->edit.facedots_pos_nor_data, MR_DATATYPE_VERT | MR_DATATYPE_LOOP | MR_DATATYPE_POLY | MR_DATATYPE_OVERLAY);
- DRW_ADD_FLAG_FROM_VBO_REQUEST(mr_edit_flag, cache->edit.loop_stretch_angle, combined_edit_flag);
- DRW_ADD_FLAG_FROM_VBO_REQUEST(mr_edit_flag, cache->edit.loop_stretch_area, combined_edit_flag);
- DRW_ADD_FLAG_FROM_VBO_REQUEST(mr_edit_flag, cache->edit.facedots_uv, combined_edit_flag | MR_DATATYPE_LOOPUV);
- DRW_ADD_FLAG_FROM_VBO_REQUEST(mr_edit_flag, cache->edit.facedots_uv_data, combined_edit_flag);
- DRW_ADD_FLAG_FROM_IBO_REQUEST(mr_edit_flag, cache->ibo.edituv_loops_points, combined_edit_flag);
- DRW_ADD_FLAG_FROM_IBO_REQUEST(mr_edit_flag, cache->ibo.edituv_loops_line_strips, combined_edit_flag);
- DRW_ADD_FLAG_FROM_IBO_REQUEST(mr_edit_flag, cache->ibo.edituv_loops_tri_fans, combined_edit_flag);
- /* TODO: Some of the flags here may not be needed. */
- DRW_ADD_FLAG_FROM_IBO_REQUEST(mr_edit_flag, cache->ibo.edit_loops_points, MR_DATATYPE_VERT | MR_DATATYPE_EDGE | MR_DATATYPE_LOOSE_VERT | MR_DATATYPE_LOOSE_EDGE | MR_DATATYPE_POLY | MR_DATATYPE_LOOP | MR_DATATYPE_LOOPTRI);
- DRW_ADD_FLAG_FROM_IBO_REQUEST(mr_edit_flag, cache->ibo.edit_loops_lines, MR_DATATYPE_VERT | MR_DATATYPE_EDGE | MR_DATATYPE_LOOSE_VERT | MR_DATATYPE_LOOSE_EDGE | MR_DATATYPE_POLY | MR_DATATYPE_LOOP | MR_DATATYPE_LOOPTRI);
- DRW_ADD_FLAG_FROM_IBO_REQUEST(mr_edit_flag, cache->ibo.edit_loops_tris, MR_DATATYPE_VERT | MR_DATATYPE_EDGE | MR_DATATYPE_LOOSE_VERT | MR_DATATYPE_LOOSE_EDGE | MR_DATATYPE_POLY | MR_DATATYPE_LOOP | MR_DATATYPE_LOOPTRI);
-
- Mesh *me_original = me;
- MBC_GET_FINAL_MESH(me);
-
- if (me_original == me) {
- mr_flag |= mr_edit_flag;
- }
-
- MeshRenderData *rdata = NULL;
-
- if (mr_flag != 0) {
- rdata = mesh_render_data_create_ex(me, mr_flag, &cache->cd_used, ts);
- }
-
- /* Generate VBOs */
- if (DRW_vbo_requested(cache->ordered.pos_nor)) {
- mesh_create_pos_and_nor(rdata, cache->ordered.pos_nor);
- }
- if (DRW_vbo_requested(cache->ordered.weights)) {
- mesh_create_weights(rdata, cache->ordered.weights, &cache->weight_state);
- }
- if (DRW_vbo_requested(cache->ordered.loop_pos_nor)) {
- mesh_create_loop_pos_and_nor(rdata, cache->ordered.loop_pos_nor);
- }
- if (DRW_vbo_requested(cache->ordered.loop_edge_fac)) {
- mesh_create_loop_edge_fac(rdata, cache->ordered.loop_edge_fac);
- }
- if (DRW_vbo_requested(cache->ordered.loop_uv_tan)) {
- mesh_create_loop_uv_and_tan(rdata, cache->ordered.loop_uv_tan);
- }
- if (DRW_vbo_requested(cache->ordered.loop_orco)) {
- mesh_create_loop_orco(rdata, cache->ordered.loop_orco);
- }
- if (DRW_vbo_requested(cache->ordered.loop_vcol)) {
- mesh_create_loop_vcol(rdata, cache->ordered.loop_vcol);
- }
- if (DRW_ibo_requested(cache->ibo.edges_lines)) {
- mesh_create_edges_lines(rdata, cache->ibo.edges_lines, use_hide);
- }
- if (DRW_ibo_requested(cache->ibo.edges_adj_lines)) {
- mesh_create_edges_adjacency_lines(rdata, cache->ibo.edges_adj_lines, &cache->is_manifold, use_hide);
- }
- if (DRW_ibo_requested(cache->ibo.loose_edges_lines)) {
- mesh_create_loose_edges_lines(rdata, cache->ibo.loose_edges_lines, use_hide);
- }
- if (DRW_ibo_requested(cache->ibo.surf_tris)) {
- mesh_create_surf_tris(rdata, cache->ibo.surf_tris, use_hide);
- }
- if (DRW_ibo_requested(cache->ibo.loops_lines)) {
- mesh_create_loops_lines(rdata, cache->ibo.loops_lines, use_hide);
- }
- if (DRW_ibo_requested(cache->ibo.loops_line_strips)) {
- mesh_create_loops_line_strips(rdata, cache->ibo.loops_line_strips, use_hide);
- }
- if (DRW_ibo_requested(cache->ibo.loops_tris)) {
- mesh_create_loops_tris(rdata, &cache->ibo.loops_tris, 1, use_hide);
- }
- if (DRW_ibo_requested(cache->surf_per_mat_tris[0])) {
- mesh_create_loops_tris(rdata, cache->surf_per_mat_tris, cache->mat_len, use_hide);
- }
-
- /* Use original Mesh* to have the correct edit cage. */
- if (me_original != me && mr_edit_flag != 0) {
- if (rdata) {
- mesh_render_data_free(rdata);
- }
- rdata = mesh_render_data_create_ex(me_original, mr_edit_flag, NULL, ts);
- }
-
- if (rdata && rdata->mapped.supported) {
- rdata->mapped.use = true;
- }
-
- if (DRW_vbo_requested(cache->edit.loop_pos_nor) ||
- DRW_vbo_requested(cache->edit.loop_lnor) ||
- DRW_vbo_requested(cache->edit.loop_data) ||
- DRW_vbo_requested(cache->edit.loop_vert_idx) ||
- DRW_vbo_requested(cache->edit.loop_edge_idx) ||
- DRW_vbo_requested(cache->edit.loop_face_idx))
- {
- mesh_create_edit_vertex_loops(
- rdata,
- cache->edit.loop_pos_nor,
- cache->edit.loop_lnor,
- NULL,
- cache->edit.loop_data,
- cache->edit.loop_vert_idx,
- cache->edit.loop_edge_idx,
- cache->edit.loop_face_idx);
- }
- if (DRW_vbo_requested(cache->edit.facedots_pos_nor_data)) {
- mesh_create_edit_facedots(rdata, cache->edit.facedots_pos_nor_data);
- }
- if (DRW_vbo_requested(cache->edit.facedots_idx)) {
- mesh_create_edit_facedots_select_id(rdata, cache->edit.facedots_idx);
- }
- if (DRW_ibo_requested(cache->ibo.edit_loops_points) ||
- DRW_ibo_requested(cache->ibo.edit_loops_lines))
- {
- mesh_create_edit_loops_points_lines(rdata, cache->ibo.edit_loops_points, cache->ibo.edit_loops_lines);
- }
- if (DRW_ibo_requested(cache->ibo.edit_loops_tris)) {
- mesh_create_edit_loops_tris(rdata, cache->ibo.edit_loops_tris);
- }
-
- /* UV editor */
- /**
- * TODO: The code and data structure is ready to support modified UV display
- * but the selection code for UVs needs to support it first. So for now, only
- * display the cage in all cases.
- */
- if (rdata && rdata->mapped.supported) {
- rdata->mapped.use = false;
- }
-
- if (DRW_vbo_requested(cache->edit.loop_uv_data) ||
- DRW_vbo_requested(cache->edit.loop_uv))
- {
- mesh_create_edit_vertex_loops(
- rdata,
- NULL,
- NULL,
- cache->edit.loop_uv,
- cache->edit.loop_uv_data,
- NULL,
- NULL,
- NULL);
- }
- if (DRW_vbo_requested(cache->edit.loop_stretch_angle) ||
- DRW_vbo_requested(cache->edit.loop_stretch_area) ||
- DRW_vbo_requested(cache->edit.facedots_uv) ||
- DRW_vbo_requested(cache->edit.facedots_uv_data) ||
- DRW_ibo_requested(cache->ibo.edituv_loops_points) ||
- DRW_ibo_requested(cache->ibo.edituv_loops_line_strips) ||
- DRW_ibo_requested(cache->ibo.edituv_loops_tri_fans))
- {
- mesh_create_uvedit_buffers(rdata,
- cache->edit.loop_stretch_area, cache->edit.loop_stretch_angle,
- cache->edit.facedots_uv, cache->edit.facedots_uv_data,
- cache->ibo.edituv_loops_points,
- cache->ibo.edituv_loops_line_strips,
- cache->ibo.edituv_loops_tri_fans);
- }
-
- if (rdata) {
- mesh_render_data_free(rdata);
- }
+ MeshBatchCache *cache = mesh_batch_cache_get(me);
+
+ /* Check vertex weights. */
+ if ((cache->batch.surface_weights != 0) && (ts != NULL)) {
+ struct DRW_MeshWeightState wstate;
+ BLI_assert(ob->type == OB_MESH);
+ drw_mesh_weight_state_extract(ob, me, ts, is_paint_mode, &wstate);
+ mesh_batch_cache_check_vertex_group(cache, &wstate);
+ drw_mesh_weight_state_copy(&cache->weight_state, &wstate);
+ drw_mesh_weight_state_clear(&wstate);
+ }
+
+ /* Verify that all surface batches have needed attribute layers. */
+ /* TODO(fclem): We could be a bit smarter here and only do it per material. */
+ bool cd_overlap = mesh_cd_layers_type_overlap(cache->cd_used, cache->cd_needed);
+ if (cd_overlap == false) {
+ if ((cache->cd_used.uv & cache->cd_needed.uv) != cache->cd_needed.uv ||
+ (cache->cd_used.tan & cache->cd_needed.tan) != cache->cd_needed.tan ||
+ cache->cd_used.tan_orco != cache->cd_needed.tan_orco) {
+ GPU_VERTBUF_DISCARD_SAFE(cache->ordered.loop_uv_tan);
+ }
+ if (cache->cd_used.orco != cache->cd_needed.orco) {
+ GPU_VERTBUF_DISCARD_SAFE(cache->ordered.loop_orco);
+ }
+ if ((cache->cd_used.vcol & cache->cd_needed.vcol) != cache->cd_needed.vcol) {
+ GPU_VERTBUF_DISCARD_SAFE(cache->ordered.loop_vcol);
+ }
+ /* We can't discard batches at this point as they have been
+ * referenced for drawing. Just clear them in place. */
+ for (int i = 0; i < cache->mat_len; ++i) {
+ GPU_BATCH_CLEAR_SAFE(cache->surf_per_mat[i]);
+ }
+ GPU_BATCH_CLEAR_SAFE(cache->batch.surface);
+
+ mesh_cd_layers_type_merge(&cache->cd_used, cache->cd_needed);
+ }
+ mesh_cd_layers_type_clear(&cache->cd_needed);
+
+ /* Discard UV batches if sync_selection changes */
+ if (ts != NULL) {
+ const bool is_uvsyncsel = (ts->uv_flag & UV_SYNC_SELECTION);
+ if (cache->is_uvsyncsel != is_uvsyncsel) {
+ cache->is_uvsyncsel = is_uvsyncsel;
+ GPU_VERTBUF_DISCARD_SAFE(cache->edit.loop_uv_data);
+ GPU_VERTBUF_DISCARD_SAFE(cache->edit.loop_stretch_angle);
+ GPU_VERTBUF_DISCARD_SAFE(cache->edit.loop_stretch_area);
+ GPU_VERTBUF_DISCARD_SAFE(cache->edit.loop_uv);
+ GPU_VERTBUF_DISCARD_SAFE(cache->edit.facedots_uv);
+ GPU_INDEXBUF_DISCARD_SAFE(cache->ibo.edituv_loops_tri_fans);
+ GPU_INDEXBUF_DISCARD_SAFE(cache->ibo.edituv_loops_line_strips);
+ GPU_INDEXBUF_DISCARD_SAFE(cache->ibo.edituv_loops_points);
+ /* We only clear the batches as they may already have been referenced. */
+ GPU_BATCH_CLEAR_SAFE(cache->batch.edituv_faces_strech_area);
+ GPU_BATCH_CLEAR_SAFE(cache->batch.edituv_faces_strech_angle);
+ GPU_BATCH_CLEAR_SAFE(cache->batch.edituv_faces);
+ GPU_BATCH_CLEAR_SAFE(cache->batch.edituv_edges);
+ GPU_BATCH_CLEAR_SAFE(cache->batch.edituv_verts);
+ GPU_BATCH_CLEAR_SAFE(cache->batch.edituv_facedots);
+ }
+ }
+
+ /* Init batches and request VBOs & IBOs */
+ if (DRW_batch_requested(cache->batch.surface, GPU_PRIM_TRIS)) {
+ DRW_ibo_request(cache->batch.surface, &cache->ibo.loops_tris);
+ DRW_vbo_request(cache->batch.surface, &cache->ordered.loop_pos_nor);
+ /* For paint overlay. Active layer should have been queried. */
+ if (cache->cd_used.uv != 0) {
+ DRW_vbo_request(cache->batch.surface, &cache->ordered.loop_uv_tan);
+ }
+ if (cache->cd_used.vcol != 0) {
+ DRW_vbo_request(cache->batch.surface, &cache->ordered.loop_vcol);
+ }
+ }
+ if (DRW_batch_requested(cache->batch.all_verts, GPU_PRIM_POINTS)) {
+ DRW_vbo_request(cache->batch.all_verts, &cache->ordered.pos_nor);
+ }
+ if (DRW_batch_requested(cache->batch.all_edges, GPU_PRIM_LINES)) {
+ DRW_ibo_request(cache->batch.all_edges, &cache->ibo.edges_lines);
+ DRW_vbo_request(cache->batch.all_edges, &cache->ordered.pos_nor);
+ }
+ if (DRW_batch_requested(cache->batch.loose_edges, GPU_PRIM_LINES)) {
+ DRW_ibo_request(cache->batch.loose_edges, &cache->ibo.loose_edges_lines);
+ DRW_vbo_request(cache->batch.loose_edges, &cache->ordered.pos_nor);
+ }
+ if (DRW_batch_requested(cache->batch.edge_detection, GPU_PRIM_LINES_ADJ)) {
+ DRW_ibo_request(cache->batch.edge_detection, &cache->ibo.edges_adj_lines);
+ DRW_vbo_request(cache->batch.edge_detection, &cache->ordered.pos_nor);
+ }
+ if (DRW_batch_requested(cache->batch.surface_weights, GPU_PRIM_TRIS)) {
+ DRW_ibo_request(cache->batch.surface_weights, &cache->ibo.surf_tris);
+ DRW_vbo_request(cache->batch.surface_weights, &cache->ordered.pos_nor);
+ DRW_vbo_request(cache->batch.surface_weights, &cache->ordered.weights);
+ }
+ if (DRW_batch_requested(cache->batch.wire_loops, GPU_PRIM_LINE_STRIP)) {
+ DRW_ibo_request(cache->batch.wire_loops, &cache->ibo.loops_line_strips);
+ DRW_vbo_request(cache->batch.wire_loops, &cache->ordered.loop_pos_nor);
+ }
+ if (DRW_batch_requested(cache->batch.wire_edges, GPU_PRIM_LINES)) {
+ DRW_ibo_request(cache->batch.wire_edges, &cache->ibo.loops_lines);
+ DRW_vbo_request(cache->batch.wire_edges, &cache->ordered.loop_pos_nor);
+ DRW_vbo_request(cache->batch.wire_edges, &cache->ordered.loop_edge_fac);
+ }
+ if (DRW_batch_requested(cache->batch.wire_loops_uvs, GPU_PRIM_LINE_STRIP)) {
+ DRW_ibo_request(cache->batch.wire_loops_uvs, &cache->ibo.loops_line_strips);
+ /* For paint overlay. Active layer should have been queried. */
+ if (cache->cd_used.uv != 0) {
+ DRW_vbo_request(cache->batch.wire_loops_uvs, &cache->ordered.loop_uv_tan);
+ }
+ }
+
+ /* Edit Mesh */
+ if (DRW_batch_requested(cache->batch.edit_triangles, GPU_PRIM_TRIS)) {
+ DRW_ibo_request(cache->batch.edit_triangles, &cache->ibo.edit_loops_tris);
+ DRW_vbo_request(cache->batch.edit_triangles, &cache->edit.loop_pos_nor);
+ DRW_vbo_request(cache->batch.edit_triangles, &cache->edit.loop_data);
+ }
+ if (DRW_batch_requested(cache->batch.edit_vertices, GPU_PRIM_POINTS)) {
+ DRW_ibo_request(cache->batch.edit_vertices, &cache->ibo.edit_loops_points);
+ DRW_vbo_request(cache->batch.edit_vertices, &cache->edit.loop_pos_nor);
+ DRW_vbo_request(cache->batch.edit_vertices, &cache->edit.loop_data);
+ }
+ if (DRW_batch_requested(cache->batch.edit_edges, GPU_PRIM_LINES)) {
+ DRW_ibo_request(cache->batch.edit_edges, &cache->ibo.edit_loops_lines);
+ DRW_vbo_request(cache->batch.edit_edges, &cache->edit.loop_pos_nor);
+ DRW_vbo_request(cache->batch.edit_edges, &cache->edit.loop_data);
+ }
+ if (DRW_batch_requested(cache->batch.edit_lnor, GPU_PRIM_POINTS)) {
+ DRW_ibo_request(cache->batch.edit_lnor, &cache->ibo.edit_loops_tris);
+ DRW_vbo_request(cache->batch.edit_lnor, &cache->edit.loop_pos_nor);
+ DRW_vbo_request(cache->batch.edit_lnor, &cache->edit.loop_lnor);
+ }
+ if (DRW_batch_requested(cache->batch.edit_facedots, GPU_PRIM_POINTS)) {
+ DRW_vbo_request(cache->batch.edit_facedots, &cache->edit.facedots_pos_nor_data);
+ }
+
+ /* Edit UV */
+ if (DRW_batch_requested(cache->batch.edituv_faces, GPU_PRIM_TRI_FAN)) {
+ DRW_ibo_request(cache->batch.edituv_faces, &cache->ibo.edituv_loops_tri_fans);
+ DRW_vbo_request(cache->batch.edituv_faces, &cache->edit.loop_uv);
+ DRW_vbo_request(cache->batch.edituv_faces, &cache->edit.loop_uv_data);
+ }
+ if (DRW_batch_requested(cache->batch.edituv_faces_strech_area, GPU_PRIM_TRI_FAN)) {
+ DRW_ibo_request(cache->batch.edituv_faces_strech_area, &cache->ibo.edituv_loops_tri_fans);
+ DRW_vbo_request(cache->batch.edituv_faces_strech_area, &cache->edit.loop_uv);
+ DRW_vbo_request(cache->batch.edituv_faces_strech_area, &cache->edit.loop_uv_data);
+ DRW_vbo_request(cache->batch.edituv_faces_strech_area, &cache->edit.loop_stretch_area);
+ }
+ if (DRW_batch_requested(cache->batch.edituv_faces_strech_angle, GPU_PRIM_TRI_FAN)) {
+ DRW_ibo_request(cache->batch.edituv_faces_strech_angle, &cache->ibo.edituv_loops_tri_fans);
+ DRW_vbo_request(cache->batch.edituv_faces_strech_angle, &cache->edit.loop_uv);
+ DRW_vbo_request(cache->batch.edituv_faces_strech_angle, &cache->edit.loop_uv_data);
+ DRW_vbo_request(cache->batch.edituv_faces_strech_angle, &cache->edit.loop_stretch_angle);
+ }
+ if (DRW_batch_requested(cache->batch.edituv_edges, GPU_PRIM_LINES)) {
+ DRW_ibo_request(cache->batch.edituv_edges, &cache->ibo.edituv_loops_line_strips);
+ DRW_vbo_request(cache->batch.edituv_edges, &cache->edit.loop_uv);
+ DRW_vbo_request(cache->batch.edituv_edges, &cache->edit.loop_uv_data);
+ }
+ if (DRW_batch_requested(cache->batch.edituv_verts, GPU_PRIM_POINTS)) {
+ DRW_ibo_request(cache->batch.edituv_verts, &cache->ibo.edituv_loops_points);
+ DRW_vbo_request(cache->batch.edituv_verts, &cache->edit.loop_uv);
+ DRW_vbo_request(cache->batch.edituv_verts, &cache->edit.loop_uv_data);
+ }
+ if (DRW_batch_requested(cache->batch.edituv_facedots, GPU_PRIM_POINTS)) {
+ DRW_vbo_request(cache->batch.edituv_facedots, &cache->edit.facedots_uv);
+ DRW_vbo_request(cache->batch.edituv_facedots, &cache->edit.facedots_uv_data);
+ }
+
+ /* Selection */
+ /* TODO reuse ordered.loop_pos_nor if possible. */
+ if (DRW_batch_requested(cache->batch.edit_selection_verts, GPU_PRIM_POINTS)) {
+ DRW_ibo_request(cache->batch.edit_selection_verts, &cache->ibo.edit_loops_points);
+ DRW_vbo_request(cache->batch.edit_selection_verts, &cache->edit.loop_pos_nor);
+ DRW_vbo_request(cache->batch.edit_selection_verts, &cache->edit.loop_vert_idx);
+ }
+ if (DRW_batch_requested(cache->batch.edit_selection_edges, GPU_PRIM_LINES)) {
+ DRW_ibo_request(cache->batch.edit_selection_edges, &cache->ibo.edit_loops_lines);
+ DRW_vbo_request(cache->batch.edit_selection_edges, &cache->edit.loop_pos_nor);
+ DRW_vbo_request(cache->batch.edit_selection_edges, &cache->edit.loop_edge_idx);
+ }
+ if (DRW_batch_requested(cache->batch.edit_selection_faces, GPU_PRIM_TRIS)) {
+ DRW_ibo_request(cache->batch.edit_selection_faces, &cache->ibo.edit_loops_tris);
+ DRW_vbo_request(cache->batch.edit_selection_faces, &cache->edit.loop_pos_nor);
+ DRW_vbo_request(cache->batch.edit_selection_faces, &cache->edit.loop_face_idx);
+ }
+ if (DRW_batch_requested(cache->batch.edit_selection_facedots, GPU_PRIM_POINTS)) {
+ DRW_vbo_request(cache->batch.edit_selection_facedots, &cache->edit.facedots_pos_nor_data);
+ DRW_vbo_request(cache->batch.edit_selection_facedots, &cache->edit.facedots_idx);
+ }
+
+ /* Per Material */
+ for (int i = 0; i < cache->mat_len; ++i) {
+ if (DRW_batch_requested(cache->surf_per_mat[i], GPU_PRIM_TRIS)) {
+ if (cache->mat_len > 1) {
+ DRW_ibo_request(cache->surf_per_mat[i], &cache->surf_per_mat_tris[i]);
+ }
+ else {
+ DRW_ibo_request(cache->surf_per_mat[i], &cache->ibo.loops_tris);
+ }
+ DRW_vbo_request(cache->surf_per_mat[i], &cache->ordered.loop_pos_nor);
+ if ((cache->cd_used.uv != 0) || (cache->cd_used.tan != 0) ||
+ (cache->cd_used.tan_orco != 0)) {
+ DRW_vbo_request(cache->surf_per_mat[i], &cache->ordered.loop_uv_tan);
+ }
+ if (cache->cd_used.vcol != 0) {
+ DRW_vbo_request(cache->surf_per_mat[i], &cache->ordered.loop_vcol);
+ }
+ if (cache->cd_used.orco != 0) {
+ /* OPTI : Only do that if there is modifiers that modify orcos. */
+ CustomData *cd_vdata = (me->edit_mesh) ? &me->edit_mesh->bm->vdata : &me->vdata;
+ if (CustomData_get_layer(cd_vdata, CD_ORCO) != NULL && ob->modifiers.first != NULL) {
+ DRW_vbo_request(cache->surf_per_mat[i], &cache->ordered.loop_orco);
+ }
+ else if (cache->cd_used.tan_orco == 0) {
+ /* Skip orco calculation if not needed by tangent generation. */
+ cache->cd_used.orco = 0;
+ }
+ }
+ }
+ }
+
+ /* Generate MeshRenderData flags */
+ int mr_flag = 0, mr_edit_flag = 0;
+ DRW_ADD_FLAG_FROM_VBO_REQUEST(mr_flag, cache->ordered.pos_nor, MR_DATATYPE_VERT);
+ DRW_ADD_FLAG_FROM_VBO_REQUEST(
+ mr_flag, cache->ordered.weights, MR_DATATYPE_VERT | MR_DATATYPE_DVERT);
+ DRW_ADD_FLAG_FROM_VBO_REQUEST(mr_flag,
+ cache->ordered.loop_pos_nor,
+ MR_DATATYPE_VERT | MR_DATATYPE_POLY | MR_DATATYPE_LOOP);
+ DRW_ADD_FLAG_FROM_VBO_REQUEST(mr_flag,
+ cache->ordered.loop_uv_tan,
+ MR_DATATYPE_VERT | MR_DATATYPE_POLY | MR_DATATYPE_LOOP |
+ MR_DATATYPE_SHADING | MR_DATATYPE_LOOPTRI);
+ DRW_ADD_FLAG_FROM_VBO_REQUEST(mr_flag,
+ cache->ordered.loop_orco,
+ MR_DATATYPE_VERT | MR_DATATYPE_POLY | MR_DATATYPE_LOOP |
+ MR_DATATYPE_SHADING);
+ DRW_ADD_FLAG_FROM_VBO_REQUEST(mr_flag,
+ cache->ordered.loop_vcol,
+ MR_DATATYPE_VERT | MR_DATATYPE_POLY | MR_DATATYPE_LOOP |
+ MR_DATATYPE_SHADING);
+ DRW_ADD_FLAG_FROM_VBO_REQUEST(mr_flag,
+ cache->ordered.loop_edge_fac,
+ MR_DATATYPE_VERT | MR_DATATYPE_POLY | MR_DATATYPE_EDGE |
+ MR_DATATYPE_LOOP);
+ DRW_ADD_FLAG_FROM_IBO_REQUEST(mr_flag,
+ cache->ibo.surf_tris,
+ MR_DATATYPE_VERT | MR_DATATYPE_LOOP | MR_DATATYPE_POLY |
+ MR_DATATYPE_LOOPTRI);
+ DRW_ADD_FLAG_FROM_IBO_REQUEST(
+ mr_flag, cache->ibo.loops_tris, MR_DATATYPE_LOOP | MR_DATATYPE_POLY | MR_DATATYPE_LOOPTRI);
+ DRW_ADD_FLAG_FROM_IBO_REQUEST(
+ mr_flag, cache->ibo.loops_lines, MR_DATATYPE_LOOP | MR_DATATYPE_EDGE | MR_DATATYPE_POLY);
+ DRW_ADD_FLAG_FROM_IBO_REQUEST(
+ mr_flag, cache->ibo.loops_line_strips, MR_DATATYPE_LOOP | MR_DATATYPE_POLY);
+ DRW_ADD_FLAG_FROM_IBO_REQUEST(
+ mr_flag, cache->ibo.edges_lines, MR_DATATYPE_VERT | MR_DATATYPE_EDGE);
+ DRW_ADD_FLAG_FROM_IBO_REQUEST(mr_flag,
+ cache->ibo.edges_adj_lines,
+ MR_DATATYPE_VERT | MR_DATATYPE_LOOP | MR_DATATYPE_POLY |
+ MR_DATATYPE_LOOPTRI);
+ DRW_ADD_FLAG_FROM_IBO_REQUEST(
+ mr_flag, cache->ibo.loose_edges_lines, MR_DATATYPE_VERT | MR_DATATYPE_EDGE);
+ for (int i = 0; i < cache->mat_len; ++i) {
+ DRW_ADD_FLAG_FROM_IBO_REQUEST(mr_flag,
+ cache->surf_per_mat_tris[i],
+ MR_DATATYPE_LOOP | MR_DATATYPE_POLY | MR_DATATYPE_LOOPTRI);
+ }
+
+ int combined_edit_flag = MR_DATATYPE_VERT | MR_DATATYPE_EDGE | MR_DATATYPE_LOOP |
+ MR_DATATYPE_POLY | MR_DATATYPE_LOOSE_VERT | MR_DATATYPE_LOOSE_EDGE |
+ MR_DATATYPE_OVERLAY;
+ DRW_ADD_FLAG_FROM_VBO_REQUEST(mr_edit_flag, cache->edit.loop_pos_nor, combined_edit_flag);
+ DRW_ADD_FLAG_FROM_VBO_REQUEST(mr_edit_flag, cache->edit.loop_lnor, combined_edit_flag);
+ DRW_ADD_FLAG_FROM_VBO_REQUEST(mr_edit_flag, cache->edit.loop_data, combined_edit_flag);
+ DRW_ADD_FLAG_FROM_VBO_REQUEST(mr_edit_flag, cache->edit.loop_uv_data, combined_edit_flag);
+ DRW_ADD_FLAG_FROM_VBO_REQUEST(
+ mr_edit_flag, cache->edit.loop_uv, combined_edit_flag | MR_DATATYPE_LOOPUV);
+ DRW_ADD_FLAG_FROM_VBO_REQUEST(mr_edit_flag,
+ cache->edit.loop_vert_idx,
+ MR_DATATYPE_VERT | MR_DATATYPE_EDGE | MR_DATATYPE_LOOSE_VERT |
+ MR_DATATYPE_LOOSE_EDGE | MR_DATATYPE_POLY | MR_DATATYPE_LOOP);
+ DRW_ADD_FLAG_FROM_VBO_REQUEST(mr_edit_flag,
+ cache->edit.loop_edge_idx,
+ MR_DATATYPE_VERT | MR_DATATYPE_EDGE | MR_DATATYPE_LOOSE_VERT |
+ MR_DATATYPE_LOOSE_EDGE | MR_DATATYPE_POLY | MR_DATATYPE_LOOP);
+ DRW_ADD_FLAG_FROM_VBO_REQUEST(mr_edit_flag,
+ cache->edit.loop_face_idx,
+ MR_DATATYPE_VERT | MR_DATATYPE_EDGE | MR_DATATYPE_LOOSE_VERT |
+ MR_DATATYPE_LOOSE_EDGE | MR_DATATYPE_POLY | MR_DATATYPE_LOOP);
+ DRW_ADD_FLAG_FROM_VBO_REQUEST(mr_edit_flag, cache->edit.facedots_idx, MR_DATATYPE_POLY);
+ DRW_ADD_FLAG_FROM_VBO_REQUEST(mr_edit_flag,
+ cache->edit.facedots_pos_nor_data,
+ MR_DATATYPE_VERT | MR_DATATYPE_LOOP | MR_DATATYPE_POLY |
+ MR_DATATYPE_OVERLAY);
+ DRW_ADD_FLAG_FROM_VBO_REQUEST(mr_edit_flag, cache->edit.loop_stretch_angle, combined_edit_flag);
+ DRW_ADD_FLAG_FROM_VBO_REQUEST(mr_edit_flag, cache->edit.loop_stretch_area, combined_edit_flag);
+ DRW_ADD_FLAG_FROM_VBO_REQUEST(
+ mr_edit_flag, cache->edit.facedots_uv, combined_edit_flag | MR_DATATYPE_LOOPUV);
+ DRW_ADD_FLAG_FROM_VBO_REQUEST(mr_edit_flag, cache->edit.facedots_uv_data, combined_edit_flag);
+ DRW_ADD_FLAG_FROM_IBO_REQUEST(mr_edit_flag, cache->ibo.edituv_loops_points, combined_edit_flag);
+ DRW_ADD_FLAG_FROM_IBO_REQUEST(
+ mr_edit_flag, cache->ibo.edituv_loops_line_strips, combined_edit_flag);
+ DRW_ADD_FLAG_FROM_IBO_REQUEST(
+ mr_edit_flag, cache->ibo.edituv_loops_tri_fans, combined_edit_flag);
+ /* TODO: Some of the flags here may not be needed. */
+ DRW_ADD_FLAG_FROM_IBO_REQUEST(mr_edit_flag,
+ cache->ibo.edit_loops_points,
+ MR_DATATYPE_VERT | MR_DATATYPE_EDGE | MR_DATATYPE_LOOSE_VERT |
+ MR_DATATYPE_LOOSE_EDGE | MR_DATATYPE_POLY | MR_DATATYPE_LOOP |
+ MR_DATATYPE_LOOPTRI);
+ DRW_ADD_FLAG_FROM_IBO_REQUEST(mr_edit_flag,
+ cache->ibo.edit_loops_lines,
+ MR_DATATYPE_VERT | MR_DATATYPE_EDGE | MR_DATATYPE_LOOSE_VERT |
+ MR_DATATYPE_LOOSE_EDGE | MR_DATATYPE_POLY | MR_DATATYPE_LOOP |
+ MR_DATATYPE_LOOPTRI);
+ DRW_ADD_FLAG_FROM_IBO_REQUEST(mr_edit_flag,
+ cache->ibo.edit_loops_tris,
+ MR_DATATYPE_VERT | MR_DATATYPE_EDGE | MR_DATATYPE_LOOSE_VERT |
+ MR_DATATYPE_LOOSE_EDGE | MR_DATATYPE_POLY | MR_DATATYPE_LOOP |
+ MR_DATATYPE_LOOPTRI);
+
+ Mesh *me_original = me;
+ MBC_GET_FINAL_MESH(me);
+
+ if (me_original == me) {
+ mr_flag |= mr_edit_flag;
+ }
+
+ MeshRenderData *rdata = NULL;
+
+ if (mr_flag != 0) {
+ rdata = mesh_render_data_create_ex(me, mr_flag, &cache->cd_used, ts);
+ }
+
+ /* Generate VBOs */
+ if (DRW_vbo_requested(cache->ordered.pos_nor)) {
+ mesh_create_pos_and_nor(rdata, cache->ordered.pos_nor);
+ }
+ if (DRW_vbo_requested(cache->ordered.weights)) {
+ mesh_create_weights(rdata, cache->ordered.weights, &cache->weight_state);
+ }
+ if (DRW_vbo_requested(cache->ordered.loop_pos_nor)) {
+ mesh_create_loop_pos_and_nor(rdata, cache->ordered.loop_pos_nor);
+ }
+ if (DRW_vbo_requested(cache->ordered.loop_edge_fac)) {
+ mesh_create_loop_edge_fac(rdata, cache->ordered.loop_edge_fac);
+ }
+ if (DRW_vbo_requested(cache->ordered.loop_uv_tan)) {
+ mesh_create_loop_uv_and_tan(rdata, cache->ordered.loop_uv_tan);
+ }
+ if (DRW_vbo_requested(cache->ordered.loop_orco)) {
+ mesh_create_loop_orco(rdata, cache->ordered.loop_orco);
+ }
+ if (DRW_vbo_requested(cache->ordered.loop_vcol)) {
+ mesh_create_loop_vcol(rdata, cache->ordered.loop_vcol);
+ }
+ if (DRW_ibo_requested(cache->ibo.edges_lines)) {
+ mesh_create_edges_lines(rdata, cache->ibo.edges_lines, use_hide);
+ }
+ if (DRW_ibo_requested(cache->ibo.edges_adj_lines)) {
+ mesh_create_edges_adjacency_lines(
+ rdata, cache->ibo.edges_adj_lines, &cache->is_manifold, use_hide);
+ }
+ if (DRW_ibo_requested(cache->ibo.loose_edges_lines)) {
+ mesh_create_loose_edges_lines(rdata, cache->ibo.loose_edges_lines, use_hide);
+ }
+ if (DRW_ibo_requested(cache->ibo.surf_tris)) {
+ mesh_create_surf_tris(rdata, cache->ibo.surf_tris, use_hide);
+ }
+ if (DRW_ibo_requested(cache->ibo.loops_lines)) {
+ mesh_create_loops_lines(rdata, cache->ibo.loops_lines, use_hide);
+ }
+ if (DRW_ibo_requested(cache->ibo.loops_line_strips)) {
+ mesh_create_loops_line_strips(rdata, cache->ibo.loops_line_strips, use_hide);
+ }
+ if (DRW_ibo_requested(cache->ibo.loops_tris)) {
+ mesh_create_loops_tris(rdata, &cache->ibo.loops_tris, 1, use_hide);
+ }
+ if (DRW_ibo_requested(cache->surf_per_mat_tris[0])) {
+ mesh_create_loops_tris(rdata, cache->surf_per_mat_tris, cache->mat_len, use_hide);
+ }
+
+ /* Use original Mesh* to have the correct edit cage. */
+ if (me_original != me && mr_edit_flag != 0) {
+ if (rdata) {
+ mesh_render_data_free(rdata);
+ }
+ rdata = mesh_render_data_create_ex(me_original, mr_edit_flag, NULL, ts);
+ }
+
+ if (rdata && rdata->mapped.supported) {
+ rdata->mapped.use = true;
+ }
+
+ if (DRW_vbo_requested(cache->edit.loop_pos_nor) || DRW_vbo_requested(cache->edit.loop_lnor) ||
+ DRW_vbo_requested(cache->edit.loop_data) || DRW_vbo_requested(cache->edit.loop_vert_idx) ||
+ DRW_vbo_requested(cache->edit.loop_edge_idx) ||
+ DRW_vbo_requested(cache->edit.loop_face_idx)) {
+ mesh_create_edit_vertex_loops(rdata,
+ cache->edit.loop_pos_nor,
+ cache->edit.loop_lnor,
+ NULL,
+ cache->edit.loop_data,
+ cache->edit.loop_vert_idx,
+ cache->edit.loop_edge_idx,
+ cache->edit.loop_face_idx);
+ }
+ if (DRW_vbo_requested(cache->edit.facedots_pos_nor_data)) {
+ mesh_create_edit_facedots(rdata, cache->edit.facedots_pos_nor_data);
+ }
+ if (DRW_vbo_requested(cache->edit.facedots_idx)) {
+ mesh_create_edit_facedots_select_id(rdata, cache->edit.facedots_idx);
+ }
+ if (DRW_ibo_requested(cache->ibo.edit_loops_points) ||
+ DRW_ibo_requested(cache->ibo.edit_loops_lines)) {
+ mesh_create_edit_loops_points_lines(
+ rdata, cache->ibo.edit_loops_points, cache->ibo.edit_loops_lines);
+ }
+ if (DRW_ibo_requested(cache->ibo.edit_loops_tris)) {
+ mesh_create_edit_loops_tris(rdata, cache->ibo.edit_loops_tris);
+ }
+
+ /* UV editor */
+ /**
+ * TODO: The code and data structure is ready to support modified UV display
+ * but the selection code for UVs needs to support it first. So for now, only
+ * display the cage in all cases.
+ */
+ if (rdata && rdata->mapped.supported) {
+ rdata->mapped.use = false;
+ }
+
+ if (DRW_vbo_requested(cache->edit.loop_uv_data) || DRW_vbo_requested(cache->edit.loop_uv)) {
+ mesh_create_edit_vertex_loops(
+ rdata, NULL, NULL, cache->edit.loop_uv, cache->edit.loop_uv_data, NULL, NULL, NULL);
+ }
+ if (DRW_vbo_requested(cache->edit.loop_stretch_angle) ||
+ DRW_vbo_requested(cache->edit.loop_stretch_area) ||
+ DRW_vbo_requested(cache->edit.facedots_uv) ||
+ DRW_vbo_requested(cache->edit.facedots_uv_data) ||
+ DRW_ibo_requested(cache->ibo.edituv_loops_points) ||
+ DRW_ibo_requested(cache->ibo.edituv_loops_line_strips) ||
+ DRW_ibo_requested(cache->ibo.edituv_loops_tri_fans)) {
+ mesh_create_uvedit_buffers(rdata,
+ cache->edit.loop_stretch_area,
+ cache->edit.loop_stretch_angle,
+ cache->edit.facedots_uv,
+ cache->edit.facedots_uv_data,
+ cache->ibo.edituv_loops_points,
+ cache->ibo.edituv_loops_line_strips,
+ cache->ibo.edituv_loops_tri_fans);
+ }
+
+ if (rdata) {
+ mesh_render_data_free(rdata);
+ }
#ifdef DEBUG
- /* Make sure all requested batches have been setup. */
- for (int i = 0; i < sizeof(cache->batch) / sizeof(void *); ++i) {
- BLI_assert(!DRW_batch_requested(((GPUBatch **)&cache->batch)[i], 0));
- }
+ /* Make sure all requested batches have been setup. */
+ for (int i = 0; i < sizeof(cache->batch) / sizeof(void *); ++i) {
+ BLI_assert(!DRW_batch_requested(((GPUBatch **)&cache->batch)[i], 0));
+ }
#endif
}
diff --git a/source/blender/draw/intern/draw_cache_impl_metaball.c b/source/blender/draw/intern/draw_cache_impl_metaball.c
index 46c247d67ea..2c06d536f3d 100644
--- a/source/blender/draw/intern/draw_cache_impl_metaball.c
+++ b/source/blender/draw/intern/draw_cache_impl_metaball.c
@@ -35,9 +35,7 @@
#include "GPU_batch.h"
-
-#include "draw_cache_impl.h" /* own include */
-
+#include "draw_cache_impl.h" /* own include */
static void metaball_batch_cache_clear(MetaBall *mb);
@@ -45,127 +43,128 @@ static void metaball_batch_cache_clear(MetaBall *mb);
/* MetaBall GPUBatch Cache */
typedef struct MetaBallBatchCache {
- GPUBatch *batch;
- GPUBatch **shaded_triangles;
+ GPUBatch *batch;
+ GPUBatch **shaded_triangles;
- int mat_len;
+ int mat_len;
- /* Shared */
- GPUVertBuf *pos_nor_in_order;
+ /* Shared */
+ GPUVertBuf *pos_nor_in_order;
- /* Wireframe */
- struct {
- GPUBatch *batch;
- } face_wire;
+ /* Wireframe */
+ struct {
+ GPUBatch *batch;
+ } face_wire;
- /* Edge detection */
- GPUBatch *edge_detection;
- GPUIndexBuf *edges_adj_lines;
+ /* Edge detection */
+ GPUBatch *edge_detection;
+ GPUIndexBuf *edges_adj_lines;
- /* settings to determine if cache is invalid */
- bool is_dirty;
+ /* settings to determine if cache is invalid */
+ bool is_dirty;
- /* Valid only if edge_detection is up to date. */
- bool is_manifold;
+ /* Valid only if edge_detection is up to date. */
+ bool is_manifold;
} MetaBallBatchCache;
/* GPUBatch cache management. */
static bool metaball_batch_cache_valid(MetaBall *mb)
{
- MetaBallBatchCache *cache = mb->batch_cache;
+ MetaBallBatchCache *cache = mb->batch_cache;
- if (cache == NULL) {
- return false;
- }
+ if (cache == NULL) {
+ return false;
+ }
- return cache->is_dirty == false;
+ return cache->is_dirty == false;
}
static void metaball_batch_cache_init(MetaBall *mb)
{
- MetaBallBatchCache *cache = mb->batch_cache;
-
- if (!cache) {
- cache = mb->batch_cache = MEM_mallocN(sizeof(*cache), __func__);
- }
- cache->batch = NULL;
- cache->mat_len = 0;
- cache->shaded_triangles = NULL;
- cache->is_dirty = false;
- cache->pos_nor_in_order = NULL;
- cache->face_wire.batch = NULL;
- cache->edge_detection = NULL;
- cache->edges_adj_lines = NULL;
- cache->is_manifold = false;
+ MetaBallBatchCache *cache = mb->batch_cache;
+
+ if (!cache) {
+ cache = mb->batch_cache = MEM_mallocN(sizeof(*cache), __func__);
+ }
+ cache->batch = NULL;
+ cache->mat_len = 0;
+ cache->shaded_triangles = NULL;
+ cache->is_dirty = false;
+ cache->pos_nor_in_order = NULL;
+ cache->face_wire.batch = NULL;
+ cache->edge_detection = NULL;
+ cache->edges_adj_lines = NULL;
+ cache->is_manifold = false;
}
static MetaBallBatchCache *metaball_batch_cache_get(MetaBall *mb)
{
- if (!metaball_batch_cache_valid(mb)) {
- metaball_batch_cache_clear(mb);
- metaball_batch_cache_init(mb);
- }
- return mb->batch_cache;
+ if (!metaball_batch_cache_valid(mb)) {
+ metaball_batch_cache_clear(mb);
+ metaball_batch_cache_init(mb);
+ }
+ return mb->batch_cache;
}
void DRW_mball_batch_cache_dirty_tag(MetaBall *mb, int mode)
{
- MetaBallBatchCache *cache = mb->batch_cache;
- if (cache == NULL) {
- return;
- }
- switch (mode) {
- case BKE_MBALL_BATCH_DIRTY_ALL:
- cache->is_dirty = true;
- break;
- default:
- BLI_assert(0);
- }
+ MetaBallBatchCache *cache = mb->batch_cache;
+ if (cache == NULL) {
+ return;
+ }
+ switch (mode) {
+ case BKE_MBALL_BATCH_DIRTY_ALL:
+ cache->is_dirty = true;
+ break;
+ default:
+ BLI_assert(0);
+ }
}
static void metaball_batch_cache_clear(MetaBall *mb)
{
- MetaBallBatchCache *cache = mb->batch_cache;
- if (!cache) {
- return;
- }
-
- GPU_BATCH_DISCARD_SAFE(cache->face_wire.batch);
- GPU_BATCH_DISCARD_SAFE(cache->batch);
- GPU_BATCH_DISCARD_SAFE(cache->edge_detection);
- GPU_VERTBUF_DISCARD_SAFE(cache->pos_nor_in_order);
- GPU_INDEXBUF_DISCARD_SAFE(cache->edges_adj_lines);
- /* Note: shaded_triangles[0] is already freed by cache->batch */
- MEM_SAFE_FREE(cache->shaded_triangles);
- cache->mat_len = 0;
- cache->is_manifold = false;
+ MetaBallBatchCache *cache = mb->batch_cache;
+ if (!cache) {
+ return;
+ }
+
+ GPU_BATCH_DISCARD_SAFE(cache->face_wire.batch);
+ GPU_BATCH_DISCARD_SAFE(cache->batch);
+ GPU_BATCH_DISCARD_SAFE(cache->edge_detection);
+ GPU_VERTBUF_DISCARD_SAFE(cache->pos_nor_in_order);
+ GPU_INDEXBUF_DISCARD_SAFE(cache->edges_adj_lines);
+ /* Note: shaded_triangles[0] is already freed by cache->batch */
+ MEM_SAFE_FREE(cache->shaded_triangles);
+ cache->mat_len = 0;
+ cache->is_manifold = false;
}
void DRW_mball_batch_cache_free(MetaBall *mb)
{
- metaball_batch_cache_clear(mb);
- MEM_SAFE_FREE(mb->batch_cache);
+ metaball_batch_cache_clear(mb);
+ MEM_SAFE_FREE(mb->batch_cache);
}
static GPUVertBuf *mball_batch_cache_get_pos_and_normals(Object *ob, MetaBallBatchCache *cache)
{
- if (cache->pos_nor_in_order == NULL) {
- ListBase *lb = &ob->runtime.curve_cache->disp;
- cache->pos_nor_in_order = MEM_callocN(sizeof(GPUVertBuf), __func__);
- DRW_displist_vertbuf_create_pos_and_nor(lb, cache->pos_nor_in_order);
- }
- return cache->pos_nor_in_order;
+ if (cache->pos_nor_in_order == NULL) {
+ ListBase *lb = &ob->runtime.curve_cache->disp;
+ cache->pos_nor_in_order = MEM_callocN(sizeof(GPUVertBuf), __func__);
+ DRW_displist_vertbuf_create_pos_and_nor(lb, cache->pos_nor_in_order);
+ }
+ return cache->pos_nor_in_order;
}
static GPUIndexBuf *mball_batch_cache_get_edges_adj_lines(Object *ob, MetaBallBatchCache *cache)
{
- if (cache->edges_adj_lines == NULL) {
- ListBase *lb = &ob->runtime.curve_cache->disp;
- cache->edges_adj_lines = MEM_callocN(sizeof(GPUVertBuf), __func__);
- DRW_displist_indexbuf_create_edges_adjacency_lines(lb, cache->edges_adj_lines, &cache->is_manifold);
- }
- return cache->edges_adj_lines;
+ if (cache->edges_adj_lines == NULL) {
+ ListBase *lb = &ob->runtime.curve_cache->disp;
+ cache->edges_adj_lines = MEM_callocN(sizeof(GPUVertBuf), __func__);
+ DRW_displist_indexbuf_create_edges_adjacency_lines(
+ lb, cache->edges_adj_lines, &cache->is_manifold);
+ }
+ return cache->edges_adj_lines;
}
/* -------------------------------------------------------------------- */
@@ -174,95 +173,96 @@ static GPUIndexBuf *mball_batch_cache_get_edges_adj_lines(Object *ob, MetaBallBa
GPUBatch *DRW_metaball_batch_cache_get_triangles_with_normals(Object *ob)
{
- if (!BKE_mball_is_basis(ob)) {
- return NULL;
- }
-
- MetaBall *mb = ob->data;
- MetaBallBatchCache *cache = metaball_batch_cache_get(mb);
-
- if (cache->batch == NULL) {
- ListBase *lb = &ob->runtime.curve_cache->disp;
- GPUIndexBuf *ibo = MEM_callocN(sizeof(GPUIndexBuf), __func__);
- DRW_displist_indexbuf_create_triangles_in_order(lb, ibo);
- cache->batch = GPU_batch_create_ex(
- GPU_PRIM_TRIS,
- mball_batch_cache_get_pos_and_normals(ob, cache),
- ibo,
- GPU_BATCH_OWNS_INDEX);
- }
-
- return cache->batch;
+ if (!BKE_mball_is_basis(ob)) {
+ return NULL;
+ }
+
+ MetaBall *mb = ob->data;
+ MetaBallBatchCache *cache = metaball_batch_cache_get(mb);
+
+ if (cache->batch == NULL) {
+ ListBase *lb = &ob->runtime.curve_cache->disp;
+ GPUIndexBuf *ibo = MEM_callocN(sizeof(GPUIndexBuf), __func__);
+ DRW_displist_indexbuf_create_triangles_in_order(lb, ibo);
+ cache->batch = GPU_batch_create_ex(GPU_PRIM_TRIS,
+ mball_batch_cache_get_pos_and_normals(ob, cache),
+ ibo,
+ GPU_BATCH_OWNS_INDEX);
+ }
+
+ return cache->batch;
}
-GPUBatch **DRW_metaball_batch_cache_get_surface_shaded(Object *ob, MetaBall *mb, struct GPUMaterial **UNUSED(gpumat_array), uint gpumat_array_len)
+GPUBatch **DRW_metaball_batch_cache_get_surface_shaded(Object *ob,
+ MetaBall *mb,
+ struct GPUMaterial **UNUSED(gpumat_array),
+ uint gpumat_array_len)
{
- if (!BKE_mball_is_basis(ob)) {
- return NULL;
- }
-
- MetaBallBatchCache *cache = metaball_batch_cache_get(mb);
- if (cache->shaded_triangles == NULL) {
- cache->mat_len = gpumat_array_len;
- cache->shaded_triangles = MEM_callocN(sizeof(*cache->shaded_triangles) * cache->mat_len, __func__);
- cache->shaded_triangles[0] = DRW_metaball_batch_cache_get_triangles_with_normals(ob);
- for (int i = 1; i < cache->mat_len; ++i) {
- cache->shaded_triangles[i] = NULL;
- }
- }
- return cache->shaded_triangles;
-
+ if (!BKE_mball_is_basis(ob)) {
+ return NULL;
+ }
+
+ MetaBallBatchCache *cache = metaball_batch_cache_get(mb);
+ if (cache->shaded_triangles == NULL) {
+ cache->mat_len = gpumat_array_len;
+ cache->shaded_triangles = MEM_callocN(sizeof(*cache->shaded_triangles) * cache->mat_len,
+ __func__);
+ cache->shaded_triangles[0] = DRW_metaball_batch_cache_get_triangles_with_normals(ob);
+ for (int i = 1; i < cache->mat_len; ++i) {
+ cache->shaded_triangles[i] = NULL;
+ }
+ }
+ return cache->shaded_triangles;
}
GPUBatch *DRW_metaball_batch_cache_get_wireframes_face(Object *ob)
{
- if (!BKE_mball_is_basis(ob)) {
- return NULL;
- }
+ if (!BKE_mball_is_basis(ob)) {
+ return NULL;
+ }
- MetaBall *mb = ob->data;
- MetaBallBatchCache *cache = metaball_batch_cache_get(mb);
+ MetaBall *mb = ob->data;
+ MetaBallBatchCache *cache = metaball_batch_cache_get(mb);
- if (cache->face_wire.batch == NULL) {
- ListBase *lb = &ob->runtime.curve_cache->disp;
+ if (cache->face_wire.batch == NULL) {
+ ListBase *lb = &ob->runtime.curve_cache->disp;
- GPUVertBuf *vbo_wiredata = MEM_callocN(sizeof(GPUVertBuf), __func__);
- DRW_displist_vertbuf_create_wiredata(lb, vbo_wiredata);
+ GPUVertBuf *vbo_wiredata = MEM_callocN(sizeof(GPUVertBuf), __func__);
+ DRW_displist_vertbuf_create_wiredata(lb, vbo_wiredata);
- GPUIndexBuf *ibo = MEM_callocN(sizeof(GPUIndexBuf), __func__);
- DRW_displist_indexbuf_create_lines_in_order(lb, ibo);
+ GPUIndexBuf *ibo = MEM_callocN(sizeof(GPUIndexBuf), __func__);
+ DRW_displist_indexbuf_create_lines_in_order(lb, ibo);
- cache->face_wire.batch = GPU_batch_create_ex(
- GPU_PRIM_LINES,
- mball_batch_cache_get_pos_and_normals(ob, cache),
- ibo,
- GPU_BATCH_OWNS_INDEX);
+ cache->face_wire.batch = GPU_batch_create_ex(GPU_PRIM_LINES,
+ mball_batch_cache_get_pos_and_normals(ob, cache),
+ ibo,
+ GPU_BATCH_OWNS_INDEX);
- GPU_batch_vertbuf_add_ex(cache->face_wire.batch, vbo_wiredata, true);
- }
+ GPU_batch_vertbuf_add_ex(cache->face_wire.batch, vbo_wiredata, true);
+ }
- return cache->face_wire.batch;
+ return cache->face_wire.batch;
}
-struct GPUBatch *DRW_metaball_batch_cache_get_edge_detection(struct Object *ob, bool *r_is_manifold)
+struct GPUBatch *DRW_metaball_batch_cache_get_edge_detection(struct Object *ob,
+ bool *r_is_manifold)
{
- if (!BKE_mball_is_basis(ob)) {
- return NULL;
- }
+ if (!BKE_mball_is_basis(ob)) {
+ return NULL;
+ }
- MetaBall *mb = ob->data;
- MetaBallBatchCache *cache = metaball_batch_cache_get(mb);
+ MetaBall *mb = ob->data;
+ MetaBallBatchCache *cache = metaball_batch_cache_get(mb);
- if (cache->edge_detection == NULL) {
- cache->edge_detection = GPU_batch_create(
- GPU_PRIM_LINES_ADJ,
- mball_batch_cache_get_pos_and_normals(ob, cache),
- mball_batch_cache_get_edges_adj_lines(ob, cache));
- }
+ if (cache->edge_detection == NULL) {
+ cache->edge_detection = GPU_batch_create(GPU_PRIM_LINES_ADJ,
+ mball_batch_cache_get_pos_and_normals(ob, cache),
+ mball_batch_cache_get_edges_adj_lines(ob, cache));
+ }
- if (r_is_manifold) {
- *r_is_manifold = cache->is_manifold;
- }
+ if (r_is_manifold) {
+ *r_is_manifold = cache->is_manifold;
+ }
- return cache->edge_detection;
+ return cache->edge_detection;
} \ No newline at end of file
diff --git a/source/blender/draw/intern/draw_cache_impl_particles.c b/source/blender/draw/intern/draw_cache_impl_particles.c
index b23d5e0fb9c..e998b17a44f 100644
--- a/source/blender/draw/intern/draw_cache_impl_particles.c
+++ b/source/blender/draw/intern/draw_cache_impl_particles.c
@@ -48,7 +48,7 @@
#include "DEG_depsgraph_query.h"
-#include "draw_cache_impl.h" /* own include */
+#include "draw_cache_impl.h" /* own include */
#include "draw_hair_private.h"
static void particle_batch_cache_clear(ParticleSystem *psys);
@@ -57,1640 +57,1624 @@ static void particle_batch_cache_clear(ParticleSystem *psys);
/* Particle GPUBatch Cache */
typedef struct ParticlePointCache {
- GPUVertBuf *pos;
- GPUBatch *points;
- int elems_len;
- int point_len;
+ GPUVertBuf *pos;
+ GPUBatch *points;
+ int elems_len;
+ int point_len;
} ParticlePointCache;
typedef struct ParticleBatchCache {
- /* Object mode strands for hair and points for particle,
- * strands for paths when in edit mode.
- */
- ParticleHairCache hair; /* Used for hair strands */
- ParticlePointCache point; /* Used for particle points. */
+ /* Object mode strands for hair and points for particle,
+ * strands for paths when in edit mode.
+ */
+ ParticleHairCache hair; /* Used for hair strands */
+ ParticlePointCache point; /* Used for particle points. */
- /* Control points when in edit mode. */
- ParticleHairCache edit_hair;
+ /* Control points when in edit mode. */
+ ParticleHairCache edit_hair;
- GPUVertBuf *edit_pos;
- GPUBatch *edit_strands;
+ GPUVertBuf *edit_pos;
+ GPUBatch *edit_strands;
- GPUVertBuf *edit_inner_pos;
- GPUBatch *edit_inner_points;
- int edit_inner_point_len;
+ GPUVertBuf *edit_inner_pos;
+ GPUBatch *edit_inner_points;
+ int edit_inner_point_len;
- GPUVertBuf *edit_tip_pos;
- GPUBatch *edit_tip_points;
- int edit_tip_point_len;
+ GPUVertBuf *edit_tip_pos;
+ GPUBatch *edit_tip_points;
+ int edit_tip_point_len;
- /* Settings to determine if cache is invalid. */
- bool is_dirty;
- bool edit_is_weight;
+ /* Settings to determine if cache is invalid. */
+ bool is_dirty;
+ bool edit_is_weight;
} ParticleBatchCache;
/* GPUBatch cache management. */
typedef struct HairAttributeID {
- uint pos;
- uint tan;
- uint ind;
+ uint pos;
+ uint tan;
+ uint ind;
} HairAttributeID;
typedef struct EditStrandData {
- float pos[3];
- uchar color;
+ float pos[3];
+ uchar color;
} EditStrandData;
static GPUVertFormat *edit_points_vert_format_get(uint *r_pos_id, uint *r_color_id)
{
- static GPUVertFormat edit_point_format = { 0 };
- static uint pos_id, color_id;
- if (edit_point_format.attr_len == 0) {
- /* Keep in sync with EditStrandData */
- pos_id = GPU_vertformat_attr_add(&edit_point_format, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
- color_id = GPU_vertformat_attr_add(&edit_point_format, "color", GPU_COMP_U8, 1, GPU_FETCH_INT_TO_FLOAT_UNIT);
- }
- *r_pos_id = pos_id;
- *r_color_id = color_id;
- return &edit_point_format;
+ static GPUVertFormat edit_point_format = {0};
+ static uint pos_id, color_id;
+ if (edit_point_format.attr_len == 0) {
+ /* Keep in sync with EditStrandData */
+ pos_id = GPU_vertformat_attr_add(&edit_point_format, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
+ color_id = GPU_vertformat_attr_add(
+ &edit_point_format, "color", GPU_COMP_U8, 1, GPU_FETCH_INT_TO_FLOAT_UNIT);
+ }
+ *r_pos_id = pos_id;
+ *r_color_id = color_id;
+ return &edit_point_format;
}
static bool particle_batch_cache_valid(ParticleSystem *psys)
{
- ParticleBatchCache *cache = psys->batch_cache;
+ ParticleBatchCache *cache = psys->batch_cache;
- if (cache == NULL) {
- return false;
- }
+ if (cache == NULL) {
+ return false;
+ }
- if (cache->is_dirty == false) {
- return true;
- }
- else {
- return false;
- }
+ if (cache->is_dirty == false) {
+ return true;
+ }
+ else {
+ return false;
+ }
- return true;
+ return true;
}
static void particle_batch_cache_init(ParticleSystem *psys)
{
- ParticleBatchCache *cache = psys->batch_cache;
+ ParticleBatchCache *cache = psys->batch_cache;
- if (!cache) {
- cache = psys->batch_cache = MEM_callocN(sizeof(*cache), __func__);
- }
- else {
- memset(cache, 0, sizeof(*cache));
- }
+ if (!cache) {
+ cache = psys->batch_cache = MEM_callocN(sizeof(*cache), __func__);
+ }
+ else {
+ memset(cache, 0, sizeof(*cache));
+ }
- cache->is_dirty = false;
+ cache->is_dirty = false;
}
static ParticleBatchCache *particle_batch_cache_get(ParticleSystem *psys)
{
- if (!particle_batch_cache_valid(psys)) {
- particle_batch_cache_clear(psys);
- particle_batch_cache_init(psys);
- }
- return psys->batch_cache;
+ if (!particle_batch_cache_valid(psys)) {
+ particle_batch_cache_clear(psys);
+ particle_batch_cache_init(psys);
+ }
+ return psys->batch_cache;
}
void DRW_particle_batch_cache_dirty_tag(ParticleSystem *psys, int mode)
{
- ParticleBatchCache *cache = psys->batch_cache;
- if (cache == NULL) {
- return;
- }
- switch (mode) {
- case BKE_PARTICLE_BATCH_DIRTY_ALL:
- cache->is_dirty = true;
- break;
- default:
- BLI_assert(0);
- }
+ ParticleBatchCache *cache = psys->batch_cache;
+ if (cache == NULL) {
+ return;
+ }
+ switch (mode) {
+ case BKE_PARTICLE_BATCH_DIRTY_ALL:
+ cache->is_dirty = true;
+ break;
+ default:
+ BLI_assert(0);
+ }
}
static void particle_batch_cache_clear_point(ParticlePointCache *point_cache)
{
- GPU_BATCH_DISCARD_SAFE(point_cache->points);
- GPU_VERTBUF_DISCARD_SAFE(point_cache->pos);
+ GPU_BATCH_DISCARD_SAFE(point_cache->points);
+ GPU_VERTBUF_DISCARD_SAFE(point_cache->pos);
}
static void particle_batch_cache_clear_hair(ParticleHairCache *hair_cache)
{
- /* TODO more granular update tagging. */
- GPU_VERTBUF_DISCARD_SAFE(hair_cache->proc_point_buf);
- DRW_TEXTURE_FREE_SAFE(hair_cache->point_tex);
-
- GPU_VERTBUF_DISCARD_SAFE(hair_cache->proc_strand_buf);
- GPU_VERTBUF_DISCARD_SAFE(hair_cache->proc_strand_seg_buf);
- DRW_TEXTURE_FREE_SAFE(hair_cache->strand_tex);
- DRW_TEXTURE_FREE_SAFE(hair_cache->strand_seg_tex);
-
- for (int i = 0; i < MAX_MTFACE; ++i) {
- GPU_VERTBUF_DISCARD_SAFE(hair_cache->proc_uv_buf[i]);
- DRW_TEXTURE_FREE_SAFE(hair_cache->uv_tex[i]);
- }
- for (int i = 0; i < MAX_MCOL; ++i) {
- GPU_VERTBUF_DISCARD_SAFE(hair_cache->proc_col_buf[i]);
- DRW_TEXTURE_FREE_SAFE(hair_cache->col_tex[i]);
- }
- for (int i = 0; i < MAX_HAIR_SUBDIV; ++i) {
- GPU_VERTBUF_DISCARD_SAFE(hair_cache->final[i].proc_buf);
- DRW_TEXTURE_FREE_SAFE(hair_cache->final[i].proc_tex);
- for (int j = 0; j < MAX_THICKRES; ++j) {
- GPU_BATCH_DISCARD_SAFE(hair_cache->final[i].proc_hairs[j]);
- }
- }
-
- /* "Normal" legacy hairs */
- GPU_BATCH_DISCARD_SAFE(hair_cache->hairs);
- GPU_VERTBUF_DISCARD_SAFE(hair_cache->pos);
- GPU_INDEXBUF_DISCARD_SAFE(hair_cache->indices);
+ /* TODO more granular update tagging. */
+ GPU_VERTBUF_DISCARD_SAFE(hair_cache->proc_point_buf);
+ DRW_TEXTURE_FREE_SAFE(hair_cache->point_tex);
+
+ GPU_VERTBUF_DISCARD_SAFE(hair_cache->proc_strand_buf);
+ GPU_VERTBUF_DISCARD_SAFE(hair_cache->proc_strand_seg_buf);
+ DRW_TEXTURE_FREE_SAFE(hair_cache->strand_tex);
+ DRW_TEXTURE_FREE_SAFE(hair_cache->strand_seg_tex);
+
+ for (int i = 0; i < MAX_MTFACE; ++i) {
+ GPU_VERTBUF_DISCARD_SAFE(hair_cache->proc_uv_buf[i]);
+ DRW_TEXTURE_FREE_SAFE(hair_cache->uv_tex[i]);
+ }
+ for (int i = 0; i < MAX_MCOL; ++i) {
+ GPU_VERTBUF_DISCARD_SAFE(hair_cache->proc_col_buf[i]);
+ DRW_TEXTURE_FREE_SAFE(hair_cache->col_tex[i]);
+ }
+ for (int i = 0; i < MAX_HAIR_SUBDIV; ++i) {
+ GPU_VERTBUF_DISCARD_SAFE(hair_cache->final[i].proc_buf);
+ DRW_TEXTURE_FREE_SAFE(hair_cache->final[i].proc_tex);
+ for (int j = 0; j < MAX_THICKRES; ++j) {
+ GPU_BATCH_DISCARD_SAFE(hair_cache->final[i].proc_hairs[j]);
+ }
+ }
+
+ /* "Normal" legacy hairs */
+ GPU_BATCH_DISCARD_SAFE(hair_cache->hairs);
+ GPU_VERTBUF_DISCARD_SAFE(hair_cache->pos);
+ GPU_INDEXBUF_DISCARD_SAFE(hair_cache->indices);
}
static void particle_batch_cache_clear(ParticleSystem *psys)
{
- ParticleBatchCache *cache = psys->batch_cache;
- if (!cache) {
- return;
- }
+ ParticleBatchCache *cache = psys->batch_cache;
+ if (!cache) {
+ return;
+ }
- particle_batch_cache_clear_point(&cache->point);
- particle_batch_cache_clear_hair(&cache->hair);
+ particle_batch_cache_clear_point(&cache->point);
+ particle_batch_cache_clear_hair(&cache->hair);
- particle_batch_cache_clear_hair(&cache->edit_hair);
+ particle_batch_cache_clear_hair(&cache->edit_hair);
- GPU_BATCH_DISCARD_SAFE(cache->edit_inner_points);
- GPU_VERTBUF_DISCARD_SAFE(cache->edit_inner_pos);
- GPU_BATCH_DISCARD_SAFE(cache->edit_tip_points);
- GPU_VERTBUF_DISCARD_SAFE(cache->edit_tip_pos);
+ GPU_BATCH_DISCARD_SAFE(cache->edit_inner_points);
+ GPU_VERTBUF_DISCARD_SAFE(cache->edit_inner_pos);
+ GPU_BATCH_DISCARD_SAFE(cache->edit_tip_points);
+ GPU_VERTBUF_DISCARD_SAFE(cache->edit_tip_pos);
}
void DRW_particle_batch_cache_free(ParticleSystem *psys)
{
- particle_batch_cache_clear(psys);
- MEM_SAFE_FREE(psys->batch_cache);
+ particle_batch_cache_clear(psys);
+ MEM_SAFE_FREE(psys->batch_cache);
}
-static void count_cache_segment_keys(
- ParticleCacheKey **pathcache,
- const int num_path_cache_keys,
- ParticleHairCache *hair_cache)
+static void count_cache_segment_keys(ParticleCacheKey **pathcache,
+ const int num_path_cache_keys,
+ ParticleHairCache *hair_cache)
{
- for (int i = 0; i < num_path_cache_keys; i++) {
- ParticleCacheKey *path = pathcache[i];
- if (path->segments > 0) {
- hair_cache->strands_len++;
- hair_cache->elems_len += path->segments + 2;
- hair_cache->point_len += path->segments + 1;
- }
- }
+ for (int i = 0; i < num_path_cache_keys; i++) {
+ ParticleCacheKey *path = pathcache[i];
+ if (path->segments > 0) {
+ hair_cache->strands_len++;
+ hair_cache->elems_len += path->segments + 2;
+ hair_cache->point_len += path->segments + 1;
+ }
+ }
}
-static void ensure_seg_pt_count(
- PTCacheEdit *edit,
- ParticleSystem *psys,
- ParticleHairCache *hair_cache)
+static void ensure_seg_pt_count(PTCacheEdit *edit,
+ ParticleSystem *psys,
+ ParticleHairCache *hair_cache)
{
- if ((hair_cache->pos != NULL && hair_cache->indices != NULL) ||
- (hair_cache->proc_point_buf != NULL))
- {
- return;
- }
-
- hair_cache->strands_len = 0;
- hair_cache->elems_len = 0;
- hair_cache->point_len = 0;
-
- if (edit != NULL && edit->pathcache != NULL) {
- count_cache_segment_keys(edit->pathcache, edit->totcached, hair_cache);
- }
- else {
- if (psys->pathcache &&
- (!psys->childcache || (psys->part->draw & PART_DRAW_PARENT)))
- {
- count_cache_segment_keys(psys->pathcache, psys->totpart, hair_cache);
- }
- if (psys->childcache) {
- const int child_count = psys->totchild * psys->part->disp / 100;
- count_cache_segment_keys(psys->childcache, child_count, hair_cache);
- }
- }
+ if ((hair_cache->pos != NULL && hair_cache->indices != NULL) ||
+ (hair_cache->proc_point_buf != NULL)) {
+ return;
+ }
+
+ hair_cache->strands_len = 0;
+ hair_cache->elems_len = 0;
+ hair_cache->point_len = 0;
+
+ if (edit != NULL && edit->pathcache != NULL) {
+ count_cache_segment_keys(edit->pathcache, edit->totcached, hair_cache);
+ }
+ else {
+ if (psys->pathcache && (!psys->childcache || (psys->part->draw & PART_DRAW_PARENT))) {
+ count_cache_segment_keys(psys->pathcache, psys->totpart, hair_cache);
+ }
+ if (psys->childcache) {
+ const int child_count = psys->totchild * psys->part->disp / 100;
+ count_cache_segment_keys(psys->childcache, child_count, hair_cache);
+ }
+ }
}
static void particle_pack_mcol(MCol *mcol, ushort r_scol[3])
{
- /* Convert to linear ushort and swizzle */
- r_scol[0] = unit_float_to_ushort_clamp(BLI_color_from_srgb_table[mcol->b]);
- r_scol[1] = unit_float_to_ushort_clamp(BLI_color_from_srgb_table[mcol->g]);
- r_scol[2] = unit_float_to_ushort_clamp(BLI_color_from_srgb_table[mcol->r]);
+ /* Convert to linear ushort and swizzle */
+ r_scol[0] = unit_float_to_ushort_clamp(BLI_color_from_srgb_table[mcol->b]);
+ r_scol[1] = unit_float_to_ushort_clamp(BLI_color_from_srgb_table[mcol->g]);
+ r_scol[2] = unit_float_to_ushort_clamp(BLI_color_from_srgb_table[mcol->r]);
}
/* Used by parent particles and simple children. */
-static void particle_calculate_parent_uvs(
- ParticleSystem *psys,
- ParticleSystemModifierData *psmd,
- const int num_uv_layers,
- const int parent_index,
- /*const*/ MTFace **mtfaces,
- float (*r_uv)[2])
+static void particle_calculate_parent_uvs(ParticleSystem *psys,
+ ParticleSystemModifierData *psmd,
+ const int num_uv_layers,
+ const int parent_index,
+ /*const*/ MTFace **mtfaces,
+ float (*r_uv)[2])
{
- if (psmd == NULL) {
- return;
- }
- const int emit_from = psmd->psys->part->from;
- if (!ELEM(emit_from, PART_FROM_FACE, PART_FROM_VOLUME)) {
- return;
- }
- ParticleData *particle = &psys->particles[parent_index];
- int num = particle->num_dmcache;
- if (num == DMCACHE_NOTFOUND || num == DMCACHE_ISCHILD) {
- if (particle->num < psmd->mesh_final->totface) {
- num = particle->num;
- }
- }
- if (num != DMCACHE_NOTFOUND && num != DMCACHE_ISCHILD) {
- MFace *mface = &psmd->mesh_final->mface[num];
- for (int j = 0; j < num_uv_layers; j++) {
- psys_interpolate_uvs(
- mtfaces[j] + num,
- mface->v4,
- particle->fuv,
- r_uv[j]);
- }
- }
+ if (psmd == NULL) {
+ return;
+ }
+ const int emit_from = psmd->psys->part->from;
+ if (!ELEM(emit_from, PART_FROM_FACE, PART_FROM_VOLUME)) {
+ return;
+ }
+ ParticleData *particle = &psys->particles[parent_index];
+ int num = particle->num_dmcache;
+ if (num == DMCACHE_NOTFOUND || num == DMCACHE_ISCHILD) {
+ if (particle->num < psmd->mesh_final->totface) {
+ num = particle->num;
+ }
+ }
+ if (num != DMCACHE_NOTFOUND && num != DMCACHE_ISCHILD) {
+ MFace *mface = &psmd->mesh_final->mface[num];
+ for (int j = 0; j < num_uv_layers; j++) {
+ psys_interpolate_uvs(mtfaces[j] + num, mface->v4, particle->fuv, r_uv[j]);
+ }
+ }
}
-static void particle_calculate_parent_mcol(
- ParticleSystem *psys,
- ParticleSystemModifierData *psmd,
- const int num_uv_layers,
- const int parent_index,
- /*const*/ MCol **mcols,
- MCol *r_mcol)
+static void particle_calculate_parent_mcol(ParticleSystem *psys,
+ ParticleSystemModifierData *psmd,
+ const int num_uv_layers,
+ const int parent_index,
+ /*const*/ MCol **mcols,
+ MCol *r_mcol)
{
- if (psmd == NULL) {
- return;
- }
- const int emit_from = psmd->psys->part->from;
- if (!ELEM(emit_from, PART_FROM_FACE, PART_FROM_VOLUME)) {
- return;
- }
- ParticleData *particle = &psys->particles[parent_index];
- int num = particle->num_dmcache;
- if (num == DMCACHE_NOTFOUND || num == DMCACHE_ISCHILD) {
- if (particle->num < psmd->mesh_final->totface) {
- num = particle->num;
- }
- }
- if (num != DMCACHE_NOTFOUND && num != DMCACHE_ISCHILD) {
- MFace *mface = &psmd->mesh_final->mface[num];
- for (int j = 0; j < num_uv_layers; j++) {
- psys_interpolate_mcol(
- mcols[j] + num,
- mface->v4,
- particle->fuv,
- &r_mcol[j]);
- }
- }
+ if (psmd == NULL) {
+ return;
+ }
+ const int emit_from = psmd->psys->part->from;
+ if (!ELEM(emit_from, PART_FROM_FACE, PART_FROM_VOLUME)) {
+ return;
+ }
+ ParticleData *particle = &psys->particles[parent_index];
+ int num = particle->num_dmcache;
+ if (num == DMCACHE_NOTFOUND || num == DMCACHE_ISCHILD) {
+ if (particle->num < psmd->mesh_final->totface) {
+ num = particle->num;
+ }
+ }
+ if (num != DMCACHE_NOTFOUND && num != DMCACHE_ISCHILD) {
+ MFace *mface = &psmd->mesh_final->mface[num];
+ for (int j = 0; j < num_uv_layers; j++) {
+ psys_interpolate_mcol(mcols[j] + num, mface->v4, particle->fuv, &r_mcol[j]);
+ }
+ }
}
/* Used by interpolated children. */
-static void particle_interpolate_children_uvs(
- ParticleSystem *psys,
- ParticleSystemModifierData *psmd,
- const int num_uv_layers,
- const int child_index,
- /*const*/ MTFace **mtfaces,
- float (*r_uv)[2])
+static void particle_interpolate_children_uvs(ParticleSystem *psys,
+ ParticleSystemModifierData *psmd,
+ const int num_uv_layers,
+ const int child_index,
+ /*const*/ MTFace **mtfaces,
+ float (*r_uv)[2])
{
- if (psmd == NULL) {
- return;
- }
- const int emit_from = psmd->psys->part->from;
- if (!ELEM(emit_from, PART_FROM_FACE, PART_FROM_VOLUME)) {
- return;
- }
- ChildParticle *particle = &psys->child[child_index];
- int num = particle->num;
- if (num != DMCACHE_NOTFOUND) {
- MFace *mface = &psmd->mesh_final->mface[num];
- for (int j = 0; j < num_uv_layers; j++) {
- psys_interpolate_uvs(
- mtfaces[j] + num,
- mface->v4,
- particle->fuv,
- r_uv[j]);
- }
- }
+ if (psmd == NULL) {
+ return;
+ }
+ const int emit_from = psmd->psys->part->from;
+ if (!ELEM(emit_from, PART_FROM_FACE, PART_FROM_VOLUME)) {
+ return;
+ }
+ ChildParticle *particle = &psys->child[child_index];
+ int num = particle->num;
+ if (num != DMCACHE_NOTFOUND) {
+ MFace *mface = &psmd->mesh_final->mface[num];
+ for (int j = 0; j < num_uv_layers; j++) {
+ psys_interpolate_uvs(mtfaces[j] + num, mface->v4, particle->fuv, r_uv[j]);
+ }
+ }
}
-static void particle_interpolate_children_mcol(
- ParticleSystem *psys,
- ParticleSystemModifierData *psmd,
- const int num_col_layers,
- const int child_index,
- /*const*/ MCol **mcols,
- MCol *r_mcol)
+static void particle_interpolate_children_mcol(ParticleSystem *psys,
+ ParticleSystemModifierData *psmd,
+ const int num_col_layers,
+ const int child_index,
+ /*const*/ MCol **mcols,
+ MCol *r_mcol)
{
- if (psmd == NULL) {
- return;
- }
- const int emit_from = psmd->psys->part->from;
- if (!ELEM(emit_from, PART_FROM_FACE, PART_FROM_VOLUME)) {
- return;
- }
- ChildParticle *particle = &psys->child[child_index];
- int num = particle->num;
- if (num != DMCACHE_NOTFOUND) {
- MFace *mface = &psmd->mesh_final->mface[num];
- for (int j = 0; j < num_col_layers; j++) {
- psys_interpolate_mcol(
- mcols[j] + num,
- mface->v4,
- particle->fuv,
- &r_mcol[j]);
- }
- }
+ if (psmd == NULL) {
+ return;
+ }
+ const int emit_from = psmd->psys->part->from;
+ if (!ELEM(emit_from, PART_FROM_FACE, PART_FROM_VOLUME)) {
+ return;
+ }
+ ChildParticle *particle = &psys->child[child_index];
+ int num = particle->num;
+ if (num != DMCACHE_NOTFOUND) {
+ MFace *mface = &psmd->mesh_final->mface[num];
+ for (int j = 0; j < num_col_layers; j++) {
+ psys_interpolate_mcol(mcols[j] + num, mface->v4, particle->fuv, &r_mcol[j]);
+ }
+ }
}
-static void particle_calculate_uvs(
- ParticleSystem *psys,
- ParticleSystemModifierData *psmd,
- const bool is_simple,
- const int num_uv_layers,
- const int parent_index,
- const int child_index,
- /*const*/ MTFace **mtfaces,
- float (**r_parent_uvs)[2],
- float (**r_uv)[2])
+static void particle_calculate_uvs(ParticleSystem *psys,
+ ParticleSystemModifierData *psmd,
+ const bool is_simple,
+ const int num_uv_layers,
+ const int parent_index,
+ const int child_index,
+ /*const*/ MTFace **mtfaces,
+ float (**r_parent_uvs)[2],
+ float (**r_uv)[2])
{
- if (psmd == NULL) {
- return;
- }
- if (is_simple) {
- if (r_parent_uvs[parent_index] != NULL) {
- *r_uv = r_parent_uvs[parent_index];
- }
- else {
- *r_uv = MEM_callocN(sizeof(**r_uv) * num_uv_layers, "Particle UVs");
- }
- }
- else {
- *r_uv = MEM_callocN(sizeof(**r_uv) * num_uv_layers, "Particle UVs");
- }
- if (child_index == -1) {
- /* Calculate UVs for parent particles. */
- if (is_simple) {
- r_parent_uvs[parent_index] = *r_uv;
- }
- particle_calculate_parent_uvs(
- psys, psmd, num_uv_layers, parent_index, mtfaces, *r_uv);
- }
- else {
- /* Calculate UVs for child particles. */
- if (!is_simple) {
- particle_interpolate_children_uvs(
- psys, psmd, num_uv_layers, child_index, mtfaces, *r_uv);
- }
- else if (!r_parent_uvs[psys->child[child_index].parent]) {
- r_parent_uvs[psys->child[child_index].parent] = *r_uv;
- particle_calculate_parent_uvs(
- psys, psmd, num_uv_layers, parent_index, mtfaces, *r_uv);
- }
- }
+ if (psmd == NULL) {
+ return;
+ }
+ if (is_simple) {
+ if (r_parent_uvs[parent_index] != NULL) {
+ *r_uv = r_parent_uvs[parent_index];
+ }
+ else {
+ *r_uv = MEM_callocN(sizeof(**r_uv) * num_uv_layers, "Particle UVs");
+ }
+ }
+ else {
+ *r_uv = MEM_callocN(sizeof(**r_uv) * num_uv_layers, "Particle UVs");
+ }
+ if (child_index == -1) {
+ /* Calculate UVs for parent particles. */
+ if (is_simple) {
+ r_parent_uvs[parent_index] = *r_uv;
+ }
+ particle_calculate_parent_uvs(psys, psmd, num_uv_layers, parent_index, mtfaces, *r_uv);
+ }
+ else {
+ /* Calculate UVs for child particles. */
+ if (!is_simple) {
+ particle_interpolate_children_uvs(psys, psmd, num_uv_layers, child_index, mtfaces, *r_uv);
+ }
+ else if (!r_parent_uvs[psys->child[child_index].parent]) {
+ r_parent_uvs[psys->child[child_index].parent] = *r_uv;
+ particle_calculate_parent_uvs(psys, psmd, num_uv_layers, parent_index, mtfaces, *r_uv);
+ }
+ }
}
-static void particle_calculate_mcol(
- ParticleSystem *psys,
- ParticleSystemModifierData *psmd,
- const bool is_simple,
- const int num_col_layers,
- const int parent_index,
- const int child_index,
- /*const*/ MCol **mcols,
- MCol **r_parent_mcol,
- MCol **r_mcol)
+static void particle_calculate_mcol(ParticleSystem *psys,
+ ParticleSystemModifierData *psmd,
+ const bool is_simple,
+ const int num_col_layers,
+ const int parent_index,
+ const int child_index,
+ /*const*/ MCol **mcols,
+ MCol **r_parent_mcol,
+ MCol **r_mcol)
{
- if (psmd == NULL) {
- return;
- }
- if (is_simple) {
- if (r_parent_mcol[parent_index] != NULL) {
- *r_mcol = r_parent_mcol[parent_index];
- }
- else {
- *r_mcol = MEM_callocN(sizeof(**r_mcol) * num_col_layers, "Particle MCol");
- }
- }
- else {
- *r_mcol = MEM_callocN(sizeof(**r_mcol) * num_col_layers, "Particle MCol");
- }
- if (child_index == -1) {
- /* Calculate MCols for parent particles. */
- if (is_simple) {
- r_parent_mcol[parent_index] = *r_mcol;
- }
- particle_calculate_parent_mcol(
- psys, psmd, num_col_layers, parent_index, mcols, *r_mcol);
- }
- else {
- /* Calculate MCols for child particles. */
- if (!is_simple) {
- particle_interpolate_children_mcol(
- psys, psmd, num_col_layers, child_index, mcols, *r_mcol);
- }
- else if (!r_parent_mcol[psys->child[child_index].parent]) {
- r_parent_mcol[psys->child[child_index].parent] = *r_mcol;
- particle_calculate_parent_mcol(
- psys, psmd, num_col_layers, parent_index, mcols, *r_mcol);
- }
- }
+ if (psmd == NULL) {
+ return;
+ }
+ if (is_simple) {
+ if (r_parent_mcol[parent_index] != NULL) {
+ *r_mcol = r_parent_mcol[parent_index];
+ }
+ else {
+ *r_mcol = MEM_callocN(sizeof(**r_mcol) * num_col_layers, "Particle MCol");
+ }
+ }
+ else {
+ *r_mcol = MEM_callocN(sizeof(**r_mcol) * num_col_layers, "Particle MCol");
+ }
+ if (child_index == -1) {
+ /* Calculate MCols for parent particles. */
+ if (is_simple) {
+ r_parent_mcol[parent_index] = *r_mcol;
+ }
+ particle_calculate_parent_mcol(psys, psmd, num_col_layers, parent_index, mcols, *r_mcol);
+ }
+ else {
+ /* Calculate MCols for child particles. */
+ if (!is_simple) {
+ particle_interpolate_children_mcol(psys, psmd, num_col_layers, child_index, mcols, *r_mcol);
+ }
+ else if (!r_parent_mcol[psys->child[child_index].parent]) {
+ r_parent_mcol[psys->child[child_index].parent] = *r_mcol;
+ particle_calculate_parent_mcol(psys, psmd, num_col_layers, parent_index, mcols, *r_mcol);
+ }
+ }
}
/* Will return last filled index. */
typedef enum ParticleSource {
- PARTICLE_SOURCE_PARENT,
- PARTICLE_SOURCE_CHILDREN,
+ PARTICLE_SOURCE_PARENT,
+ PARTICLE_SOURCE_CHILDREN,
} ParticleSource;
-static int particle_batch_cache_fill_segments(
- ParticleSystem *psys,
- ParticleSystemModifierData *psmd,
- ParticleCacheKey **path_cache,
- const ParticleSource particle_source,
- const int global_offset,
- const int start_index,
- const int num_path_keys,
- const int num_uv_layers,
- const int num_col_layers,
- /*const*/ MTFace **mtfaces,
- /*const*/ MCol **mcols,
- uint *uv_id,
- uint *col_id,
- float (***r_parent_uvs)[2],
- MCol ***r_parent_mcol,
- GPUIndexBufBuilder *elb,
- HairAttributeID *attr_id,
- ParticleHairCache *hair_cache)
+static int particle_batch_cache_fill_segments(ParticleSystem *psys,
+ ParticleSystemModifierData *psmd,
+ ParticleCacheKey **path_cache,
+ const ParticleSource particle_source,
+ const int global_offset,
+ const int start_index,
+ const int num_path_keys,
+ const int num_uv_layers,
+ const int num_col_layers,
+ /*const*/ MTFace **mtfaces,
+ /*const*/ MCol **mcols,
+ uint *uv_id,
+ uint *col_id,
+ float (***r_parent_uvs)[2],
+ MCol ***r_parent_mcol,
+ GPUIndexBufBuilder *elb,
+ HairAttributeID *attr_id,
+ ParticleHairCache *hair_cache)
{
- const bool is_simple = (psys->part->childtype == PART_CHILD_PARTICLES);
- const bool is_child = (particle_source == PARTICLE_SOURCE_CHILDREN);
- if (is_simple && *r_parent_uvs == NULL) {
- /* TODO(sergey): For edit mode it should be edit->totcached. */
- *r_parent_uvs = MEM_callocN(
- sizeof(*r_parent_uvs) * psys->totpart,
- "Parent particle UVs");
- }
- if (is_simple && *r_parent_mcol == NULL) {
- *r_parent_mcol = MEM_callocN(
- sizeof(*r_parent_mcol) * psys->totpart,
- "Parent particle MCol");
- }
- int curr_point = start_index;
- for (int i = 0; i < num_path_keys; i++) {
- ParticleCacheKey *path = path_cache[i];
- if (path->segments <= 0) {
- continue;
- }
- float tangent[3];
- float (*uv)[2] = NULL;
- MCol *mcol = NULL;
- particle_calculate_mcol(
- psys, psmd,
- is_simple, num_col_layers,
- is_child ? psys->child[i].parent : i,
- is_child ? i : -1,
- mcols,
- *r_parent_mcol, &mcol);
- particle_calculate_uvs(
- psys, psmd,
- is_simple, num_uv_layers,
- is_child ? psys->child[i].parent : i,
- is_child ? i : -1,
- mtfaces,
- *r_parent_uvs, &uv);
- for (int j = 0; j < path->segments; j++) {
- if (j == 0) {
- sub_v3_v3v3(tangent, path[j + 1].co, path[j].co);
- }
- else {
- sub_v3_v3v3(tangent, path[j + 1].co, path[j - 1].co);
- }
- GPU_vertbuf_attr_set(hair_cache->pos, attr_id->pos, curr_point, path[j].co);
- GPU_vertbuf_attr_set(hair_cache->pos, attr_id->tan, curr_point, tangent);
- GPU_vertbuf_attr_set(hair_cache->pos, attr_id->ind, curr_point, &i);
- if (psmd != NULL) {
- for (int k = 0; k < num_uv_layers; k++) {
- GPU_vertbuf_attr_set(
- hair_cache->pos, uv_id[k], curr_point,
- (is_simple && is_child) ?
- (*r_parent_uvs)[psys->child[i].parent][k] : uv[k]);
- }
- for (int k = 0; k < num_col_layers; k++) {
- /* TODO Put the conversion outside the loop */
- ushort scol[4];
- particle_pack_mcol(
- (is_simple && is_child) ?
- &(*r_parent_mcol)[psys->child[i].parent][k] : &mcol[k],
- scol);
- GPU_vertbuf_attr_set(hair_cache->pos, col_id[k], curr_point, scol);
- }
- }
- GPU_indexbuf_add_generic_vert(elb, curr_point);
- curr_point++;
- }
- sub_v3_v3v3(tangent, path[path->segments].co, path[path->segments - 1].co);
-
- int global_index = i + global_offset;
- GPU_vertbuf_attr_set(hair_cache->pos, attr_id->pos, curr_point, path[path->segments].co);
- GPU_vertbuf_attr_set(hair_cache->pos, attr_id->tan, curr_point, tangent);
- GPU_vertbuf_attr_set(hair_cache->pos, attr_id->ind, curr_point, &global_index);
-
- if (psmd != NULL) {
- for (int k = 0; k < num_uv_layers; k++) {
- GPU_vertbuf_attr_set(
- hair_cache->pos, uv_id[k], curr_point,
- (is_simple && is_child) ?
- (*r_parent_uvs)[psys->child[i].parent][k] : uv[k]);
- }
- for (int k = 0; k < num_col_layers; k++) {
- /* TODO Put the conversion outside the loop */
- ushort scol[4];
- particle_pack_mcol(
- (is_simple && is_child) ?
- &(*r_parent_mcol)[psys->child[i].parent][k] : &mcol[k],
- scol);
- GPU_vertbuf_attr_set(hair_cache->pos, col_id[k], curr_point, scol);
- }
- if (!is_simple) {
- MEM_freeN(uv);
- MEM_freeN(mcol);
- }
- }
- /* Finish the segment and add restart primitive. */
- GPU_indexbuf_add_generic_vert(elb, curr_point);
- GPU_indexbuf_add_primitive_restart(elb);
- curr_point++;
- }
- return curr_point;
+ const bool is_simple = (psys->part->childtype == PART_CHILD_PARTICLES);
+ const bool is_child = (particle_source == PARTICLE_SOURCE_CHILDREN);
+ if (is_simple && *r_parent_uvs == NULL) {
+ /* TODO(sergey): For edit mode it should be edit->totcached. */
+ *r_parent_uvs = MEM_callocN(sizeof(*r_parent_uvs) * psys->totpart, "Parent particle UVs");
+ }
+ if (is_simple && *r_parent_mcol == NULL) {
+ *r_parent_mcol = MEM_callocN(sizeof(*r_parent_mcol) * psys->totpart, "Parent particle MCol");
+ }
+ int curr_point = start_index;
+ for (int i = 0; i < num_path_keys; i++) {
+ ParticleCacheKey *path = path_cache[i];
+ if (path->segments <= 0) {
+ continue;
+ }
+ float tangent[3];
+ float(*uv)[2] = NULL;
+ MCol *mcol = NULL;
+ particle_calculate_mcol(psys,
+ psmd,
+ is_simple,
+ num_col_layers,
+ is_child ? psys->child[i].parent : i,
+ is_child ? i : -1,
+ mcols,
+ *r_parent_mcol,
+ &mcol);
+ particle_calculate_uvs(psys,
+ psmd,
+ is_simple,
+ num_uv_layers,
+ is_child ? psys->child[i].parent : i,
+ is_child ? i : -1,
+ mtfaces,
+ *r_parent_uvs,
+ &uv);
+ for (int j = 0; j < path->segments; j++) {
+ if (j == 0) {
+ sub_v3_v3v3(tangent, path[j + 1].co, path[j].co);
+ }
+ else {
+ sub_v3_v3v3(tangent, path[j + 1].co, path[j - 1].co);
+ }
+ GPU_vertbuf_attr_set(hair_cache->pos, attr_id->pos, curr_point, path[j].co);
+ GPU_vertbuf_attr_set(hair_cache->pos, attr_id->tan, curr_point, tangent);
+ GPU_vertbuf_attr_set(hair_cache->pos, attr_id->ind, curr_point, &i);
+ if (psmd != NULL) {
+ for (int k = 0; k < num_uv_layers; k++) {
+ GPU_vertbuf_attr_set(
+ hair_cache->pos,
+ uv_id[k],
+ curr_point,
+ (is_simple && is_child) ? (*r_parent_uvs)[psys->child[i].parent][k] : uv[k]);
+ }
+ for (int k = 0; k < num_col_layers; k++) {
+ /* TODO Put the conversion outside the loop */
+ ushort scol[4];
+ particle_pack_mcol(
+ (is_simple && is_child) ? &(*r_parent_mcol)[psys->child[i].parent][k] : &mcol[k],
+ scol);
+ GPU_vertbuf_attr_set(hair_cache->pos, col_id[k], curr_point, scol);
+ }
+ }
+ GPU_indexbuf_add_generic_vert(elb, curr_point);
+ curr_point++;
+ }
+ sub_v3_v3v3(tangent, path[path->segments].co, path[path->segments - 1].co);
+
+ int global_index = i + global_offset;
+ GPU_vertbuf_attr_set(hair_cache->pos, attr_id->pos, curr_point, path[path->segments].co);
+ GPU_vertbuf_attr_set(hair_cache->pos, attr_id->tan, curr_point, tangent);
+ GPU_vertbuf_attr_set(hair_cache->pos, attr_id->ind, curr_point, &global_index);
+
+ if (psmd != NULL) {
+ for (int k = 0; k < num_uv_layers; k++) {
+ GPU_vertbuf_attr_set(hair_cache->pos,
+ uv_id[k],
+ curr_point,
+ (is_simple && is_child) ? (*r_parent_uvs)[psys->child[i].parent][k] :
+ uv[k]);
+ }
+ for (int k = 0; k < num_col_layers; k++) {
+ /* TODO Put the conversion outside the loop */
+ ushort scol[4];
+ particle_pack_mcol((is_simple && is_child) ? &(*r_parent_mcol)[psys->child[i].parent][k] :
+ &mcol[k],
+ scol);
+ GPU_vertbuf_attr_set(hair_cache->pos, col_id[k], curr_point, scol);
+ }
+ if (!is_simple) {
+ MEM_freeN(uv);
+ MEM_freeN(mcol);
+ }
+ }
+ /* Finish the segment and add restart primitive. */
+ GPU_indexbuf_add_generic_vert(elb, curr_point);
+ GPU_indexbuf_add_primitive_restart(elb);
+ curr_point++;
+ }
+ return curr_point;
}
-static void particle_batch_cache_fill_segments_proc_pos(
- ParticleCacheKey **path_cache,
- const int num_path_keys,
- GPUVertBufRaw *attr_step)
+static void particle_batch_cache_fill_segments_proc_pos(ParticleCacheKey **path_cache,
+ const int num_path_keys,
+ GPUVertBufRaw *attr_step)
{
- for (int i = 0; i < num_path_keys; i++) {
- ParticleCacheKey *path = path_cache[i];
- if (path->segments <= 0) {
- continue;
- }
- float total_len = 0.0f;
- float *co_prev = NULL, *seg_data_first;
- for (int j = 0; j <= path->segments; j++) {
- float *seg_data = (float *)GPU_vertbuf_raw_step(attr_step);
- copy_v3_v3(seg_data, path[j].co);
- if (co_prev) {
- total_len += len_v3v3(co_prev, path[j].co);
- }
- else {
- seg_data_first = seg_data;
- }
- seg_data[3] = total_len;
- co_prev = path[j].co;
- }
- if (total_len > 0.0f) {
- /* Divide by total length to have a [0-1] number. */
- for (int j = 0; j <= path->segments; j++, seg_data_first += 4) {
- seg_data_first[3] /= total_len;
- }
- }
- }
+ for (int i = 0; i < num_path_keys; i++) {
+ ParticleCacheKey *path = path_cache[i];
+ if (path->segments <= 0) {
+ continue;
+ }
+ float total_len = 0.0f;
+ float *co_prev = NULL, *seg_data_first;
+ for (int j = 0; j <= path->segments; j++) {
+ float *seg_data = (float *)GPU_vertbuf_raw_step(attr_step);
+ copy_v3_v3(seg_data, path[j].co);
+ if (co_prev) {
+ total_len += len_v3v3(co_prev, path[j].co);
+ }
+ else {
+ seg_data_first = seg_data;
+ }
+ seg_data[3] = total_len;
+ co_prev = path[j].co;
+ }
+ if (total_len > 0.0f) {
+ /* Divide by total length to have a [0-1] number. */
+ for (int j = 0; j <= path->segments; j++, seg_data_first += 4) {
+ seg_data_first[3] /= total_len;
+ }
+ }
+ }
}
static float particle_key_select_ratio(const PTCacheEdit *edit, int strand, float t)
{
- const PTCacheEditPoint *point = &edit->points[strand];
- float edit_key_seg_t = 1.0f / (point->totkey - 1);
- if (t == 1.0) {
- return (point->keys[point->totkey - 1].flag & PEK_SELECT) ? 1.0f : 0.0;
- }
- else {
- float interp = t / edit_key_seg_t;
- int index = (int)interp;
- interp -= floorf(interp); /* Time between 2 edit key */
- float s1 = (point->keys[index].flag & PEK_SELECT) ? 1.0f : 0.0;
- float s2 = (point->keys[index + 1].flag & PEK_SELECT) ? 1.0f : 0.0;
- return s1 + interp * (s2 - s1);
- }
+ const PTCacheEditPoint *point = &edit->points[strand];
+ float edit_key_seg_t = 1.0f / (point->totkey - 1);
+ if (t == 1.0) {
+ return (point->keys[point->totkey - 1].flag & PEK_SELECT) ? 1.0f : 0.0;
+ }
+ else {
+ float interp = t / edit_key_seg_t;
+ int index = (int)interp;
+ interp -= floorf(interp); /* Time between 2 edit key */
+ float s1 = (point->keys[index].flag & PEK_SELECT) ? 1.0f : 0.0;
+ float s2 = (point->keys[index + 1].flag & PEK_SELECT) ? 1.0f : 0.0;
+ return s1 + interp * (s2 - s1);
+ }
}
static float particle_key_weight(const ParticleData *particle, int strand, float t)
{
- const ParticleData *part = particle + strand;
- const HairKey *hkeys = part->hair;
- float edit_key_seg_t = 1.0f / (part->totkey - 1);
- if (t == 1.0) {
- return hkeys[part->totkey - 1].weight;
- }
- else {
- float interp = t / edit_key_seg_t;
- int index = (int)interp;
- interp -= floorf(interp); /* Time between 2 edit key */
- float s1 = hkeys[index].weight;
- float s2 = hkeys[index + 1].weight;
- return s1 + interp * (s2 - s1);
- }
+ const ParticleData *part = particle + strand;
+ const HairKey *hkeys = part->hair;
+ float edit_key_seg_t = 1.0f / (part->totkey - 1);
+ if (t == 1.0) {
+ return hkeys[part->totkey - 1].weight;
+ }
+ else {
+ float interp = t / edit_key_seg_t;
+ int index = (int)interp;
+ interp -= floorf(interp); /* Time between 2 edit key */
+ float s1 = hkeys[index].weight;
+ float s2 = hkeys[index + 1].weight;
+ return s1 + interp * (s2 - s1);
+ }
}
static int particle_batch_cache_fill_segments_edit(
- const PTCacheEdit *edit, /* NULL for weight data */
- const ParticleData *particle, /* NULL for select data */
- ParticleCacheKey **path_cache,
- const int start_index,
- const int num_path_keys,
- GPUIndexBufBuilder *elb,
- GPUVertBufRaw *attr_step)
+ const PTCacheEdit *edit, /* NULL for weight data */
+ const ParticleData *particle, /* NULL for select data */
+ ParticleCacheKey **path_cache,
+ const int start_index,
+ const int num_path_keys,
+ GPUIndexBufBuilder *elb,
+ GPUVertBufRaw *attr_step)
{
- int curr_point = start_index;
- for (int i = 0; i < num_path_keys; i++) {
- ParticleCacheKey *path = path_cache[i];
- if (path->segments <= 0) {
- continue;
- }
- for (int j = 0; j <= path->segments; j++) {
- EditStrandData *seg_data = (EditStrandData *)GPU_vertbuf_raw_step(attr_step);
- copy_v3_v3(seg_data->pos, path[j].co);
- float strand_t = (float)(j) / path->segments;
- if (particle) {
- float weight = particle_key_weight(particle, i, strand_t);
- /* NaN or unclamped become 0xFF */
- seg_data->color = (uchar)((weight <= 1.0f) ? 0xFE * weight : 0xFF);
- }
- else {
- float selected = particle_key_select_ratio(edit, i, strand_t);
- seg_data->color = (uchar)(0xFF * selected);
- }
- GPU_indexbuf_add_generic_vert(elb, curr_point);
- curr_point++;
- }
- /* Finish the segment and add restart primitive. */
- GPU_indexbuf_add_primitive_restart(elb);
- }
- return curr_point;
+ int curr_point = start_index;
+ for (int i = 0; i < num_path_keys; i++) {
+ ParticleCacheKey *path = path_cache[i];
+ if (path->segments <= 0) {
+ continue;
+ }
+ for (int j = 0; j <= path->segments; j++) {
+ EditStrandData *seg_data = (EditStrandData *)GPU_vertbuf_raw_step(attr_step);
+ copy_v3_v3(seg_data->pos, path[j].co);
+ float strand_t = (float)(j) / path->segments;
+ if (particle) {
+ float weight = particle_key_weight(particle, i, strand_t);
+ /* NaN or unclamped become 0xFF */
+ seg_data->color = (uchar)((weight <= 1.0f) ? 0xFE * weight : 0xFF);
+ }
+ else {
+ float selected = particle_key_select_ratio(edit, i, strand_t);
+ seg_data->color = (uchar)(0xFF * selected);
+ }
+ GPU_indexbuf_add_generic_vert(elb, curr_point);
+ curr_point++;
+ }
+ /* Finish the segment and add restart primitive. */
+ GPU_indexbuf_add_primitive_restart(elb);
+ }
+ return curr_point;
}
-static int particle_batch_cache_fill_segments_indices(
- ParticleCacheKey **path_cache,
- const int start_index,
- const int num_path_keys,
- const int res,
- GPUIndexBufBuilder *elb)
+static int particle_batch_cache_fill_segments_indices(ParticleCacheKey **path_cache,
+ const int start_index,
+ const int num_path_keys,
+ const int res,
+ GPUIndexBufBuilder *elb)
{
- int curr_point = start_index;
- for (int i = 0; i < num_path_keys; i++) {
- ParticleCacheKey *path = path_cache[i];
- if (path->segments <= 0) {
- continue;
- }
- for (int k = 0; k < res; k++) {
- GPU_indexbuf_add_generic_vert(elb, curr_point++);
- }
- GPU_indexbuf_add_primitive_restart(elb);
- }
- return curr_point;
+ int curr_point = start_index;
+ for (int i = 0; i < num_path_keys; i++) {
+ ParticleCacheKey *path = path_cache[i];
+ if (path->segments <= 0) {
+ continue;
+ }
+ for (int k = 0; k < res; k++) {
+ GPU_indexbuf_add_generic_vert(elb, curr_point++);
+ }
+ GPU_indexbuf_add_primitive_restart(elb);
+ }
+ return curr_point;
}
-static int particle_batch_cache_fill_strands_data(
- ParticleSystem *psys,
- ParticleSystemModifierData *psmd,
- ParticleCacheKey **path_cache,
- const ParticleSource particle_source,
- const int start_index,
- const int num_path_keys,
- GPUVertBufRaw *data_step, GPUVertBufRaw *seg_step,
- float (***r_parent_uvs)[2], GPUVertBufRaw *uv_step, MTFace **mtfaces, int num_uv_layers,
- MCol ***r_parent_mcol, GPUVertBufRaw *col_step, MCol **mcols, int num_col_layers)
+static int particle_batch_cache_fill_strands_data(ParticleSystem *psys,
+ ParticleSystemModifierData *psmd,
+ ParticleCacheKey **path_cache,
+ const ParticleSource particle_source,
+ const int start_index,
+ const int num_path_keys,
+ GPUVertBufRaw *data_step,
+ GPUVertBufRaw *seg_step,
+ float (***r_parent_uvs)[2],
+ GPUVertBufRaw *uv_step,
+ MTFace **mtfaces,
+ int num_uv_layers,
+ MCol ***r_parent_mcol,
+ GPUVertBufRaw *col_step,
+ MCol **mcols,
+ int num_col_layers)
{
- const bool is_simple = (psys->part->childtype == PART_CHILD_PARTICLES);
- const bool is_child = (particle_source == PARTICLE_SOURCE_CHILDREN);
- if (is_simple && *r_parent_uvs == NULL) {
- /* TODO(sergey): For edit mode it should be edit->totcached. */
- *r_parent_uvs = MEM_callocN(
- sizeof(*r_parent_uvs) * psys->totpart,
- "Parent particle UVs");
- }
- if (is_simple && *r_parent_mcol == NULL) {
- *r_parent_mcol = MEM_callocN(
- sizeof(*r_parent_mcol) * psys->totpart,
- "Parent particle MCol");
- }
- int curr_point = start_index;
- for (int i = 0; i < num_path_keys; i++) {
- ParticleCacheKey *path = path_cache[i];
- if (path->segments <= 0) {
- continue;
- }
-
- *(uint *)GPU_vertbuf_raw_step(data_step) = curr_point;
- *(ushort *)GPU_vertbuf_raw_step(seg_step) = path->segments;
- curr_point += path->segments + 1;
-
- if (psmd != NULL) {
- float (*uv)[2] = NULL;
- MCol *mcol = NULL;
-
- particle_calculate_uvs(
- psys, psmd,
- is_simple, num_uv_layers,
- is_child ? psys->child[i].parent : i,
- is_child ? i : -1,
- mtfaces,
- *r_parent_uvs, &uv);
-
- particle_calculate_mcol(
- psys, psmd,
- is_simple, num_col_layers,
- is_child ? psys->child[i].parent : i,
- is_child ? i : -1,
- mcols,
- *r_parent_mcol, &mcol);
-
- for (int k = 0; k < num_uv_layers; k++) {
- float *t_uv = (float *)GPU_vertbuf_raw_step(uv_step + k);
- copy_v2_v2(t_uv, uv[k]);
- }
- for (int k = 0; k < num_col_layers; k++) {
- ushort *scol = (ushort *)GPU_vertbuf_raw_step(col_step + k);
- particle_pack_mcol(
- (is_simple && is_child) ?
- &(*r_parent_mcol)[psys->child[i].parent][k] : &mcol[k],
- scol);
- }
- if (!is_simple) {
- MEM_freeN(uv);
- MEM_freeN(mcol);
- }
- }
- }
- return curr_point;
+ const bool is_simple = (psys->part->childtype == PART_CHILD_PARTICLES);
+ const bool is_child = (particle_source == PARTICLE_SOURCE_CHILDREN);
+ if (is_simple && *r_parent_uvs == NULL) {
+ /* TODO(sergey): For edit mode it should be edit->totcached. */
+ *r_parent_uvs = MEM_callocN(sizeof(*r_parent_uvs) * psys->totpart, "Parent particle UVs");
+ }
+ if (is_simple && *r_parent_mcol == NULL) {
+ *r_parent_mcol = MEM_callocN(sizeof(*r_parent_mcol) * psys->totpart, "Parent particle MCol");
+ }
+ int curr_point = start_index;
+ for (int i = 0; i < num_path_keys; i++) {
+ ParticleCacheKey *path = path_cache[i];
+ if (path->segments <= 0) {
+ continue;
+ }
+
+ *(uint *)GPU_vertbuf_raw_step(data_step) = curr_point;
+ *(ushort *)GPU_vertbuf_raw_step(seg_step) = path->segments;
+ curr_point += path->segments + 1;
+
+ if (psmd != NULL) {
+ float(*uv)[2] = NULL;
+ MCol *mcol = NULL;
+
+ particle_calculate_uvs(psys,
+ psmd,
+ is_simple,
+ num_uv_layers,
+ is_child ? psys->child[i].parent : i,
+ is_child ? i : -1,
+ mtfaces,
+ *r_parent_uvs,
+ &uv);
+
+ particle_calculate_mcol(psys,
+ psmd,
+ is_simple,
+ num_col_layers,
+ is_child ? psys->child[i].parent : i,
+ is_child ? i : -1,
+ mcols,
+ *r_parent_mcol,
+ &mcol);
+
+ for (int k = 0; k < num_uv_layers; k++) {
+ float *t_uv = (float *)GPU_vertbuf_raw_step(uv_step + k);
+ copy_v2_v2(t_uv, uv[k]);
+ }
+ for (int k = 0; k < num_col_layers; k++) {
+ ushort *scol = (ushort *)GPU_vertbuf_raw_step(col_step + k);
+ particle_pack_mcol((is_simple && is_child) ? &(*r_parent_mcol)[psys->child[i].parent][k] :
+ &mcol[k],
+ scol);
+ }
+ if (!is_simple) {
+ MEM_freeN(uv);
+ MEM_freeN(mcol);
+ }
+ }
+ }
+ return curr_point;
}
-static void particle_batch_cache_ensure_procedural_final_points(
- ParticleHairCache *cache,
- int subdiv)
+static void particle_batch_cache_ensure_procedural_final_points(ParticleHairCache *cache,
+ int subdiv)
{
- /* Same format as point_tex. */
- GPUVertFormat format = { 0 };
- GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 4, GPU_FETCH_FLOAT);
+ /* Same format as point_tex. */
+ GPUVertFormat format = {0};
+ GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 4, GPU_FETCH_FLOAT);
- cache->final[subdiv].proc_buf = GPU_vertbuf_create_with_format(&format);
+ cache->final[subdiv].proc_buf = GPU_vertbuf_create_with_format(&format);
- /* Create a destination buffer for the transform feedback. Sized appropriately */
- /* Those are points! not line segments. */
- GPU_vertbuf_data_alloc(cache->final[subdiv].proc_buf, cache->final[subdiv].strands_res * cache->strands_len);
+ /* Create a destination buffer for the transform feedback. Sized appropriately */
+ /* Those are points! not line segments. */
+ GPU_vertbuf_data_alloc(cache->final[subdiv].proc_buf,
+ cache->final[subdiv].strands_res * cache->strands_len);
- /* Create vbo immediately to bind to texture buffer. */
- GPU_vertbuf_use(cache->final[subdiv].proc_buf);
+ /* Create vbo immediately to bind to texture buffer. */
+ GPU_vertbuf_use(cache->final[subdiv].proc_buf);
- cache->final[subdiv].proc_tex = GPU_texture_create_from_vertbuf(cache->final[subdiv].proc_buf);
+ cache->final[subdiv].proc_tex = GPU_texture_create_from_vertbuf(cache->final[subdiv].proc_buf);
}
-static void particle_batch_cache_ensure_procedural_strand_data(
- PTCacheEdit *edit,
- ParticleSystem *psys,
- ModifierData *md,
- ParticleHairCache *cache)
+static void particle_batch_cache_ensure_procedural_strand_data(PTCacheEdit *edit,
+ ParticleSystem *psys,
+ ModifierData *md,
+ ParticleHairCache *cache)
{
- int active_uv = 0;
- int active_col = 0;
-
- ParticleSystemModifierData *psmd = (ParticleSystemModifierData *)md;
-
- if (psmd != NULL && psmd->mesh_final != NULL) {
- if (CustomData_has_layer(&psmd->mesh_final->ldata, CD_MLOOPUV)) {
- cache->num_uv_layers = CustomData_number_of_layers(&psmd->mesh_final->ldata, CD_MLOOPUV);
- active_uv = CustomData_get_active_layer(&psmd->mesh_final->ldata, CD_MLOOPUV);
- }
- if (CustomData_has_layer(&psmd->mesh_final->ldata, CD_MLOOPCOL)) {
- cache->num_col_layers = CustomData_number_of_layers(&psmd->mesh_final->ldata, CD_MLOOPCOL);
- active_col = CustomData_get_active_layer(&psmd->mesh_final->ldata, CD_MLOOPCOL);
- }
- }
-
- GPUVertBufRaw data_step, seg_step;
- GPUVertBufRaw uv_step[MAX_MTFACE];
- GPUVertBufRaw col_step[MAX_MCOL];
-
- MTFace *mtfaces[MAX_MTFACE] = {NULL};
- MCol *mcols[MAX_MCOL] = {NULL};
- float (**parent_uvs)[2] = NULL;
- MCol **parent_mcol = NULL;
-
- GPUVertFormat format_data = {0};
- uint data_id = GPU_vertformat_attr_add(&format_data, "data", GPU_COMP_U32, 1, GPU_FETCH_INT);
-
- GPUVertFormat format_seg = {0};
- uint seg_id = GPU_vertformat_attr_add(&format_seg, "data", GPU_COMP_U16, 1, GPU_FETCH_INT);
-
- GPUVertFormat format_uv = {0};
- uint uv_id = GPU_vertformat_attr_add(&format_uv, "uv", GPU_COMP_F32, 2, GPU_FETCH_FLOAT);
-
- GPUVertFormat format_col = {0};
- uint col_id = GPU_vertformat_attr_add(&format_col, "col", GPU_COMP_U16, 4, GPU_FETCH_INT_TO_FLOAT_UNIT);
-
- memset(cache->uv_layer_names, 0, sizeof(cache->uv_layer_names));
- memset(cache->col_layer_names, 0, sizeof(cache->col_layer_names));
-
- /* Strand Data */
- cache->proc_strand_buf = GPU_vertbuf_create_with_format(&format_data);
- GPU_vertbuf_data_alloc(cache->proc_strand_buf, cache->strands_len);
- GPU_vertbuf_attr_get_raw_data(cache->proc_strand_buf, data_id, &data_step);
-
- cache->proc_strand_seg_buf = GPU_vertbuf_create_with_format(&format_seg);
- GPU_vertbuf_data_alloc(cache->proc_strand_seg_buf, cache->strands_len);
- GPU_vertbuf_attr_get_raw_data(cache->proc_strand_seg_buf, seg_id, &seg_step);
-
- /* UV layers */
- for (int i = 0; i < cache->num_uv_layers; i++) {
- cache->proc_uv_buf[i] = GPU_vertbuf_create_with_format(&format_uv);
- GPU_vertbuf_data_alloc(cache->proc_uv_buf[i], cache->strands_len);
- GPU_vertbuf_attr_get_raw_data(cache->proc_uv_buf[i], uv_id, &uv_step[i]);
-
- const char *name = CustomData_get_layer_name(&psmd->mesh_final->ldata, CD_MLOOPUV, i);
- uint hash = BLI_ghashutil_strhash_p(name);
- int n = 0;
- BLI_snprintf(cache->uv_layer_names[i][n++], MAX_LAYER_NAME_LEN, "u%u", hash);
- BLI_snprintf(cache->uv_layer_names[i][n++], MAX_LAYER_NAME_LEN, "a%u", hash);
-
- if (i == active_uv) {
- BLI_strncpy(cache->uv_layer_names[i][n], "u", MAX_LAYER_NAME_LEN);
- }
- }
- /* Vertex colors */
- for (int i = 0; i < cache->num_col_layers; i++) {
- cache->proc_col_buf[i] = GPU_vertbuf_create_with_format(&format_col);
- GPU_vertbuf_data_alloc(cache->proc_col_buf[i], cache->strands_len);
- GPU_vertbuf_attr_get_raw_data(cache->proc_col_buf[i], col_id, &col_step[i]);
-
- const char *name = CustomData_get_layer_name(&psmd->mesh_final->ldata, CD_MLOOPCOL, i);
- uint hash = BLI_ghashutil_strhash_p(name);
- int n = 0;
- BLI_snprintf(cache->col_layer_names[i][n++], MAX_LAYER_NAME_LEN, "c%u", hash);
-
- /* We only do vcols auto name that are not overridden by uvs */
- if (CustomData_get_named_layer_index(&psmd->mesh_final->ldata, CD_MLOOPUV, name) == -1) {
- BLI_snprintf(cache->col_layer_names[i][n++], MAX_LAYER_NAME_LEN, "a%u", hash);
- }
-
- if (i == active_col) {
- BLI_strncpy(cache->col_layer_names[i][n], "c", MAX_LAYER_NAME_LEN);
- }
- }
-
- if (cache->num_uv_layers || cache->num_col_layers) {
- BKE_mesh_tessface_ensure(psmd->mesh_final);
- if (cache->num_uv_layers) {
- for (int j = 0; j < cache->num_uv_layers; j++) {
- mtfaces[j] = (MTFace *)CustomData_get_layer_n(&psmd->mesh_final->fdata, CD_MTFACE, j);
- }
- }
- if (cache->num_col_layers) {
- for (int j = 0; j < cache->num_col_layers; j++) {
- mcols[j] = (MCol *)CustomData_get_layer_n(&psmd->mesh_final->fdata, CD_MCOL, j);
- }
- }
- }
-
- if (edit != NULL && edit->pathcache != NULL) {
- particle_batch_cache_fill_strands_data(
- psys, psmd, edit->pathcache, PARTICLE_SOURCE_PARENT,
- 0, edit->totcached,
- &data_step, &seg_step,
- &parent_uvs, uv_step, (MTFace **)mtfaces, cache->num_uv_layers,
- &parent_mcol, col_step, (MCol **)mcols, cache->num_col_layers);
- }
- else {
- int curr_point = 0;
- if ((psys->pathcache != NULL) &&
- (!psys->childcache || (psys->part->draw & PART_DRAW_PARENT)))
- {
- curr_point = particle_batch_cache_fill_strands_data(
- psys, psmd, psys->pathcache, PARTICLE_SOURCE_PARENT,
- 0, psys->totpart,
- &data_step, &seg_step,
- &parent_uvs, uv_step, (MTFace **)mtfaces, cache->num_uv_layers,
- &parent_mcol, col_step, (MCol **)mcols, cache->num_col_layers);
- }
- if (psys->childcache) {
- const int child_count = psys->totchild * psys->part->disp / 100;
- curr_point = particle_batch_cache_fill_strands_data(
- psys, psmd, psys->childcache, PARTICLE_SOURCE_CHILDREN,
- curr_point, child_count,
- &data_step, &seg_step,
- &parent_uvs, uv_step, (MTFace **)mtfaces, cache->num_uv_layers,
- &parent_mcol, col_step, (MCol **)mcols, cache->num_col_layers);
- }
- }
- /* Cleanup. */
- if (parent_uvs != NULL) {
- /* TODO(sergey): For edit mode it should be edit->totcached. */
- for (int i = 0; i < psys->totpart; i++) {
- MEM_SAFE_FREE(parent_uvs[i]);
- }
- MEM_freeN(parent_uvs);
- }
- if (parent_mcol != NULL) {
- for (int i = 0; i < psys->totpart; i++) {
- MEM_SAFE_FREE(parent_mcol[i]);
- }
- MEM_freeN(parent_mcol);
- }
-
- /* Create vbo immediately to bind to texture buffer. */
- GPU_vertbuf_use(cache->proc_strand_buf);
- cache->strand_tex = GPU_texture_create_from_vertbuf(cache->proc_strand_buf);
-
- GPU_vertbuf_use(cache->proc_strand_seg_buf);
- cache->strand_seg_tex = GPU_texture_create_from_vertbuf(cache->proc_strand_seg_buf);
-
- for (int i = 0; i < cache->num_uv_layers; i++) {
- GPU_vertbuf_use(cache->proc_uv_buf[i]);
- cache->uv_tex[i] = GPU_texture_create_from_vertbuf(cache->proc_uv_buf[i]);
- }
- for (int i = 0; i < cache->num_col_layers; i++) {
- GPU_vertbuf_use(cache->proc_col_buf[i]);
- cache->col_tex[i] = GPU_texture_create_from_vertbuf(cache->proc_col_buf[i]);
- }
+ int active_uv = 0;
+ int active_col = 0;
+
+ ParticleSystemModifierData *psmd = (ParticleSystemModifierData *)md;
+
+ if (psmd != NULL && psmd->mesh_final != NULL) {
+ if (CustomData_has_layer(&psmd->mesh_final->ldata, CD_MLOOPUV)) {
+ cache->num_uv_layers = CustomData_number_of_layers(&psmd->mesh_final->ldata, CD_MLOOPUV);
+ active_uv = CustomData_get_active_layer(&psmd->mesh_final->ldata, CD_MLOOPUV);
+ }
+ if (CustomData_has_layer(&psmd->mesh_final->ldata, CD_MLOOPCOL)) {
+ cache->num_col_layers = CustomData_number_of_layers(&psmd->mesh_final->ldata, CD_MLOOPCOL);
+ active_col = CustomData_get_active_layer(&psmd->mesh_final->ldata, CD_MLOOPCOL);
+ }
+ }
+
+ GPUVertBufRaw data_step, seg_step;
+ GPUVertBufRaw uv_step[MAX_MTFACE];
+ GPUVertBufRaw col_step[MAX_MCOL];
+
+ MTFace *mtfaces[MAX_MTFACE] = {NULL};
+ MCol *mcols[MAX_MCOL] = {NULL};
+ float(**parent_uvs)[2] = NULL;
+ MCol **parent_mcol = NULL;
+
+ GPUVertFormat format_data = {0};
+ uint data_id = GPU_vertformat_attr_add(&format_data, "data", GPU_COMP_U32, 1, GPU_FETCH_INT);
+
+ GPUVertFormat format_seg = {0};
+ uint seg_id = GPU_vertformat_attr_add(&format_seg, "data", GPU_COMP_U16, 1, GPU_FETCH_INT);
+
+ GPUVertFormat format_uv = {0};
+ uint uv_id = GPU_vertformat_attr_add(&format_uv, "uv", GPU_COMP_F32, 2, GPU_FETCH_FLOAT);
+
+ GPUVertFormat format_col = {0};
+ uint col_id = GPU_vertformat_attr_add(
+ &format_col, "col", GPU_COMP_U16, 4, GPU_FETCH_INT_TO_FLOAT_UNIT);
+
+ memset(cache->uv_layer_names, 0, sizeof(cache->uv_layer_names));
+ memset(cache->col_layer_names, 0, sizeof(cache->col_layer_names));
+
+ /* Strand Data */
+ cache->proc_strand_buf = GPU_vertbuf_create_with_format(&format_data);
+ GPU_vertbuf_data_alloc(cache->proc_strand_buf, cache->strands_len);
+ GPU_vertbuf_attr_get_raw_data(cache->proc_strand_buf, data_id, &data_step);
+
+ cache->proc_strand_seg_buf = GPU_vertbuf_create_with_format(&format_seg);
+ GPU_vertbuf_data_alloc(cache->proc_strand_seg_buf, cache->strands_len);
+ GPU_vertbuf_attr_get_raw_data(cache->proc_strand_seg_buf, seg_id, &seg_step);
+
+ /* UV layers */
+ for (int i = 0; i < cache->num_uv_layers; i++) {
+ cache->proc_uv_buf[i] = GPU_vertbuf_create_with_format(&format_uv);
+ GPU_vertbuf_data_alloc(cache->proc_uv_buf[i], cache->strands_len);
+ GPU_vertbuf_attr_get_raw_data(cache->proc_uv_buf[i], uv_id, &uv_step[i]);
+
+ const char *name = CustomData_get_layer_name(&psmd->mesh_final->ldata, CD_MLOOPUV, i);
+ uint hash = BLI_ghashutil_strhash_p(name);
+ int n = 0;
+ BLI_snprintf(cache->uv_layer_names[i][n++], MAX_LAYER_NAME_LEN, "u%u", hash);
+ BLI_snprintf(cache->uv_layer_names[i][n++], MAX_LAYER_NAME_LEN, "a%u", hash);
+
+ if (i == active_uv) {
+ BLI_strncpy(cache->uv_layer_names[i][n], "u", MAX_LAYER_NAME_LEN);
+ }
+ }
+ /* Vertex colors */
+ for (int i = 0; i < cache->num_col_layers; i++) {
+ cache->proc_col_buf[i] = GPU_vertbuf_create_with_format(&format_col);
+ GPU_vertbuf_data_alloc(cache->proc_col_buf[i], cache->strands_len);
+ GPU_vertbuf_attr_get_raw_data(cache->proc_col_buf[i], col_id, &col_step[i]);
+
+ const char *name = CustomData_get_layer_name(&psmd->mesh_final->ldata, CD_MLOOPCOL, i);
+ uint hash = BLI_ghashutil_strhash_p(name);
+ int n = 0;
+ BLI_snprintf(cache->col_layer_names[i][n++], MAX_LAYER_NAME_LEN, "c%u", hash);
+
+ /* We only do vcols auto name that are not overridden by uvs */
+ if (CustomData_get_named_layer_index(&psmd->mesh_final->ldata, CD_MLOOPUV, name) == -1) {
+ BLI_snprintf(cache->col_layer_names[i][n++], MAX_LAYER_NAME_LEN, "a%u", hash);
+ }
+
+ if (i == active_col) {
+ BLI_strncpy(cache->col_layer_names[i][n], "c", MAX_LAYER_NAME_LEN);
+ }
+ }
+
+ if (cache->num_uv_layers || cache->num_col_layers) {
+ BKE_mesh_tessface_ensure(psmd->mesh_final);
+ if (cache->num_uv_layers) {
+ for (int j = 0; j < cache->num_uv_layers; j++) {
+ mtfaces[j] = (MTFace *)CustomData_get_layer_n(&psmd->mesh_final->fdata, CD_MTFACE, j);
+ }
+ }
+ if (cache->num_col_layers) {
+ for (int j = 0; j < cache->num_col_layers; j++) {
+ mcols[j] = (MCol *)CustomData_get_layer_n(&psmd->mesh_final->fdata, CD_MCOL, j);
+ }
+ }
+ }
+
+ if (edit != NULL && edit->pathcache != NULL) {
+ particle_batch_cache_fill_strands_data(psys,
+ psmd,
+ edit->pathcache,
+ PARTICLE_SOURCE_PARENT,
+ 0,
+ edit->totcached,
+ &data_step,
+ &seg_step,
+ &parent_uvs,
+ uv_step,
+ (MTFace **)mtfaces,
+ cache->num_uv_layers,
+ &parent_mcol,
+ col_step,
+ (MCol **)mcols,
+ cache->num_col_layers);
+ }
+ else {
+ int curr_point = 0;
+ if ((psys->pathcache != NULL) &&
+ (!psys->childcache || (psys->part->draw & PART_DRAW_PARENT))) {
+ curr_point = particle_batch_cache_fill_strands_data(psys,
+ psmd,
+ psys->pathcache,
+ PARTICLE_SOURCE_PARENT,
+ 0,
+ psys->totpart,
+ &data_step,
+ &seg_step,
+ &parent_uvs,
+ uv_step,
+ (MTFace **)mtfaces,
+ cache->num_uv_layers,
+ &parent_mcol,
+ col_step,
+ (MCol **)mcols,
+ cache->num_col_layers);
+ }
+ if (psys->childcache) {
+ const int child_count = psys->totchild * psys->part->disp / 100;
+ curr_point = particle_batch_cache_fill_strands_data(psys,
+ psmd,
+ psys->childcache,
+ PARTICLE_SOURCE_CHILDREN,
+ curr_point,
+ child_count,
+ &data_step,
+ &seg_step,
+ &parent_uvs,
+ uv_step,
+ (MTFace **)mtfaces,
+ cache->num_uv_layers,
+ &parent_mcol,
+ col_step,
+ (MCol **)mcols,
+ cache->num_col_layers);
+ }
+ }
+ /* Cleanup. */
+ if (parent_uvs != NULL) {
+ /* TODO(sergey): For edit mode it should be edit->totcached. */
+ for (int i = 0; i < psys->totpart; i++) {
+ MEM_SAFE_FREE(parent_uvs[i]);
+ }
+ MEM_freeN(parent_uvs);
+ }
+ if (parent_mcol != NULL) {
+ for (int i = 0; i < psys->totpart; i++) {
+ MEM_SAFE_FREE(parent_mcol[i]);
+ }
+ MEM_freeN(parent_mcol);
+ }
+
+ /* Create vbo immediately to bind to texture buffer. */
+ GPU_vertbuf_use(cache->proc_strand_buf);
+ cache->strand_tex = GPU_texture_create_from_vertbuf(cache->proc_strand_buf);
+
+ GPU_vertbuf_use(cache->proc_strand_seg_buf);
+ cache->strand_seg_tex = GPU_texture_create_from_vertbuf(cache->proc_strand_seg_buf);
+
+ for (int i = 0; i < cache->num_uv_layers; i++) {
+ GPU_vertbuf_use(cache->proc_uv_buf[i]);
+ cache->uv_tex[i] = GPU_texture_create_from_vertbuf(cache->proc_uv_buf[i]);
+ }
+ for (int i = 0; i < cache->num_col_layers; i++) {
+ GPU_vertbuf_use(cache->proc_col_buf[i]);
+ cache->col_tex[i] = GPU_texture_create_from_vertbuf(cache->proc_col_buf[i]);
+ }
}
-static void particle_batch_cache_ensure_procedural_indices(
- PTCacheEdit *edit,
- ParticleSystem *psys,
- ParticleHairCache *cache,
- int thickness_res,
- int subdiv)
+static void particle_batch_cache_ensure_procedural_indices(PTCacheEdit *edit,
+ ParticleSystem *psys,
+ ParticleHairCache *cache,
+ int thickness_res,
+ int subdiv)
{
- BLI_assert(thickness_res <= MAX_THICKRES); /* Cylinder strip not currently supported. */
-
- if (cache->final[subdiv].proc_hairs[thickness_res - 1] != NULL) {
- return;
- }
-
- int verts_per_hair = cache->final[subdiv].strands_res * thickness_res;
- /* +1 for primitive restart */
- int element_count = (verts_per_hair + 1) * cache->strands_len;
- GPUPrimType prim_type = (thickness_res == 1) ? GPU_PRIM_LINE_STRIP : GPU_PRIM_TRI_STRIP;
-
- static GPUVertFormat format = { 0 };
- GPU_vertformat_clear(&format);
-
- /* initialize vertex format */
- GPU_vertformat_attr_add(&format, "dummy", GPU_COMP_U8, 1, GPU_FETCH_INT_TO_FLOAT_UNIT);
-
- GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
- GPU_vertbuf_data_alloc(vbo, 1);
-
- GPUIndexBufBuilder elb;
- GPU_indexbuf_init_ex(&elb, prim_type, element_count, element_count, true);
-
- if (edit != NULL && edit->pathcache != NULL) {
- particle_batch_cache_fill_segments_indices(
- edit->pathcache, 0, edit->totcached, verts_per_hair, &elb);
- }
- else {
- int curr_point = 0;
- if ((psys->pathcache != NULL) &&
- (!psys->childcache || (psys->part->draw & PART_DRAW_PARENT)))
- {
- curr_point = particle_batch_cache_fill_segments_indices(
- psys->pathcache, 0, psys->totpart, verts_per_hair, &elb);
- }
- if (psys->childcache) {
- const int child_count = psys->totchild * psys->part->disp / 100;
- curr_point = particle_batch_cache_fill_segments_indices(
- psys->childcache, curr_point, child_count, verts_per_hair, &elb);
- }
- }
-
- cache->final[subdiv].proc_hairs[thickness_res - 1] = GPU_batch_create_ex(
- prim_type,
- vbo,
- GPU_indexbuf_build(&elb),
- GPU_BATCH_OWNS_VBO | GPU_BATCH_OWNS_INDEX);
+ BLI_assert(thickness_res <= MAX_THICKRES); /* Cylinder strip not currently supported. */
+
+ if (cache->final[subdiv].proc_hairs[thickness_res - 1] != NULL) {
+ return;
+ }
+
+ int verts_per_hair = cache->final[subdiv].strands_res * thickness_res;
+ /* +1 for primitive restart */
+ int element_count = (verts_per_hair + 1) * cache->strands_len;
+ GPUPrimType prim_type = (thickness_res == 1) ? GPU_PRIM_LINE_STRIP : GPU_PRIM_TRI_STRIP;
+
+ static GPUVertFormat format = {0};
+ GPU_vertformat_clear(&format);
+
+ /* initialize vertex format */
+ GPU_vertformat_attr_add(&format, "dummy", GPU_COMP_U8, 1, GPU_FETCH_INT_TO_FLOAT_UNIT);
+
+ GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
+ GPU_vertbuf_data_alloc(vbo, 1);
+
+ GPUIndexBufBuilder elb;
+ GPU_indexbuf_init_ex(&elb, prim_type, element_count, element_count, true);
+
+ if (edit != NULL && edit->pathcache != NULL) {
+ particle_batch_cache_fill_segments_indices(
+ edit->pathcache, 0, edit->totcached, verts_per_hair, &elb);
+ }
+ else {
+ int curr_point = 0;
+ if ((psys->pathcache != NULL) &&
+ (!psys->childcache || (psys->part->draw & PART_DRAW_PARENT))) {
+ curr_point = particle_batch_cache_fill_segments_indices(
+ psys->pathcache, 0, psys->totpart, verts_per_hair, &elb);
+ }
+ if (psys->childcache) {
+ const int child_count = psys->totchild * psys->part->disp / 100;
+ curr_point = particle_batch_cache_fill_segments_indices(
+ psys->childcache, curr_point, child_count, verts_per_hair, &elb);
+ }
+ }
+
+ cache->final[subdiv].proc_hairs[thickness_res - 1] = GPU_batch_create_ex(
+ prim_type, vbo, GPU_indexbuf_build(&elb), GPU_BATCH_OWNS_VBO | GPU_BATCH_OWNS_INDEX);
}
-static void particle_batch_cache_ensure_procedural_pos(
- PTCacheEdit *edit,
- ParticleSystem *psys,
- ParticleHairCache *cache)
+static void particle_batch_cache_ensure_procedural_pos(PTCacheEdit *edit,
+ ParticleSystem *psys,
+ ParticleHairCache *cache)
{
- if (cache->proc_point_buf != NULL) {
- return;
- }
-
- /* initialize vertex format */
- GPUVertFormat format = {0};
- uint pos_id = GPU_vertformat_attr_add(&format, "posTime", GPU_COMP_F32, 4, GPU_FETCH_FLOAT);
-
- cache->proc_point_buf = GPU_vertbuf_create_with_format(&format);
- GPU_vertbuf_data_alloc(cache->proc_point_buf, cache->point_len);
-
- GPUVertBufRaw pos_step;
- GPU_vertbuf_attr_get_raw_data(cache->proc_point_buf, pos_id, &pos_step);
-
- if (edit != NULL && edit->pathcache != NULL) {
- particle_batch_cache_fill_segments_proc_pos(
- edit->pathcache,
- edit->totcached,
- &pos_step);
- }
- else {
- if ((psys->pathcache != NULL) &&
- (!psys->childcache || (psys->part->draw & PART_DRAW_PARENT)))
- {
- particle_batch_cache_fill_segments_proc_pos(
- psys->pathcache,
- psys->totpart,
- &pos_step);
- }
- if (psys->childcache) {
- const int child_count = psys->totchild * psys->part->disp / 100;
- particle_batch_cache_fill_segments_proc_pos(
- psys->childcache,
- child_count,
- &pos_step);
- }
- }
-
- /* Create vbo immediately to bind to texture buffer. */
- GPU_vertbuf_use(cache->proc_point_buf);
-
- cache->point_tex = GPU_texture_create_from_vertbuf(cache->proc_point_buf);
+ if (cache->proc_point_buf != NULL) {
+ return;
+ }
+
+ /* initialize vertex format */
+ GPUVertFormat format = {0};
+ uint pos_id = GPU_vertformat_attr_add(&format, "posTime", GPU_COMP_F32, 4, GPU_FETCH_FLOAT);
+
+ cache->proc_point_buf = GPU_vertbuf_create_with_format(&format);
+ GPU_vertbuf_data_alloc(cache->proc_point_buf, cache->point_len);
+
+ GPUVertBufRaw pos_step;
+ GPU_vertbuf_attr_get_raw_data(cache->proc_point_buf, pos_id, &pos_step);
+
+ if (edit != NULL && edit->pathcache != NULL) {
+ particle_batch_cache_fill_segments_proc_pos(edit->pathcache, edit->totcached, &pos_step);
+ }
+ else {
+ if ((psys->pathcache != NULL) &&
+ (!psys->childcache || (psys->part->draw & PART_DRAW_PARENT))) {
+ particle_batch_cache_fill_segments_proc_pos(psys->pathcache, psys->totpart, &pos_step);
+ }
+ if (psys->childcache) {
+ const int child_count = psys->totchild * psys->part->disp / 100;
+ particle_batch_cache_fill_segments_proc_pos(psys->childcache, child_count, &pos_step);
+ }
+ }
+
+ /* Create vbo immediately to bind to texture buffer. */
+ GPU_vertbuf_use(cache->proc_point_buf);
+
+ cache->point_tex = GPU_texture_create_from_vertbuf(cache->proc_point_buf);
}
-static void particle_batch_cache_ensure_pos_and_seg(
- PTCacheEdit *edit,
- ParticleSystem *psys,
- ModifierData *md,
- ParticleHairCache *hair_cache)
+static void particle_batch_cache_ensure_pos_and_seg(PTCacheEdit *edit,
+ ParticleSystem *psys,
+ ModifierData *md,
+ ParticleHairCache *hair_cache)
{
- if (hair_cache->pos != NULL && hair_cache->indices != NULL) {
- return;
- }
-
- int curr_point = 0;
- ParticleSystemModifierData *psmd = (ParticleSystemModifierData *)md;
-
- GPU_VERTBUF_DISCARD_SAFE(hair_cache->pos);
- GPU_INDEXBUF_DISCARD_SAFE(hair_cache->indices);
-
- static GPUVertFormat format = { 0 };
- HairAttributeID attr_id;
- uint *uv_id = NULL;
- uint *col_id = NULL;
- int num_uv_layers = 0;
- int num_col_layers = 0;
- int active_uv = 0;
- int active_col = 0;
- MTFace **mtfaces = NULL;
- MCol **mcols = NULL;
- float (**parent_uvs)[2] = NULL;
- MCol **parent_mcol = NULL;
-
- if (psmd != NULL) {
- if (CustomData_has_layer(&psmd->mesh_final->ldata, CD_MLOOPUV)) {
- num_uv_layers = CustomData_number_of_layers(&psmd->mesh_final->ldata, CD_MLOOPUV);
- active_uv = CustomData_get_active_layer(&psmd->mesh_final->ldata, CD_MLOOPUV);
- }
- if (CustomData_has_layer(&psmd->mesh_final->ldata, CD_MLOOPCOL)) {
- num_col_layers = CustomData_number_of_layers(&psmd->mesh_final->ldata, CD_MLOOPCOL);
- active_col = CustomData_get_active_layer(&psmd->mesh_final->ldata, CD_MLOOPCOL);
- }
- }
-
- GPU_vertformat_clear(&format);
-
- /* initialize vertex format */
- attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
- attr_id.tan = GPU_vertformat_attr_add(&format, "nor", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
- attr_id.ind = GPU_vertformat_attr_add(&format, "ind", GPU_COMP_I32, 1, GPU_FETCH_INT);
-
- if (psmd) {
- uv_id = MEM_mallocN(sizeof(*uv_id) * num_uv_layers, "UV attr format");
- col_id = MEM_mallocN(sizeof(*col_id) * num_col_layers, "Col attr format");
-
- for (int i = 0; i < num_uv_layers; i++) {
- const char *name = CustomData_get_layer_name(&psmd->mesh_final->ldata, CD_MLOOPUV, i);
- char uuid[32];
-
- BLI_snprintf(uuid, sizeof(uuid), "u%u", BLI_ghashutil_strhash_p(name));
- uv_id[i] = GPU_vertformat_attr_add(&format, uuid, GPU_COMP_F32, 2, GPU_FETCH_FLOAT);
-
- if (i == active_uv) {
- GPU_vertformat_alias_add(&format, "u");
- }
- }
-
- for (int i = 0; i < num_uv_layers; i++) {
- const char *name = CustomData_get_layer_name(&psmd->mesh_final->ldata, CD_MLOOPUV, i);
- char uuid[32];
-
- BLI_snprintf(uuid, sizeof(uuid), "c%u", BLI_ghashutil_strhash_p(name));
- col_id[i] = GPU_vertformat_attr_add(&format, uuid, GPU_COMP_F32, 2, GPU_FETCH_FLOAT);
-
- if (i == active_col) {
- GPU_vertformat_alias_add(&format, "c");
- }
- }
- }
-
- hair_cache->pos = GPU_vertbuf_create_with_format(&format);
- GPU_vertbuf_data_alloc(hair_cache->pos, hair_cache->point_len);
-
- GPUIndexBufBuilder elb;
- GPU_indexbuf_init_ex(
- &elb,
- GPU_PRIM_LINE_STRIP,
- hair_cache->elems_len, hair_cache->point_len,
- true);
-
- if (num_uv_layers || num_col_layers) {
- BKE_mesh_tessface_ensure(psmd->mesh_final);
- if (num_uv_layers) {
- mtfaces = MEM_mallocN(sizeof(*mtfaces) * num_uv_layers, "Faces UV layers");
- for (int i = 0; i < num_uv_layers; i++) {
- mtfaces[i] = (MTFace *)CustomData_get_layer_n(&psmd->mesh_final->fdata, CD_MTFACE, i);
- }
- }
- if (num_col_layers) {
- mcols = MEM_mallocN(sizeof(*mcols) * num_col_layers, "Color layers");
- for (int i = 0; i < num_col_layers; i++) {
- mcols[i] = (MCol *)CustomData_get_layer_n(&psmd->mesh_final->fdata, CD_MCOL, i);
- }
- }
- }
-
- if (edit != NULL && edit->pathcache != NULL) {
- curr_point = particle_batch_cache_fill_segments(
- psys, psmd, edit->pathcache, PARTICLE_SOURCE_PARENT,
- 0, 0, edit->totcached,
- num_uv_layers, num_col_layers, mtfaces, mcols, uv_id, col_id, &parent_uvs, &parent_mcol,
- &elb, &attr_id, hair_cache);
- }
- else {
- if ((psys->pathcache != NULL) &&
- (!psys->childcache || (psys->part->draw & PART_DRAW_PARENT)))
- {
- curr_point = particle_batch_cache_fill_segments(
- psys, psmd, psys->pathcache, PARTICLE_SOURCE_PARENT,
- 0, 0, psys->totpart,
- num_uv_layers, num_col_layers, mtfaces, mcols, uv_id, col_id, &parent_uvs, &parent_mcol,
- &elb, &attr_id, hair_cache);
- }
- if (psys->childcache != NULL) {
- const int child_count = psys->totchild * psys->part->disp / 100;
- curr_point = particle_batch_cache_fill_segments(
- psys, psmd, psys->childcache, PARTICLE_SOURCE_CHILDREN,
- psys->totpart, curr_point, child_count,
- num_uv_layers, num_col_layers, mtfaces, mcols, uv_id, col_id, &parent_uvs, &parent_mcol,
- &elb, &attr_id, hair_cache);
- }
- }
- /* Cleanup. */
- if (parent_uvs != NULL) {
- /* TODO(sergey): For edit mode it should be edit->totcached. */
- for (int i = 0; i < psys->totpart; i++) {
- MEM_SAFE_FREE(parent_uvs[i]);
- }
- MEM_freeN(parent_uvs);
- }
- if (parent_mcol != NULL) {
- for (int i = 0; i < psys->totpart; i++) {
- MEM_SAFE_FREE(parent_mcol[i]);
- }
- MEM_freeN(parent_mcol);
- }
- if (num_uv_layers) {
- MEM_freeN(mtfaces);
- }
- if (num_col_layers) {
- MEM_freeN(mcols);
- }
- if (psmd != NULL) {
- MEM_freeN(uv_id);
- }
- hair_cache->indices = GPU_indexbuf_build(&elb);
+ if (hair_cache->pos != NULL && hair_cache->indices != NULL) {
+ return;
+ }
+
+ int curr_point = 0;
+ ParticleSystemModifierData *psmd = (ParticleSystemModifierData *)md;
+
+ GPU_VERTBUF_DISCARD_SAFE(hair_cache->pos);
+ GPU_INDEXBUF_DISCARD_SAFE(hair_cache->indices);
+
+ static GPUVertFormat format = {0};
+ HairAttributeID attr_id;
+ uint *uv_id = NULL;
+ uint *col_id = NULL;
+ int num_uv_layers = 0;
+ int num_col_layers = 0;
+ int active_uv = 0;
+ int active_col = 0;
+ MTFace **mtfaces = NULL;
+ MCol **mcols = NULL;
+ float(**parent_uvs)[2] = NULL;
+ MCol **parent_mcol = NULL;
+
+ if (psmd != NULL) {
+ if (CustomData_has_layer(&psmd->mesh_final->ldata, CD_MLOOPUV)) {
+ num_uv_layers = CustomData_number_of_layers(&psmd->mesh_final->ldata, CD_MLOOPUV);
+ active_uv = CustomData_get_active_layer(&psmd->mesh_final->ldata, CD_MLOOPUV);
+ }
+ if (CustomData_has_layer(&psmd->mesh_final->ldata, CD_MLOOPCOL)) {
+ num_col_layers = CustomData_number_of_layers(&psmd->mesh_final->ldata, CD_MLOOPCOL);
+ active_col = CustomData_get_active_layer(&psmd->mesh_final->ldata, CD_MLOOPCOL);
+ }
+ }
+
+ GPU_vertformat_clear(&format);
+
+ /* initialize vertex format */
+ attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
+ attr_id.tan = GPU_vertformat_attr_add(&format, "nor", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
+ attr_id.ind = GPU_vertformat_attr_add(&format, "ind", GPU_COMP_I32, 1, GPU_FETCH_INT);
+
+ if (psmd) {
+ uv_id = MEM_mallocN(sizeof(*uv_id) * num_uv_layers, "UV attr format");
+ col_id = MEM_mallocN(sizeof(*col_id) * num_col_layers, "Col attr format");
+
+ for (int i = 0; i < num_uv_layers; i++) {
+ const char *name = CustomData_get_layer_name(&psmd->mesh_final->ldata, CD_MLOOPUV, i);
+ char uuid[32];
+
+ BLI_snprintf(uuid, sizeof(uuid), "u%u", BLI_ghashutil_strhash_p(name));
+ uv_id[i] = GPU_vertformat_attr_add(&format, uuid, GPU_COMP_F32, 2, GPU_FETCH_FLOAT);
+
+ if (i == active_uv) {
+ GPU_vertformat_alias_add(&format, "u");
+ }
+ }
+
+ for (int i = 0; i < num_uv_layers; i++) {
+ const char *name = CustomData_get_layer_name(&psmd->mesh_final->ldata, CD_MLOOPUV, i);
+ char uuid[32];
+
+ BLI_snprintf(uuid, sizeof(uuid), "c%u", BLI_ghashutil_strhash_p(name));
+ col_id[i] = GPU_vertformat_attr_add(&format, uuid, GPU_COMP_F32, 2, GPU_FETCH_FLOAT);
+
+ if (i == active_col) {
+ GPU_vertformat_alias_add(&format, "c");
+ }
+ }
+ }
+
+ hair_cache->pos = GPU_vertbuf_create_with_format(&format);
+ GPU_vertbuf_data_alloc(hair_cache->pos, hair_cache->point_len);
+
+ GPUIndexBufBuilder elb;
+ GPU_indexbuf_init_ex(
+ &elb, GPU_PRIM_LINE_STRIP, hair_cache->elems_len, hair_cache->point_len, true);
+
+ if (num_uv_layers || num_col_layers) {
+ BKE_mesh_tessface_ensure(psmd->mesh_final);
+ if (num_uv_layers) {
+ mtfaces = MEM_mallocN(sizeof(*mtfaces) * num_uv_layers, "Faces UV layers");
+ for (int i = 0; i < num_uv_layers; i++) {
+ mtfaces[i] = (MTFace *)CustomData_get_layer_n(&psmd->mesh_final->fdata, CD_MTFACE, i);
+ }
+ }
+ if (num_col_layers) {
+ mcols = MEM_mallocN(sizeof(*mcols) * num_col_layers, "Color layers");
+ for (int i = 0; i < num_col_layers; i++) {
+ mcols[i] = (MCol *)CustomData_get_layer_n(&psmd->mesh_final->fdata, CD_MCOL, i);
+ }
+ }
+ }
+
+ if (edit != NULL && edit->pathcache != NULL) {
+ curr_point = particle_batch_cache_fill_segments(psys,
+ psmd,
+ edit->pathcache,
+ PARTICLE_SOURCE_PARENT,
+ 0,
+ 0,
+ edit->totcached,
+ num_uv_layers,
+ num_col_layers,
+ mtfaces,
+ mcols,
+ uv_id,
+ col_id,
+ &parent_uvs,
+ &parent_mcol,
+ &elb,
+ &attr_id,
+ hair_cache);
+ }
+ else {
+ if ((psys->pathcache != NULL) &&
+ (!psys->childcache || (psys->part->draw & PART_DRAW_PARENT))) {
+ curr_point = particle_batch_cache_fill_segments(psys,
+ psmd,
+ psys->pathcache,
+ PARTICLE_SOURCE_PARENT,
+ 0,
+ 0,
+ psys->totpart,
+ num_uv_layers,
+ num_col_layers,
+ mtfaces,
+ mcols,
+ uv_id,
+ col_id,
+ &parent_uvs,
+ &parent_mcol,
+ &elb,
+ &attr_id,
+ hair_cache);
+ }
+ if (psys->childcache != NULL) {
+ const int child_count = psys->totchild * psys->part->disp / 100;
+ curr_point = particle_batch_cache_fill_segments(psys,
+ psmd,
+ psys->childcache,
+ PARTICLE_SOURCE_CHILDREN,
+ psys->totpart,
+ curr_point,
+ child_count,
+ num_uv_layers,
+ num_col_layers,
+ mtfaces,
+ mcols,
+ uv_id,
+ col_id,
+ &parent_uvs,
+ &parent_mcol,
+ &elb,
+ &attr_id,
+ hair_cache);
+ }
+ }
+ /* Cleanup. */
+ if (parent_uvs != NULL) {
+ /* TODO(sergey): For edit mode it should be edit->totcached. */
+ for (int i = 0; i < psys->totpart; i++) {
+ MEM_SAFE_FREE(parent_uvs[i]);
+ }
+ MEM_freeN(parent_uvs);
+ }
+ if (parent_mcol != NULL) {
+ for (int i = 0; i < psys->totpart; i++) {
+ MEM_SAFE_FREE(parent_mcol[i]);
+ }
+ MEM_freeN(parent_mcol);
+ }
+ if (num_uv_layers) {
+ MEM_freeN(mtfaces);
+ }
+ if (num_col_layers) {
+ MEM_freeN(mcols);
+ }
+ if (psmd != NULL) {
+ MEM_freeN(uv_id);
+ }
+ hair_cache->indices = GPU_indexbuf_build(&elb);
}
-static void particle_batch_cache_ensure_pos(
- Object *object,
- ParticleSystem *psys,
- ParticlePointCache *point_cache)
+static void particle_batch_cache_ensure_pos(Object *object,
+ ParticleSystem *psys,
+ ParticlePointCache *point_cache)
{
- if (point_cache->pos != NULL) {
- return;
- }
-
- static GPUVertFormat format = { 0 };
- static uint pos_id, rot_id, val_id;
- int i, curr_point;
- ParticleData *pa;
- ParticleKey state;
- ParticleSimulationData sim = {NULL};
- const DRWContextState *draw_ctx = DRW_context_state_get();
-
- sim.depsgraph = draw_ctx->depsgraph;
- sim.scene = draw_ctx->scene;
- sim.ob = object;
- sim.psys = psys;
- sim.psmd = psys_get_modifier(object, psys);
- sim.psys->lattice_deform_data = psys_create_lattice_deform_data(&sim);
-
- if (psys->part->phystype == PART_PHYS_KEYED) {
- if (psys->flag & PSYS_KEYED) {
- psys_count_keyed_targets(&sim);
- if (psys->totkeyed == 0) {
- return;
- }
- }
- }
-
- GPU_VERTBUF_DISCARD_SAFE(point_cache->pos);
-
- if (format.attr_len == 0) {
- /* initialize vertex format */
- pos_id = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 4, GPU_FETCH_FLOAT);
- rot_id = GPU_vertformat_attr_add(&format, "rot", GPU_COMP_F32, 4, GPU_FETCH_FLOAT);
- val_id = GPU_vertformat_attr_add(&format, "val", GPU_COMP_F32, 1, GPU_FETCH_FLOAT);
- }
-
- point_cache->pos = GPU_vertbuf_create_with_format(&format);
- GPU_vertbuf_data_alloc(point_cache->pos, psys->totpart);
-
- for (curr_point = 0, i = 0, pa = psys->particles; i < psys->totpart; i++, pa++) {
- state.time = DEG_get_ctime(draw_ctx->depsgraph);
- if (!psys_get_particle_state(&sim, i, &state, 0)) {
- continue;
- }
-
- float val;
-
- GPU_vertbuf_attr_set(point_cache->pos, pos_id, curr_point, state.co);
- GPU_vertbuf_attr_set(point_cache->pos, rot_id, curr_point, state.rot);
-
- switch (psys->part->draw_col) {
- case PART_DRAW_COL_VEL:
- val = len_v3(state.vel) / psys->part->color_vec_max;
- break;
- case PART_DRAW_COL_ACC:
- val = len_v3v3(
- state.vel,
- pa->prev_state.vel) / ((state.time - pa->prev_state.time) * psys->part->color_vec_max);
- break;
- default:
- val = -1.0f;
- break;
- }
-
- GPU_vertbuf_attr_set(point_cache->pos, val_id, curr_point, &val);
-
- curr_point++;
- }
-
- if (curr_point != psys->totpart) {
- GPU_vertbuf_data_resize(point_cache->pos, curr_point);
- }
+ if (point_cache->pos != NULL) {
+ return;
+ }
+
+ static GPUVertFormat format = {0};
+ static uint pos_id, rot_id, val_id;
+ int i, curr_point;
+ ParticleData *pa;
+ ParticleKey state;
+ ParticleSimulationData sim = {NULL};
+ const DRWContextState *draw_ctx = DRW_context_state_get();
+
+ sim.depsgraph = draw_ctx->depsgraph;
+ sim.scene = draw_ctx->scene;
+ sim.ob = object;
+ sim.psys = psys;
+ sim.psmd = psys_get_modifier(object, psys);
+ sim.psys->lattice_deform_data = psys_create_lattice_deform_data(&sim);
+
+ if (psys->part->phystype == PART_PHYS_KEYED) {
+ if (psys->flag & PSYS_KEYED) {
+ psys_count_keyed_targets(&sim);
+ if (psys->totkeyed == 0) {
+ return;
+ }
+ }
+ }
+
+ GPU_VERTBUF_DISCARD_SAFE(point_cache->pos);
+
+ if (format.attr_len == 0) {
+ /* initialize vertex format */
+ pos_id = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 4, GPU_FETCH_FLOAT);
+ rot_id = GPU_vertformat_attr_add(&format, "rot", GPU_COMP_F32, 4, GPU_FETCH_FLOAT);
+ val_id = GPU_vertformat_attr_add(&format, "val", GPU_COMP_F32, 1, GPU_FETCH_FLOAT);
+ }
+
+ point_cache->pos = GPU_vertbuf_create_with_format(&format);
+ GPU_vertbuf_data_alloc(point_cache->pos, psys->totpart);
+
+ for (curr_point = 0, i = 0, pa = psys->particles; i < psys->totpart; i++, pa++) {
+ state.time = DEG_get_ctime(draw_ctx->depsgraph);
+ if (!psys_get_particle_state(&sim, i, &state, 0)) {
+ continue;
+ }
+
+ float val;
+
+ GPU_vertbuf_attr_set(point_cache->pos, pos_id, curr_point, state.co);
+ GPU_vertbuf_attr_set(point_cache->pos, rot_id, curr_point, state.rot);
+
+ switch (psys->part->draw_col) {
+ case PART_DRAW_COL_VEL:
+ val = len_v3(state.vel) / psys->part->color_vec_max;
+ break;
+ case PART_DRAW_COL_ACC:
+ val = len_v3v3(state.vel, pa->prev_state.vel) /
+ ((state.time - pa->prev_state.time) * psys->part->color_vec_max);
+ break;
+ default:
+ val = -1.0f;
+ break;
+ }
+
+ GPU_vertbuf_attr_set(point_cache->pos, val_id, curr_point, &val);
+
+ curr_point++;
+ }
+
+ if (curr_point != psys->totpart) {
+ GPU_vertbuf_data_resize(point_cache->pos, curr_point);
+ }
}
-static void drw_particle_update_ptcache_edit(
- Object *object_eval,
- ParticleSystem *psys,
- PTCacheEdit *edit)
+static void drw_particle_update_ptcache_edit(Object *object_eval,
+ ParticleSystem *psys,
+ PTCacheEdit *edit)
{
- if (edit->psys == NULL) {
- return;
- }
- /* NOTE: Get flag from particle system coming from drawing object.
- * this is where depsgraph will be setting flags to.
- */
- const DRWContextState *draw_ctx = DRW_context_state_get();
- Scene *scene_orig = (Scene *)DEG_get_original_id(&draw_ctx->scene->id);
- Object *object_orig = DEG_get_original_object(object_eval);
- if (psys->flag & PSYS_HAIR_UPDATED) {
- PE_update_object(draw_ctx->depsgraph, scene_orig, object_orig, 0);
- psys->flag &= ~PSYS_HAIR_UPDATED;
- }
- if (edit->pathcache == NULL) {
- Depsgraph *depsgraph = draw_ctx->depsgraph;
- psys_cache_edit_paths(
- depsgraph,
- scene_orig, object_orig,
- edit,
- DEG_get_ctime(depsgraph),
- DEG_get_mode(depsgraph) == DAG_EVAL_RENDER);
- }
+ if (edit->psys == NULL) {
+ return;
+ }
+ /* NOTE: Get flag from particle system coming from drawing object.
+ * this is where depsgraph will be setting flags to.
+ */
+ const DRWContextState *draw_ctx = DRW_context_state_get();
+ Scene *scene_orig = (Scene *)DEG_get_original_id(&draw_ctx->scene->id);
+ Object *object_orig = DEG_get_original_object(object_eval);
+ if (psys->flag & PSYS_HAIR_UPDATED) {
+ PE_update_object(draw_ctx->depsgraph, scene_orig, object_orig, 0);
+ psys->flag &= ~PSYS_HAIR_UPDATED;
+ }
+ if (edit->pathcache == NULL) {
+ Depsgraph *depsgraph = draw_ctx->depsgraph;
+ psys_cache_edit_paths(depsgraph,
+ scene_orig,
+ object_orig,
+ edit,
+ DEG_get_ctime(depsgraph),
+ DEG_get_mode(depsgraph) == DAG_EVAL_RENDER);
+ }
}
-static void drw_particle_update_ptcache(
- Object *object_eval,
- ParticleSystem *psys)
+static void drw_particle_update_ptcache(Object *object_eval, ParticleSystem *psys)
{
- if ((object_eval->mode & OB_MODE_PARTICLE_EDIT) == 0) {
- return;
- }
- const DRWContextState *draw_ctx = DRW_context_state_get();
- Scene *scene_orig = (Scene *)DEG_get_original_id(&draw_ctx->scene->id);
- Object *object_orig = DEG_get_original_object(object_eval);
- PTCacheEdit *edit = PE_create_current(
- draw_ctx->depsgraph, scene_orig, object_orig);
- if (edit != NULL) {
- drw_particle_update_ptcache_edit(object_eval, psys, edit);
- }
+ if ((object_eval->mode & OB_MODE_PARTICLE_EDIT) == 0) {
+ return;
+ }
+ const DRWContextState *draw_ctx = DRW_context_state_get();
+ Scene *scene_orig = (Scene *)DEG_get_original_id(&draw_ctx->scene->id);
+ Object *object_orig = DEG_get_original_object(object_eval);
+ PTCacheEdit *edit = PE_create_current(draw_ctx->depsgraph, scene_orig, object_orig);
+ if (edit != NULL) {
+ drw_particle_update_ptcache_edit(object_eval, psys, edit);
+ }
}
typedef struct ParticleDrawSource {
- Object *object;
- ParticleSystem *psys;
- ModifierData *md;
- PTCacheEdit *edit;
+ Object *object;
+ ParticleSystem *psys;
+ ModifierData *md;
+ PTCacheEdit *edit;
} ParticleDrawSource;
-static void drw_particle_get_hair_source(
- Object *object,
- ParticleSystem *psys,
- ModifierData *md,
- PTCacheEdit *edit,
- ParticleDrawSource *r_draw_source)
+static void drw_particle_get_hair_source(Object *object,
+ ParticleSystem *psys,
+ ModifierData *md,
+ PTCacheEdit *edit,
+ ParticleDrawSource *r_draw_source)
{
- r_draw_source->object = object;
- r_draw_source->psys = psys;
- r_draw_source->md = md;
- r_draw_source->edit = edit;
- if ((object->mode & OB_MODE_PARTICLE_EDIT) != 0) {
- r_draw_source->object = DEG_get_original_object(object);
- r_draw_source->psys = psys_orig_get(psys);
- }
+ r_draw_source->object = object;
+ r_draw_source->psys = psys;
+ r_draw_source->md = md;
+ r_draw_source->edit = edit;
+ if ((object->mode & OB_MODE_PARTICLE_EDIT) != 0) {
+ r_draw_source->object = DEG_get_original_object(object);
+ r_draw_source->psys = psys_orig_get(psys);
+ }
}
-GPUBatch *DRW_particles_batch_cache_get_hair(
- Object *object,
- ParticleSystem *psys,
- ModifierData *md)
+GPUBatch *DRW_particles_batch_cache_get_hair(Object *object,
+ ParticleSystem *psys,
+ ModifierData *md)
{
- ParticleBatchCache *cache = particle_batch_cache_get(psys);
- if (cache->hair.hairs == NULL) {
- drw_particle_update_ptcache(object, psys);
- ParticleDrawSource source;
- drw_particle_get_hair_source(object, psys, md, NULL, &source);
- ensure_seg_pt_count(source.edit, source.psys, &cache->hair);
- particle_batch_cache_ensure_pos_and_seg(source.edit, source.psys, source.md, &cache->hair);
- cache->hair.hairs = GPU_batch_create(
- GPU_PRIM_LINE_STRIP,
- cache->hair.pos,
- cache->hair.indices);
- }
- return cache->hair.hairs;
+ ParticleBatchCache *cache = particle_batch_cache_get(psys);
+ if (cache->hair.hairs == NULL) {
+ drw_particle_update_ptcache(object, psys);
+ ParticleDrawSource source;
+ drw_particle_get_hair_source(object, psys, md, NULL, &source);
+ ensure_seg_pt_count(source.edit, source.psys, &cache->hair);
+ particle_batch_cache_ensure_pos_and_seg(source.edit, source.psys, source.md, &cache->hair);
+ cache->hair.hairs = GPU_batch_create(
+ GPU_PRIM_LINE_STRIP, cache->hair.pos, cache->hair.indices);
+ }
+ return cache->hair.hairs;
}
GPUBatch *DRW_particles_batch_cache_get_dots(Object *object, ParticleSystem *psys)
{
- ParticleBatchCache *cache = particle_batch_cache_get(psys);
+ ParticleBatchCache *cache = particle_batch_cache_get(psys);
- if (cache->point.points == NULL) {
- particle_batch_cache_ensure_pos(object, psys, &cache->point);
- cache->point.points = GPU_batch_create(GPU_PRIM_POINTS, cache->point.pos, NULL);
- }
+ if (cache->point.points == NULL) {
+ particle_batch_cache_ensure_pos(object, psys, &cache->point);
+ cache->point.points = GPU_batch_create(GPU_PRIM_POINTS, cache->point.pos, NULL);
+ }
- return cache->point.points;
+ return cache->point.points;
}
-static void particle_batch_cache_ensure_edit_pos_and_seg(
- PTCacheEdit *edit,
- ParticleSystem *psys,
- ModifierData *UNUSED(md),
- ParticleHairCache *hair_cache,
- bool use_weight)
+static void particle_batch_cache_ensure_edit_pos_and_seg(PTCacheEdit *edit,
+ ParticleSystem *psys,
+ ModifierData *UNUSED(md),
+ ParticleHairCache *hair_cache,
+ bool use_weight)
{
- if (hair_cache->pos != NULL && hair_cache->indices != NULL) {
- return;
- }
-
- ParticleData *particle = (use_weight) ? psys->particles : NULL;
-
- GPU_VERTBUF_DISCARD_SAFE(hair_cache->pos);
- GPU_INDEXBUF_DISCARD_SAFE(hair_cache->indices);
-
- GPUVertBufRaw data_step;
- GPUIndexBufBuilder elb;
- uint pos_id, color_id;
- GPUVertFormat *edit_point_format = edit_points_vert_format_get(&pos_id, &color_id);
-
- hair_cache->pos = GPU_vertbuf_create_with_format(edit_point_format);
- GPU_vertbuf_data_alloc(hair_cache->pos, hair_cache->point_len);
- GPU_vertbuf_attr_get_raw_data(hair_cache->pos, pos_id, &data_step);
-
- GPU_indexbuf_init_ex(
- &elb,
- GPU_PRIM_LINE_STRIP,
- hair_cache->elems_len, hair_cache->point_len,
- true);
-
- if (edit != NULL && edit->pathcache != NULL) {
- particle_batch_cache_fill_segments_edit(
- edit, particle, edit->pathcache,
- 0, edit->totcached,
- &elb, &data_step);
- }
- else {
- BLI_assert(!"Hairs are not in edit mode!");
- }
- hair_cache->indices = GPU_indexbuf_build(&elb);
+ if (hair_cache->pos != NULL && hair_cache->indices != NULL) {
+ return;
+ }
+
+ ParticleData *particle = (use_weight) ? psys->particles : NULL;
+
+ GPU_VERTBUF_DISCARD_SAFE(hair_cache->pos);
+ GPU_INDEXBUF_DISCARD_SAFE(hair_cache->indices);
+
+ GPUVertBufRaw data_step;
+ GPUIndexBufBuilder elb;
+ uint pos_id, color_id;
+ GPUVertFormat *edit_point_format = edit_points_vert_format_get(&pos_id, &color_id);
+
+ hair_cache->pos = GPU_vertbuf_create_with_format(edit_point_format);
+ GPU_vertbuf_data_alloc(hair_cache->pos, hair_cache->point_len);
+ GPU_vertbuf_attr_get_raw_data(hair_cache->pos, pos_id, &data_step);
+
+ GPU_indexbuf_init_ex(
+ &elb, GPU_PRIM_LINE_STRIP, hair_cache->elems_len, hair_cache->point_len, true);
+
+ if (edit != NULL && edit->pathcache != NULL) {
+ particle_batch_cache_fill_segments_edit(
+ edit, particle, edit->pathcache, 0, edit->totcached, &elb, &data_step);
+ }
+ else {
+ BLI_assert(!"Hairs are not in edit mode!");
+ }
+ hair_cache->indices = GPU_indexbuf_build(&elb);
}
-GPUBatch *DRW_particles_batch_cache_get_edit_strands(
- Object *object,
- ParticleSystem *psys,
- PTCacheEdit *edit,
- bool use_weight)
+GPUBatch *DRW_particles_batch_cache_get_edit_strands(Object *object,
+ ParticleSystem *psys,
+ PTCacheEdit *edit,
+ bool use_weight)
{
- ParticleBatchCache *cache = particle_batch_cache_get(psys);
- if (cache->edit_is_weight != use_weight) {
- GPU_VERTBUF_DISCARD_SAFE(cache->edit_hair.pos);
- GPU_BATCH_DISCARD_SAFE(cache->edit_hair.hairs);
- }
- if (cache->edit_hair.hairs != NULL) {
- return cache->edit_hair.hairs;
- }
- drw_particle_update_ptcache_edit(object, psys, edit);
- ensure_seg_pt_count(edit, psys, &cache->edit_hair);
- particle_batch_cache_ensure_edit_pos_and_seg(edit, psys, NULL, &cache->edit_hair, use_weight);
- cache->edit_hair.hairs = GPU_batch_create(
- GPU_PRIM_LINE_STRIP,
- cache->edit_hair.pos,
- cache->edit_hair.indices);
- cache->edit_is_weight = use_weight;
- return cache->edit_hair.hairs;
+ ParticleBatchCache *cache = particle_batch_cache_get(psys);
+ if (cache->edit_is_weight != use_weight) {
+ GPU_VERTBUF_DISCARD_SAFE(cache->edit_hair.pos);
+ GPU_BATCH_DISCARD_SAFE(cache->edit_hair.hairs);
+ }
+ if (cache->edit_hair.hairs != NULL) {
+ return cache->edit_hair.hairs;
+ }
+ drw_particle_update_ptcache_edit(object, psys, edit);
+ ensure_seg_pt_count(edit, psys, &cache->edit_hair);
+ particle_batch_cache_ensure_edit_pos_and_seg(edit, psys, NULL, &cache->edit_hair, use_weight);
+ cache->edit_hair.hairs = GPU_batch_create(
+ GPU_PRIM_LINE_STRIP, cache->edit_hair.pos, cache->edit_hair.indices);
+ cache->edit_is_weight = use_weight;
+ return cache->edit_hair.hairs;
}
-static void ensure_edit_inner_points_count(
- const PTCacheEdit *edit,
- ParticleBatchCache *cache)
+static void ensure_edit_inner_points_count(const PTCacheEdit *edit, ParticleBatchCache *cache)
{
- if (cache->edit_inner_pos != NULL) {
- return;
- }
- cache->edit_inner_point_len = 0;
- for (int point_index = 0; point_index < edit->totpoint; point_index++) {
- const PTCacheEditPoint *point = &edit->points[point_index];
- BLI_assert(point->totkey >= 1);
- cache->edit_inner_point_len += (point->totkey - 1);
- }
+ if (cache->edit_inner_pos != NULL) {
+ return;
+ }
+ cache->edit_inner_point_len = 0;
+ for (int point_index = 0; point_index < edit->totpoint; point_index++) {
+ const PTCacheEditPoint *point = &edit->points[point_index];
+ BLI_assert(point->totkey >= 1);
+ cache->edit_inner_point_len += (point->totkey - 1);
+ }
}
-static void particle_batch_cache_ensure_edit_inner_pos(
- PTCacheEdit *edit,
- ParticleBatchCache *cache)
+static void particle_batch_cache_ensure_edit_inner_pos(PTCacheEdit *edit,
+ ParticleBatchCache *cache)
{
- if (cache->edit_inner_pos != NULL) {
- return;
- }
-
- uint pos_id, color_id;
- GPUVertFormat *edit_point_format = edit_points_vert_format_get(&pos_id, &color_id);
-
- cache->edit_inner_pos = GPU_vertbuf_create_with_format(edit_point_format);
- GPU_vertbuf_data_alloc(cache->edit_inner_pos, cache->edit_inner_point_len);
-
- int global_key_index = 0;
- for (int point_index = 0; point_index < edit->totpoint; point_index++) {
- const PTCacheEditPoint *point = &edit->points[point_index];
- for (int key_index = 0; key_index < point->totkey - 1; key_index++) {
- PTCacheEditKey *key = &point->keys[key_index];
- uchar color = (key->flag & PEK_SELECT) ? 0xFF : 0x00;
- GPU_vertbuf_attr_set(cache->edit_inner_pos, pos_id, global_key_index, key->world_co);
- GPU_vertbuf_attr_set(cache->edit_inner_pos, color_id, global_key_index, &color);
- global_key_index++;
- }
- }
+ if (cache->edit_inner_pos != NULL) {
+ return;
+ }
+
+ uint pos_id, color_id;
+ GPUVertFormat *edit_point_format = edit_points_vert_format_get(&pos_id, &color_id);
+
+ cache->edit_inner_pos = GPU_vertbuf_create_with_format(edit_point_format);
+ GPU_vertbuf_data_alloc(cache->edit_inner_pos, cache->edit_inner_point_len);
+
+ int global_key_index = 0;
+ for (int point_index = 0; point_index < edit->totpoint; point_index++) {
+ const PTCacheEditPoint *point = &edit->points[point_index];
+ for (int key_index = 0; key_index < point->totkey - 1; key_index++) {
+ PTCacheEditKey *key = &point->keys[key_index];
+ uchar color = (key->flag & PEK_SELECT) ? 0xFF : 0x00;
+ GPU_vertbuf_attr_set(cache->edit_inner_pos, pos_id, global_key_index, key->world_co);
+ GPU_vertbuf_attr_set(cache->edit_inner_pos, color_id, global_key_index, &color);
+ global_key_index++;
+ }
+ }
}
-GPUBatch *DRW_particles_batch_cache_get_edit_inner_points(
- Object *object,
- ParticleSystem *psys,
- PTCacheEdit *edit)
+GPUBatch *DRW_particles_batch_cache_get_edit_inner_points(Object *object,
+ ParticleSystem *psys,
+ PTCacheEdit *edit)
{
- ParticleBatchCache *cache = particle_batch_cache_get(psys);
- if (cache->edit_inner_points != NULL) {
- return cache->edit_inner_points;
- }
- drw_particle_update_ptcache_edit(object, psys, edit);
- ensure_edit_inner_points_count(edit, cache);
- particle_batch_cache_ensure_edit_inner_pos(edit, cache);
- cache->edit_inner_points = GPU_batch_create(
- GPU_PRIM_POINTS,
- cache->edit_inner_pos,
- NULL);
- return cache->edit_inner_points;
+ ParticleBatchCache *cache = particle_batch_cache_get(psys);
+ if (cache->edit_inner_points != NULL) {
+ return cache->edit_inner_points;
+ }
+ drw_particle_update_ptcache_edit(object, psys, edit);
+ ensure_edit_inner_points_count(edit, cache);
+ particle_batch_cache_ensure_edit_inner_pos(edit, cache);
+ cache->edit_inner_points = GPU_batch_create(GPU_PRIM_POINTS, cache->edit_inner_pos, NULL);
+ return cache->edit_inner_points;
}
-static void ensure_edit_tip_points_count(
- const PTCacheEdit *edit,
- ParticleBatchCache *cache)
+static void ensure_edit_tip_points_count(const PTCacheEdit *edit, ParticleBatchCache *cache)
{
- if (cache->edit_tip_pos != NULL) {
- return;
- }
- cache->edit_tip_point_len = edit->totpoint;
+ if (cache->edit_tip_pos != NULL) {
+ return;
+ }
+ cache->edit_tip_point_len = edit->totpoint;
}
-static void particle_batch_cache_ensure_edit_tip_pos(
- PTCacheEdit *edit,
- ParticleBatchCache *cache)
+static void particle_batch_cache_ensure_edit_tip_pos(PTCacheEdit *edit, ParticleBatchCache *cache)
{
- if (cache->edit_tip_pos != NULL) {
- return;
- }
-
- uint pos_id, color_id;
- GPUVertFormat *edit_point_format = edit_points_vert_format_get(&pos_id, &color_id);
-
- cache->edit_tip_pos = GPU_vertbuf_create_with_format(edit_point_format);
- GPU_vertbuf_data_alloc(cache->edit_tip_pos, cache->edit_tip_point_len);
-
- for (int point_index = 0; point_index < edit->totpoint; point_index++) {
- const PTCacheEditPoint *point = &edit->points[point_index];
- PTCacheEditKey *key = &point->keys[point->totkey - 1];
- uchar color = (key->flag & PEK_SELECT) ? 0xFF : 0x00;
- GPU_vertbuf_attr_set(cache->edit_tip_pos, pos_id, point_index, key->world_co);
- GPU_vertbuf_attr_set(cache->edit_tip_pos, color_id, point_index, &color);
- }
+ if (cache->edit_tip_pos != NULL) {
+ return;
+ }
+
+ uint pos_id, color_id;
+ GPUVertFormat *edit_point_format = edit_points_vert_format_get(&pos_id, &color_id);
+
+ cache->edit_tip_pos = GPU_vertbuf_create_with_format(edit_point_format);
+ GPU_vertbuf_data_alloc(cache->edit_tip_pos, cache->edit_tip_point_len);
+
+ for (int point_index = 0; point_index < edit->totpoint; point_index++) {
+ const PTCacheEditPoint *point = &edit->points[point_index];
+ PTCacheEditKey *key = &point->keys[point->totkey - 1];
+ uchar color = (key->flag & PEK_SELECT) ? 0xFF : 0x00;
+ GPU_vertbuf_attr_set(cache->edit_tip_pos, pos_id, point_index, key->world_co);
+ GPU_vertbuf_attr_set(cache->edit_tip_pos, color_id, point_index, &color);
+ }
}
-GPUBatch *DRW_particles_batch_cache_get_edit_tip_points(
- Object *object,
- ParticleSystem *psys,
- PTCacheEdit *edit)
+GPUBatch *DRW_particles_batch_cache_get_edit_tip_points(Object *object,
+ ParticleSystem *psys,
+ PTCacheEdit *edit)
{
- ParticleBatchCache *cache = particle_batch_cache_get(psys);
- if (cache->edit_tip_points != NULL) {
- return cache->edit_tip_points;
- }
- drw_particle_update_ptcache_edit(object, psys, edit);
- ensure_edit_tip_points_count(edit, cache);
- particle_batch_cache_ensure_edit_tip_pos(edit, cache);
- cache->edit_tip_points = GPU_batch_create(
- GPU_PRIM_POINTS,
- cache->edit_tip_pos,
- NULL);
- return cache->edit_tip_points;
+ ParticleBatchCache *cache = particle_batch_cache_get(psys);
+ if (cache->edit_tip_points != NULL) {
+ return cache->edit_tip_points;
+ }
+ drw_particle_update_ptcache_edit(object, psys, edit);
+ ensure_edit_tip_points_count(edit, cache);
+ particle_batch_cache_ensure_edit_tip_pos(edit, cache);
+ cache->edit_tip_points = GPU_batch_create(GPU_PRIM_POINTS, cache->edit_tip_pos, NULL);
+ return cache->edit_tip_points;
}
/* Ensure all textures and buffers needed for GPU accelerated drawing. */
-bool particles_ensure_procedural_data(
- Object *object,
- ParticleSystem *psys,
- ModifierData *md,
- ParticleHairCache **r_hair_cache,
- int subdiv,
- int thickness_res)
+bool particles_ensure_procedural_data(Object *object,
+ ParticleSystem *psys,
+ ModifierData *md,
+ ParticleHairCache **r_hair_cache,
+ int subdiv,
+ int thickness_res)
{
- bool need_ft_update = false;
-
- drw_particle_update_ptcache(object, psys);
-
- ParticleDrawSource source;
- drw_particle_get_hair_source(object, psys, md, NULL, &source);
-
- ParticleSettings *part = source.psys->part;
- ParticleBatchCache *cache = particle_batch_cache_get(source.psys);
- *r_hair_cache = &cache->hair;
-
- (*r_hair_cache)->final[subdiv].strands_res = 1 << (part->draw_step + subdiv);
-
- /* Refreshed on combing and simulation. */
- if ((*r_hair_cache)->proc_point_buf == NULL) {
- ensure_seg_pt_count(source.edit, source.psys, &cache->hair);
- particle_batch_cache_ensure_procedural_pos(source.edit, source.psys, &cache->hair);
- need_ft_update = true;
- }
-
- /* Refreshed if active layer or custom data changes. */
- if ((*r_hair_cache)->strand_tex == NULL) {
- particle_batch_cache_ensure_procedural_strand_data(source.edit, source.psys, source.md, &cache->hair);
- }
-
- /* Refreshed only on subdiv count change. */
- if ((*r_hair_cache)->final[subdiv].proc_buf == NULL) {
- particle_batch_cache_ensure_procedural_final_points(&cache->hair, subdiv);
- need_ft_update = true;
- }
- if ((*r_hair_cache)->final[subdiv].proc_hairs[thickness_res - 1] == NULL) {
- particle_batch_cache_ensure_procedural_indices(source.edit, source.psys, &cache->hair, thickness_res, subdiv);
- }
-
- return need_ft_update;
+ bool need_ft_update = false;
+
+ drw_particle_update_ptcache(object, psys);
+
+ ParticleDrawSource source;
+ drw_particle_get_hair_source(object, psys, md, NULL, &source);
+
+ ParticleSettings *part = source.psys->part;
+ ParticleBatchCache *cache = particle_batch_cache_get(source.psys);
+ *r_hair_cache = &cache->hair;
+
+ (*r_hair_cache)->final[subdiv].strands_res = 1 << (part->draw_step + subdiv);
+
+ /* Refreshed on combing and simulation. */
+ if ((*r_hair_cache)->proc_point_buf == NULL) {
+ ensure_seg_pt_count(source.edit, source.psys, &cache->hair);
+ particle_batch_cache_ensure_procedural_pos(source.edit, source.psys, &cache->hair);
+ need_ft_update = true;
+ }
+
+ /* Refreshed if active layer or custom data changes. */
+ if ((*r_hair_cache)->strand_tex == NULL) {
+ particle_batch_cache_ensure_procedural_strand_data(
+ source.edit, source.psys, source.md, &cache->hair);
+ }
+
+ /* Refreshed only on subdiv count change. */
+ if ((*r_hair_cache)->final[subdiv].proc_buf == NULL) {
+ particle_batch_cache_ensure_procedural_final_points(&cache->hair, subdiv);
+ need_ft_update = true;
+ }
+ if ((*r_hair_cache)->final[subdiv].proc_hairs[thickness_res - 1] == NULL) {
+ particle_batch_cache_ensure_procedural_indices(
+ source.edit, source.psys, &cache->hair, thickness_res, subdiv);
+ }
+
+ return need_ft_update;
}
diff --git a/source/blender/draw/intern/draw_common.c b/source/blender/draw/intern/draw_common.c
index 573b906413f..465a4f7a897 100644
--- a/source/blender/draw/intern/draw_common.c
+++ b/source/blender/draw/intern/draw_common.c
@@ -34,11 +34,11 @@
#include "draw_common.h"
#if 0
-#define UI_COLOR_RGB_FROM_U8(r, g, b, v4) \
- ARRAY_SET_ITEMS(v4, (float)r / 255.0f, (float)g / 255.0f, (float)b / 255.0f, 1.0)
+# define UI_COLOR_RGB_FROM_U8(r, g, b, v4) \
+ ARRAY_SET_ITEMS(v4, (float)r / 255.0f, (float)g / 255.0f, (float)b / 255.0f, 1.0)
#endif
#define UI_COLOR_RGBA_FROM_U8(r, g, b, a, v4) \
- ARRAY_SET_ITEMS(v4, (float)r / 255.0f, (float)g / 255.0f, (float)b / 255.0f, (float)a / 255.0f)
+ ARRAY_SET_ITEMS(v4, (float)r / 255.0f, (float)g / 255.0f, (float)b / 255.0f, (float)a / 255.0f)
/* Colors & Constant */
struct DRW_Global G_draw = {{{0}}};
@@ -50,158 +50,162 @@ static struct GPUTexture *DRW_create_weight_colorramp_texture(void);
void DRW_globals_update(void)
{
- GlobalsUboStorage *gb = &G_draw.block;
-
- UI_GetThemeColor4fv(TH_WIRE, gb->colorWire);
- UI_GetThemeColor4fv(TH_WIRE_EDIT, gb->colorWireEdit);
- UI_GetThemeColor4fv(TH_ACTIVE, gb->colorActive);
- UI_GetThemeColor4fv(TH_SELECT, gb->colorSelect);
- UI_COLOR_RGBA_FROM_U8(0x88, 0xFF, 0xFF, 155, gb->colorLibrarySelect);
- UI_COLOR_RGBA_FROM_U8(0x55, 0xCC, 0xCC, 155, gb->colorLibrary);
- UI_GetThemeColor4fv(TH_TRANSFORM, gb->colorTransform);
- UI_GetThemeColor4fv(TH_LIGHT, gb->colorLight);
- UI_GetThemeColor4fv(TH_SPEAKER, gb->colorSpeaker);
- UI_GetThemeColor4fv(TH_CAMERA, gb->colorCamera);
- UI_GetThemeColor4fv(TH_EMPTY, gb->colorEmpty);
- UI_GetThemeColor4fv(TH_VERTEX, gb->colorVertex);
- UI_GetThemeColor4fv(TH_VERTEX_SELECT, gb->colorVertexSelect);
- UI_GetThemeColor4fv(TH_VERTEX_UNREFERENCED, gb->colorVertexUnreferenced);
- UI_COLOR_RGBA_FROM_U8(0xB0, 0x00, 0xB0, 0xFF, gb->colorVertexMissingData);
- UI_GetThemeColor4fv(TH_EDITMESH_ACTIVE, gb->colorEditMeshActive);
- UI_GetThemeColor4fv(TH_EDGE_SELECT, gb->colorEdgeSelect);
-
- UI_GetThemeColor4fv(TH_EDGE_SEAM, gb->colorEdgeSeam);
- UI_GetThemeColor4fv(TH_EDGE_SHARP, gb->colorEdgeSharp);
- UI_GetThemeColor4fv(TH_EDGE_CREASE, gb->colorEdgeCrease);
- UI_GetThemeColor4fv(TH_EDGE_BEVEL, gb->colorEdgeBWeight);
- UI_GetThemeColor4fv(TH_EDGE_FACESEL, gb->colorEdgeFaceSelect);
- UI_GetThemeColor4fv(TH_FACE, gb->colorFace);
- UI_GetThemeColor4fv(TH_FACE_SELECT, gb->colorFaceSelect);
- UI_GetThemeColor4fv(TH_NORMAL, gb->colorNormal);
- UI_GetThemeColor4fv(TH_VNORMAL, gb->colorVNormal);
- UI_GetThemeColor4fv(TH_LNORMAL, gb->colorLNormal);
- UI_GetThemeColor4fv(TH_FACE_DOT, gb->colorFaceDot);
- UI_GetThemeColor4fv(TH_BACK, gb->colorBackground);
-
- /* Custom median color to slightly affect the edit mesh colors. */
- interp_v4_v4v4(gb->colorEditMeshMiddle, gb->colorVertexSelect, gb->colorWireEdit, 0.35f);
- copy_v3_fl(gb->colorEditMeshMiddle, dot_v3v3(gb->colorEditMeshMiddle, (float[3]){0.3333f, 0.3333f, 0.3333f})); /* Desaturate */
-
- interp_v4_v4v4(gb->colorDupliSelect, gb->colorBackground, gb->colorSelect, 0.5f);
- /* Was 50% in 2.7x since the background was lighter making it easier to tell the color from black,
- * with a darker background we need a more faded color. */
- interp_v4_v4v4(gb->colorDupli, gb->colorBackground, gb->colorWire, 0.3f);
+ GlobalsUboStorage *gb = &G_draw.block;
+
+ UI_GetThemeColor4fv(TH_WIRE, gb->colorWire);
+ UI_GetThemeColor4fv(TH_WIRE_EDIT, gb->colorWireEdit);
+ UI_GetThemeColor4fv(TH_ACTIVE, gb->colorActive);
+ UI_GetThemeColor4fv(TH_SELECT, gb->colorSelect);
+ UI_COLOR_RGBA_FROM_U8(0x88, 0xFF, 0xFF, 155, gb->colorLibrarySelect);
+ UI_COLOR_RGBA_FROM_U8(0x55, 0xCC, 0xCC, 155, gb->colorLibrary);
+ UI_GetThemeColor4fv(TH_TRANSFORM, gb->colorTransform);
+ UI_GetThemeColor4fv(TH_LIGHT, gb->colorLight);
+ UI_GetThemeColor4fv(TH_SPEAKER, gb->colorSpeaker);
+ UI_GetThemeColor4fv(TH_CAMERA, gb->colorCamera);
+ UI_GetThemeColor4fv(TH_EMPTY, gb->colorEmpty);
+ UI_GetThemeColor4fv(TH_VERTEX, gb->colorVertex);
+ UI_GetThemeColor4fv(TH_VERTEX_SELECT, gb->colorVertexSelect);
+ UI_GetThemeColor4fv(TH_VERTEX_UNREFERENCED, gb->colorVertexUnreferenced);
+ UI_COLOR_RGBA_FROM_U8(0xB0, 0x00, 0xB0, 0xFF, gb->colorVertexMissingData);
+ UI_GetThemeColor4fv(TH_EDITMESH_ACTIVE, gb->colorEditMeshActive);
+ UI_GetThemeColor4fv(TH_EDGE_SELECT, gb->colorEdgeSelect);
+
+ UI_GetThemeColor4fv(TH_EDGE_SEAM, gb->colorEdgeSeam);
+ UI_GetThemeColor4fv(TH_EDGE_SHARP, gb->colorEdgeSharp);
+ UI_GetThemeColor4fv(TH_EDGE_CREASE, gb->colorEdgeCrease);
+ UI_GetThemeColor4fv(TH_EDGE_BEVEL, gb->colorEdgeBWeight);
+ UI_GetThemeColor4fv(TH_EDGE_FACESEL, gb->colorEdgeFaceSelect);
+ UI_GetThemeColor4fv(TH_FACE, gb->colorFace);
+ UI_GetThemeColor4fv(TH_FACE_SELECT, gb->colorFaceSelect);
+ UI_GetThemeColor4fv(TH_NORMAL, gb->colorNormal);
+ UI_GetThemeColor4fv(TH_VNORMAL, gb->colorVNormal);
+ UI_GetThemeColor4fv(TH_LNORMAL, gb->colorLNormal);
+ UI_GetThemeColor4fv(TH_FACE_DOT, gb->colorFaceDot);
+ UI_GetThemeColor4fv(TH_BACK, gb->colorBackground);
+
+ /* Custom median color to slightly affect the edit mesh colors. */
+ interp_v4_v4v4(gb->colorEditMeshMiddle, gb->colorVertexSelect, gb->colorWireEdit, 0.35f);
+ copy_v3_fl(
+ gb->colorEditMeshMiddle,
+ dot_v3v3(gb->colorEditMeshMiddle, (float[3]){0.3333f, 0.3333f, 0.3333f})); /* Desaturate */
+
+ interp_v4_v4v4(gb->colorDupliSelect, gb->colorBackground, gb->colorSelect, 0.5f);
+ /* Was 50% in 2.7x since the background was lighter making it easier to tell the color from black,
+ * with a darker background we need a more faded color. */
+ interp_v4_v4v4(gb->colorDupli, gb->colorBackground, gb->colorWire, 0.3f);
#ifdef WITH_FREESTYLE
- UI_GetThemeColor4fv(TH_FREESTYLE_EDGE_MARK, gb->colorEdgeFreestyle);
- UI_GetThemeColor4fv(TH_FREESTYLE_FACE_MARK, gb->colorFaceFreestyle);
+ UI_GetThemeColor4fv(TH_FREESTYLE_EDGE_MARK, gb->colorEdgeFreestyle);
+ UI_GetThemeColor4fv(TH_FREESTYLE_FACE_MARK, gb->colorFaceFreestyle);
#else
- zero_v4(gb->colorEdgeFreestyle);
- zero_v4(gb->colorFaceFreestyle);
+ zero_v4(gb->colorEdgeFreestyle);
+ zero_v4(gb->colorFaceFreestyle);
#endif
- /* Curve */
- UI_GetThemeColor4fv(TH_HANDLE_FREE, gb->colorHandleFree);
- UI_GetThemeColor4fv(TH_HANDLE_AUTO, gb->colorHandleAuto);
- UI_GetThemeColor4fv(TH_HANDLE_VECT, gb->colorHandleVect);
- UI_GetThemeColor4fv(TH_HANDLE_ALIGN, gb->colorHandleAlign);
- UI_GetThemeColor4fv(TH_HANDLE_AUTOCLAMP, gb->colorHandleAutoclamp);
- UI_GetThemeColor4fv(TH_HANDLE_SEL_FREE, gb->colorHandleSelFree);
- UI_GetThemeColor4fv(TH_HANDLE_SEL_AUTO, gb->colorHandleSelAuto);
- UI_GetThemeColor4fv(TH_HANDLE_SEL_VECT, gb->colorHandleSelVect);
- UI_GetThemeColor4fv(TH_HANDLE_SEL_ALIGN, gb->colorHandleSelAlign);
- UI_GetThemeColor4fv(TH_HANDLE_SEL_AUTOCLAMP, gb->colorHandleSelAutoclamp);
- UI_GetThemeColor4fv(TH_NURB_ULINE, gb->colorNurbUline);
- UI_GetThemeColor4fv(TH_NURB_VLINE, gb->colorNurbVline);
- UI_GetThemeColor4fv(TH_NURB_SEL_ULINE, gb->colorNurbSelUline);
- UI_GetThemeColor4fv(TH_NURB_SEL_VLINE, gb->colorNurbSelVline);
- UI_GetThemeColor4fv(TH_ACTIVE_SPLINE, gb->colorActiveSpline);
-
- UI_GetThemeColor4fv(TH_BONE_POSE, gb->colorBonePose);
-
- UI_GetThemeColor4fv(TH_CFRAME, gb->colorCurrentFrame);
-
- /* Grid */
- UI_GetThemeColorShade4fv(TH_GRID, 10, gb->colorGrid);
- /* emphasise division lines lighter instead of darker, if background is darker than grid */
- UI_GetThemeColorShade4fv(
- TH_GRID,
- (gb->colorGrid[0] + gb->colorGrid[1] + gb->colorGrid[2] + 0.12f >
- gb->colorBackground[0] + gb->colorBackground[1] + gb->colorBackground[2]) ?
- 20 : -10, gb->colorGridEmphasise);
- /* Grid Axis */
- UI_GetThemeColorBlendShade4fv(TH_GRID, TH_AXIS_X, 0.5f, -10, gb->colorGridAxisX);
- UI_GetThemeColorBlendShade4fv(TH_GRID, TH_AXIS_Y, 0.5f, -10, gb->colorGridAxisY);
- UI_GetThemeColorBlendShade4fv(TH_GRID, TH_AXIS_Z, 0.5f, -10, gb->colorGridAxisZ);
-
- UI_GetThemeColorShadeAlpha4fv(TH_TRANSFORM, 0, -80, gb->colorDeselect);
- UI_GetThemeColorShadeAlpha4fv(TH_WIRE, 0, -30, gb->colorOutline);
- UI_GetThemeColorShadeAlpha4fv(TH_LIGHT, 0, 255, gb->colorLightNoAlpha);
-
- gb->sizeLightCenter = (U.obcenter_dia + 1.5f) * U.pixelsize;
- gb->sizeLightCircle = U.pixelsize * 9.0f;
- gb->sizeLightCircleShadow = gb->sizeLightCircle + U.pixelsize * 3.0f;
-
- /* M_SQRT2 to be at least the same size of the old square */
- gb->sizeVertex = U.pixelsize * (max_ff(1.0f, UI_GetThemeValuef(TH_VERTEX_SIZE) * (float)M_SQRT2 / 2.0f));
- gb->sizeFaceDot = U.pixelsize * UI_GetThemeValuef(TH_FACEDOT_SIZE);
- gb->sizeEdge = U.pixelsize * (1.0f / 2.0f); /* TODO Theme */
- gb->sizeEdgeFix = U.pixelsize * (0.5f + 2.0f * (2.0f * (gb->sizeEdge * (float)M_SQRT1_2)));
-
- /* Color management. */
- if (DRW_state_is_image_render()) {
- float *color = gb->UBO_FIRST_COLOR;
- do {
- /* TODO more accurate transform. */
- srgb_to_linearrgb_v4(color, color);
- color += 4;
- } while (color != gb->UBO_LAST_COLOR);
- }
-
- if (G_draw.block_ubo == NULL) {
- G_draw.block_ubo = DRW_uniformbuffer_create(sizeof(GlobalsUboStorage), gb);
- }
-
- DRW_uniformbuffer_update(G_draw.block_ubo, gb);
-
- if (!G_draw.ramp) {
- ColorBand ramp = {0};
- float *colors;
- int col_size;
-
- ramp.tot = 3;
- ramp.data[0].a = 1.0f;
- ramp.data[0].b = 1.0f;
- ramp.data[0].pos = 0.0f;
- ramp.data[1].a = 1.0f;
- ramp.data[1].g = 1.0f;
- ramp.data[1].pos = 0.5f;
- ramp.data[2].a = 1.0f;
- ramp.data[2].r = 1.0f;
- ramp.data[2].pos = 1.0f;
-
- BKE_colorband_evaluate_table_rgba(&ramp, &colors, &col_size);
-
- G_draw.ramp = GPU_texture_create_1d(col_size, GPU_RGBA8, colors, NULL);
-
- MEM_freeN(colors);
- }
-
- /* Weight Painting color ramp texture */
- bool user_weight_ramp = (U.flag & USER_CUSTOM_RANGE) != 0;
-
- if (weight_ramp_custom != user_weight_ramp ||
- (user_weight_ramp && memcmp(&weight_ramp_copy, &U.coba_weight, sizeof(ColorBand)) != 0))
- {
- DRW_TEXTURE_FREE_SAFE(G_draw.weight_ramp);
- }
-
- if (G_draw.weight_ramp == NULL) {
- weight_ramp_custom = user_weight_ramp;
- memcpy(&weight_ramp_copy, &U.coba_weight, sizeof(ColorBand));
-
- G_draw.weight_ramp = DRW_create_weight_colorramp_texture();
- }
+ /* Curve */
+ UI_GetThemeColor4fv(TH_HANDLE_FREE, gb->colorHandleFree);
+ UI_GetThemeColor4fv(TH_HANDLE_AUTO, gb->colorHandleAuto);
+ UI_GetThemeColor4fv(TH_HANDLE_VECT, gb->colorHandleVect);
+ UI_GetThemeColor4fv(TH_HANDLE_ALIGN, gb->colorHandleAlign);
+ UI_GetThemeColor4fv(TH_HANDLE_AUTOCLAMP, gb->colorHandleAutoclamp);
+ UI_GetThemeColor4fv(TH_HANDLE_SEL_FREE, gb->colorHandleSelFree);
+ UI_GetThemeColor4fv(TH_HANDLE_SEL_AUTO, gb->colorHandleSelAuto);
+ UI_GetThemeColor4fv(TH_HANDLE_SEL_VECT, gb->colorHandleSelVect);
+ UI_GetThemeColor4fv(TH_HANDLE_SEL_ALIGN, gb->colorHandleSelAlign);
+ UI_GetThemeColor4fv(TH_HANDLE_SEL_AUTOCLAMP, gb->colorHandleSelAutoclamp);
+ UI_GetThemeColor4fv(TH_NURB_ULINE, gb->colorNurbUline);
+ UI_GetThemeColor4fv(TH_NURB_VLINE, gb->colorNurbVline);
+ UI_GetThemeColor4fv(TH_NURB_SEL_ULINE, gb->colorNurbSelUline);
+ UI_GetThemeColor4fv(TH_NURB_SEL_VLINE, gb->colorNurbSelVline);
+ UI_GetThemeColor4fv(TH_ACTIVE_SPLINE, gb->colorActiveSpline);
+
+ UI_GetThemeColor4fv(TH_BONE_POSE, gb->colorBonePose);
+
+ UI_GetThemeColor4fv(TH_CFRAME, gb->colorCurrentFrame);
+
+ /* Grid */
+ UI_GetThemeColorShade4fv(TH_GRID, 10, gb->colorGrid);
+ /* emphasise division lines lighter instead of darker, if background is darker than grid */
+ UI_GetThemeColorShade4fv(
+ TH_GRID,
+ (gb->colorGrid[0] + gb->colorGrid[1] + gb->colorGrid[2] + 0.12f >
+ gb->colorBackground[0] + gb->colorBackground[1] + gb->colorBackground[2]) ?
+ 20 :
+ -10,
+ gb->colorGridEmphasise);
+ /* Grid Axis */
+ UI_GetThemeColorBlendShade4fv(TH_GRID, TH_AXIS_X, 0.5f, -10, gb->colorGridAxisX);
+ UI_GetThemeColorBlendShade4fv(TH_GRID, TH_AXIS_Y, 0.5f, -10, gb->colorGridAxisY);
+ UI_GetThemeColorBlendShade4fv(TH_GRID, TH_AXIS_Z, 0.5f, -10, gb->colorGridAxisZ);
+
+ UI_GetThemeColorShadeAlpha4fv(TH_TRANSFORM, 0, -80, gb->colorDeselect);
+ UI_GetThemeColorShadeAlpha4fv(TH_WIRE, 0, -30, gb->colorOutline);
+ UI_GetThemeColorShadeAlpha4fv(TH_LIGHT, 0, 255, gb->colorLightNoAlpha);
+
+ gb->sizeLightCenter = (U.obcenter_dia + 1.5f) * U.pixelsize;
+ gb->sizeLightCircle = U.pixelsize * 9.0f;
+ gb->sizeLightCircleShadow = gb->sizeLightCircle + U.pixelsize * 3.0f;
+
+ /* M_SQRT2 to be at least the same size of the old square */
+ gb->sizeVertex = U.pixelsize *
+ (max_ff(1.0f, UI_GetThemeValuef(TH_VERTEX_SIZE) * (float)M_SQRT2 / 2.0f));
+ gb->sizeFaceDot = U.pixelsize * UI_GetThemeValuef(TH_FACEDOT_SIZE);
+ gb->sizeEdge = U.pixelsize * (1.0f / 2.0f); /* TODO Theme */
+ gb->sizeEdgeFix = U.pixelsize * (0.5f + 2.0f * (2.0f * (gb->sizeEdge * (float)M_SQRT1_2)));
+
+ /* Color management. */
+ if (DRW_state_is_image_render()) {
+ float *color = gb->UBO_FIRST_COLOR;
+ do {
+ /* TODO more accurate transform. */
+ srgb_to_linearrgb_v4(color, color);
+ color += 4;
+ } while (color != gb->UBO_LAST_COLOR);
+ }
+
+ if (G_draw.block_ubo == NULL) {
+ G_draw.block_ubo = DRW_uniformbuffer_create(sizeof(GlobalsUboStorage), gb);
+ }
+
+ DRW_uniformbuffer_update(G_draw.block_ubo, gb);
+
+ if (!G_draw.ramp) {
+ ColorBand ramp = {0};
+ float *colors;
+ int col_size;
+
+ ramp.tot = 3;
+ ramp.data[0].a = 1.0f;
+ ramp.data[0].b = 1.0f;
+ ramp.data[0].pos = 0.0f;
+ ramp.data[1].a = 1.0f;
+ ramp.data[1].g = 1.0f;
+ ramp.data[1].pos = 0.5f;
+ ramp.data[2].a = 1.0f;
+ ramp.data[2].r = 1.0f;
+ ramp.data[2].pos = 1.0f;
+
+ BKE_colorband_evaluate_table_rgba(&ramp, &colors, &col_size);
+
+ G_draw.ramp = GPU_texture_create_1d(col_size, GPU_RGBA8, colors, NULL);
+
+ MEM_freeN(colors);
+ }
+
+ /* Weight Painting color ramp texture */
+ bool user_weight_ramp = (U.flag & USER_CUSTOM_RANGE) != 0;
+
+ if (weight_ramp_custom != user_weight_ramp ||
+ (user_weight_ramp && memcmp(&weight_ramp_copy, &U.coba_weight, sizeof(ColorBand)) != 0)) {
+ DRW_TEXTURE_FREE_SAFE(G_draw.weight_ramp);
+ }
+
+ if (G_draw.weight_ramp == NULL) {
+ weight_ramp_custom = user_weight_ramp;
+ memcpy(&weight_ramp_copy, &U.coba_weight, sizeof(ColorBand));
+
+ G_draw.weight_ramp = DRW_create_weight_colorramp_texture();
+ }
}
/* ********************************* SHGROUP ************************************* */
@@ -238,737 +242,807 @@ extern char datatoc_object_mball_handles_vert_glsl[];
extern char datatoc_object_empty_axes_vert_glsl[];
typedef struct COMMON_Shaders {
- struct GPUShader *shape_outline;
- struct GPUShader *shape_solid;
- struct GPUShader *bone_axes;
- struct GPUShader *bone_envelope;
- struct GPUShader *bone_envelope_distance;
- struct GPUShader *bone_envelope_outline;
- struct GPUShader *bone_sphere;
- struct GPUShader *bone_sphere_outline;
- struct GPUShader *bone_stick;
- struct GPUShader *bone_dofs;
-
- struct GPUShader *mpath_line_sh;
- struct GPUShader *mpath_points_sh;
-
- struct GPUShader *volume_velocity_needle_sh;
- struct GPUShader *volume_velocity_sh;
- struct GPUShader *empty_axes_sh;
-
- struct GPUShader *mball_handles;
+ struct GPUShader *shape_outline;
+ struct GPUShader *shape_solid;
+ struct GPUShader *bone_axes;
+ struct GPUShader *bone_envelope;
+ struct GPUShader *bone_envelope_distance;
+ struct GPUShader *bone_envelope_outline;
+ struct GPUShader *bone_sphere;
+ struct GPUShader *bone_sphere_outline;
+ struct GPUShader *bone_stick;
+ struct GPUShader *bone_dofs;
+
+ struct GPUShader *mpath_line_sh;
+ struct GPUShader *mpath_points_sh;
+
+ struct GPUShader *volume_velocity_needle_sh;
+ struct GPUShader *volume_velocity_sh;
+ struct GPUShader *empty_axes_sh;
+
+ struct GPUShader *mball_handles;
} COMMON_Shaders;
static COMMON_Shaders g_shaders[GPU_SHADER_CFG_LEN] = {{NULL}};
static struct {
- struct GPUVertFormat *instance_screenspace;
- struct GPUVertFormat *instance_color;
- struct GPUVertFormat *instance_screen_aligned;
- struct GPUVertFormat *instance_scaled;
- struct GPUVertFormat *instance_sized;
- struct GPUVertFormat *instance_outline;
- struct GPUVertFormat *instance;
- struct GPUVertFormat *instance_camera;
- struct GPUVertFormat *instance_distance_lines;
- struct GPUVertFormat *instance_spot;
- struct GPUVertFormat *instance_bone;
- struct GPUVertFormat *instance_bone_dof;
- struct GPUVertFormat *instance_bone_stick;
- struct GPUVertFormat *instance_bone_outline;
- struct GPUVertFormat *instance_bone_envelope;
- struct GPUVertFormat *instance_bone_envelope_distance;
- struct GPUVertFormat *instance_bone_envelope_outline;
- struct GPUVertFormat *instance_mball_handles;
- struct GPUVertFormat *dynlines_color;
+ struct GPUVertFormat *instance_screenspace;
+ struct GPUVertFormat *instance_color;
+ struct GPUVertFormat *instance_screen_aligned;
+ struct GPUVertFormat *instance_scaled;
+ struct GPUVertFormat *instance_sized;
+ struct GPUVertFormat *instance_outline;
+ struct GPUVertFormat *instance;
+ struct GPUVertFormat *instance_camera;
+ struct GPUVertFormat *instance_distance_lines;
+ struct GPUVertFormat *instance_spot;
+ struct GPUVertFormat *instance_bone;
+ struct GPUVertFormat *instance_bone_dof;
+ struct GPUVertFormat *instance_bone_stick;
+ struct GPUVertFormat *instance_bone_outline;
+ struct GPUVertFormat *instance_bone_envelope;
+ struct GPUVertFormat *instance_bone_envelope_distance;
+ struct GPUVertFormat *instance_bone_envelope_outline;
+ struct GPUVertFormat *instance_mball_handles;
+ struct GPUVertFormat *dynlines_color;
} g_formats = {NULL};
void DRW_globals_free(void)
{
- struct GPUVertFormat **format = &g_formats.instance_screenspace;
- for (int i = 0; i < sizeof(g_formats) / sizeof(void *); ++i, ++format) {
- MEM_SAFE_FREE(*format);
- }
-
- for (int j = 0; j < GPU_SHADER_CFG_LEN; j++) {
- struct GPUShader **shader = &g_shaders[j].shape_outline;
- for (int i = 0; i < sizeof(g_shaders[j]) / sizeof(void *); ++i, ++shader) {
- DRW_SHADER_FREE_SAFE(*shader);
- }
- }
+ struct GPUVertFormat **format = &g_formats.instance_screenspace;
+ for (int i = 0; i < sizeof(g_formats) / sizeof(void *); ++i, ++format) {
+ MEM_SAFE_FREE(*format);
+ }
+
+ for (int j = 0; j < GPU_SHADER_CFG_LEN; j++) {
+ struct GPUShader **shader = &g_shaders[j].shape_outline;
+ for (int i = 0; i < sizeof(g_shaders[j]) / sizeof(void *); ++i, ++shader) {
+ DRW_SHADER_FREE_SAFE(*shader);
+ }
+ }
}
void DRW_shgroup_world_clip_planes_from_rv3d(DRWShadingGroup *shgrp, const RegionView3D *rv3d)
{
- int world_clip_planes_len = (rv3d->viewlock & RV3D_BOXCLIP) ? 4 : 6;
- DRW_shgroup_uniform_vec4(shgrp, "WorldClipPlanes", rv3d->clip[0], world_clip_planes_len);
- DRW_shgroup_state_enable(shgrp, DRW_STATE_CLIP_PLANES);
+ int world_clip_planes_len = (rv3d->viewlock & RV3D_BOXCLIP) ? 4 : 6;
+ DRW_shgroup_uniform_vec4(shgrp, "WorldClipPlanes", rv3d->clip[0], world_clip_planes_len);
+ DRW_shgroup_state_enable(shgrp, DRW_STATE_CLIP_PLANES);
}
DRWShadingGroup *shgroup_dynlines_flat_color(DRWPass *pass, eGPUShaderConfig sh_cfg)
{
- GPUShader *sh = GPU_shader_get_builtin_shader_with_config(GPU_SHADER_3D_FLAT_COLOR, sh_cfg);
-
- DRW_shgroup_instance_format(g_formats.dynlines_color, {
- {"pos", DRW_ATTR_FLOAT, 3},
- {"color", DRW_ATTR_FLOAT, 4},
- });
-
- DRWShadingGroup *grp = DRW_shgroup_line_batch_create_with_format(sh, pass, g_formats.dynlines_color);
- if (sh_cfg == GPU_SHADER_CFG_CLIPPED) {
- DRW_shgroup_world_clip_planes_from_rv3d(grp, DRW_context_state_get()->rv3d);
- }
- return grp;
+ GPUShader *sh = GPU_shader_get_builtin_shader_with_config(GPU_SHADER_3D_FLAT_COLOR, sh_cfg);
+
+ DRW_shgroup_instance_format(g_formats.dynlines_color,
+ {
+ {"pos", DRW_ATTR_FLOAT, 3},
+ {"color", DRW_ATTR_FLOAT, 4},
+ });
+
+ DRWShadingGroup *grp = DRW_shgroup_line_batch_create_with_format(
+ sh, pass, g_formats.dynlines_color);
+ if (sh_cfg == GPU_SHADER_CFG_CLIPPED) {
+ DRW_shgroup_world_clip_planes_from_rv3d(grp, DRW_context_state_get()->rv3d);
+ }
+ return grp;
}
-DRWShadingGroup *shgroup_dynlines_dashed_uniform_color(DRWPass *pass, const float color[4], eGPUShaderConfig sh_cfg)
+DRWShadingGroup *shgroup_dynlines_dashed_uniform_color(DRWPass *pass,
+ const float color[4],
+ eGPUShaderConfig sh_cfg)
{
- GPUShader *sh = GPU_shader_get_builtin_shader_with_config(GPU_SHADER_3D_LINE_DASHED_UNIFORM_COLOR, sh_cfg);
-
- static float dash_width = 6.0f;
- static float dash_factor = 0.5f;
- DRWShadingGroup *grp = DRW_shgroup_line_batch_create(sh, pass);
- DRW_shgroup_uniform_vec4(grp, "color", color, 1);
- DRW_shgroup_uniform_vec2(grp, "viewport_size", DRW_viewport_size_get(), 1);
- DRW_shgroup_uniform_float(grp, "dash_width", &dash_width, 1);
- DRW_shgroup_uniform_float(grp, "dash_factor", &dash_factor, 1);
- DRW_shgroup_uniform_int_copy(grp, "colors_len", 0); /* "simple" mode */
- if (sh_cfg == GPU_SHADER_CFG_CLIPPED) {
- DRW_shgroup_world_clip_planes_from_rv3d(grp, DRW_context_state_get()->rv3d);
- }
- return grp;
+ GPUShader *sh = GPU_shader_get_builtin_shader_with_config(
+ GPU_SHADER_3D_LINE_DASHED_UNIFORM_COLOR, sh_cfg);
+
+ static float dash_width = 6.0f;
+ static float dash_factor = 0.5f;
+ DRWShadingGroup *grp = DRW_shgroup_line_batch_create(sh, pass);
+ DRW_shgroup_uniform_vec4(grp, "color", color, 1);
+ DRW_shgroup_uniform_vec2(grp, "viewport_size", DRW_viewport_size_get(), 1);
+ DRW_shgroup_uniform_float(grp, "dash_width", &dash_width, 1);
+ DRW_shgroup_uniform_float(grp, "dash_factor", &dash_factor, 1);
+ DRW_shgroup_uniform_int_copy(grp, "colors_len", 0); /* "simple" mode */
+ if (sh_cfg == GPU_SHADER_CFG_CLIPPED) {
+ DRW_shgroup_world_clip_planes_from_rv3d(grp, DRW_context_state_get()->rv3d);
+ }
+ return grp;
}
-DRWShadingGroup *shgroup_dynpoints_uniform_color(
- DRWPass *pass, const float color[4], const float *size, eGPUShaderConfig sh_cfg)
+DRWShadingGroup *shgroup_dynpoints_uniform_color(DRWPass *pass,
+ const float color[4],
+ const float *size,
+ eGPUShaderConfig sh_cfg)
{
- GPUShader *sh = GPU_shader_get_builtin_shader_with_config(GPU_SHADER_3D_POINT_UNIFORM_SIZE_UNIFORM_COLOR_AA, sh_cfg);
-
- DRWShadingGroup *grp = DRW_shgroup_point_batch_create(sh, pass);
- DRW_shgroup_uniform_vec4(grp, "color", color, 1);
- DRW_shgroup_uniform_float(grp, "size", size, 1);
- DRW_shgroup_state_enable(grp, DRW_STATE_POINT);
- if (sh_cfg == GPU_SHADER_CFG_CLIPPED) {
- DRW_shgroup_world_clip_planes_from_rv3d(grp, DRW_context_state_get()->rv3d);
- }
- return grp;
+ GPUShader *sh = GPU_shader_get_builtin_shader_with_config(
+ GPU_SHADER_3D_POINT_UNIFORM_SIZE_UNIFORM_COLOR_AA, sh_cfg);
+
+ DRWShadingGroup *grp = DRW_shgroup_point_batch_create(sh, pass);
+ DRW_shgroup_uniform_vec4(grp, "color", color, 1);
+ DRW_shgroup_uniform_float(grp, "size", size, 1);
+ DRW_shgroup_state_enable(grp, DRW_STATE_POINT);
+ if (sh_cfg == GPU_SHADER_CFG_CLIPPED) {
+ DRW_shgroup_world_clip_planes_from_rv3d(grp, DRW_context_state_get()->rv3d);
+ }
+ return grp;
}
-DRWShadingGroup *shgroup_groundlines_uniform_color(DRWPass *pass, const float color[4], eGPUShaderConfig sh_cfg)
+DRWShadingGroup *shgroup_groundlines_uniform_color(DRWPass *pass,
+ const float color[4],
+ eGPUShaderConfig sh_cfg)
{
- GPUShader *sh = GPU_shader_get_builtin_shader_with_config(GPU_SHADER_3D_GROUNDLINE, sh_cfg);
-
- DRWShadingGroup *grp = DRW_shgroup_point_batch_create(sh, pass);
- DRW_shgroup_uniform_vec4(grp, "color", color, 1);
- if (sh_cfg == GPU_SHADER_CFG_CLIPPED) {
- DRW_shgroup_world_clip_planes_from_rv3d(grp, DRW_context_state_get()->rv3d);
- }
- return grp;
+ GPUShader *sh = GPU_shader_get_builtin_shader_with_config(GPU_SHADER_3D_GROUNDLINE, sh_cfg);
+
+ DRWShadingGroup *grp = DRW_shgroup_point_batch_create(sh, pass);
+ DRW_shgroup_uniform_vec4(grp, "color", color, 1);
+ if (sh_cfg == GPU_SHADER_CFG_CLIPPED) {
+ DRW_shgroup_world_clip_planes_from_rv3d(grp, DRW_context_state_get()->rv3d);
+ }
+ return grp;
}
-DRWShadingGroup *shgroup_groundpoints_uniform_color(DRWPass *pass, const float color[4], eGPUShaderConfig sh_cfg)
+DRWShadingGroup *shgroup_groundpoints_uniform_color(DRWPass *pass,
+ const float color[4],
+ eGPUShaderConfig sh_cfg)
{
- GPUShader *sh = GPU_shader_get_builtin_shader_with_config(GPU_SHADER_3D_GROUNDPOINT, sh_cfg);
-
- DRWShadingGroup *grp = DRW_shgroup_point_batch_create(sh, pass);
- DRW_shgroup_uniform_vec4(grp, "color", color, 1);
- DRW_shgroup_state_enable(grp, DRW_STATE_POINT);
- if (sh_cfg == GPU_SHADER_CFG_CLIPPED) {
- DRW_shgroup_world_clip_planes_from_rv3d(grp, DRW_context_state_get()->rv3d);
- }
- return grp;
+ GPUShader *sh = GPU_shader_get_builtin_shader_with_config(GPU_SHADER_3D_GROUNDPOINT, sh_cfg);
+
+ DRWShadingGroup *grp = DRW_shgroup_point_batch_create(sh, pass);
+ DRW_shgroup_uniform_vec4(grp, "color", color, 1);
+ DRW_shgroup_state_enable(grp, DRW_STATE_POINT);
+ if (sh_cfg == GPU_SHADER_CFG_CLIPPED) {
+ DRW_shgroup_world_clip_planes_from_rv3d(grp, DRW_context_state_get()->rv3d);
+ }
+ return grp;
}
-DRWShadingGroup *shgroup_instance_screenspace(
- DRWPass *pass, struct GPUBatch *geom, const float *size, eGPUShaderConfig sh_cfg)
+DRWShadingGroup *shgroup_instance_screenspace(DRWPass *pass,
+ struct GPUBatch *geom,
+ const float *size,
+ eGPUShaderConfig sh_cfg)
{
- GPUShader *sh = GPU_shader_get_builtin_shader_with_config(GPU_SHADER_3D_SCREENSPACE_VARIYING_COLOR, sh_cfg);
-
- DRW_shgroup_instance_format(g_formats.instance_screenspace, {
- {"world_pos", DRW_ATTR_FLOAT, 3},
- {"color", DRW_ATTR_FLOAT, 3},
- });
-
- DRWShadingGroup *grp = DRW_shgroup_instance_create(sh, pass, geom, g_formats.instance_screenspace);
- DRW_shgroup_uniform_float(grp, "size", size, 1);
- DRW_shgroup_uniform_float(grp, "pixel_size", DRW_viewport_pixelsize_get(), 1);
- DRW_shgroup_uniform_vec3(grp, "screen_vecs[0]", DRW_viewport_screenvecs_get(), 2);
- if (sh_cfg == GPU_SHADER_CFG_CLIPPED) {
- DRW_shgroup_world_clip_planes_from_rv3d(grp, DRW_context_state_get()->rv3d);
- }
- return grp;
+ GPUShader *sh = GPU_shader_get_builtin_shader_with_config(
+ GPU_SHADER_3D_SCREENSPACE_VARIYING_COLOR, sh_cfg);
+
+ DRW_shgroup_instance_format(g_formats.instance_screenspace,
+ {
+ {"world_pos", DRW_ATTR_FLOAT, 3},
+ {"color", DRW_ATTR_FLOAT, 3},
+ });
+
+ DRWShadingGroup *grp = DRW_shgroup_instance_create(
+ sh, pass, geom, g_formats.instance_screenspace);
+ DRW_shgroup_uniform_float(grp, "size", size, 1);
+ DRW_shgroup_uniform_float(grp, "pixel_size", DRW_viewport_pixelsize_get(), 1);
+ DRW_shgroup_uniform_vec3(grp, "screen_vecs[0]", DRW_viewport_screenvecs_get(), 2);
+ if (sh_cfg == GPU_SHADER_CFG_CLIPPED) {
+ DRW_shgroup_world_clip_planes_from_rv3d(grp, DRW_context_state_get()->rv3d);
+ }
+ return grp;
}
DRWShadingGroup *shgroup_instance_solid(DRWPass *pass, struct GPUBatch *geom)
{
- static float light[3] = {0.0f, 0.0f, 1.0f};
- GPUShader *sh = GPU_shader_get_builtin_shader(GPU_SHADER_3D_OBJECTSPACE_SIMPLE_LIGHTING_VARIYING_COLOR);
+ static float light[3] = {0.0f, 0.0f, 1.0f};
+ GPUShader *sh = GPU_shader_get_builtin_shader(
+ GPU_SHADER_3D_OBJECTSPACE_SIMPLE_LIGHTING_VARIYING_COLOR);
- DRW_shgroup_instance_format(g_formats.instance_color, {
- {"InstanceModelMatrix", DRW_ATTR_FLOAT, 16},
- {"color", DRW_ATTR_FLOAT, 4},
- });
+ DRW_shgroup_instance_format(g_formats.instance_color,
+ {
+ {"InstanceModelMatrix", DRW_ATTR_FLOAT, 16},
+ {"color", DRW_ATTR_FLOAT, 4},
+ });
- DRWShadingGroup *grp = DRW_shgroup_instance_create(sh, pass, geom, g_formats.instance_color);
- DRW_shgroup_uniform_vec3(grp, "light", light, 1);
+ DRWShadingGroup *grp = DRW_shgroup_instance_create(sh, pass, geom, g_formats.instance_color);
+ DRW_shgroup_uniform_vec3(grp, "light", light, 1);
- return grp;
+ return grp;
}
DRWShadingGroup *shgroup_instance_wire(DRWPass *pass, struct GPUBatch *geom)
{
- GPUShader *sh = GPU_shader_get_builtin_shader(GPU_SHADER_3D_OBJECTSPACE_VARIYING_COLOR);
+ GPUShader *sh = GPU_shader_get_builtin_shader(GPU_SHADER_3D_OBJECTSPACE_VARIYING_COLOR);
- DRW_shgroup_instance_format(g_formats.instance_color, {
- {"InstanceModelMatrix", DRW_ATTR_FLOAT, 16},
- {"color", DRW_ATTR_FLOAT, 4},
- });
+ DRW_shgroup_instance_format(g_formats.instance_color,
+ {
+ {"InstanceModelMatrix", DRW_ATTR_FLOAT, 16},
+ {"color", DRW_ATTR_FLOAT, 4},
+ });
- DRWShadingGroup *grp = DRW_shgroup_instance_create(sh, pass, geom, g_formats.instance_color);
+ DRWShadingGroup *grp = DRW_shgroup_instance_create(sh, pass, geom, g_formats.instance_color);
- return grp;
+ return grp;
}
-DRWShadingGroup *shgroup_instance_screen_aligned(
- DRWPass *pass, struct GPUBatch *geom, eGPUShaderConfig sh_cfg)
+DRWShadingGroup *shgroup_instance_screen_aligned(DRWPass *pass,
+ struct GPUBatch *geom,
+ eGPUShaderConfig sh_cfg)
{
- GPUShader *sh = GPU_shader_get_builtin_shader_with_config(GPU_SHADER_3D_INSTANCE_SCREEN_ALIGNED, sh_cfg);
-
- DRW_shgroup_instance_format(g_formats.instance_screen_aligned, {
- {"color", DRW_ATTR_FLOAT, 3},
- {"size", DRW_ATTR_FLOAT, 1},
- {"InstanceModelMatrix", DRW_ATTR_FLOAT, 16},
- });
-
- DRWShadingGroup *grp = DRW_shgroup_instance_create(sh, pass, geom, g_formats.instance_screen_aligned);
- DRW_shgroup_uniform_vec3(grp, "screen_vecs[0]", DRW_viewport_screenvecs_get(), 2);
- if (sh_cfg == GPU_SHADER_CFG_CLIPPED) {
- DRW_shgroup_world_clip_planes_from_rv3d(grp, DRW_context_state_get()->rv3d);
- }
- return grp;
+ GPUShader *sh = GPU_shader_get_builtin_shader_with_config(GPU_SHADER_3D_INSTANCE_SCREEN_ALIGNED,
+ sh_cfg);
+
+ DRW_shgroup_instance_format(g_formats.instance_screen_aligned,
+ {
+ {"color", DRW_ATTR_FLOAT, 3},
+ {"size", DRW_ATTR_FLOAT, 1},
+ {"InstanceModelMatrix", DRW_ATTR_FLOAT, 16},
+ });
+
+ DRWShadingGroup *grp = DRW_shgroup_instance_create(
+ sh, pass, geom, g_formats.instance_screen_aligned);
+ DRW_shgroup_uniform_vec3(grp, "screen_vecs[0]", DRW_viewport_screenvecs_get(), 2);
+ if (sh_cfg == GPU_SHADER_CFG_CLIPPED) {
+ DRW_shgroup_world_clip_planes_from_rv3d(grp, DRW_context_state_get()->rv3d);
+ }
+ return grp;
}
-DRWShadingGroup *shgroup_instance_scaled(DRWPass *pass, struct GPUBatch *geom, eGPUShaderConfig sh_cfg)
+DRWShadingGroup *shgroup_instance_scaled(DRWPass *pass,
+ struct GPUBatch *geom,
+ eGPUShaderConfig sh_cfg)
{
- GPUShader *sh_inst = GPU_shader_get_builtin_shader_with_config(GPU_SHADER_INSTANCE_VARIYING_COLOR_VARIYING_SCALE, sh_cfg);
-
- DRW_shgroup_instance_format(g_formats.instance_scaled, {
- {"color", DRW_ATTR_FLOAT, 3},
- {"size", DRW_ATTR_FLOAT, 3},
- {"InstanceModelMatrix", DRW_ATTR_FLOAT, 16},
- });
-
- DRWShadingGroup *grp = DRW_shgroup_instance_create(sh_inst, pass, geom, g_formats.instance_scaled);
- if (sh_cfg == GPU_SHADER_CFG_CLIPPED) {
- DRW_shgroup_world_clip_planes_from_rv3d(grp, DRW_context_state_get()->rv3d);
- }
- return grp;
+ GPUShader *sh_inst = GPU_shader_get_builtin_shader_with_config(
+ GPU_SHADER_INSTANCE_VARIYING_COLOR_VARIYING_SCALE, sh_cfg);
+
+ DRW_shgroup_instance_format(g_formats.instance_scaled,
+ {
+ {"color", DRW_ATTR_FLOAT, 3},
+ {"size", DRW_ATTR_FLOAT, 3},
+ {"InstanceModelMatrix", DRW_ATTR_FLOAT, 16},
+ });
+
+ DRWShadingGroup *grp = DRW_shgroup_instance_create(
+ sh_inst, pass, geom, g_formats.instance_scaled);
+ if (sh_cfg == GPU_SHADER_CFG_CLIPPED) {
+ DRW_shgroup_world_clip_planes_from_rv3d(grp, DRW_context_state_get()->rv3d);
+ }
+ return grp;
}
DRWShadingGroup *shgroup_instance(DRWPass *pass, struct GPUBatch *geom, eGPUShaderConfig sh_cfg)
{
- GPUShader *sh_inst = GPU_shader_get_builtin_shader_with_config(GPU_SHADER_INSTANCE_VARIYING_COLOR_VARIYING_SIZE, sh_cfg);
-
- DRW_shgroup_instance_format(g_formats.instance_sized, {
- {"color", DRW_ATTR_FLOAT, 4},
- {"size", DRW_ATTR_FLOAT, 1},
- {"InstanceModelMatrix", DRW_ATTR_FLOAT, 16},
- });
-
- DRWShadingGroup *grp = DRW_shgroup_instance_create(sh_inst, pass, geom, g_formats.instance_sized);
- DRW_shgroup_state_disable(grp, DRW_STATE_BLEND);
- if (sh_cfg == GPU_SHADER_CFG_CLIPPED) {
- DRW_shgroup_world_clip_planes_from_rv3d(grp, DRW_context_state_get()->rv3d);
- }
- return grp;
+ GPUShader *sh_inst = GPU_shader_get_builtin_shader_with_config(
+ GPU_SHADER_INSTANCE_VARIYING_COLOR_VARIYING_SIZE, sh_cfg);
+
+ DRW_shgroup_instance_format(g_formats.instance_sized,
+ {
+ {"color", DRW_ATTR_FLOAT, 4},
+ {"size", DRW_ATTR_FLOAT, 1},
+ {"InstanceModelMatrix", DRW_ATTR_FLOAT, 16},
+ });
+
+ DRWShadingGroup *grp = DRW_shgroup_instance_create(
+ sh_inst, pass, geom, g_formats.instance_sized);
+ DRW_shgroup_state_disable(grp, DRW_STATE_BLEND);
+ if (sh_cfg == GPU_SHADER_CFG_CLIPPED) {
+ DRW_shgroup_world_clip_planes_from_rv3d(grp, DRW_context_state_get()->rv3d);
+ }
+ return grp;
}
-DRWShadingGroup *shgroup_instance_alpha(DRWPass *pass, struct GPUBatch *geom, eGPUShaderConfig sh_cfg)
+DRWShadingGroup *shgroup_instance_alpha(DRWPass *pass,
+ struct GPUBatch *geom,
+ eGPUShaderConfig sh_cfg)
{
- GPUShader *sh_inst = GPU_shader_get_builtin_shader_with_config(GPU_SHADER_INSTANCE_VARIYING_COLOR_VARIYING_SIZE, sh_cfg);
-
- DRW_shgroup_instance_format(g_formats.instance_sized, {
- {"color", DRW_ATTR_FLOAT, 4},
- {"size", DRW_ATTR_FLOAT, 1},
- {"InstanceModelMatrix", DRW_ATTR_FLOAT, 16},
- });
-
- DRWShadingGroup *grp = DRW_shgroup_instance_create(sh_inst, pass, geom, g_formats.instance_sized);
- if (sh_cfg == GPU_SHADER_CFG_CLIPPED) {
- DRW_shgroup_world_clip_planes_from_rv3d(grp, DRW_context_state_get()->rv3d);
- }
- return grp;
+ GPUShader *sh_inst = GPU_shader_get_builtin_shader_with_config(
+ GPU_SHADER_INSTANCE_VARIYING_COLOR_VARIYING_SIZE, sh_cfg);
+
+ DRW_shgroup_instance_format(g_formats.instance_sized,
+ {
+ {"color", DRW_ATTR_FLOAT, 4},
+ {"size", DRW_ATTR_FLOAT, 1},
+ {"InstanceModelMatrix", DRW_ATTR_FLOAT, 16},
+ });
+
+ DRWShadingGroup *grp = DRW_shgroup_instance_create(
+ sh_inst, pass, geom, g_formats.instance_sized);
+ if (sh_cfg == GPU_SHADER_CFG_CLIPPED) {
+ DRW_shgroup_world_clip_planes_from_rv3d(grp, DRW_context_state_get()->rv3d);
+ }
+ return grp;
}
-DRWShadingGroup *shgroup_instance_empty_axes(DRWPass *pass, struct GPUBatch *geom, eGPUShaderConfig sh_cfg)
+DRWShadingGroup *shgroup_instance_empty_axes(DRWPass *pass,
+ struct GPUBatch *geom,
+ eGPUShaderConfig sh_cfg)
{
- COMMON_Shaders *sh_data = &g_shaders[sh_cfg];
- if (sh_data->empty_axes_sh == NULL) {
- const GPUShaderConfigData *sh_cfg_data = &GPU_shader_cfg_data[sh_cfg];
- sh_data->empty_axes_sh = GPU_shader_create_from_arrays({
- .vert = (const char *[]){sh_cfg_data->lib, datatoc_object_empty_axes_vert_glsl, NULL},
- .frag = (const char *[]){datatoc_gpu_shader_flat_color_frag_glsl, NULL},
- .defs = (const char *[]){sh_cfg_data->def, NULL},
- });
- }
-
- DRW_shgroup_instance_format(g_formats.instance_sized, {
- {"color", DRW_ATTR_FLOAT, 3},
- {"size", DRW_ATTR_FLOAT, 1},
- {"InstanceModelMatrix", DRW_ATTR_FLOAT, 16},
- });
-
- DRWShadingGroup *grp = DRW_shgroup_instance_create(sh_data->empty_axes_sh, pass, geom, g_formats.instance_sized);
- DRW_shgroup_uniform_vec3(grp, "screenVecs[0]", DRW_viewport_screenvecs_get(), 2);
- if (sh_cfg == GPU_SHADER_CFG_CLIPPED) {
- DRW_shgroup_world_clip_planes_from_rv3d(grp, DRW_context_state_get()->rv3d);
- }
- return grp;
+ COMMON_Shaders *sh_data = &g_shaders[sh_cfg];
+ if (sh_data->empty_axes_sh == NULL) {
+ const GPUShaderConfigData *sh_cfg_data = &GPU_shader_cfg_data[sh_cfg];
+ sh_data->empty_axes_sh = GPU_shader_create_from_arrays({
+ .vert = (const char *[]){sh_cfg_data->lib, datatoc_object_empty_axes_vert_glsl, NULL},
+ .frag = (const char *[]){datatoc_gpu_shader_flat_color_frag_glsl, NULL},
+ .defs = (const char *[]){sh_cfg_data->def, NULL},
+ });
+ }
+
+ DRW_shgroup_instance_format(g_formats.instance_sized,
+ {
+ {"color", DRW_ATTR_FLOAT, 3},
+ {"size", DRW_ATTR_FLOAT, 1},
+ {"InstanceModelMatrix", DRW_ATTR_FLOAT, 16},
+ });
+
+ DRWShadingGroup *grp = DRW_shgroup_instance_create(
+ sh_data->empty_axes_sh, pass, geom, g_formats.instance_sized);
+ DRW_shgroup_uniform_vec3(grp, "screenVecs[0]", DRW_viewport_screenvecs_get(), 2);
+ if (sh_cfg == GPU_SHADER_CFG_CLIPPED) {
+ DRW_shgroup_world_clip_planes_from_rv3d(grp, DRW_context_state_get()->rv3d);
+ }
+ return grp;
}
DRWShadingGroup *shgroup_instance_outline(DRWPass *pass, struct GPUBatch *geom, int *baseid)
{
- GPUShader *sh_inst = GPU_shader_get_builtin_shader(GPU_SHADER_INSTANCE_VARIYING_ID_VARIYING_SIZE);
+ GPUShader *sh_inst = GPU_shader_get_builtin_shader(
+ GPU_SHADER_INSTANCE_VARIYING_ID_VARIYING_SIZE);
- DRW_shgroup_instance_format(g_formats.instance_outline, {
- {"callId", DRW_ATTR_INT, 1},
- {"size", DRW_ATTR_FLOAT, 1},
- {"InstanceModelMatrix", DRW_ATTR_FLOAT, 16},
- });
+ DRW_shgroup_instance_format(g_formats.instance_outline,
+ {
+ {"callId", DRW_ATTR_INT, 1},
+ {"size", DRW_ATTR_FLOAT, 1},
+ {"InstanceModelMatrix", DRW_ATTR_FLOAT, 16},
+ });
- DRWShadingGroup *grp = DRW_shgroup_instance_create(sh_inst, pass, geom, g_formats.instance_outline);
- DRW_shgroup_uniform_int(grp, "baseId", baseid, 1);
+ DRWShadingGroup *grp = DRW_shgroup_instance_create(
+ sh_inst, pass, geom, g_formats.instance_outline);
+ DRW_shgroup_uniform_int(grp, "baseId", baseid, 1);
- return grp;
+ return grp;
}
-DRWShadingGroup *shgroup_camera_instance(DRWPass *pass, struct GPUBatch *geom, eGPUShaderConfig sh_cfg)
+DRWShadingGroup *shgroup_camera_instance(DRWPass *pass,
+ struct GPUBatch *geom,
+ eGPUShaderConfig sh_cfg)
{
- GPUShader *sh_inst = GPU_shader_get_builtin_shader_with_config(GPU_SHADER_CAMERA, sh_cfg);
-
- DRW_shgroup_instance_format(g_formats.instance_camera, {
- {"color", DRW_ATTR_FLOAT, 3},
- {"corners", DRW_ATTR_FLOAT, 8},
- {"depth", DRW_ATTR_FLOAT, 1},
- {"tria", DRW_ATTR_FLOAT, 4},
- {"InstanceModelMatrix", DRW_ATTR_FLOAT, 16},
- });
-
- DRWShadingGroup *grp = DRW_shgroup_instance_create(sh_inst, pass, geom, g_formats.instance_camera);
- if (sh_cfg == GPU_SHADER_CFG_CLIPPED) {
- DRW_shgroup_world_clip_planes_from_rv3d(grp, DRW_context_state_get()->rv3d);
- }
- return grp;
+ GPUShader *sh_inst = GPU_shader_get_builtin_shader_with_config(GPU_SHADER_CAMERA, sh_cfg);
+
+ DRW_shgroup_instance_format(g_formats.instance_camera,
+ {
+ {"color", DRW_ATTR_FLOAT, 3},
+ {"corners", DRW_ATTR_FLOAT, 8},
+ {"depth", DRW_ATTR_FLOAT, 1},
+ {"tria", DRW_ATTR_FLOAT, 4},
+ {"InstanceModelMatrix", DRW_ATTR_FLOAT, 16},
+ });
+
+ DRWShadingGroup *grp = DRW_shgroup_instance_create(
+ sh_inst, pass, geom, g_formats.instance_camera);
+ if (sh_cfg == GPU_SHADER_CFG_CLIPPED) {
+ DRW_shgroup_world_clip_planes_from_rv3d(grp, DRW_context_state_get()->rv3d);
+ }
+ return grp;
}
-DRWShadingGroup *shgroup_distance_lines_instance(DRWPass *pass, struct GPUBatch *geom, eGPUShaderConfig sh_cfg)
+DRWShadingGroup *shgroup_distance_lines_instance(DRWPass *pass,
+ struct GPUBatch *geom,
+ eGPUShaderConfig sh_cfg)
{
- GPUShader *sh_inst = GPU_shader_get_builtin_shader_with_config(GPU_SHADER_DISTANCE_LINES, sh_cfg);
- static float point_size = 4.0f;
-
- DRW_shgroup_instance_format(g_formats.instance_distance_lines, {
- {"color", DRW_ATTR_FLOAT, 3},
- {"start", DRW_ATTR_FLOAT, 1},
- {"end", DRW_ATTR_FLOAT, 1},
- {"InstanceModelMatrix", DRW_ATTR_FLOAT, 16},
- });
-
- DRWShadingGroup *grp = DRW_shgroup_instance_create(sh_inst, pass, geom, g_formats.instance_distance_lines);
- DRW_shgroup_uniform_float(grp, "size", &point_size, 1);
- if (sh_cfg == GPU_SHADER_CFG_CLIPPED) {
- DRW_shgroup_world_clip_planes_from_rv3d(grp, DRW_context_state_get()->rv3d);
- }
- return grp;
+ GPUShader *sh_inst = GPU_shader_get_builtin_shader_with_config(GPU_SHADER_DISTANCE_LINES,
+ sh_cfg);
+ static float point_size = 4.0f;
+
+ DRW_shgroup_instance_format(g_formats.instance_distance_lines,
+ {
+ {"color", DRW_ATTR_FLOAT, 3},
+ {"start", DRW_ATTR_FLOAT, 1},
+ {"end", DRW_ATTR_FLOAT, 1},
+ {"InstanceModelMatrix", DRW_ATTR_FLOAT, 16},
+ });
+
+ DRWShadingGroup *grp = DRW_shgroup_instance_create(
+ sh_inst, pass, geom, g_formats.instance_distance_lines);
+ DRW_shgroup_uniform_float(grp, "size", &point_size, 1);
+ if (sh_cfg == GPU_SHADER_CFG_CLIPPED) {
+ DRW_shgroup_world_clip_planes_from_rv3d(grp, DRW_context_state_get()->rv3d);
+ }
+ return grp;
}
-DRWShadingGroup *shgroup_spot_instance(DRWPass *pass, struct GPUBatch *geom, eGPUShaderConfig sh_cfg)
+DRWShadingGroup *shgroup_spot_instance(DRWPass *pass,
+ struct GPUBatch *geom,
+ eGPUShaderConfig sh_cfg)
{
- GPUShader *sh_inst = GPU_shader_get_builtin_shader_with_config(GPU_SHADER_INSTANCE_EDGES_VARIYING_COLOR, sh_cfg);
- static const int True = true;
- static const int False = false;
-
- DRW_shgroup_instance_format(g_formats.instance_spot, {
- {"color", DRW_ATTR_FLOAT, 3},
- {"InstanceModelMatrix", DRW_ATTR_FLOAT, 16},
- });
-
- DRWShadingGroup *grp = DRW_shgroup_instance_create(sh_inst, pass, geom, g_formats.instance_spot);
- DRW_shgroup_uniform_bool(grp, "drawFront", &False, 1);
- DRW_shgroup_uniform_bool(grp, "drawBack", &False, 1);
- DRW_shgroup_uniform_bool(grp, "drawSilhouette", &True, 1);
- if (sh_cfg == GPU_SHADER_CFG_CLIPPED) {
- DRW_shgroup_world_clip_planes_from_rv3d(grp, DRW_context_state_get()->rv3d);
- }
- return grp;
+ GPUShader *sh_inst = GPU_shader_get_builtin_shader_with_config(
+ GPU_SHADER_INSTANCE_EDGES_VARIYING_COLOR, sh_cfg);
+ static const int True = true;
+ static const int False = false;
+
+ DRW_shgroup_instance_format(g_formats.instance_spot,
+ {
+ {"color", DRW_ATTR_FLOAT, 3},
+ {"InstanceModelMatrix", DRW_ATTR_FLOAT, 16},
+ });
+
+ DRWShadingGroup *grp = DRW_shgroup_instance_create(sh_inst, pass, geom, g_formats.instance_spot);
+ DRW_shgroup_uniform_bool(grp, "drawFront", &False, 1);
+ DRW_shgroup_uniform_bool(grp, "drawBack", &False, 1);
+ DRW_shgroup_uniform_bool(grp, "drawSilhouette", &True, 1);
+ if (sh_cfg == GPU_SHADER_CFG_CLIPPED) {
+ DRW_shgroup_world_clip_planes_from_rv3d(grp, DRW_context_state_get()->rv3d);
+ }
+ return grp;
}
DRWShadingGroup *shgroup_instance_bone_axes(DRWPass *pass, eGPUShaderConfig sh_cfg)
{
- COMMON_Shaders *sh_data = &g_shaders[sh_cfg];
- if (sh_data->bone_axes == NULL) {
- const GPUShaderConfigData *sh_cfg_data = &GPU_shader_cfg_data[sh_cfg];
- sh_data->bone_axes = GPU_shader_create_from_arrays({
- .vert = (const char *[]){sh_cfg_data->lib, datatoc_armature_axes_vert_glsl, NULL},
- .frag = (const char *[]){datatoc_gpu_shader_flat_color_frag_glsl, NULL},
- .defs = (const char *[]){sh_cfg_data->def, NULL},
- });
- }
-
- DRW_shgroup_instance_format(g_formats.instance_color, {
- {"InstanceModelMatrix", DRW_ATTR_FLOAT, 16},
- {"color", DRW_ATTR_FLOAT, 4},
- });
-
- DRWShadingGroup *grp = DRW_shgroup_instance_create(
- sh_data->bone_axes,
- pass, DRW_cache_bone_arrows_get(),
- g_formats.instance_color);
- DRW_shgroup_uniform_vec3(grp, "screenVecs[0]", DRW_viewport_screenvecs_get(), 2);
- if (sh_cfg == GPU_SHADER_CFG_CLIPPED) {
- DRW_shgroup_world_clip_planes_from_rv3d(grp, DRW_context_state_get()->rv3d);
- }
- return grp;
+ COMMON_Shaders *sh_data = &g_shaders[sh_cfg];
+ if (sh_data->bone_axes == NULL) {
+ const GPUShaderConfigData *sh_cfg_data = &GPU_shader_cfg_data[sh_cfg];
+ sh_data->bone_axes = GPU_shader_create_from_arrays({
+ .vert = (const char *[]){sh_cfg_data->lib, datatoc_armature_axes_vert_glsl, NULL},
+ .frag = (const char *[]){datatoc_gpu_shader_flat_color_frag_glsl, NULL},
+ .defs = (const char *[]){sh_cfg_data->def, NULL},
+ });
+ }
+
+ DRW_shgroup_instance_format(g_formats.instance_color,
+ {
+ {"InstanceModelMatrix", DRW_ATTR_FLOAT, 16},
+ {"color", DRW_ATTR_FLOAT, 4},
+ });
+
+ DRWShadingGroup *grp = DRW_shgroup_instance_create(
+ sh_data->bone_axes, pass, DRW_cache_bone_arrows_get(), g_formats.instance_color);
+ DRW_shgroup_uniform_vec3(grp, "screenVecs[0]", DRW_viewport_screenvecs_get(), 2);
+ if (sh_cfg == GPU_SHADER_CFG_CLIPPED) {
+ DRW_shgroup_world_clip_planes_from_rv3d(grp, DRW_context_state_get()->rv3d);
+ }
+ return grp;
}
DRWShadingGroup *shgroup_instance_bone_envelope_outline(DRWPass *pass, eGPUShaderConfig sh_cfg)
{
- COMMON_Shaders *sh_data = &g_shaders[sh_cfg];
- if (sh_data->bone_envelope_outline == NULL) {
- const GPUShaderConfigData *sh_cfg_data = &GPU_shader_cfg_data[sh_cfg];
- sh_data->bone_envelope_outline = GPU_shader_create_from_arrays({
- .vert = (const char *[]){sh_cfg_data->lib, datatoc_armature_envelope_outline_vert_glsl, NULL},
- .frag = (const char *[]){datatoc_gpu_shader_flat_color_frag_glsl, NULL},
- .defs = (const char *[]){sh_cfg_data->def, NULL},
- });
- }
-
- DRW_shgroup_instance_format(g_formats.instance_bone_envelope_outline, {
- {"headSphere", DRW_ATTR_FLOAT, 4},
- {"tailSphere", DRW_ATTR_FLOAT, 4},
- {"outlineColorSize", DRW_ATTR_FLOAT, 4},
- {"xAxis", DRW_ATTR_FLOAT, 3},
- });
-
- DRWShadingGroup *grp = DRW_shgroup_instance_create(
- sh_data->bone_envelope_outline,
- pass, DRW_cache_bone_envelope_outline_get(),
- g_formats.instance_bone_envelope_outline);
- DRW_shgroup_uniform_vec2(grp, "viewportSize", DRW_viewport_size_get(), 1);
- if (sh_cfg == GPU_SHADER_CFG_CLIPPED) {
- DRW_shgroup_world_clip_planes_from_rv3d(grp, DRW_context_state_get()->rv3d);
- }
- return grp;
+ COMMON_Shaders *sh_data = &g_shaders[sh_cfg];
+ if (sh_data->bone_envelope_outline == NULL) {
+ const GPUShaderConfigData *sh_cfg_data = &GPU_shader_cfg_data[sh_cfg];
+ sh_data->bone_envelope_outline = GPU_shader_create_from_arrays({
+ .vert =
+ (const char *[]){sh_cfg_data->lib, datatoc_armature_envelope_outline_vert_glsl, NULL},
+ .frag = (const char *[]){datatoc_gpu_shader_flat_color_frag_glsl, NULL},
+ .defs = (const char *[]){sh_cfg_data->def, NULL},
+ });
+ }
+
+ DRW_shgroup_instance_format(g_formats.instance_bone_envelope_outline,
+ {
+ {"headSphere", DRW_ATTR_FLOAT, 4},
+ {"tailSphere", DRW_ATTR_FLOAT, 4},
+ {"outlineColorSize", DRW_ATTR_FLOAT, 4},
+ {"xAxis", DRW_ATTR_FLOAT, 3},
+ });
+
+ DRWShadingGroup *grp = DRW_shgroup_instance_create(sh_data->bone_envelope_outline,
+ pass,
+ DRW_cache_bone_envelope_outline_get(),
+ g_formats.instance_bone_envelope_outline);
+ DRW_shgroup_uniform_vec2(grp, "viewportSize", DRW_viewport_size_get(), 1);
+ if (sh_cfg == GPU_SHADER_CFG_CLIPPED) {
+ DRW_shgroup_world_clip_planes_from_rv3d(grp, DRW_context_state_get()->rv3d);
+ }
+ return grp;
}
DRWShadingGroup *shgroup_instance_bone_envelope_distance(DRWPass *pass, eGPUShaderConfig sh_cfg)
{
- COMMON_Shaders *sh_data = &g_shaders[sh_cfg];
- if (sh_data->bone_envelope_distance == NULL) {
- const GPUShaderConfigData *sh_cfg_data = &GPU_shader_cfg_data[sh_cfg];
- sh_data->bone_envelope_distance = GPU_shader_create_from_arrays({
- .vert = (const char *[]){sh_cfg_data->lib, datatoc_armature_envelope_solid_vert_glsl, NULL},
- .frag = (const char *[]){datatoc_armature_envelope_distance_frag_glsl, NULL},
- .defs = (const char *[]){sh_cfg_data->def, NULL},
- });
- }
-
- DRW_shgroup_instance_format(g_formats.instance_bone_envelope_distance, {
- {"headSphere", DRW_ATTR_FLOAT, 4},
- {"tailSphere", DRW_ATTR_FLOAT, 4},
- {"xAxis", DRW_ATTR_FLOAT, 3},
- });
-
- DRWShadingGroup *grp = DRW_shgroup_instance_create(
- sh_data->bone_envelope_distance,
- pass, DRW_cache_bone_envelope_solid_get(),
- g_formats.instance_bone_envelope_distance);
- if (sh_cfg == GPU_SHADER_CFG_CLIPPED) {
- DRW_shgroup_world_clip_planes_from_rv3d(grp, DRW_context_state_get()->rv3d);
- }
- return grp;
+ COMMON_Shaders *sh_data = &g_shaders[sh_cfg];
+ if (sh_data->bone_envelope_distance == NULL) {
+ const GPUShaderConfigData *sh_cfg_data = &GPU_shader_cfg_data[sh_cfg];
+ sh_data->bone_envelope_distance = GPU_shader_create_from_arrays({
+ .vert =
+ (const char *[]){sh_cfg_data->lib, datatoc_armature_envelope_solid_vert_glsl, NULL},
+ .frag = (const char *[]){datatoc_armature_envelope_distance_frag_glsl, NULL},
+ .defs = (const char *[]){sh_cfg_data->def, NULL},
+ });
+ }
+
+ DRW_shgroup_instance_format(g_formats.instance_bone_envelope_distance,
+ {
+ {"headSphere", DRW_ATTR_FLOAT, 4},
+ {"tailSphere", DRW_ATTR_FLOAT, 4},
+ {"xAxis", DRW_ATTR_FLOAT, 3},
+ });
+
+ DRWShadingGroup *grp = DRW_shgroup_instance_create(sh_data->bone_envelope_distance,
+ pass,
+ DRW_cache_bone_envelope_solid_get(),
+ g_formats.instance_bone_envelope_distance);
+ if (sh_cfg == GPU_SHADER_CFG_CLIPPED) {
+ DRW_shgroup_world_clip_planes_from_rv3d(grp, DRW_context_state_get()->rv3d);
+ }
+ return grp;
}
-DRWShadingGroup *shgroup_instance_bone_envelope_solid(DRWPass *pass, bool transp, eGPUShaderConfig sh_cfg)
+DRWShadingGroup *shgroup_instance_bone_envelope_solid(DRWPass *pass,
+ bool transp,
+ eGPUShaderConfig sh_cfg)
{
- COMMON_Shaders *sh_data = &g_shaders[sh_cfg];
- if (sh_data->bone_envelope == NULL) {
- const GPUShaderConfigData *sh_cfg_data = &GPU_shader_cfg_data[sh_cfg];
- sh_data->bone_envelope = GPU_shader_create_from_arrays({
- .vert = (const char *[]){sh_cfg_data->lib, datatoc_armature_envelope_solid_vert_glsl, NULL},
- .frag = (const char *[]){datatoc_armature_envelope_solid_frag_glsl, NULL},
- .defs = (const char *[]){sh_cfg_data->def, NULL},
- });
- }
-
- DRW_shgroup_instance_format(g_formats.instance_bone_envelope, {
- {"headSphere", DRW_ATTR_FLOAT, 4},
- {"tailSphere", DRW_ATTR_FLOAT, 4},
- {"boneColor", DRW_ATTR_FLOAT, 3},
- {"stateColor", DRW_ATTR_FLOAT, 3},
- {"xAxis", DRW_ATTR_FLOAT, 3},
- });
-
- DRWShadingGroup *grp = DRW_shgroup_instance_create(
- sh_data->bone_envelope,
- pass, DRW_cache_bone_envelope_solid_get(),
- g_formats.instance_bone_envelope);
- DRW_shgroup_uniform_float_copy(grp, "alpha", transp ? 0.6f : 1.0f);
- if (sh_cfg == GPU_SHADER_CFG_CLIPPED) {
- DRW_shgroup_world_clip_planes_from_rv3d(grp, DRW_context_state_get()->rv3d);
- }
- return grp;
+ COMMON_Shaders *sh_data = &g_shaders[sh_cfg];
+ if (sh_data->bone_envelope == NULL) {
+ const GPUShaderConfigData *sh_cfg_data = &GPU_shader_cfg_data[sh_cfg];
+ sh_data->bone_envelope = GPU_shader_create_from_arrays({
+ .vert =
+ (const char *[]){sh_cfg_data->lib, datatoc_armature_envelope_solid_vert_glsl, NULL},
+ .frag = (const char *[]){datatoc_armature_envelope_solid_frag_glsl, NULL},
+ .defs = (const char *[]){sh_cfg_data->def, NULL},
+ });
+ }
+
+ DRW_shgroup_instance_format(g_formats.instance_bone_envelope,
+ {
+ {"headSphere", DRW_ATTR_FLOAT, 4},
+ {"tailSphere", DRW_ATTR_FLOAT, 4},
+ {"boneColor", DRW_ATTR_FLOAT, 3},
+ {"stateColor", DRW_ATTR_FLOAT, 3},
+ {"xAxis", DRW_ATTR_FLOAT, 3},
+ });
+
+ DRWShadingGroup *grp = DRW_shgroup_instance_create(sh_data->bone_envelope,
+ pass,
+ DRW_cache_bone_envelope_solid_get(),
+ g_formats.instance_bone_envelope);
+ DRW_shgroup_uniform_float_copy(grp, "alpha", transp ? 0.6f : 1.0f);
+ if (sh_cfg == GPU_SHADER_CFG_CLIPPED) {
+ DRW_shgroup_world_clip_planes_from_rv3d(grp, DRW_context_state_get()->rv3d);
+ }
+ return grp;
}
DRWShadingGroup *shgroup_instance_mball_handles(DRWPass *pass, eGPUShaderConfig sh_cfg)
{
- COMMON_Shaders *sh_data = &g_shaders[sh_cfg];
- if (sh_data->mball_handles == NULL) {
- const GPUShaderConfigData *sh_cfg_data = &GPU_shader_cfg_data[sh_cfg];
- sh_data->mball_handles = GPU_shader_create_from_arrays({
- .vert = (const char *[]){sh_cfg_data->lib, datatoc_object_mball_handles_vert_glsl, NULL},
- .frag = (const char *[]){datatoc_gpu_shader_flat_color_frag_glsl, NULL},
- .defs = (const char *[]){sh_cfg_data->def, NULL},
- });
- }
-
- DRW_shgroup_instance_format(g_formats.instance_mball_handles, {
- {"ScaleTranslationMatrix", DRW_ATTR_FLOAT, 12},
- {"radius", DRW_ATTR_FLOAT, 1},
- {"color", DRW_ATTR_FLOAT, 3},
- });
-
- DRWShadingGroup *grp = DRW_shgroup_instance_create(
- sh_data->mball_handles, pass,
- DRW_cache_screenspace_circle_get(),
- g_formats.instance_mball_handles);
- DRW_shgroup_uniform_vec3(grp, "screen_vecs[0]", DRW_viewport_screenvecs_get(), 2);
- if (sh_cfg == GPU_SHADER_CFG_CLIPPED) {
- DRW_shgroup_world_clip_planes_from_rv3d(grp, DRW_context_state_get()->rv3d);
- }
- return grp;
+ COMMON_Shaders *sh_data = &g_shaders[sh_cfg];
+ if (sh_data->mball_handles == NULL) {
+ const GPUShaderConfigData *sh_cfg_data = &GPU_shader_cfg_data[sh_cfg];
+ sh_data->mball_handles = GPU_shader_create_from_arrays({
+ .vert = (const char *[]){sh_cfg_data->lib, datatoc_object_mball_handles_vert_glsl, NULL},
+ .frag = (const char *[]){datatoc_gpu_shader_flat_color_frag_glsl, NULL},
+ .defs = (const char *[]){sh_cfg_data->def, NULL},
+ });
+ }
+
+ DRW_shgroup_instance_format(g_formats.instance_mball_handles,
+ {
+ {"ScaleTranslationMatrix", DRW_ATTR_FLOAT, 12},
+ {"radius", DRW_ATTR_FLOAT, 1},
+ {"color", DRW_ATTR_FLOAT, 3},
+ });
+
+ DRWShadingGroup *grp = DRW_shgroup_instance_create(sh_data->mball_handles,
+ pass,
+ DRW_cache_screenspace_circle_get(),
+ g_formats.instance_mball_handles);
+ DRW_shgroup_uniform_vec3(grp, "screen_vecs[0]", DRW_viewport_screenvecs_get(), 2);
+ if (sh_cfg == GPU_SHADER_CFG_CLIPPED) {
+ DRW_shgroup_world_clip_planes_from_rv3d(grp, DRW_context_state_get()->rv3d);
+ }
+ return grp;
}
/* Only works with batches with adjacency infos. */
-DRWShadingGroup *shgroup_instance_bone_shape_outline(
- DRWPass *pass, struct GPUBatch *geom, eGPUShaderConfig sh_cfg)
+DRWShadingGroup *shgroup_instance_bone_shape_outline(DRWPass *pass,
+ struct GPUBatch *geom,
+ eGPUShaderConfig sh_cfg)
{
- COMMON_Shaders *sh_data = &g_shaders[sh_cfg];
- if (sh_data->shape_outline == NULL) {
- const GPUShaderConfigData *sh_cfg_data = &GPU_shader_cfg_data[sh_cfg];
- sh_data->shape_outline = GPU_shader_create_from_arrays({
- .vert = (const char *[]){sh_cfg_data->lib, datatoc_armature_shape_outline_vert_glsl, NULL},
- .geom = (const char *[]){sh_cfg_data->lib, datatoc_armature_shape_outline_geom_glsl, NULL},
- .frag = (const char *[]){datatoc_gpu_shader_flat_color_frag_glsl, NULL},
- .defs = (const char *[]){sh_cfg_data->def, NULL},
- });
- }
-
- DRW_shgroup_instance_format(g_formats.instance_bone_outline, {
- {"InstanceModelMatrix", DRW_ATTR_FLOAT, 16},
- {"outlineColorSize", DRW_ATTR_FLOAT, 4},
- });
-
- DRWShadingGroup *grp = DRW_shgroup_instance_create(
- sh_data->shape_outline,
- pass, geom, g_formats.instance_bone_outline);
- DRW_shgroup_uniform_vec2(grp, "viewportSize", DRW_viewport_size_get(), 1);
- if (sh_cfg == GPU_SHADER_CFG_CLIPPED) {
- DRW_shgroup_world_clip_planes_from_rv3d(grp, DRW_context_state_get()->rv3d);
- }
- return grp;
+ COMMON_Shaders *sh_data = &g_shaders[sh_cfg];
+ if (sh_data->shape_outline == NULL) {
+ const GPUShaderConfigData *sh_cfg_data = &GPU_shader_cfg_data[sh_cfg];
+ sh_data->shape_outline = GPU_shader_create_from_arrays({
+ .vert = (const char *[]){sh_cfg_data->lib, datatoc_armature_shape_outline_vert_glsl, NULL},
+ .geom = (const char *[]){sh_cfg_data->lib, datatoc_armature_shape_outline_geom_glsl, NULL},
+ .frag = (const char *[]){datatoc_gpu_shader_flat_color_frag_glsl, NULL},
+ .defs = (const char *[]){sh_cfg_data->def, NULL},
+ });
+ }
+
+ DRW_shgroup_instance_format(g_formats.instance_bone_outline,
+ {
+ {"InstanceModelMatrix", DRW_ATTR_FLOAT, 16},
+ {"outlineColorSize", DRW_ATTR_FLOAT, 4},
+ });
+
+ DRWShadingGroup *grp = DRW_shgroup_instance_create(
+ sh_data->shape_outline, pass, geom, g_formats.instance_bone_outline);
+ DRW_shgroup_uniform_vec2(grp, "viewportSize", DRW_viewport_size_get(), 1);
+ if (sh_cfg == GPU_SHADER_CFG_CLIPPED) {
+ DRW_shgroup_world_clip_planes_from_rv3d(grp, DRW_context_state_get()->rv3d);
+ }
+ return grp;
}
-DRWShadingGroup *shgroup_instance_bone_shape_solid(
- DRWPass *pass, struct GPUBatch *geom, bool transp, eGPUShaderConfig sh_cfg)
+DRWShadingGroup *shgroup_instance_bone_shape_solid(DRWPass *pass,
+ struct GPUBatch *geom,
+ bool transp,
+ eGPUShaderConfig sh_cfg)
{
- COMMON_Shaders *sh_data = &g_shaders[sh_cfg];
- if (sh_data->shape_solid == NULL) {
- const GPUShaderConfigData *sh_cfg_data = &GPU_shader_cfg_data[sh_cfg];
- sh_data->shape_solid = GPU_shader_create_from_arrays({
- .vert = (const char *[]){sh_cfg_data->lib, datatoc_armature_shape_solid_vert_glsl, NULL},
- .frag = (const char *[]){datatoc_armature_shape_solid_frag_glsl, NULL},
- .defs = (const char *[]){sh_cfg_data->def, NULL},
- });
- }
-
- DRW_shgroup_instance_format(g_formats.instance_bone, {
- {"InstanceModelMatrix", DRW_ATTR_FLOAT, 16},
- {"boneColor", DRW_ATTR_FLOAT, 3},
- {"stateColor", DRW_ATTR_FLOAT, 3},
- });
-
- DRWShadingGroup *grp = DRW_shgroup_instance_create(
- sh_data->shape_solid,
- pass, geom, g_formats.instance_bone);
- DRW_shgroup_uniform_float_copy(grp, "alpha", transp ? 0.6f : 1.0f);
- if (sh_cfg == GPU_SHADER_CFG_CLIPPED) {
- DRW_shgroup_world_clip_planes_from_rv3d(grp, DRW_context_state_get()->rv3d);
- }
- return grp;
+ COMMON_Shaders *sh_data = &g_shaders[sh_cfg];
+ if (sh_data->shape_solid == NULL) {
+ const GPUShaderConfigData *sh_cfg_data = &GPU_shader_cfg_data[sh_cfg];
+ sh_data->shape_solid = GPU_shader_create_from_arrays({
+ .vert = (const char *[]){sh_cfg_data->lib, datatoc_armature_shape_solid_vert_glsl, NULL},
+ .frag = (const char *[]){datatoc_armature_shape_solid_frag_glsl, NULL},
+ .defs = (const char *[]){sh_cfg_data->def, NULL},
+ });
+ }
+
+ DRW_shgroup_instance_format(g_formats.instance_bone,
+ {
+ {"InstanceModelMatrix", DRW_ATTR_FLOAT, 16},
+ {"boneColor", DRW_ATTR_FLOAT, 3},
+ {"stateColor", DRW_ATTR_FLOAT, 3},
+ });
+
+ DRWShadingGroup *grp = DRW_shgroup_instance_create(
+ sh_data->shape_solid, pass, geom, g_formats.instance_bone);
+ DRW_shgroup_uniform_float_copy(grp, "alpha", transp ? 0.6f : 1.0f);
+ if (sh_cfg == GPU_SHADER_CFG_CLIPPED) {
+ DRW_shgroup_world_clip_planes_from_rv3d(grp, DRW_context_state_get()->rv3d);
+ }
+ return grp;
}
-DRWShadingGroup *shgroup_instance_bone_sphere_solid(DRWPass *pass, bool transp, eGPUShaderConfig sh_cfg)
+DRWShadingGroup *shgroup_instance_bone_sphere_solid(DRWPass *pass,
+ bool transp,
+ eGPUShaderConfig sh_cfg)
{
- COMMON_Shaders *sh_data = &g_shaders[sh_cfg];
- if (sh_data->bone_sphere == NULL) {
- const GPUShaderConfigData *sh_cfg_data = &GPU_shader_cfg_data[sh_cfg];
- sh_data->bone_sphere = GPU_shader_create_from_arrays({
- .vert = (const char *[]){sh_cfg_data->lib, datatoc_armature_sphere_solid_vert_glsl, NULL},
- .frag = (const char *[]){datatoc_armature_sphere_solid_frag_glsl, NULL},
- .defs = (const char *[]){sh_cfg_data->def, NULL},
- });
- }
-
- DRW_shgroup_instance_format(g_formats.instance_bone, {
- {"InstanceModelMatrix", DRW_ATTR_FLOAT, 16},
- {"boneColor", DRW_ATTR_FLOAT, 3},
- {"stateColor", DRW_ATTR_FLOAT, 3},
- });
-
- DRWShadingGroup *grp = DRW_shgroup_instance_create(
- sh_data->bone_sphere,
- pass, DRW_cache_bone_point_get(), g_formats.instance_bone);
- /* More transparent than the shape to be less distractive. */
- DRW_shgroup_uniform_float_copy(grp, "alpha", transp ? 0.4f : 1.0f);
- if (sh_cfg == GPU_SHADER_CFG_CLIPPED) {
- DRW_shgroup_world_clip_planes_from_rv3d(grp, DRW_context_state_get()->rv3d);
- }
- return grp;
+ COMMON_Shaders *sh_data = &g_shaders[sh_cfg];
+ if (sh_data->bone_sphere == NULL) {
+ const GPUShaderConfigData *sh_cfg_data = &GPU_shader_cfg_data[sh_cfg];
+ sh_data->bone_sphere = GPU_shader_create_from_arrays({
+ .vert = (const char *[]){sh_cfg_data->lib, datatoc_armature_sphere_solid_vert_glsl, NULL},
+ .frag = (const char *[]){datatoc_armature_sphere_solid_frag_glsl, NULL},
+ .defs = (const char *[]){sh_cfg_data->def, NULL},
+ });
+ }
+
+ DRW_shgroup_instance_format(g_formats.instance_bone,
+ {
+ {"InstanceModelMatrix", DRW_ATTR_FLOAT, 16},
+ {"boneColor", DRW_ATTR_FLOAT, 3},
+ {"stateColor", DRW_ATTR_FLOAT, 3},
+ });
+
+ DRWShadingGroup *grp = DRW_shgroup_instance_create(
+ sh_data->bone_sphere, pass, DRW_cache_bone_point_get(), g_formats.instance_bone);
+ /* More transparent than the shape to be less distractive. */
+ DRW_shgroup_uniform_float_copy(grp, "alpha", transp ? 0.4f : 1.0f);
+ if (sh_cfg == GPU_SHADER_CFG_CLIPPED) {
+ DRW_shgroup_world_clip_planes_from_rv3d(grp, DRW_context_state_get()->rv3d);
+ }
+ return grp;
}
DRWShadingGroup *shgroup_instance_bone_sphere_outline(DRWPass *pass, eGPUShaderConfig sh_cfg)
{
- COMMON_Shaders *sh_data = &g_shaders[sh_cfg];
- if (sh_data->bone_sphere_outline == NULL) {
- const GPUShaderConfigData *sh_cfg_data = &GPU_shader_cfg_data[sh_cfg];
- sh_data->bone_sphere_outline = GPU_shader_create_from_arrays({
- .vert = (const char *[]){sh_cfg_data->lib, datatoc_armature_sphere_outline_vert_glsl, NULL},
- .frag = (const char *[]){datatoc_gpu_shader_flat_color_frag_glsl, NULL},
- .defs = (const char *[]){sh_cfg_data->def, NULL},
- });
- }
-
- DRW_shgroup_instance_format(g_formats.instance_bone_outline, {
- {"InstanceModelMatrix", DRW_ATTR_FLOAT, 16},
- {"outlineColorSize", DRW_ATTR_FLOAT, 4},
- });
-
- DRWShadingGroup *grp = DRW_shgroup_instance_create(
- sh_data->bone_sphere_outline,
- pass, DRW_cache_bone_point_wire_outline_get(),
- g_formats.instance_bone_outline);
- DRW_shgroup_uniform_vec2(grp, "viewportSize", DRW_viewport_size_get(), 1);
- if (sh_cfg == GPU_SHADER_CFG_CLIPPED) {
- DRW_shgroup_world_clip_planes_from_rv3d(grp, DRW_context_state_get()->rv3d);
- }
- return grp;
+ COMMON_Shaders *sh_data = &g_shaders[sh_cfg];
+ if (sh_data->bone_sphere_outline == NULL) {
+ const GPUShaderConfigData *sh_cfg_data = &GPU_shader_cfg_data[sh_cfg];
+ sh_data->bone_sphere_outline = GPU_shader_create_from_arrays({
+ .vert =
+ (const char *[]){sh_cfg_data->lib, datatoc_armature_sphere_outline_vert_glsl, NULL},
+ .frag = (const char *[]){datatoc_gpu_shader_flat_color_frag_glsl, NULL},
+ .defs = (const char *[]){sh_cfg_data->def, NULL},
+ });
+ }
+
+ DRW_shgroup_instance_format(g_formats.instance_bone_outline,
+ {
+ {"InstanceModelMatrix", DRW_ATTR_FLOAT, 16},
+ {"outlineColorSize", DRW_ATTR_FLOAT, 4},
+ });
+
+ DRWShadingGroup *grp = DRW_shgroup_instance_create(sh_data->bone_sphere_outline,
+ pass,
+ DRW_cache_bone_point_wire_outline_get(),
+ g_formats.instance_bone_outline);
+ DRW_shgroup_uniform_vec2(grp, "viewportSize", DRW_viewport_size_get(), 1);
+ if (sh_cfg == GPU_SHADER_CFG_CLIPPED) {
+ DRW_shgroup_world_clip_planes_from_rv3d(grp, DRW_context_state_get()->rv3d);
+ }
+ return grp;
}
DRWShadingGroup *shgroup_instance_bone_stick(DRWPass *pass, eGPUShaderConfig sh_cfg)
{
- COMMON_Shaders *sh_data = &g_shaders[sh_cfg];
- if (sh_data->bone_stick == NULL) {
- const GPUShaderConfigData *sh_cfg_data = &GPU_shader_cfg_data[sh_cfg];
- sh_data->bone_stick = GPU_shader_create_from_arrays({
- .vert = (const char *[]){sh_cfg_data->lib, datatoc_armature_stick_vert_glsl, NULL},
- .frag = (const char *[]){datatoc_armature_stick_frag_glsl, NULL},
- .defs = (const char *[]){sh_cfg_data->def, NULL},
- });
- }
-
- DRW_shgroup_instance_format(g_formats.instance_bone_stick, {
- {"boneStart", DRW_ATTR_FLOAT, 3},
- {"boneEnd", DRW_ATTR_FLOAT, 3},
- {"wireColor", DRW_ATTR_FLOAT, 4}, /* TODO port theses to uchar color */
- {"boneColor", DRW_ATTR_FLOAT, 4},
- {"headColor", DRW_ATTR_FLOAT, 4},
- {"tailColor", DRW_ATTR_FLOAT, 4},
- });
-
- DRWShadingGroup *grp = DRW_shgroup_instance_create(
- sh_data->bone_stick,
- pass, DRW_cache_bone_stick_get(),
- g_formats.instance_bone_stick);
- DRW_shgroup_uniform_vec2(grp, "viewportSize", DRW_viewport_size_get(), 1);
- DRW_shgroup_uniform_float_copy(grp, "stickSize", 5.0f * U.pixelsize);
- if (sh_cfg == GPU_SHADER_CFG_CLIPPED) {
- DRW_shgroup_world_clip_planes_from_rv3d(grp, DRW_context_state_get()->rv3d);
- }
- return grp;
+ COMMON_Shaders *sh_data = &g_shaders[sh_cfg];
+ if (sh_data->bone_stick == NULL) {
+ const GPUShaderConfigData *sh_cfg_data = &GPU_shader_cfg_data[sh_cfg];
+ sh_data->bone_stick = GPU_shader_create_from_arrays({
+ .vert = (const char *[]){sh_cfg_data->lib, datatoc_armature_stick_vert_glsl, NULL},
+ .frag = (const char *[]){datatoc_armature_stick_frag_glsl, NULL},
+ .defs = (const char *[]){sh_cfg_data->def, NULL},
+ });
+ }
+
+ DRW_shgroup_instance_format(
+ g_formats.instance_bone_stick,
+ {
+ {"boneStart", DRW_ATTR_FLOAT, 3},
+ {"boneEnd", DRW_ATTR_FLOAT, 3},
+ {"wireColor", DRW_ATTR_FLOAT, 4}, /* TODO port theses to uchar color */
+ {"boneColor", DRW_ATTR_FLOAT, 4},
+ {"headColor", DRW_ATTR_FLOAT, 4},
+ {"tailColor", DRW_ATTR_FLOAT, 4},
+ });
+
+ DRWShadingGroup *grp = DRW_shgroup_instance_create(
+ sh_data->bone_stick, pass, DRW_cache_bone_stick_get(), g_formats.instance_bone_stick);
+ DRW_shgroup_uniform_vec2(grp, "viewportSize", DRW_viewport_size_get(), 1);
+ DRW_shgroup_uniform_float_copy(grp, "stickSize", 5.0f * U.pixelsize);
+ if (sh_cfg == GPU_SHADER_CFG_CLIPPED) {
+ DRW_shgroup_world_clip_planes_from_rv3d(grp, DRW_context_state_get()->rv3d);
+ }
+ return grp;
}
struct DRWShadingGroup *shgroup_instance_bone_dof(struct DRWPass *pass, struct GPUBatch *geom)
{
- COMMON_Shaders *sh_data = &g_shaders[GPU_SHADER_CFG_DEFAULT];
- if (sh_data->bone_dofs == NULL) {
- sh_data->bone_dofs = DRW_shader_create(
- datatoc_armature_dof_vert_glsl, NULL,
- datatoc_gpu_shader_flat_color_frag_glsl, NULL);
- }
-
- DRW_shgroup_instance_format(g_formats.instance_bone_dof, {
- {"InstanceModelMatrix", DRW_ATTR_FLOAT, 16},
- {"color", DRW_ATTR_FLOAT, 4},
- {"amin", DRW_ATTR_FLOAT, 2},
- {"amax", DRW_ATTR_FLOAT, 2},
- });
-
- DRWShadingGroup *grp = DRW_shgroup_instance_create(
- sh_data->bone_dofs,
- pass, geom,
- g_formats.instance_bone_dof);
-
- return grp;
+ COMMON_Shaders *sh_data = &g_shaders[GPU_SHADER_CFG_DEFAULT];
+ if (sh_data->bone_dofs == NULL) {
+ sh_data->bone_dofs = DRW_shader_create(
+ datatoc_armature_dof_vert_glsl, NULL, datatoc_gpu_shader_flat_color_frag_glsl, NULL);
+ }
+
+ DRW_shgroup_instance_format(g_formats.instance_bone_dof,
+ {
+ {"InstanceModelMatrix", DRW_ATTR_FLOAT, 16},
+ {"color", DRW_ATTR_FLOAT, 4},
+ {"amin", DRW_ATTR_FLOAT, 2},
+ {"amax", DRW_ATTR_FLOAT, 2},
+ });
+
+ DRWShadingGroup *grp = DRW_shgroup_instance_create(
+ sh_data->bone_dofs, pass, geom, g_formats.instance_bone_dof);
+
+ return grp;
}
struct GPUShader *mpath_line_shader_get(void)
{
- COMMON_Shaders *sh_data = &g_shaders[GPU_SHADER_CFG_DEFAULT];
- if (sh_data->mpath_line_sh == NULL) {
- sh_data->mpath_line_sh = DRW_shader_create_with_lib(
- datatoc_animviz_mpath_lines_vert_glsl,
- datatoc_animviz_mpath_lines_geom_glsl,
- datatoc_gpu_shader_3D_smooth_color_frag_glsl,
- datatoc_common_globals_lib_glsl,
- NULL);
- }
- return sh_data->mpath_line_sh;
+ COMMON_Shaders *sh_data = &g_shaders[GPU_SHADER_CFG_DEFAULT];
+ if (sh_data->mpath_line_sh == NULL) {
+ sh_data->mpath_line_sh = DRW_shader_create_with_lib(
+ datatoc_animviz_mpath_lines_vert_glsl,
+ datatoc_animviz_mpath_lines_geom_glsl,
+ datatoc_gpu_shader_3D_smooth_color_frag_glsl,
+ datatoc_common_globals_lib_glsl,
+ NULL);
+ }
+ return sh_data->mpath_line_sh;
}
-
struct GPUShader *mpath_points_shader_get(void)
{
- COMMON_Shaders *sh_data = &g_shaders[GPU_SHADER_CFG_DEFAULT];
- if (sh_data->mpath_points_sh == NULL) {
- sh_data->mpath_points_sh = DRW_shader_create_with_lib(
- datatoc_animviz_mpath_points_vert_glsl,
- NULL,
- datatoc_gpu_shader_point_varying_color_frag_glsl,
- datatoc_common_globals_lib_glsl,
- NULL);
- }
- return sh_data->mpath_points_sh;
+ COMMON_Shaders *sh_data = &g_shaders[GPU_SHADER_CFG_DEFAULT];
+ if (sh_data->mpath_points_sh == NULL) {
+ sh_data->mpath_points_sh = DRW_shader_create_with_lib(
+ datatoc_animviz_mpath_points_vert_glsl,
+ NULL,
+ datatoc_gpu_shader_point_varying_color_frag_glsl,
+ datatoc_common_globals_lib_glsl,
+ NULL);
+ }
+ return sh_data->mpath_points_sh;
}
struct GPUShader *volume_velocity_shader_get(bool use_needle)
{
- COMMON_Shaders *sh_data = &g_shaders[GPU_SHADER_CFG_DEFAULT];
- if (use_needle) {
- if (sh_data->volume_velocity_needle_sh == NULL) {
- sh_data->volume_velocity_needle_sh = DRW_shader_create(
- datatoc_volume_velocity_vert_glsl, NULL,
- datatoc_gpu_shader_flat_color_frag_glsl, "#define USE_NEEDLE");
- }
- return sh_data->volume_velocity_needle_sh;
- }
- else {
- if (sh_data->volume_velocity_sh == NULL) {
- sh_data->volume_velocity_sh = DRW_shader_create(
- datatoc_volume_velocity_vert_glsl, NULL,
- datatoc_gpu_shader_flat_color_frag_glsl, NULL);
- }
- return sh_data->volume_velocity_sh;
- }
+ COMMON_Shaders *sh_data = &g_shaders[GPU_SHADER_CFG_DEFAULT];
+ if (use_needle) {
+ if (sh_data->volume_velocity_needle_sh == NULL) {
+ sh_data->volume_velocity_needle_sh = DRW_shader_create(
+ datatoc_volume_velocity_vert_glsl,
+ NULL,
+ datatoc_gpu_shader_flat_color_frag_glsl,
+ "#define USE_NEEDLE");
+ }
+ return sh_data->volume_velocity_needle_sh;
+ }
+ else {
+ if (sh_data->volume_velocity_sh == NULL) {
+ sh_data->volume_velocity_sh = DRW_shader_create(
+ datatoc_volume_velocity_vert_glsl, NULL, datatoc_gpu_shader_flat_color_frag_glsl, NULL);
+ }
+ return sh_data->volume_velocity_sh;
+ }
}
/* ******************************************** COLOR UTILS *********************************************** */
@@ -980,158 +1054,207 @@ struct GPUShader *volume_velocity_shader_get(bool use_needle)
*/
int DRW_object_wire_theme_get(Object *ob, ViewLayer *view_layer, float **r_color)
{
- const DRWContextState *draw_ctx = DRW_context_state_get();
- const bool is_edit = (draw_ctx->object_mode & OB_MODE_EDIT) && (ob->mode & OB_MODE_EDIT);
- const bool active = (view_layer->basact && view_layer->basact->object == ob);
- /* confusing logic here, there are 2 methods of setting the color
- * 'colortab[colindex]' and 'theme_id', colindex overrides theme_id.
- *
- * note: no theme yet for 'colindex' */
- int theme_id = is_edit ? TH_WIRE_EDIT : TH_WIRE;
-
- if (is_edit) {
- /* fallback to TH_WIRE */
- }
- else if (((G.moving & G_TRANSFORM_OBJ) != 0) &&
- ((ob->base_flag & BASE_SELECTED) != 0))
- {
- theme_id = TH_TRANSFORM;
- }
- else {
- /* Sets the 'theme_id' or fallback to wire */
- if ((ob->base_flag & BASE_SELECTED) != 0) {
- theme_id = (active) ? TH_ACTIVE : TH_SELECT;
- }
- else {
- if (ob->type == OB_LAMP) { theme_id = TH_LIGHT; }
- else if (ob->type == OB_SPEAKER) { theme_id = TH_SPEAKER; }
- else if (ob->type == OB_CAMERA) { theme_id = TH_CAMERA; }
- else if (ob->type == OB_EMPTY) { theme_id = TH_EMPTY; }
- else if (ob->type == OB_LIGHTPROBE) { theme_id = TH_EMPTY; } /* TODO add lightprobe color */
- /* fallback to TH_WIRE */
- }
- }
-
- if (r_color != NULL) {
- if (UNLIKELY(ob->base_flag & BASE_FROM_SET)) {
- *r_color = G_draw.block.colorDupli;
- }
- else if (UNLIKELY(ob->base_flag & BASE_FROM_DUPLI)) {
- switch (theme_id) {
- case TH_ACTIVE:
- case TH_SELECT: *r_color = G_draw.block.colorDupliSelect; break;
- case TH_TRANSFORM: *r_color = G_draw.block.colorTransform; break;
- default: *r_color = G_draw.block.colorDupli; break;
- }
- }
- else {
- switch (theme_id) {
- case TH_WIRE_EDIT: *r_color = G_draw.block.colorWireEdit; break;
- case TH_ACTIVE: *r_color = G_draw.block.colorActive; break;
- case TH_SELECT: *r_color = G_draw.block.colorSelect; break;
- case TH_TRANSFORM: *r_color = G_draw.block.colorTransform; break;
- case TH_SPEAKER: *r_color = G_draw.block.colorSpeaker; break;
- case TH_CAMERA: *r_color = G_draw.block.colorCamera; break;
- case TH_EMPTY: *r_color = G_draw.block.colorEmpty; break;
- case TH_LIGHT: *r_color = G_draw.block.colorLight; break;
- default: *r_color = G_draw.block.colorWire; break;
- }
- }
- }
-
- return theme_id;
+ const DRWContextState *draw_ctx = DRW_context_state_get();
+ const bool is_edit = (draw_ctx->object_mode & OB_MODE_EDIT) && (ob->mode & OB_MODE_EDIT);
+ const bool active = (view_layer->basact && view_layer->basact->object == ob);
+ /* confusing logic here, there are 2 methods of setting the color
+ * 'colortab[colindex]' and 'theme_id', colindex overrides theme_id.
+ *
+ * note: no theme yet for 'colindex' */
+ int theme_id = is_edit ? TH_WIRE_EDIT : TH_WIRE;
+
+ if (is_edit) {
+ /* fallback to TH_WIRE */
+ }
+ else if (((G.moving & G_TRANSFORM_OBJ) != 0) && ((ob->base_flag & BASE_SELECTED) != 0)) {
+ theme_id = TH_TRANSFORM;
+ }
+ else {
+ /* Sets the 'theme_id' or fallback to wire */
+ if ((ob->base_flag & BASE_SELECTED) != 0) {
+ theme_id = (active) ? TH_ACTIVE : TH_SELECT;
+ }
+ else {
+ if (ob->type == OB_LAMP) {
+ theme_id = TH_LIGHT;
+ }
+ else if (ob->type == OB_SPEAKER) {
+ theme_id = TH_SPEAKER;
+ }
+ else if (ob->type == OB_CAMERA) {
+ theme_id = TH_CAMERA;
+ }
+ else if (ob->type == OB_EMPTY) {
+ theme_id = TH_EMPTY;
+ }
+ else if (ob->type == OB_LIGHTPROBE) {
+ theme_id = TH_EMPTY;
+ } /* TODO add lightprobe color */
+ /* fallback to TH_WIRE */
+ }
+ }
+
+ if (r_color != NULL) {
+ if (UNLIKELY(ob->base_flag & BASE_FROM_SET)) {
+ *r_color = G_draw.block.colorDupli;
+ }
+ else if (UNLIKELY(ob->base_flag & BASE_FROM_DUPLI)) {
+ switch (theme_id) {
+ case TH_ACTIVE:
+ case TH_SELECT:
+ *r_color = G_draw.block.colorDupliSelect;
+ break;
+ case TH_TRANSFORM:
+ *r_color = G_draw.block.colorTransform;
+ break;
+ default:
+ *r_color = G_draw.block.colorDupli;
+ break;
+ }
+ }
+ else {
+ switch (theme_id) {
+ case TH_WIRE_EDIT:
+ *r_color = G_draw.block.colorWireEdit;
+ break;
+ case TH_ACTIVE:
+ *r_color = G_draw.block.colorActive;
+ break;
+ case TH_SELECT:
+ *r_color = G_draw.block.colorSelect;
+ break;
+ case TH_TRANSFORM:
+ *r_color = G_draw.block.colorTransform;
+ break;
+ case TH_SPEAKER:
+ *r_color = G_draw.block.colorSpeaker;
+ break;
+ case TH_CAMERA:
+ *r_color = G_draw.block.colorCamera;
+ break;
+ case TH_EMPTY:
+ *r_color = G_draw.block.colorEmpty;
+ break;
+ case TH_LIGHT:
+ *r_color = G_draw.block.colorLight;
+ break;
+ default:
+ *r_color = G_draw.block.colorWire;
+ break;
+ }
+ }
+ }
+
+ return theme_id;
}
/* XXX This is very stupid, better find something more general. */
float *DRW_color_background_blend_get(int theme_id)
{
- static float colors[11][4];
- float *ret;
-
- switch (theme_id) {
- case TH_WIRE_EDIT: ret = colors[0]; break;
- case TH_ACTIVE: ret = colors[1]; break;
- case TH_SELECT: ret = colors[2]; break;
- case TH_TRANSFORM: ret = colors[5]; break;
- case TH_SPEAKER: ret = colors[6]; break;
- case TH_CAMERA: ret = colors[7]; break;
- case TH_EMPTY: ret = colors[8]; break;
- case TH_LIGHT: ret = colors[9]; break;
- default: ret = colors[10]; break;
- }
-
- UI_GetThemeColorBlendShade4fv(theme_id, TH_BACK, 0.5, 0, ret);
-
- return ret;
+ static float colors[11][4];
+ float *ret;
+
+ switch (theme_id) {
+ case TH_WIRE_EDIT:
+ ret = colors[0];
+ break;
+ case TH_ACTIVE:
+ ret = colors[1];
+ break;
+ case TH_SELECT:
+ ret = colors[2];
+ break;
+ case TH_TRANSFORM:
+ ret = colors[5];
+ break;
+ case TH_SPEAKER:
+ ret = colors[6];
+ break;
+ case TH_CAMERA:
+ ret = colors[7];
+ break;
+ case TH_EMPTY:
+ ret = colors[8];
+ break;
+ case TH_LIGHT:
+ ret = colors[9];
+ break;
+ default:
+ ret = colors[10];
+ break;
+ }
+
+ UI_GetThemeColorBlendShade4fv(theme_id, TH_BACK, 0.5, 0, ret);
+
+ return ret;
}
-
bool DRW_object_is_flat(Object *ob, int *axis)
{
- float dim[3];
-
- if (!ELEM(ob->type, OB_MESH, OB_CURVE, OB_SURF, OB_FONT, OB_MBALL)) {
- /* Non-meshes object cannot be considered as flat. */
- return false;
- }
-
- BKE_object_dimensions_get(ob, dim);
- if (dim[0] == 0.0f) {
- *axis = 0;
- return true;
- }
- else if (dim[1] == 0.0f) {
- *axis = 1;
- return true;
- }
- else if (dim[2] == 0.0f) {
- *axis = 2;
- return true;
- }
- return false;
+ float dim[3];
+
+ if (!ELEM(ob->type, OB_MESH, OB_CURVE, OB_SURF, OB_FONT, OB_MBALL)) {
+ /* Non-meshes object cannot be considered as flat. */
+ return false;
+ }
+
+ BKE_object_dimensions_get(ob, dim);
+ if (dim[0] == 0.0f) {
+ *axis = 0;
+ return true;
+ }
+ else if (dim[1] == 0.0f) {
+ *axis = 1;
+ return true;
+ }
+ else if (dim[2] == 0.0f) {
+ *axis = 2;
+ return true;
+ }
+ return false;
}
bool DRW_object_axis_orthogonal_to_view(Object *ob, int axis)
{
- float ob_rot[3][3], invviewmat[4][4];
- DRW_viewport_matrix_get(invviewmat, DRW_MAT_VIEWINV);
- BKE_object_rot_to_mat3(ob, ob_rot, true);
- float dot = dot_v3v3(ob_rot[axis], invviewmat[2]);
- if (fabsf(dot) < 1e-3) {
- return true;
- }
-
- return false;
+ float ob_rot[3][3], invviewmat[4][4];
+ DRW_viewport_matrix_get(invviewmat, DRW_MAT_VIEWINV);
+ BKE_object_rot_to_mat3(ob, ob_rot, true);
+ float dot = dot_v3v3(ob_rot[axis], invviewmat[2]);
+ if (fabsf(dot) < 1e-3) {
+ return true;
+ }
+
+ return false;
}
static void DRW_evaluate_weight_to_color(const float weight, float result[4])
{
- if (U.flag & USER_CUSTOM_RANGE) {
- BKE_colorband_evaluate(&U.coba_weight, weight, result);
- }
- else {
- /* Use gamma correction to even out the color bands:
- * increasing widens yellow/cyan vs red/green/blue.
- * Gamma 1.0 produces the original 2.79 color ramp. */
- const float gamma = 1.5f;
- float hsv[3] = {(2.0f / 3.0f) * (1.0f - weight), 1.0f, pow(0.5f + 0.5f * weight, gamma)};
-
- hsv_to_rgb_v(hsv, result);
-
- for (int i = 0; i < 3; i++) {
- result[i] = pow(result[i], 1.0f / gamma);
- }
- }
+ if (U.flag & USER_CUSTOM_RANGE) {
+ BKE_colorband_evaluate(&U.coba_weight, weight, result);
+ }
+ else {
+ /* Use gamma correction to even out the color bands:
+ * increasing widens yellow/cyan vs red/green/blue.
+ * Gamma 1.0 produces the original 2.79 color ramp. */
+ const float gamma = 1.5f;
+ float hsv[3] = {(2.0f / 3.0f) * (1.0f - weight), 1.0f, pow(0.5f + 0.5f * weight, gamma)};
+
+ hsv_to_rgb_v(hsv, result);
+
+ for (int i = 0; i < 3; i++) {
+ result[i] = pow(result[i], 1.0f / gamma);
+ }
+ }
}
static GPUTexture *DRW_create_weight_colorramp_texture(void)
{
- char error[256];
- float pixels[256][4];
- for (int i = 0 ; i < 256 ; i ++) {
- DRW_evaluate_weight_to_color(i / 255.0f, pixels[i]);
- pixels[i][3] = 1.0f;
- }
-
- return GPU_texture_create_1d(256, GPU_RGBA8, pixels[0], error);
+ char error[256];
+ float pixels[256][4];
+ for (int i = 0; i < 256; i++) {
+ DRW_evaluate_weight_to_color(i / 255.0f, pixels[i]);
+ pixels[i][3] = 1.0f;
+ }
+
+ return GPU_texture_create_1d(256, GPU_RGBA8, pixels[0], error);
}
diff --git a/source/blender/draw/intern/draw_common.h b/source/blender/draw/intern/draw_common.h
index f6ebfcab788..db8f8d46e85 100644
--- a/source/blender/draw/intern/draw_common.h
+++ b/source/blender/draw/intern/draw_common.h
@@ -40,81 +40,81 @@ struct ViewLayer;
/* Keep in sync with: common_globals_lib.glsl (globalsBlock) */
/* NOTE! Also keep all color as vec4 and between UBO_FIRST_COLOR and UBO_LAST_COLOR */
typedef struct GlobalsUboStorage {
- /* UBOs data needs to be 16 byte aligned (size of vec4) */
- float colorWire[4];
- float colorWireEdit[4];
- float colorActive[4];
- float colorSelect[4];
- float colorDupliSelect[4];
- float colorDupli[4];
- float colorLibrarySelect[4];
- float colorLibrary[4];
- float colorTransform[4];
- float colorLight[4];
- float colorSpeaker[4];
- float colorCamera[4];
- float colorEmpty[4];
- float colorVertex[4];
- float colorVertexSelect[4];
- float colorVertexUnreferenced[4];
- float colorVertexMissingData[4];
- float colorEditMeshActive[4];
- float colorEdgeSelect[4];
- float colorEdgeSeam[4];
- float colorEdgeSharp[4];
- float colorEdgeCrease[4];
- float colorEdgeBWeight[4];
- float colorEdgeFaceSelect[4];
- float colorEdgeFreestyle[4];
- float colorFace[4];
- float colorFaceSelect[4];
- float colorFaceFreestyle[4];
- float colorNormal[4];
- float colorVNormal[4];
- float colorLNormal[4];
- float colorFaceDot[4];
-
- float colorDeselect[4];
- float colorOutline[4];
- float colorLightNoAlpha[4];
-
- float colorBackground[4];
- float colorEditMeshMiddle[4];
-
- float colorHandleFree[4];
- float colorHandleAuto[4];
- float colorHandleVect[4];
- float colorHandleAlign[4];
- float colorHandleAutoclamp[4];
- float colorHandleSelFree[4];
- float colorHandleSelAuto[4];
- float colorHandleSelVect[4];
- float colorHandleSelAlign[4];
- float colorHandleSelAutoclamp[4];
- float colorNurbUline[4];
- float colorNurbVline[4];
- float colorNurbSelUline[4];
- float colorNurbSelVline[4];
- float colorActiveSpline[4];
-
- float colorBonePose[4];
-
- float colorCurrentFrame[4];
-
- float colorGrid[4];
- float colorGridEmphasise[4];
- float colorGridAxisX[4];
- float colorGridAxisY[4];
- float colorGridAxisZ[4];
-
- /* NOTE! Put all color before UBO_LAST_COLOR */
-
- /* Pack individual float at the end of the buffer to avoid alignment errors */
- float sizeLightCenter, sizeLightCircle, sizeLightCircleShadow;
- float sizeVertex, sizeEdge, sizeEdgeFix, sizeFaceDot;
- float gridDistance, gridResolution, gridSubdivisions, gridScale;
-
- float pad_globalsBlock;
+ /* UBOs data needs to be 16 byte aligned (size of vec4) */
+ float colorWire[4];
+ float colorWireEdit[4];
+ float colorActive[4];
+ float colorSelect[4];
+ float colorDupliSelect[4];
+ float colorDupli[4];
+ float colorLibrarySelect[4];
+ float colorLibrary[4];
+ float colorTransform[4];
+ float colorLight[4];
+ float colorSpeaker[4];
+ float colorCamera[4];
+ float colorEmpty[4];
+ float colorVertex[4];
+ float colorVertexSelect[4];
+ float colorVertexUnreferenced[4];
+ float colorVertexMissingData[4];
+ float colorEditMeshActive[4];
+ float colorEdgeSelect[4];
+ float colorEdgeSeam[4];
+ float colorEdgeSharp[4];
+ float colorEdgeCrease[4];
+ float colorEdgeBWeight[4];
+ float colorEdgeFaceSelect[4];
+ float colorEdgeFreestyle[4];
+ float colorFace[4];
+ float colorFaceSelect[4];
+ float colorFaceFreestyle[4];
+ float colorNormal[4];
+ float colorVNormal[4];
+ float colorLNormal[4];
+ float colorFaceDot[4];
+
+ float colorDeselect[4];
+ float colorOutline[4];
+ float colorLightNoAlpha[4];
+
+ float colorBackground[4];
+ float colorEditMeshMiddle[4];
+
+ float colorHandleFree[4];
+ float colorHandleAuto[4];
+ float colorHandleVect[4];
+ float colorHandleAlign[4];
+ float colorHandleAutoclamp[4];
+ float colorHandleSelFree[4];
+ float colorHandleSelAuto[4];
+ float colorHandleSelVect[4];
+ float colorHandleSelAlign[4];
+ float colorHandleSelAutoclamp[4];
+ float colorNurbUline[4];
+ float colorNurbVline[4];
+ float colorNurbSelUline[4];
+ float colorNurbSelVline[4];
+ float colorActiveSpline[4];
+
+ float colorBonePose[4];
+
+ float colorCurrentFrame[4];
+
+ float colorGrid[4];
+ float colorGridEmphasise[4];
+ float colorGridAxisX[4];
+ float colorGridAxisY[4];
+ float colorGridAxisZ[4];
+
+ /* NOTE! Put all color before UBO_LAST_COLOR */
+
+ /* Pack individual float at the end of the buffer to avoid alignment errors */
+ float sizeLightCenter, sizeLightCircle, sizeLightCircleShadow;
+ float sizeVertex, sizeEdge, sizeEdgeFix, sizeFaceDot;
+ float gridDistance, gridResolution, gridSubdivisions, gridScale;
+
+ float pad_globalsBlock;
} GlobalsUboStorage;
/* Keep in sync with globalsBlock in shaders */
BLI_STATIC_ASSERT_ALIGN(GlobalsUboStorage, 16)
@@ -122,34 +122,78 @@ BLI_STATIC_ASSERT_ALIGN(GlobalsUboStorage, 16)
void DRW_globals_update(void);
void DRW_globals_free(void);
-void DRW_shgroup_world_clip_planes_from_rv3d(struct DRWShadingGroup *shgrp, const RegionView3D *rv3d);
+void DRW_shgroup_world_clip_planes_from_rv3d(struct DRWShadingGroup *shgrp,
+ const RegionView3D *rv3d);
struct DRWShadingGroup *shgroup_dynlines_flat_color(struct DRWPass *pass, eGPUShaderConfig sh_cfg);
-struct DRWShadingGroup *shgroup_dynlines_dashed_uniform_color(struct DRWPass *pass, const float color[4], eGPUShaderConfig sh_cfg);
-struct DRWShadingGroup *shgroup_dynpoints_uniform_color(struct DRWPass *pass, const float color[4], const float *size, eGPUShaderConfig sh_cfg);
-struct DRWShadingGroup *shgroup_groundlines_uniform_color(struct DRWPass *pass, const float color[4], eGPUShaderConfig sh_cfg);
-struct DRWShadingGroup *shgroup_groundpoints_uniform_color(struct DRWPass *pass, const float color[4], eGPUShaderConfig sh_cfg);
-struct DRWShadingGroup *shgroup_instance_screenspace(struct DRWPass *pass, struct GPUBatch *geom, const float *size, eGPUShaderConfig sh_cfg);
+struct DRWShadingGroup *shgroup_dynlines_dashed_uniform_color(struct DRWPass *pass,
+ const float color[4],
+ eGPUShaderConfig sh_cfg);
+struct DRWShadingGroup *shgroup_dynpoints_uniform_color(struct DRWPass *pass,
+ const float color[4],
+ const float *size,
+ eGPUShaderConfig sh_cfg);
+struct DRWShadingGroup *shgroup_groundlines_uniform_color(struct DRWPass *pass,
+ const float color[4],
+ eGPUShaderConfig sh_cfg);
+struct DRWShadingGroup *shgroup_groundpoints_uniform_color(struct DRWPass *pass,
+ const float color[4],
+ eGPUShaderConfig sh_cfg);
+struct DRWShadingGroup *shgroup_instance_screenspace(struct DRWPass *pass,
+ struct GPUBatch *geom,
+ const float *size,
+ eGPUShaderConfig sh_cfg);
struct DRWShadingGroup *shgroup_instance_solid(struct DRWPass *pass, struct GPUBatch *geom);
struct DRWShadingGroup *shgroup_instance_wire(struct DRWPass *pass, struct GPUBatch *geom);
-struct DRWShadingGroup *shgroup_instance_screen_aligned(struct DRWPass *pass, struct GPUBatch *geom, eGPUShaderConfig sh_cfg);
-struct DRWShadingGroup *shgroup_instance_empty_axes(struct DRWPass *pass, struct GPUBatch *geom, eGPUShaderConfig sh_cfg);
-struct DRWShadingGroup *shgroup_instance_scaled(struct DRWPass *pass, struct GPUBatch *geom, eGPUShaderConfig sh_cfg);
-struct DRWShadingGroup *shgroup_instance(struct DRWPass *pass, struct GPUBatch *geom, eGPUShaderConfig sh_cfg);
-struct DRWShadingGroup *shgroup_instance_alpha(struct DRWPass *pass, struct GPUBatch *geom, eGPUShaderConfig sh_cfg);
-struct DRWShadingGroup *shgroup_instance_outline(struct DRWPass *pass, struct GPUBatch *geom, int *baseid);
-struct DRWShadingGroup *shgroup_camera_instance(struct DRWPass *pass, struct GPUBatch *geom, eGPUShaderConfig sh_cfg);
-struct DRWShadingGroup *shgroup_distance_lines_instance(struct DRWPass *pass, struct GPUBatch *geom, eGPUShaderConfig sh_cfg);
-struct DRWShadingGroup *shgroup_spot_instance(struct DRWPass *pass, struct GPUBatch *geom, eGPUShaderConfig sh_cfg);
-struct DRWShadingGroup *shgroup_instance_mball_handles(struct DRWPass *pass, eGPUShaderConfig sh_cfg);
+struct DRWShadingGroup *shgroup_instance_screen_aligned(struct DRWPass *pass,
+ struct GPUBatch *geom,
+ eGPUShaderConfig sh_cfg);
+struct DRWShadingGroup *shgroup_instance_empty_axes(struct DRWPass *pass,
+ struct GPUBatch *geom,
+ eGPUShaderConfig sh_cfg);
+struct DRWShadingGroup *shgroup_instance_scaled(struct DRWPass *pass,
+ struct GPUBatch *geom,
+ eGPUShaderConfig sh_cfg);
+struct DRWShadingGroup *shgroup_instance(struct DRWPass *pass,
+ struct GPUBatch *geom,
+ eGPUShaderConfig sh_cfg);
+struct DRWShadingGroup *shgroup_instance_alpha(struct DRWPass *pass,
+ struct GPUBatch *geom,
+ eGPUShaderConfig sh_cfg);
+struct DRWShadingGroup *shgroup_instance_outline(struct DRWPass *pass,
+ struct GPUBatch *geom,
+ int *baseid);
+struct DRWShadingGroup *shgroup_camera_instance(struct DRWPass *pass,
+ struct GPUBatch *geom,
+ eGPUShaderConfig sh_cfg);
+struct DRWShadingGroup *shgroup_distance_lines_instance(struct DRWPass *pass,
+ struct GPUBatch *geom,
+ eGPUShaderConfig sh_cfg);
+struct DRWShadingGroup *shgroup_spot_instance(struct DRWPass *pass,
+ struct GPUBatch *geom,
+ eGPUShaderConfig sh_cfg);
+struct DRWShadingGroup *shgroup_instance_mball_handles(struct DRWPass *pass,
+ eGPUShaderConfig sh_cfg);
struct DRWShadingGroup *shgroup_instance_bone_axes(struct DRWPass *pass, eGPUShaderConfig sh_cfg);
-struct DRWShadingGroup *shgroup_instance_bone_envelope_distance(struct DRWPass *pass, eGPUShaderConfig sh_cfg);
-struct DRWShadingGroup *shgroup_instance_bone_envelope_outline(struct DRWPass *pass, eGPUShaderConfig sh_cfg);
-struct DRWShadingGroup *shgroup_instance_bone_envelope_solid(struct DRWPass *pass, bool transp, eGPUShaderConfig sh_cfg);
-struct DRWShadingGroup *shgroup_instance_bone_shape_outline(struct DRWPass *pass, struct GPUBatch *geom, eGPUShaderConfig sh_cfg);
-struct DRWShadingGroup *shgroup_instance_bone_shape_solid(struct DRWPass *pass, struct GPUBatch *geom, bool transp, eGPUShaderConfig sh_cfg);
-struct DRWShadingGroup *shgroup_instance_bone_sphere_outline(struct DRWPass *pass, eGPUShaderConfig sh_cfg);
-struct DRWShadingGroup *shgroup_instance_bone_sphere_solid(struct DRWPass *pass, bool transp, eGPUShaderConfig sh_cfg);
+struct DRWShadingGroup *shgroup_instance_bone_envelope_distance(struct DRWPass *pass,
+ eGPUShaderConfig sh_cfg);
+struct DRWShadingGroup *shgroup_instance_bone_envelope_outline(struct DRWPass *pass,
+ eGPUShaderConfig sh_cfg);
+struct DRWShadingGroup *shgroup_instance_bone_envelope_solid(struct DRWPass *pass,
+ bool transp,
+ eGPUShaderConfig sh_cfg);
+struct DRWShadingGroup *shgroup_instance_bone_shape_outline(struct DRWPass *pass,
+ struct GPUBatch *geom,
+ eGPUShaderConfig sh_cfg);
+struct DRWShadingGroup *shgroup_instance_bone_shape_solid(struct DRWPass *pass,
+ struct GPUBatch *geom,
+ bool transp,
+ eGPUShaderConfig sh_cfg);
+struct DRWShadingGroup *shgroup_instance_bone_sphere_outline(struct DRWPass *pass,
+ eGPUShaderConfig sh_cfg);
+struct DRWShadingGroup *shgroup_instance_bone_sphere_solid(struct DRWPass *pass,
+ bool transp,
+ eGPUShaderConfig sh_cfg);
struct DRWShadingGroup *shgroup_instance_bone_stick(struct DRWPass *pass, eGPUShaderConfig sh_cfg);
struct DRWShadingGroup *shgroup_instance_bone_dof(struct DRWPass *pass, struct GPUBatch *geom);
@@ -158,8 +202,7 @@ struct GPUShader *mpath_points_shader_get(void);
struct GPUShader *volume_velocity_shader_get(bool use_needle);
-int DRW_object_wire_theme_get(
- struct Object *ob, struct ViewLayer *view_layer, float **r_color);
+int DRW_object_wire_theme_get(struct Object *ob, struct ViewLayer *view_layer, float **r_color);
float *DRW_color_background_blend_get(int theme_id);
bool DRW_object_is_flat(Object *ob, int *axis);
@@ -167,15 +210,18 @@ bool DRW_object_axis_orthogonal_to_view(Object *ob, int axis);
/* draw_armature.c */
typedef struct DRWArmaturePasses {
- struct DRWPass *bone_solid;
- struct DRWPass *bone_outline;
- struct DRWPass *bone_wire;
- struct DRWPass *bone_envelope;
- struct DRWPass *bone_axes;
- struct DRWPass *relationship_lines;
+ struct DRWPass *bone_solid;
+ struct DRWPass *bone_outline;
+ struct DRWPass *bone_wire;
+ struct DRWPass *bone_envelope;
+ struct DRWPass *bone_axes;
+ struct DRWPass *relationship_lines;
} DRWArmaturePasses;
-void DRW_shgroup_armature_object(struct Object *ob, struct ViewLayer *view_layer, struct DRWArmaturePasses passes, bool transp);
+void DRW_shgroup_armature_object(struct Object *ob,
+ struct ViewLayer *view_layer,
+ struct DRWArmaturePasses passes,
+ bool transp);
void DRW_shgroup_armature_pose(struct Object *ob, struct DRWArmaturePasses passes, bool transp);
void DRW_shgroup_armature_edit(struct Object *ob, struct DRWArmaturePasses passes, bool transp);
@@ -183,37 +229,38 @@ void DRW_shgroup_armature_edit(struct Object *ob, struct DRWArmaturePasses passe
/* This creates a shading group with display hairs.
* The draw call is already added by this function, just add additional uniforms. */
-struct DRWShadingGroup *DRW_shgroup_hair_create(
- struct Object *object, struct ParticleSystem *psys, struct ModifierData *md,
- struct DRWPass *hair_pass,
- struct GPUShader *shader);
-
-struct DRWShadingGroup *DRW_shgroup_material_hair_create(
- struct Object *object, struct ParticleSystem *psys, struct ModifierData *md,
- struct DRWPass *hair_pass,
- struct GPUMaterial *material);
+struct DRWShadingGroup *DRW_shgroup_hair_create(struct Object *object,
+ struct ParticleSystem *psys,
+ struct ModifierData *md,
+ struct DRWPass *hair_pass,
+ struct GPUShader *shader);
+
+struct DRWShadingGroup *DRW_shgroup_material_hair_create(struct Object *object,
+ struct ParticleSystem *psys,
+ struct ModifierData *md,
+ struct DRWPass *hair_pass,
+ struct GPUMaterial *material);
void DRW_hair_init(void);
void DRW_hair_update(void);
void DRW_hair_free(void);
/* pose_mode.c */
-bool DRW_pose_mode_armature(
- struct Object *ob, struct Object *active_ob);
+bool DRW_pose_mode_armature(struct Object *ob, struct Object *active_ob);
/* draw_common.c */
struct DRW_Global {
- /** If needed, contains all global/Theme colors
- * Add needed theme colors / values to DRW_globals_update() and update UBO
- * Not needed for constant color. */
- GlobalsUboStorage block;
- /** Define "globalsBlock" uniform for 'block'. */
- struct GPUUniformBuffer *block_ubo;
+ /** If needed, contains all global/Theme colors
+ * Add needed theme colors / values to DRW_globals_update() and update UBO
+ * Not needed for constant color. */
+ GlobalsUboStorage block;
+ /** Define "globalsBlock" uniform for 'block'. */
+ struct GPUUniformBuffer *block_ubo;
- struct GPUTexture *ramp;
- struct GPUTexture *weight_ramp;
+ struct GPUTexture *ramp;
+ struct GPUTexture *weight_ramp;
- struct GPUUniformBuffer *view_ubo;
+ struct GPUUniformBuffer *view_ubo;
};
extern struct DRW_Global G_draw;
diff --git a/source/blender/draw/intern/draw_debug.c b/source/blender/draw/intern/draw_debug.c
index 8c810f1a792..9681ffd9d3d 100644
--- a/source/blender/draw/intern/draw_debug.c
+++ b/source/blender/draw/intern/draw_debug.c
@@ -42,184 +42,185 @@ static float g_modelmat[4][4];
void DRW_debug_modelmat_reset(void)
{
- unit_m4(g_modelmat);
+ unit_m4(g_modelmat);
}
void DRW_debug_modelmat(const float modelmat[4][4])
{
- copy_m4_m4(g_modelmat, modelmat);
+ copy_m4_m4(g_modelmat, modelmat);
}
void DRW_debug_line_v3v3(const float v1[3], const float v2[3], const float color[4])
{
- DRWDebugLine *line = MEM_mallocN(sizeof(DRWDebugLine), "DRWDebugLine");
- mul_v3_m4v3(line->pos[0], g_modelmat, v1);
- mul_v3_m4v3(line->pos[1], g_modelmat, v2);
- copy_v4_v4(line->color, color);
- BLI_LINKS_PREPEND(DST.debug.lines, line);
+ DRWDebugLine *line = MEM_mallocN(sizeof(DRWDebugLine), "DRWDebugLine");
+ mul_v3_m4v3(line->pos[0], g_modelmat, v1);
+ mul_v3_m4v3(line->pos[1], g_modelmat, v2);
+ copy_v4_v4(line->color, color);
+ BLI_LINKS_PREPEND(DST.debug.lines, line);
}
void DRW_debug_polygon_v3(const float (*v)[3], const int vert_len, const float color[4])
{
- BLI_assert(vert_len > 1);
+ BLI_assert(vert_len > 1);
- for (int i = 0; i < vert_len; ++i) {
- DRW_debug_line_v3v3(v[i], v[(i + 1) % vert_len], color);
- }
+ for (int i = 0; i < vert_len; ++i) {
+ DRW_debug_line_v3v3(v[i], v[(i + 1) % vert_len], color);
+ }
}
/* NOTE: g_modelmat is still applied on top. */
void DRW_debug_m4(const float m[4][4])
{
- float v0[3] = {0.0f, 0.0f, 0.0f};
- float v1[3] = {1.0f, 0.0f, 0.0f};
- float v2[3] = {0.0f, 1.0f, 0.0f};
- float v3[3] = {0.0f, 0.0f, 1.0f};
-
- mul_m4_v3(m, v0);
- mul_m4_v3(m, v1);
- mul_m4_v3(m, v2);
- mul_m4_v3(m, v3);
-
- DRW_debug_line_v3v3(v0, v1, (float[4]){1.0f, 0.0f, 0.0f, 1.0f});
- DRW_debug_line_v3v3(v0, v2, (float[4]){0.0f, 1.0f, 0.0f, 1.0f});
- DRW_debug_line_v3v3(v0, v3, (float[4]){0.0f, 0.0f, 1.0f, 1.0f});
+ float v0[3] = {0.0f, 0.0f, 0.0f};
+ float v1[3] = {1.0f, 0.0f, 0.0f};
+ float v2[3] = {0.0f, 1.0f, 0.0f};
+ float v3[3] = {0.0f, 0.0f, 1.0f};
+
+ mul_m4_v3(m, v0);
+ mul_m4_v3(m, v1);
+ mul_m4_v3(m, v2);
+ mul_m4_v3(m, v3);
+
+ DRW_debug_line_v3v3(v0, v1, (float[4]){1.0f, 0.0f, 0.0f, 1.0f});
+ DRW_debug_line_v3v3(v0, v2, (float[4]){0.0f, 1.0f, 0.0f, 1.0f});
+ DRW_debug_line_v3v3(v0, v3, (float[4]){0.0f, 0.0f, 1.0f, 1.0f});
}
void DRW_debug_bbox(const BoundBox *bbox, const float color[4])
{
- DRW_debug_line_v3v3(bbox->vec[0], bbox->vec[1], color);
- DRW_debug_line_v3v3(bbox->vec[1], bbox->vec[2], color);
- DRW_debug_line_v3v3(bbox->vec[2], bbox->vec[3], color);
- DRW_debug_line_v3v3(bbox->vec[3], bbox->vec[0], color);
-
- DRW_debug_line_v3v3(bbox->vec[4], bbox->vec[5], color);
- DRW_debug_line_v3v3(bbox->vec[5], bbox->vec[6], color);
- DRW_debug_line_v3v3(bbox->vec[6], bbox->vec[7], color);
- DRW_debug_line_v3v3(bbox->vec[7], bbox->vec[4], color);
-
- DRW_debug_line_v3v3(bbox->vec[0], bbox->vec[4], color);
- DRW_debug_line_v3v3(bbox->vec[1], bbox->vec[5], color);
- DRW_debug_line_v3v3(bbox->vec[2], bbox->vec[6], color);
- DRW_debug_line_v3v3(bbox->vec[3], bbox->vec[7], color);
+ DRW_debug_line_v3v3(bbox->vec[0], bbox->vec[1], color);
+ DRW_debug_line_v3v3(bbox->vec[1], bbox->vec[2], color);
+ DRW_debug_line_v3v3(bbox->vec[2], bbox->vec[3], color);
+ DRW_debug_line_v3v3(bbox->vec[3], bbox->vec[0], color);
+
+ DRW_debug_line_v3v3(bbox->vec[4], bbox->vec[5], color);
+ DRW_debug_line_v3v3(bbox->vec[5], bbox->vec[6], color);
+ DRW_debug_line_v3v3(bbox->vec[6], bbox->vec[7], color);
+ DRW_debug_line_v3v3(bbox->vec[7], bbox->vec[4], color);
+
+ DRW_debug_line_v3v3(bbox->vec[0], bbox->vec[4], color);
+ DRW_debug_line_v3v3(bbox->vec[1], bbox->vec[5], color);
+ DRW_debug_line_v3v3(bbox->vec[2], bbox->vec[6], color);
+ DRW_debug_line_v3v3(bbox->vec[3], bbox->vec[7], color);
}
void DRW_debug_m4_as_bbox(const float m[4][4], const float color[4], const bool invert)
{
- BoundBox bb;
- const float min[3] = {-1.0f, -1.0f, -1.0f}, max[3] = {1.0f, 1.0f, 1.0f};
- float project_matrix[4][4];
- if (invert) {
- invert_m4_m4(project_matrix, m);
- }
- else {
- copy_m4_m4(project_matrix, m);
- }
-
- BKE_boundbox_init_from_minmax(&bb, min, max);
- for (int i = 0; i < 8; ++i) {
- mul_project_m4_v3(project_matrix, bb.vec[i]);
- }
- DRW_debug_bbox(&bb, color);
+ BoundBox bb;
+ const float min[3] = {-1.0f, -1.0f, -1.0f}, max[3] = {1.0f, 1.0f, 1.0f};
+ float project_matrix[4][4];
+ if (invert) {
+ invert_m4_m4(project_matrix, m);
+ }
+ else {
+ copy_m4_m4(project_matrix, m);
+ }
+
+ BKE_boundbox_init_from_minmax(&bb, min, max);
+ for (int i = 0; i < 8; ++i) {
+ mul_project_m4_v3(project_matrix, bb.vec[i]);
+ }
+ DRW_debug_bbox(&bb, color);
}
void DRW_debug_sphere(const float center[3], const float radius, const float color[4])
{
- float size_mat[4][4];
- DRWDebugSphere *sphere = MEM_mallocN(sizeof(DRWDebugSphere), "DRWDebugSphere");
- /* Bake all transform into a Matrix4 */
- scale_m4_fl(size_mat, radius);
- copy_m4_m4(sphere->mat, g_modelmat);
- translate_m4(sphere->mat, center[0], center[1], center[2]);
- mul_m4_m4m4(sphere->mat, sphere->mat, size_mat);
-
- copy_v4_v4(sphere->color, color);
- BLI_LINKS_PREPEND(DST.debug.spheres, sphere);
+ float size_mat[4][4];
+ DRWDebugSphere *sphere = MEM_mallocN(sizeof(DRWDebugSphere), "DRWDebugSphere");
+ /* Bake all transform into a Matrix4 */
+ scale_m4_fl(size_mat, radius);
+ copy_m4_m4(sphere->mat, g_modelmat);
+ translate_m4(sphere->mat, center[0], center[1], center[2]);
+ mul_m4_m4m4(sphere->mat, sphere->mat, size_mat);
+
+ copy_v4_v4(sphere->color, color);
+ BLI_LINKS_PREPEND(DST.debug.spheres, sphere);
}
/* --------- Render --------- */
static void drw_debug_draw_lines(void)
{
- int count = BLI_linklist_count((LinkNode *)DST.debug.lines);
+ int count = BLI_linklist_count((LinkNode *)DST.debug.lines);
- if (count == 0) {
- return;
- }
+ if (count == 0) {
+ return;
+ }
- GPUVertFormat *vert_format = immVertexFormat();
- uint pos = GPU_vertformat_attr_add(vert_format, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
- uint col = GPU_vertformat_attr_add(vert_format, "color", GPU_COMP_F32, 4, GPU_FETCH_FLOAT);
+ GPUVertFormat *vert_format = immVertexFormat();
+ uint pos = GPU_vertformat_attr_add(vert_format, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
+ uint col = GPU_vertformat_attr_add(vert_format, "color", GPU_COMP_F32, 4, GPU_FETCH_FLOAT);
- immBindBuiltinProgram(GPU_SHADER_3D_FLAT_COLOR);
+ immBindBuiltinProgram(GPU_SHADER_3D_FLAT_COLOR);
- immBegin(GPU_PRIM_LINES, count * 2);
+ immBegin(GPU_PRIM_LINES, count * 2);
- while (DST.debug.lines) {
- void *next = DST.debug.lines->next;
+ while (DST.debug.lines) {
+ void *next = DST.debug.lines->next;
- immAttr4fv(col, DST.debug.lines->color);
- immVertex3fv(pos, DST.debug.lines->pos[0]);
+ immAttr4fv(col, DST.debug.lines->color);
+ immVertex3fv(pos, DST.debug.lines->pos[0]);
- immAttr4fv(col, DST.debug.lines->color);
- immVertex3fv(pos, DST.debug.lines->pos[1]);
+ immAttr4fv(col, DST.debug.lines->color);
+ immVertex3fv(pos, DST.debug.lines->pos[1]);
- MEM_freeN(DST.debug.lines);
- DST.debug.lines = next;
- }
- immEnd();
+ MEM_freeN(DST.debug.lines);
+ DST.debug.lines = next;
+ }
+ immEnd();
- immUnbindProgram();
+ immUnbindProgram();
}
static void drw_debug_draw_spheres(void)
{
- int count = BLI_linklist_count((LinkNode *)DST.debug.spheres);
+ int count = BLI_linklist_count((LinkNode *)DST.debug.spheres);
- if (count == 0) {
- return;
- }
+ if (count == 0) {
+ return;
+ }
- float one = 1.0f;
- GPUVertFormat vert_format = {0};
- uint mat = GPU_vertformat_attr_add(&vert_format, "InstanceModelMatrix", GPU_COMP_F32, 16, GPU_FETCH_FLOAT);
- uint col = GPU_vertformat_attr_add(&vert_format, "color", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
- uint siz = GPU_vertformat_attr_add(&vert_format, "size", GPU_COMP_F32, 1, GPU_FETCH_FLOAT);
+ float one = 1.0f;
+ GPUVertFormat vert_format = {0};
+ uint mat = GPU_vertformat_attr_add(
+ &vert_format, "InstanceModelMatrix", GPU_COMP_F32, 16, GPU_FETCH_FLOAT);
+ uint col = GPU_vertformat_attr_add(&vert_format, "color", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
+ uint siz = GPU_vertformat_attr_add(&vert_format, "size", GPU_COMP_F32, 1, GPU_FETCH_FLOAT);
- GPUVertBuf *inst_vbo = GPU_vertbuf_create_with_format(&vert_format);
+ GPUVertBuf *inst_vbo = GPU_vertbuf_create_with_format(&vert_format);
- GPU_vertbuf_data_alloc(inst_vbo, count);
+ GPU_vertbuf_data_alloc(inst_vbo, count);
- int v = 0;
- while (DST.debug.spheres) {
- void *next = DST.debug.spheres->next;
+ int v = 0;
+ while (DST.debug.spheres) {
+ void *next = DST.debug.spheres->next;
- GPU_vertbuf_attr_set(inst_vbo, mat, v, DST.debug.spheres->mat[0]);
- GPU_vertbuf_attr_set(inst_vbo, col, v, DST.debug.spheres->color);
- GPU_vertbuf_attr_set(inst_vbo, siz, v, &one);
- v++;
+ GPU_vertbuf_attr_set(inst_vbo, mat, v, DST.debug.spheres->mat[0]);
+ GPU_vertbuf_attr_set(inst_vbo, col, v, DST.debug.spheres->color);
+ GPU_vertbuf_attr_set(inst_vbo, siz, v, &one);
+ v++;
- MEM_freeN(DST.debug.spheres);
- DST.debug.spheres = next;
- }
+ MEM_freeN(DST.debug.spheres);
+ DST.debug.spheres = next;
+ }
- GPUBatch *empty_sphere = DRW_cache_empty_sphere_get();
+ GPUBatch *empty_sphere = DRW_cache_empty_sphere_get();
- GPUBatch *draw_batch = GPU_batch_create(GPU_PRIM_LINES, empty_sphere->verts[0], NULL);
- GPU_batch_instbuf_set(draw_batch, inst_vbo, true);
- GPU_batch_program_set_builtin(draw_batch, GPU_SHADER_INSTANCE_VARIYING_COLOR_VARIYING_SIZE);
+ GPUBatch *draw_batch = GPU_batch_create(GPU_PRIM_LINES, empty_sphere->verts[0], NULL);
+ GPU_batch_instbuf_set(draw_batch, inst_vbo, true);
+ GPU_batch_program_set_builtin(draw_batch, GPU_SHADER_INSTANCE_VARIYING_COLOR_VARIYING_SIZE);
- GPU_batch_draw(draw_batch);
- GPU_batch_discard(draw_batch);
+ GPU_batch_draw(draw_batch);
+ GPU_batch_discard(draw_batch);
}
void drw_debug_draw(void)
{
- drw_debug_draw_lines();
- drw_debug_draw_spheres();
+ drw_debug_draw_lines();
+ drw_debug_draw_spheres();
}
void drw_debug_init(void)
{
- DRW_debug_modelmat_reset();
+ DRW_debug_modelmat_reset();
}
diff --git a/source/blender/draw/intern/draw_hair.c b/source/blender/draw/intern/draw_hair.c
index 6c1e44ac8e7..b3081a44dfb 100644
--- a/source/blender/draw/intern/draw_hair.c
+++ b/source/blender/draw/intern/draw_hair.c
@@ -34,7 +34,6 @@
#include "BKE_anim.h"
-
#include "GPU_batch.h"
#include "GPU_shader.h"
@@ -45,16 +44,16 @@
#endif
typedef enum ParticleRefineShader {
- PART_REFINE_CATMULL_ROM = 0,
- PART_REFINE_MAX_SHADER,
+ PART_REFINE_CATMULL_ROM = 0,
+ PART_REFINE_MAX_SHADER,
} ParticleRefineShader;
#ifndef USE_TRANSFORM_FEEDBACK
typedef struct ParticleRefineCall {
- struct ParticleRefineCall *next;
- GPUVertBuf *vbo;
- DRWShadingGroup *shgrp;
- uint vert_len;
+ struct ParticleRefineCall *next;
+ GPUVertBuf *vbo;
+ DRWShadingGroup *shgrp;
+ uint vert_len;
} ParticleRefineCall;
static ParticleRefineCall *g_tf_calls = NULL;
@@ -72,248 +71,258 @@ extern char datatoc_gpu_shader_3D_smooth_color_frag_glsl[];
static GPUShader *hair_refine_shader_get(ParticleRefineShader sh)
{
- if (g_refine_shaders[sh]) {
- return g_refine_shaders[sh];
- }
+ if (g_refine_shaders[sh]) {
+ return g_refine_shaders[sh];
+ }
- char *vert_with_lib = BLI_string_joinN(datatoc_common_hair_lib_glsl, datatoc_common_hair_refine_vert_glsl);
+ char *vert_with_lib = BLI_string_joinN(datatoc_common_hair_lib_glsl,
+ datatoc_common_hair_refine_vert_glsl);
#ifdef USE_TRANSFORM_FEEDBACK
- const char *var_names[1] = {"finalColor"};
- g_refine_shaders[sh] = DRW_shader_create_with_transform_feedback(vert_with_lib, NULL, "#define HAIR_PHASE_SUBDIV\n",
- GPU_SHADER_TFB_POINTS, var_names, 1);
+ const char *var_names[1] = {"finalColor"};
+ g_refine_shaders[sh] = DRW_shader_create_with_transform_feedback(
+ vert_with_lib, NULL, "#define HAIR_PHASE_SUBDIV\n", GPU_SHADER_TFB_POINTS, var_names, 1);
#else
- g_refine_shaders[sh] = DRW_shader_create(
- vert_with_lib, NULL,
- datatoc_gpu_shader_3D_smooth_color_frag_glsl,
- "#define HAIR_PHASE_SUBDIV\n"
- "#define TF_WORKAROUND\n");
+ g_refine_shaders[sh] = DRW_shader_create(vert_with_lib,
+ NULL,
+ datatoc_gpu_shader_3D_smooth_color_frag_glsl,
+ "#define HAIR_PHASE_SUBDIV\n"
+ "#define TF_WORKAROUND\n");
#endif
- MEM_freeN(vert_with_lib);
+ MEM_freeN(vert_with_lib);
- return g_refine_shaders[sh];
+ return g_refine_shaders[sh];
}
void DRW_hair_init(void)
{
#ifdef USE_TRANSFORM_FEEDBACK
- g_tf_pass = DRW_pass_create("Update Hair Pass", DRW_STATE_TRANS_FEEDBACK);
+ g_tf_pass = DRW_pass_create("Update Hair Pass", DRW_STATE_TRANS_FEEDBACK);
#else
- g_tf_pass = DRW_pass_create("Update Hair Pass", DRW_STATE_WRITE_COLOR | DRW_STATE_POINT);
+ g_tf_pass = DRW_pass_create("Update Hair Pass", DRW_STATE_WRITE_COLOR | DRW_STATE_POINT);
#endif
}
typedef struct DRWHairInstanceData {
- DrawData dd;
+ DrawData dd;
- float mat[4][4];
+ float mat[4][4];
} DRWHairInstanceData;
-static DRWShadingGroup *drw_shgroup_create_hair_procedural_ex(
- Object *object, ParticleSystem *psys, ModifierData *md,
- DRWPass *hair_pass,
- struct GPUMaterial *gpu_mat, GPUShader *gpu_shader)
+static DRWShadingGroup *drw_shgroup_create_hair_procedural_ex(Object *object,
+ ParticleSystem *psys,
+ ModifierData *md,
+ DRWPass *hair_pass,
+ struct GPUMaterial *gpu_mat,
+ GPUShader *gpu_shader)
{
- /* TODO(fclem): Pass the scene as parameter */
- const DRWContextState *draw_ctx = DRW_context_state_get();
- Scene *scene = draw_ctx->scene;
- static float unit_mat[4][4] = {
- {1, 0, 0, 0},
- {0, 1, 0, 0},
- {0, 0, 1, 0},
- {0, 0, 0, 1},
- };
- float (*dupli_mat)[4];
- Object *dupli_parent = DRW_object_get_dupli_parent(object);
- DupliObject *dupli_object = DRW_object_get_dupli(object);
-
- int subdiv = scene->r.hair_subdiv;
- int thickness_res = (scene->r.hair_type == SCE_HAIR_SHAPE_STRAND) ? 1 : 2;
-
- ParticleHairCache *hair_cache;
- ParticleSettings *part = psys->part;
- bool need_ft_update = particles_ensure_procedural_data(object, psys, md, &hair_cache, subdiv, thickness_res);
-
- DRWShadingGroup *shgrp;
- if (gpu_mat) {
- shgrp = DRW_shgroup_material_create(gpu_mat, hair_pass);
- }
- else if (gpu_shader) {
- shgrp = DRW_shgroup_create(gpu_shader, hair_pass);
- }
- else {
- shgrp = NULL;
- BLI_assert(0);
- }
-
- /* TODO optimize this. Only bind the ones GPUMaterial needs. */
- for (int i = 0; i < hair_cache->num_uv_layers; ++i) {
- for (int n = 0; n < MAX_LAYER_NAME_CT && hair_cache->uv_layer_names[i][n][0] != '\0'; ++n) {
- DRW_shgroup_uniform_texture(shgrp, hair_cache->uv_layer_names[i][n], hair_cache->uv_tex[i]);
- }
- }
- for (int i = 0; i < hair_cache->num_col_layers; ++i) {
- for (int n = 0; n < MAX_LAYER_NAME_CT && hair_cache->col_layer_names[i][n][0] != '\0'; ++n) {
- DRW_shgroup_uniform_texture(shgrp, hair_cache->col_layer_names[i][n], hair_cache->col_tex[i]);
- }
- }
-
- if ((dupli_parent != NULL) && (dupli_object != NULL)) {
- DRWHairInstanceData *hair_inst_data = (DRWHairInstanceData *)DRW_drawdata_ensure(
- &object->id, (DrawEngineType *)&drw_shgroup_create_hair_procedural_ex,
- sizeof(DRWHairInstanceData), NULL, NULL);
- dupli_mat = hair_inst_data->mat;
- if (dupli_object->type & OB_DUPLICOLLECTION) {
- copy_m4_m4(dupli_mat, dupli_parent->obmat);
- }
- else {
- copy_m4_m4(dupli_mat, dupli_object->ob->obmat);
- invert_m4(dupli_mat);
- mul_m4_m4m4(dupli_mat, object->obmat, dupli_mat);
- }
- }
- else {
- dupli_mat = unit_mat;
- }
-
- DRW_shgroup_uniform_texture(shgrp, "hairPointBuffer", hair_cache->final[subdiv].proc_tex);
- DRW_shgroup_uniform_int(shgrp, "hairStrandsRes", &hair_cache->final[subdiv].strands_res, 1);
- DRW_shgroup_uniform_int_copy(shgrp, "hairThicknessRes", thickness_res);
- DRW_shgroup_uniform_float(shgrp, "hairRadShape", &part->shape, 1);
- DRW_shgroup_uniform_mat4(shgrp, "hairDupliMatrix", dupli_mat);
- DRW_shgroup_uniform_float_copy(shgrp, "hairRadRoot", part->rad_root * part->rad_scale * 0.5f);
- DRW_shgroup_uniform_float_copy(shgrp, "hairRadTip", part->rad_tip * part->rad_scale * 0.5f);
- DRW_shgroup_uniform_bool_copy(shgrp, "hairCloseTip", (part->shape_flag & PART_SHAPE_CLOSE_TIP) != 0);
- /* TODO(fclem): Until we have a better way to cull the hair and render with orco, bypass culling test. */
- DRW_shgroup_call_object_add_no_cull(shgrp, hair_cache->final[subdiv].proc_hairs[thickness_res - 1], object);
-
- /* Transform Feedback subdiv. */
- if (need_ft_update) {
- int final_points_len = hair_cache->final[subdiv].strands_res * hair_cache->strands_len;
- GPUShader *tf_shader = hair_refine_shader_get(PART_REFINE_CATMULL_ROM);
+ /* TODO(fclem): Pass the scene as parameter */
+ const DRWContextState *draw_ctx = DRW_context_state_get();
+ Scene *scene = draw_ctx->scene;
+ static float unit_mat[4][4] = {
+ {1, 0, 0, 0},
+ {0, 1, 0, 0},
+ {0, 0, 1, 0},
+ {0, 0, 0, 1},
+ };
+ float(*dupli_mat)[4];
+ Object *dupli_parent = DRW_object_get_dupli_parent(object);
+ DupliObject *dupli_object = DRW_object_get_dupli(object);
+
+ int subdiv = scene->r.hair_subdiv;
+ int thickness_res = (scene->r.hair_type == SCE_HAIR_SHAPE_STRAND) ? 1 : 2;
+
+ ParticleHairCache *hair_cache;
+ ParticleSettings *part = psys->part;
+ bool need_ft_update = particles_ensure_procedural_data(
+ object, psys, md, &hair_cache, subdiv, thickness_res);
+
+ DRWShadingGroup *shgrp;
+ if (gpu_mat) {
+ shgrp = DRW_shgroup_material_create(gpu_mat, hair_pass);
+ }
+ else if (gpu_shader) {
+ shgrp = DRW_shgroup_create(gpu_shader, hair_pass);
+ }
+ else {
+ shgrp = NULL;
+ BLI_assert(0);
+ }
+
+ /* TODO optimize this. Only bind the ones GPUMaterial needs. */
+ for (int i = 0; i < hair_cache->num_uv_layers; ++i) {
+ for (int n = 0; n < MAX_LAYER_NAME_CT && hair_cache->uv_layer_names[i][n][0] != '\0'; ++n) {
+ DRW_shgroup_uniform_texture(shgrp, hair_cache->uv_layer_names[i][n], hair_cache->uv_tex[i]);
+ }
+ }
+ for (int i = 0; i < hair_cache->num_col_layers; ++i) {
+ for (int n = 0; n < MAX_LAYER_NAME_CT && hair_cache->col_layer_names[i][n][0] != '\0'; ++n) {
+ DRW_shgroup_uniform_texture(
+ shgrp, hair_cache->col_layer_names[i][n], hair_cache->col_tex[i]);
+ }
+ }
+
+ if ((dupli_parent != NULL) && (dupli_object != NULL)) {
+ DRWHairInstanceData *hair_inst_data = (DRWHairInstanceData *)DRW_drawdata_ensure(
+ &object->id,
+ (DrawEngineType *)&drw_shgroup_create_hair_procedural_ex,
+ sizeof(DRWHairInstanceData),
+ NULL,
+ NULL);
+ dupli_mat = hair_inst_data->mat;
+ if (dupli_object->type & OB_DUPLICOLLECTION) {
+ copy_m4_m4(dupli_mat, dupli_parent->obmat);
+ }
+ else {
+ copy_m4_m4(dupli_mat, dupli_object->ob->obmat);
+ invert_m4(dupli_mat);
+ mul_m4_m4m4(dupli_mat, object->obmat, dupli_mat);
+ }
+ }
+ else {
+ dupli_mat = unit_mat;
+ }
+
+ DRW_shgroup_uniform_texture(shgrp, "hairPointBuffer", hair_cache->final[subdiv].proc_tex);
+ DRW_shgroup_uniform_int(shgrp, "hairStrandsRes", &hair_cache->final[subdiv].strands_res, 1);
+ DRW_shgroup_uniform_int_copy(shgrp, "hairThicknessRes", thickness_res);
+ DRW_shgroup_uniform_float(shgrp, "hairRadShape", &part->shape, 1);
+ DRW_shgroup_uniform_mat4(shgrp, "hairDupliMatrix", dupli_mat);
+ DRW_shgroup_uniform_float_copy(shgrp, "hairRadRoot", part->rad_root * part->rad_scale * 0.5f);
+ DRW_shgroup_uniform_float_copy(shgrp, "hairRadTip", part->rad_tip * part->rad_scale * 0.5f);
+ DRW_shgroup_uniform_bool_copy(
+ shgrp, "hairCloseTip", (part->shape_flag & PART_SHAPE_CLOSE_TIP) != 0);
+ /* TODO(fclem): Until we have a better way to cull the hair and render with orco, bypass culling test. */
+ DRW_shgroup_call_object_add_no_cull(
+ shgrp, hair_cache->final[subdiv].proc_hairs[thickness_res - 1], object);
+
+ /* Transform Feedback subdiv. */
+ if (need_ft_update) {
+ int final_points_len = hair_cache->final[subdiv].strands_res * hair_cache->strands_len;
+ GPUShader *tf_shader = hair_refine_shader_get(PART_REFINE_CATMULL_ROM);
#ifdef USE_TRANSFORM_FEEDBACK
- DRWShadingGroup *tf_shgrp = DRW_shgroup_transform_feedback_create(tf_shader, g_tf_pass,
- hair_cache->final[subdiv].proc_buf);
+ DRWShadingGroup *tf_shgrp = DRW_shgroup_transform_feedback_create(
+ tf_shader, g_tf_pass, hair_cache->final[subdiv].proc_buf);
#else
- DRWShadingGroup *tf_shgrp = DRW_shgroup_create(tf_shader, g_tf_pass);
-
- ParticleRefineCall *pr_call = MEM_mallocN(sizeof(*pr_call), __func__);
- pr_call->next = g_tf_calls;
- pr_call->vbo = hair_cache->final[subdiv].proc_buf;
- pr_call->shgrp = tf_shgrp;
- pr_call->vert_len = final_points_len;
- g_tf_calls = pr_call;
- DRW_shgroup_uniform_int(tf_shgrp, "targetHeight", &g_tf_target_height, 1);
- DRW_shgroup_uniform_int(tf_shgrp, "targetWidth", &g_tf_target_width, 1);
- DRW_shgroup_uniform_int(tf_shgrp, "idOffset", &g_tf_id_offset, 1);
+ DRWShadingGroup *tf_shgrp = DRW_shgroup_create(tf_shader, g_tf_pass);
+
+ ParticleRefineCall *pr_call = MEM_mallocN(sizeof(*pr_call), __func__);
+ pr_call->next = g_tf_calls;
+ pr_call->vbo = hair_cache->final[subdiv].proc_buf;
+ pr_call->shgrp = tf_shgrp;
+ pr_call->vert_len = final_points_len;
+ g_tf_calls = pr_call;
+ DRW_shgroup_uniform_int(tf_shgrp, "targetHeight", &g_tf_target_height, 1);
+ DRW_shgroup_uniform_int(tf_shgrp, "targetWidth", &g_tf_target_width, 1);
+ DRW_shgroup_uniform_int(tf_shgrp, "idOffset", &g_tf_id_offset, 1);
#endif
- DRW_shgroup_uniform_texture(tf_shgrp, "hairPointBuffer", hair_cache->point_tex);
- DRW_shgroup_uniform_texture(tf_shgrp, "hairStrandBuffer", hair_cache->strand_tex);
- DRW_shgroup_uniform_texture(tf_shgrp, "hairStrandSegBuffer", hair_cache->strand_seg_tex);
- DRW_shgroup_uniform_int(tf_shgrp, "hairStrandsRes", &hair_cache->final[subdiv].strands_res, 1);
- DRW_shgroup_call_procedural_points_add(tf_shgrp, final_points_len, NULL);
- }
+ DRW_shgroup_uniform_texture(tf_shgrp, "hairPointBuffer", hair_cache->point_tex);
+ DRW_shgroup_uniform_texture(tf_shgrp, "hairStrandBuffer", hair_cache->strand_tex);
+ DRW_shgroup_uniform_texture(tf_shgrp, "hairStrandSegBuffer", hair_cache->strand_seg_tex);
+ DRW_shgroup_uniform_int(tf_shgrp, "hairStrandsRes", &hair_cache->final[subdiv].strands_res, 1);
+ DRW_shgroup_call_procedural_points_add(tf_shgrp, final_points_len, NULL);
+ }
- return shgrp;
+ return shgrp;
}
DRWShadingGroup *DRW_shgroup_hair_create(
- Object *object, ParticleSystem *psys, ModifierData *md,
- DRWPass *hair_pass,
- GPUShader *shader)
+ Object *object, ParticleSystem *psys, ModifierData *md, DRWPass *hair_pass, GPUShader *shader)
{
- return drw_shgroup_create_hair_procedural_ex(object, psys, md, hair_pass, NULL, shader);
+ return drw_shgroup_create_hair_procedural_ex(object, psys, md, hair_pass, NULL, shader);
}
-DRWShadingGroup *DRW_shgroup_material_hair_create(
- Object *object, ParticleSystem *psys, ModifierData *md,
- DRWPass *hair_pass,
- struct GPUMaterial *material)
+DRWShadingGroup *DRW_shgroup_material_hair_create(Object *object,
+ ParticleSystem *psys,
+ ModifierData *md,
+ DRWPass *hair_pass,
+ struct GPUMaterial *material)
{
- return drw_shgroup_create_hair_procedural_ex(object, psys, md, hair_pass, material, NULL);
+ return drw_shgroup_create_hair_procedural_ex(object, psys, md, hair_pass, material, NULL);
}
void DRW_hair_update(void)
{
#ifndef USE_TRANSFORM_FEEDBACK
- /**
- * Workaround to tranform feedback not working on mac.
- * On some system it crashes (see T58489) and on some other it renders garbage (see T60171).
- *
- * So instead of using transform feedback we render to a texture,
- * readback the result to system memory and reupload as VBO data.
- * It is really not ideal performance wise, but it is the simplest
- * and the most local workaround that still uses the power of the GPU.
- */
-
- if (g_tf_calls == NULL) {
- return;
- }
-
- /* Search ideal buffer size. */
- uint max_size = 0;
- for (ParticleRefineCall *pr_call = g_tf_calls; pr_call; pr_call = pr_call->next) {
- max_size = max_ii(max_size, pr_call->vert_len);
- }
-
- /* Create target Texture / Framebuffer */
- /* Don't use max size as it can be really heavy and fail.
- * Do chunks of maximum 2048 * 2048 hair points. */
- int width = 2048;
- int height = min_ii(width, 1 + max_size / width);
- GPUTexture *tex = DRW_texture_pool_query_2d(width, height, GPU_RGBA32F, (void *)DRW_hair_update);
- g_tf_target_height = height;
- g_tf_target_width = width;
-
- GPUFrameBuffer *fb = NULL;
- GPU_framebuffer_ensure_config(&fb, {
- GPU_ATTACHMENT_NONE,
- GPU_ATTACHMENT_TEXTURE(tex),
- });
-
- float *data = MEM_mallocN(sizeof(float) * 4 * width * height, "tf fallback buffer");
-
- GPU_framebuffer_bind(fb);
- while (g_tf_calls != NULL) {
- ParticleRefineCall *pr_call = g_tf_calls;
- g_tf_calls = g_tf_calls->next;
-
- g_tf_id_offset = 0;
- while (pr_call->vert_len > 0) {
- int max_read_px_len = min_ii(width * height, pr_call->vert_len);
-
- DRW_draw_pass_subset(g_tf_pass, pr_call->shgrp, pr_call->shgrp);
- /* Readback result to main memory. */
- GPU_framebuffer_read_color(fb, 0, 0, width, height, 4, 0, data);
- /* Upload back to VBO. */
- GPU_vertbuf_use(pr_call->vbo);
- glBufferSubData(GL_ARRAY_BUFFER,
- sizeof(float) * 4 * g_tf_id_offset,
- sizeof(float) * 4 * max_read_px_len,
- data);
-
- g_tf_id_offset += max_read_px_len;
- pr_call->vert_len -= max_read_px_len;
- }
-
- MEM_freeN(pr_call);
- }
-
- MEM_freeN(data);
- GPU_framebuffer_free(fb);
+ /**
+ * Workaround to tranform feedback not working on mac.
+ * On some system it crashes (see T58489) and on some other it renders garbage (see T60171).
+ *
+ * So instead of using transform feedback we render to a texture,
+ * readback the result to system memory and reupload as VBO data.
+ * It is really not ideal performance wise, but it is the simplest
+ * and the most local workaround that still uses the power of the GPU.
+ */
+
+ if (g_tf_calls == NULL) {
+ return;
+ }
+
+ /* Search ideal buffer size. */
+ uint max_size = 0;
+ for (ParticleRefineCall *pr_call = g_tf_calls; pr_call; pr_call = pr_call->next) {
+ max_size = max_ii(max_size, pr_call->vert_len);
+ }
+
+ /* Create target Texture / Framebuffer */
+ /* Don't use max size as it can be really heavy and fail.
+ * Do chunks of maximum 2048 * 2048 hair points. */
+ int width = 2048;
+ int height = min_ii(width, 1 + max_size / width);
+ GPUTexture *tex = DRW_texture_pool_query_2d(width, height, GPU_RGBA32F, (void *)DRW_hair_update);
+ g_tf_target_height = height;
+ g_tf_target_width = width;
+
+ GPUFrameBuffer *fb = NULL;
+ GPU_framebuffer_ensure_config(&fb,
+ {
+ GPU_ATTACHMENT_NONE,
+ GPU_ATTACHMENT_TEXTURE(tex),
+ });
+
+ float *data = MEM_mallocN(sizeof(float) * 4 * width * height, "tf fallback buffer");
+
+ GPU_framebuffer_bind(fb);
+ while (g_tf_calls != NULL) {
+ ParticleRefineCall *pr_call = g_tf_calls;
+ g_tf_calls = g_tf_calls->next;
+
+ g_tf_id_offset = 0;
+ while (pr_call->vert_len > 0) {
+ int max_read_px_len = min_ii(width * height, pr_call->vert_len);
+
+ DRW_draw_pass_subset(g_tf_pass, pr_call->shgrp, pr_call->shgrp);
+ /* Readback result to main memory. */
+ GPU_framebuffer_read_color(fb, 0, 0, width, height, 4, 0, data);
+ /* Upload back to VBO. */
+ GPU_vertbuf_use(pr_call->vbo);
+ glBufferSubData(GL_ARRAY_BUFFER,
+ sizeof(float) * 4 * g_tf_id_offset,
+ sizeof(float) * 4 * max_read_px_len,
+ data);
+
+ g_tf_id_offset += max_read_px_len;
+ pr_call->vert_len -= max_read_px_len;
+ }
+
+ MEM_freeN(pr_call);
+ }
+
+ MEM_freeN(data);
+ GPU_framebuffer_free(fb);
#else
- /* TODO(fclem): replace by compute shader. */
- /* Just render using transform feedback. */
- DRW_draw_pass(g_tf_pass);
+ /* TODO(fclem): replace by compute shader. */
+ /* Just render using transform feedback. */
+ DRW_draw_pass(g_tf_pass);
#endif
}
void DRW_hair_free(void)
{
- for (int i = 0; i < PART_REFINE_MAX_SHADER; ++i) {
- DRW_SHADER_FREE_SAFE(g_refine_shaders[i]);
- }
+ for (int i = 0; i < PART_REFINE_MAX_SHADER; ++i) {
+ DRW_SHADER_FREE_SAFE(g_refine_shaders[i]);
+ }
}
diff --git a/source/blender/draw/intern/draw_hair_private.h b/source/blender/draw/intern/draw_hair_private.h
index 8f7cb1fe949..72c89832d3d 100644
--- a/source/blender/draw/intern/draw_hair_private.h
+++ b/source/blender/draw/intern/draw_hair_private.h
@@ -24,10 +24,10 @@
#ifndef __DRAW_HAIR_PRIVATE_H__
#define __DRAW_HAIR_PRIVATE_H__
-#define MAX_LAYER_NAME_CT 3 /* u0123456789, u, a0123456789 */
-#define MAX_LAYER_NAME_LEN DECIMAL_DIGITS_BOUND(uint) + 2
-#define MAX_THICKRES 2 /* see eHairType */
-#define MAX_HAIR_SUBDIV 4 /* see hair_subdiv rna */
+#define MAX_LAYER_NAME_CT 3 /* u0123456789, u, a0123456789 */
+#define MAX_LAYER_NAME_LEN DECIMAL_DIGITS_BOUND(uint) + 2
+#define MAX_THICKRES 2 /* see eHairType */
+#define MAX_HAIR_SUBDIV 4 /* see hair_subdiv rna */
struct ModifierData;
struct Object;
@@ -35,56 +35,55 @@ struct ParticleHairCache;
struct ParticleSystem;
typedef struct ParticleHairFinalCache {
- /* Output of the subdivision stage: vertex buff sized to subdiv level. */
- GPUVertBuf *proc_buf;
- GPUTexture *proc_tex;
+ /* Output of the subdivision stage: vertex buff sized to subdiv level. */
+ GPUVertBuf *proc_buf;
+ GPUTexture *proc_tex;
- /* Just contains a huge index buffer used to draw the final hair. */
- GPUBatch *proc_hairs[MAX_THICKRES];
+ /* Just contains a huge index buffer used to draw the final hair. */
+ GPUBatch *proc_hairs[MAX_THICKRES];
- int strands_res; /* points per hair, at least 2 */
+ int strands_res; /* points per hair, at least 2 */
} ParticleHairFinalCache;
typedef struct ParticleHairCache {
- GPUVertBuf *pos;
- GPUIndexBuf *indices;
- GPUBatch *hairs;
+ GPUVertBuf *pos;
+ GPUIndexBuf *indices;
+ GPUBatch *hairs;
- /* Hair Procedural display: Interpolation is done on the GPU. */
- GPUVertBuf *proc_point_buf; /* Input control points */
- GPUTexture *point_tex;
+ /* Hair Procedural display: Interpolation is done on the GPU. */
+ GPUVertBuf *proc_point_buf; /* Input control points */
+ GPUTexture *point_tex;
- /** Infos of control points strands (segment count and base index) */
- GPUVertBuf *proc_strand_buf;
- GPUTexture *strand_tex;
+ /** Infos of control points strands (segment count and base index) */
+ GPUVertBuf *proc_strand_buf;
+ GPUTexture *strand_tex;
- GPUVertBuf *proc_strand_seg_buf;
- GPUTexture *strand_seg_tex;
+ GPUVertBuf *proc_strand_seg_buf;
+ GPUTexture *strand_seg_tex;
- GPUVertBuf *proc_uv_buf[MAX_MTFACE];
- GPUTexture *uv_tex[MAX_MTFACE];
- char uv_layer_names[MAX_MTFACE][MAX_LAYER_NAME_CT][MAX_LAYER_NAME_LEN];
+ GPUVertBuf *proc_uv_buf[MAX_MTFACE];
+ GPUTexture *uv_tex[MAX_MTFACE];
+ char uv_layer_names[MAX_MTFACE][MAX_LAYER_NAME_CT][MAX_LAYER_NAME_LEN];
- GPUVertBuf *proc_col_buf[MAX_MCOL];
- GPUTexture *col_tex[MAX_MCOL];
- char col_layer_names[MAX_MCOL][MAX_LAYER_NAME_CT][MAX_LAYER_NAME_LEN];
+ GPUVertBuf *proc_col_buf[MAX_MCOL];
+ GPUTexture *col_tex[MAX_MCOL];
+ char col_layer_names[MAX_MCOL][MAX_LAYER_NAME_CT][MAX_LAYER_NAME_LEN];
- int num_uv_layers;
- int num_col_layers;
+ int num_uv_layers;
+ int num_col_layers;
- ParticleHairFinalCache final[MAX_HAIR_SUBDIV];
+ ParticleHairFinalCache final[MAX_HAIR_SUBDIV];
- int strands_len;
- int elems_len;
- int point_len;
+ int strands_len;
+ int elems_len;
+ int point_len;
} ParticleHairCache;
-bool particles_ensure_procedural_data(
- struct Object *object,
- struct ParticleSystem *psys,
- struct ModifierData *md,
- struct ParticleHairCache **r_hair_cache,
- int subdiv,
- int thickness_res);
+bool particles_ensure_procedural_data(struct Object *object,
+ struct ParticleSystem *psys,
+ struct ModifierData *md,
+ struct ParticleHairCache **r_hair_cache,
+ int subdiv,
+ int thickness_res);
#endif /* __DRAW_HAIR_PRIVATE_H__ */
diff --git a/source/blender/draw/intern/draw_instance_data.c b/source/blender/draw/intern/draw_instance_data.c
index 718518643e0..27e03a3495a 100644
--- a/source/blender/draw/intern/draw_instance_data.c
+++ b/source/blender/draw/intern/draw_instance_data.c
@@ -41,44 +41,44 @@
#define BUFFER_VERTS_CHUNK 32
typedef struct DRWBatchingBuffer {
- struct DRWShadingGroup *shgroup; /* Link back to the owning shGroup. Also tells if it's used */
- GPUVertFormat *format; /* Identifier. */
- GPUVertBuf *vert; /* GPUVertBuf contained in the GPUBatch. */
- GPUBatch *batch; /* GPUBatch containing the GPUVertBuf. */
+ struct DRWShadingGroup *shgroup; /* Link back to the owning shGroup. Also tells if it's used */
+ GPUVertFormat *format; /* Identifier. */
+ GPUVertBuf *vert; /* GPUVertBuf contained in the GPUBatch. */
+ GPUBatch *batch; /* GPUBatch containing the GPUVertBuf. */
} DRWBatchingBuffer;
typedef struct DRWInstancingBuffer {
- struct DRWShadingGroup *shgroup; /* Link back to the owning shGroup. Also tells if it's used */
- GPUVertFormat *format; /* Identifier. */
- GPUBatch *instance; /* Identifier. */
- GPUVertBuf *vert; /* GPUVertBuf contained in the GPUBatch. */
- GPUBatch *batch; /* GPUBatch containing the GPUVertBuf. */
+ struct DRWShadingGroup *shgroup; /* Link back to the owning shGroup. Also tells if it's used */
+ GPUVertFormat *format; /* Identifier. */
+ GPUBatch *instance; /* Identifier. */
+ GPUVertBuf *vert; /* GPUVertBuf contained in the GPUBatch. */
+ GPUBatch *batch; /* GPUBatch containing the GPUVertBuf. */
} DRWInstancingBuffer;
typedef struct DRWInstanceChunk {
- size_t cursor; /* Offset to the next instance data. */
- size_t alloc_size; /* Number of DRWBatchingBuffer/Batches alloc'd in ibufs/btchs. */
- union {
- DRWBatchingBuffer *bbufs;
- DRWInstancingBuffer *ibufs;
- };
+ size_t cursor; /* Offset to the next instance data. */
+ size_t alloc_size; /* Number of DRWBatchingBuffer/Batches alloc'd in ibufs/btchs. */
+ union {
+ DRWBatchingBuffer *bbufs;
+ DRWInstancingBuffer *ibufs;
+ };
} DRWInstanceChunk;
struct DRWInstanceData {
- struct DRWInstanceData *next;
- bool used; /* If this data is used or not. */
- size_t data_size; /* Size of one instance data. */
- BLI_mempool *mempool;
+ struct DRWInstanceData *next;
+ bool used; /* If this data is used or not. */
+ size_t data_size; /* Size of one instance data. */
+ BLI_mempool *mempool;
};
struct DRWInstanceDataList {
- struct DRWInstanceDataList *next, *prev;
- /* Linked lists for all possible data pool size */
- DRWInstanceData *idata_head[MAX_INSTANCE_DATA_SIZE];
- DRWInstanceData *idata_tail[MAX_INSTANCE_DATA_SIZE];
+ struct DRWInstanceDataList *next, *prev;
+ /* Linked lists for all possible data pool size */
+ DRWInstanceData *idata_head[MAX_INSTANCE_DATA_SIZE];
+ DRWInstanceData *idata_tail[MAX_INSTANCE_DATA_SIZE];
- DRWInstanceChunk instancing;
- DRWInstanceChunk batching;
+ DRWInstanceChunk instancing;
+ DRWInstanceChunk batching;
};
static ListBase g_idatalists = {NULL, NULL};
@@ -97,184 +97,192 @@ static ListBase g_idatalists = {NULL, NULL};
*/
static void instance_batch_free(GPUBatch *batch, void *UNUSED(user_data))
{
- if (batch->verts[0] == NULL) {
- /** XXX This is a false positive case.
- * The batch has been requested but not init yet
- * and there is a chance that it might become init.
- */
- return;
- }
- /* Free all batches that have the same key before they are reused. */
- /* TODO: Make it thread safe! Batch freeing can happen from another thread. */
- /* XXX we need to iterate over all idatalists unless we make some smart
- * data structure to store the locations to update. */
- for (DRWInstanceDataList *idatalist = g_idatalists.first; idatalist; idatalist = idatalist->next) {
- DRWInstancingBuffer *ibuf = idatalist->instancing.ibufs;
- for (int i = 0; i < idatalist->instancing.alloc_size; i++, ibuf++) {
- if (ibuf->instance == batch) {
- BLI_assert(ibuf->shgroup == NULL); /* Make sure it has no other users. */
- GPU_VERTBUF_DISCARD_SAFE(ibuf->vert);
- GPU_BATCH_DISCARD_SAFE(ibuf->batch);
- /* Tag as non alloced. */
- ibuf->format = NULL;
- }
- }
- }
+ if (batch->verts[0] == NULL) {
+ /** XXX This is a false positive case.
+ * The batch has been requested but not init yet
+ * and there is a chance that it might become init.
+ */
+ return;
+ }
+ /* Free all batches that have the same key before they are reused. */
+ /* TODO: Make it thread safe! Batch freeing can happen from another thread. */
+ /* XXX we need to iterate over all idatalists unless we make some smart
+ * data structure to store the locations to update. */
+ for (DRWInstanceDataList *idatalist = g_idatalists.first; idatalist;
+ idatalist = idatalist->next) {
+ DRWInstancingBuffer *ibuf = idatalist->instancing.ibufs;
+ for (int i = 0; i < idatalist->instancing.alloc_size; i++, ibuf++) {
+ if (ibuf->instance == batch) {
+ BLI_assert(ibuf->shgroup == NULL); /* Make sure it has no other users. */
+ GPU_VERTBUF_DISCARD_SAFE(ibuf->vert);
+ GPU_BATCH_DISCARD_SAFE(ibuf->batch);
+ /* Tag as non alloced. */
+ ibuf->format = NULL;
+ }
+ }
+ }
}
-void DRW_batching_buffer_request(
- DRWInstanceDataList *idatalist, GPUVertFormat *format, GPUPrimType type, struct DRWShadingGroup *shgroup,
- GPUBatch **r_batch, GPUVertBuf **r_vert)
+void DRW_batching_buffer_request(DRWInstanceDataList *idatalist,
+ GPUVertFormat *format,
+ GPUPrimType type,
+ struct DRWShadingGroup *shgroup,
+ GPUBatch **r_batch,
+ GPUVertBuf **r_vert)
{
- DRWInstanceChunk *chunk = &idatalist->batching;
- DRWBatchingBuffer *bbuf = idatalist->batching.bbufs;
- BLI_assert(format);
- /* Search for an unused batch. */
- for (int i = 0; i < idatalist->batching.alloc_size; i++, bbuf++) {
- if (bbuf->shgroup == NULL) {
- if (bbuf->format == format) {
- bbuf->shgroup = shgroup;
- *r_batch = bbuf->batch;
- *r_vert = bbuf->vert;
- return;
- }
- }
- }
- int new_id = 0; /* Find insertion point. */
- for (; new_id < chunk->alloc_size; ++new_id) {
- if (chunk->bbufs[new_id].format == NULL) {
- break;
- }
- }
- /* If there is no batch left. Allocate more. */
- if (new_id == chunk->alloc_size) {
- new_id = chunk->alloc_size;
- chunk->alloc_size += BUFFER_CHUNK_SIZE;
- chunk->bbufs = MEM_reallocN(chunk->bbufs, chunk->alloc_size * sizeof(DRWBatchingBuffer));
- memset(chunk->bbufs + new_id, 0, sizeof(DRWBatchingBuffer) * BUFFER_CHUNK_SIZE);
- }
- /* Create the batch. */
- bbuf = chunk->bbufs + new_id;
- bbuf->vert = *r_vert = GPU_vertbuf_create_with_format_ex(format, GPU_USAGE_DYNAMIC);
- bbuf->batch = *r_batch = GPU_batch_create_ex(type, bbuf->vert, NULL, 0);
- bbuf->format = format;
- bbuf->shgroup = shgroup;
- GPU_vertbuf_data_alloc(*r_vert, BUFFER_VERTS_CHUNK);
+ DRWInstanceChunk *chunk = &idatalist->batching;
+ DRWBatchingBuffer *bbuf = idatalist->batching.bbufs;
+ BLI_assert(format);
+ /* Search for an unused batch. */
+ for (int i = 0; i < idatalist->batching.alloc_size; i++, bbuf++) {
+ if (bbuf->shgroup == NULL) {
+ if (bbuf->format == format) {
+ bbuf->shgroup = shgroup;
+ *r_batch = bbuf->batch;
+ *r_vert = bbuf->vert;
+ return;
+ }
+ }
+ }
+ int new_id = 0; /* Find insertion point. */
+ for (; new_id < chunk->alloc_size; ++new_id) {
+ if (chunk->bbufs[new_id].format == NULL) {
+ break;
+ }
+ }
+ /* If there is no batch left. Allocate more. */
+ if (new_id == chunk->alloc_size) {
+ new_id = chunk->alloc_size;
+ chunk->alloc_size += BUFFER_CHUNK_SIZE;
+ chunk->bbufs = MEM_reallocN(chunk->bbufs, chunk->alloc_size * sizeof(DRWBatchingBuffer));
+ memset(chunk->bbufs + new_id, 0, sizeof(DRWBatchingBuffer) * BUFFER_CHUNK_SIZE);
+ }
+ /* Create the batch. */
+ bbuf = chunk->bbufs + new_id;
+ bbuf->vert = *r_vert = GPU_vertbuf_create_with_format_ex(format, GPU_USAGE_DYNAMIC);
+ bbuf->batch = *r_batch = GPU_batch_create_ex(type, bbuf->vert, NULL, 0);
+ bbuf->format = format;
+ bbuf->shgroup = shgroup;
+ GPU_vertbuf_data_alloc(*r_vert, BUFFER_VERTS_CHUNK);
}
-void DRW_instancing_buffer_request(
- DRWInstanceDataList *idatalist, GPUVertFormat *format, GPUBatch *instance, struct DRWShadingGroup *shgroup,
- GPUBatch **r_batch, GPUVertBuf **r_vert)
+void DRW_instancing_buffer_request(DRWInstanceDataList *idatalist,
+ GPUVertFormat *format,
+ GPUBatch *instance,
+ struct DRWShadingGroup *shgroup,
+ GPUBatch **r_batch,
+ GPUVertBuf **r_vert)
{
- DRWInstanceChunk *chunk = &idatalist->instancing;
- DRWInstancingBuffer *ibuf = idatalist->instancing.ibufs;
- BLI_assert(format);
- /* Search for an unused batch. */
- for (int i = 0; i < idatalist->instancing.alloc_size; i++, ibuf++) {
- if (ibuf->shgroup == NULL) {
- if (ibuf->format == format) {
- if (ibuf->instance == instance) {
- ibuf->shgroup = shgroup;
- *r_batch = ibuf->batch;
- *r_vert = ibuf->vert;
- return;
- }
- }
- }
- }
- int new_id = 0; /* Find insertion point. */
- for (; new_id < chunk->alloc_size; ++new_id) {
- if (chunk->ibufs[new_id].format == NULL) {
- break;
- }
- }
- /* If there is no batch left. Allocate more. */
- if (new_id == chunk->alloc_size) {
- new_id = chunk->alloc_size;
- chunk->alloc_size += BUFFER_CHUNK_SIZE;
- chunk->ibufs = MEM_reallocN(chunk->ibufs, chunk->alloc_size * sizeof(DRWInstancingBuffer));
- memset(chunk->ibufs + new_id, 0, sizeof(DRWInstancingBuffer) * BUFFER_CHUNK_SIZE);
- }
- /* Create the batch. */
- ibuf = chunk->ibufs + new_id;
- ibuf->vert = *r_vert = GPU_vertbuf_create_with_format_ex(format, GPU_USAGE_DYNAMIC);
- ibuf->batch = *r_batch = MEM_callocN(sizeof(GPUBatch), "GPUBatch");
- ibuf->format = format;
- ibuf->shgroup = shgroup;
- ibuf->instance = instance;
- GPU_vertbuf_data_alloc(*r_vert, BUFFER_VERTS_CHUNK);
- /* Make sure to free this ibuf if the instance batch gets free. */
- GPU_batch_callback_free_set(instance, &instance_batch_free, NULL);
+ DRWInstanceChunk *chunk = &idatalist->instancing;
+ DRWInstancingBuffer *ibuf = idatalist->instancing.ibufs;
+ BLI_assert(format);
+ /* Search for an unused batch. */
+ for (int i = 0; i < idatalist->instancing.alloc_size; i++, ibuf++) {
+ if (ibuf->shgroup == NULL) {
+ if (ibuf->format == format) {
+ if (ibuf->instance == instance) {
+ ibuf->shgroup = shgroup;
+ *r_batch = ibuf->batch;
+ *r_vert = ibuf->vert;
+ return;
+ }
+ }
+ }
+ }
+ int new_id = 0; /* Find insertion point. */
+ for (; new_id < chunk->alloc_size; ++new_id) {
+ if (chunk->ibufs[new_id].format == NULL) {
+ break;
+ }
+ }
+ /* If there is no batch left. Allocate more. */
+ if (new_id == chunk->alloc_size) {
+ new_id = chunk->alloc_size;
+ chunk->alloc_size += BUFFER_CHUNK_SIZE;
+ chunk->ibufs = MEM_reallocN(chunk->ibufs, chunk->alloc_size * sizeof(DRWInstancingBuffer));
+ memset(chunk->ibufs + new_id, 0, sizeof(DRWInstancingBuffer) * BUFFER_CHUNK_SIZE);
+ }
+ /* Create the batch. */
+ ibuf = chunk->ibufs + new_id;
+ ibuf->vert = *r_vert = GPU_vertbuf_create_with_format_ex(format, GPU_USAGE_DYNAMIC);
+ ibuf->batch = *r_batch = MEM_callocN(sizeof(GPUBatch), "GPUBatch");
+ ibuf->format = format;
+ ibuf->shgroup = shgroup;
+ ibuf->instance = instance;
+ GPU_vertbuf_data_alloc(*r_vert, BUFFER_VERTS_CHUNK);
+ /* Make sure to free this ibuf if the instance batch gets free. */
+ GPU_batch_callback_free_set(instance, &instance_batch_free, NULL);
}
void DRW_instance_buffer_finish(DRWInstanceDataList *idatalist)
{
- size_t realloc_size = 1; /* Avoid 0 size realloc. */
- /* Resize down buffers in use and send data to GPU & free unused buffers. */
- DRWInstanceChunk *batching = &idatalist->batching;
- DRWBatchingBuffer *bbuf = batching->bbufs;
- for (int i = 0; i < batching->alloc_size; i++, bbuf++) {
- if (bbuf->shgroup != NULL) {
- realloc_size = i + 1;
- uint vert_len = DRW_shgroup_get_instance_count(bbuf->shgroup);
- vert_len += (vert_len == 0) ? 1 : 0; /* Do not realloc to 0 size buffer */
- if (vert_len + BUFFER_VERTS_CHUNK <= bbuf->vert->vertex_len) {
- uint size = vert_len + BUFFER_VERTS_CHUNK - 1;
- size = size - size % BUFFER_VERTS_CHUNK;
- GPU_vertbuf_data_resize(bbuf->vert, size);
- }
- GPU_vertbuf_use(bbuf->vert); /* Send data. */
- bbuf->shgroup = NULL; /* Set as non used for the next round. */
- }
- else {
- GPU_VERTBUF_DISCARD_SAFE(bbuf->vert);
- GPU_BATCH_DISCARD_SAFE(bbuf->batch);
- bbuf->format = NULL; /* Tag as non alloced. */
- }
- }
- /* Rounding up to nearest chunk size. */
- realloc_size += BUFFER_CHUNK_SIZE - 1;
- realloc_size -= realloc_size % BUFFER_CHUNK_SIZE;
- /* Resize down if necessary. */
- if (realloc_size < batching->alloc_size) {
- batching->alloc_size = realloc_size;
- batching->ibufs = MEM_reallocN(batching->ibufs, realloc_size * sizeof(DRWBatchingBuffer));
- }
-
- realloc_size = 1;
- /* Resize down buffers in use and send data to GPU & free unused buffers. */
- DRWInstanceChunk *instancing = &idatalist->instancing;
- DRWInstancingBuffer *ibuf = instancing->ibufs;
- for (int i = 0; i < instancing->alloc_size; i++, ibuf++) {
- if (ibuf->shgroup != NULL) {
- realloc_size = i + 1;
- uint vert_len = DRW_shgroup_get_instance_count(ibuf->shgroup);
- vert_len += (vert_len == 0) ? 1 : 0; /* Do not realloc to 0 size buffer */
- if (vert_len + BUFFER_VERTS_CHUNK <= ibuf->vert->vertex_len) {
- uint size = vert_len + BUFFER_VERTS_CHUNK - 1;
- size = size - size % BUFFER_VERTS_CHUNK;
- GPU_vertbuf_data_resize(ibuf->vert, size);
- }
- GPU_vertbuf_use(ibuf->vert); /* Send data. */
- /* Setup batch now that we are sure ibuf->instance is setup. */
- GPU_batch_copy(ibuf->batch, ibuf->instance);
- GPU_batch_instbuf_set(ibuf->batch, ibuf->vert, false);
- ibuf->shgroup = NULL; /* Set as non used for the next round. */
- }
- else {
- GPU_VERTBUF_DISCARD_SAFE(ibuf->vert);
- GPU_BATCH_DISCARD_SAFE(ibuf->batch);
- ibuf->format = NULL; /* Tag as non alloced. */
- }
- }
- /* Rounding up to nearest chunk size. */
- realloc_size += BUFFER_CHUNK_SIZE - 1;
- realloc_size -= realloc_size % BUFFER_CHUNK_SIZE;
- /* Resize down if necessary. */
- if (realloc_size < instancing->alloc_size) {
- instancing->alloc_size = realloc_size;
- instancing->ibufs = MEM_reallocN(instancing->ibufs, realloc_size * sizeof(DRWInstancingBuffer));
- }
+ size_t realloc_size = 1; /* Avoid 0 size realloc. */
+ /* Resize down buffers in use and send data to GPU & free unused buffers. */
+ DRWInstanceChunk *batching = &idatalist->batching;
+ DRWBatchingBuffer *bbuf = batching->bbufs;
+ for (int i = 0; i < batching->alloc_size; i++, bbuf++) {
+ if (bbuf->shgroup != NULL) {
+ realloc_size = i + 1;
+ uint vert_len = DRW_shgroup_get_instance_count(bbuf->shgroup);
+ vert_len += (vert_len == 0) ? 1 : 0; /* Do not realloc to 0 size buffer */
+ if (vert_len + BUFFER_VERTS_CHUNK <= bbuf->vert->vertex_len) {
+ uint size = vert_len + BUFFER_VERTS_CHUNK - 1;
+ size = size - size % BUFFER_VERTS_CHUNK;
+ GPU_vertbuf_data_resize(bbuf->vert, size);
+ }
+ GPU_vertbuf_use(bbuf->vert); /* Send data. */
+ bbuf->shgroup = NULL; /* Set as non used for the next round. */
+ }
+ else {
+ GPU_VERTBUF_DISCARD_SAFE(bbuf->vert);
+ GPU_BATCH_DISCARD_SAFE(bbuf->batch);
+ bbuf->format = NULL; /* Tag as non alloced. */
+ }
+ }
+ /* Rounding up to nearest chunk size. */
+ realloc_size += BUFFER_CHUNK_SIZE - 1;
+ realloc_size -= realloc_size % BUFFER_CHUNK_SIZE;
+ /* Resize down if necessary. */
+ if (realloc_size < batching->alloc_size) {
+ batching->alloc_size = realloc_size;
+ batching->ibufs = MEM_reallocN(batching->ibufs, realloc_size * sizeof(DRWBatchingBuffer));
+ }
+
+ realloc_size = 1;
+ /* Resize down buffers in use and send data to GPU & free unused buffers. */
+ DRWInstanceChunk *instancing = &idatalist->instancing;
+ DRWInstancingBuffer *ibuf = instancing->ibufs;
+ for (int i = 0; i < instancing->alloc_size; i++, ibuf++) {
+ if (ibuf->shgroup != NULL) {
+ realloc_size = i + 1;
+ uint vert_len = DRW_shgroup_get_instance_count(ibuf->shgroup);
+ vert_len += (vert_len == 0) ? 1 : 0; /* Do not realloc to 0 size buffer */
+ if (vert_len + BUFFER_VERTS_CHUNK <= ibuf->vert->vertex_len) {
+ uint size = vert_len + BUFFER_VERTS_CHUNK - 1;
+ size = size - size % BUFFER_VERTS_CHUNK;
+ GPU_vertbuf_data_resize(ibuf->vert, size);
+ }
+ GPU_vertbuf_use(ibuf->vert); /* Send data. */
+ /* Setup batch now that we are sure ibuf->instance is setup. */
+ GPU_batch_copy(ibuf->batch, ibuf->instance);
+ GPU_batch_instbuf_set(ibuf->batch, ibuf->vert, false);
+ ibuf->shgroup = NULL; /* Set as non used for the next round. */
+ }
+ else {
+ GPU_VERTBUF_DISCARD_SAFE(ibuf->vert);
+ GPU_BATCH_DISCARD_SAFE(ibuf->batch);
+ ibuf->format = NULL; /* Tag as non alloced. */
+ }
+ }
+ /* Rounding up to nearest chunk size. */
+ realloc_size += BUFFER_CHUNK_SIZE - 1;
+ realloc_size -= realloc_size % BUFFER_CHUNK_SIZE;
+ /* Resize down if necessary. */
+ if (realloc_size < instancing->alloc_size) {
+ instancing->alloc_size = realloc_size;
+ instancing->ibufs = MEM_reallocN(instancing->ibufs,
+ realloc_size * sizeof(DRWInstancingBuffer));
+ }
}
/** \} */
@@ -285,29 +293,29 @@ void DRW_instance_buffer_finish(DRWInstanceDataList *idatalist)
static DRWInstanceData *drw_instance_data_create(DRWInstanceDataList *idatalist, uint attr_size)
{
- DRWInstanceData *idata = MEM_callocN(sizeof(DRWInstanceData), "DRWInstanceData");
- idata->next = NULL;
- idata->used = true;
- idata->data_size = attr_size;
- idata->mempool = BLI_mempool_create(sizeof(float) * idata->data_size, 0, 16, 0);
-
- BLI_assert(attr_size > 0);
-
- /* Push to linked list. */
- if (idatalist->idata_head[attr_size - 1] == NULL) {
- idatalist->idata_head[attr_size - 1] = idata;
- }
- else {
- idatalist->idata_tail[attr_size - 1]->next = idata;
- }
- idatalist->idata_tail[attr_size - 1] = idata;
-
- return idata;
+ DRWInstanceData *idata = MEM_callocN(sizeof(DRWInstanceData), "DRWInstanceData");
+ idata->next = NULL;
+ idata->used = true;
+ idata->data_size = attr_size;
+ idata->mempool = BLI_mempool_create(sizeof(float) * idata->data_size, 0, 16, 0);
+
+ BLI_assert(attr_size > 0);
+
+ /* Push to linked list. */
+ if (idatalist->idata_head[attr_size - 1] == NULL) {
+ idatalist->idata_head[attr_size - 1] = idata;
+ }
+ else {
+ idatalist->idata_tail[attr_size - 1]->next = idata;
+ }
+ idatalist->idata_tail[attr_size - 1] = idata;
+
+ return idata;
}
static void DRW_instance_data_free(DRWInstanceData *idata)
{
- BLI_mempool_destroy(idata->mempool);
+ BLI_mempool_destroy(idata->mempool);
}
/**
@@ -315,24 +323,24 @@ static void DRW_instance_data_free(DRWInstanceData *idata)
*/
void *DRW_instance_data_next(DRWInstanceData *idata)
{
- return BLI_mempool_alloc(idata->mempool);
+ return BLI_mempool_alloc(idata->mempool);
}
DRWInstanceData *DRW_instance_data_request(DRWInstanceDataList *idatalist, uint attr_size)
{
- BLI_assert(attr_size > 0 && attr_size <= MAX_INSTANCE_DATA_SIZE);
+ BLI_assert(attr_size > 0 && attr_size <= MAX_INSTANCE_DATA_SIZE);
- DRWInstanceData *idata = idatalist->idata_head[attr_size - 1];
+ DRWInstanceData *idata = idatalist->idata_head[attr_size - 1];
- /* Search for an unused data chunk. */
- for (; idata; idata = idata->next) {
- if (idata->used == false) {
- idata->used = true;
- return idata;
- }
- }
+ /* Search for an unused data chunk. */
+ for (; idata; idata = idata->next) {
+ if (idata->used == false) {
+ idata->used = true;
+ return idata;
+ }
+ }
- return drw_instance_data_create(idatalist, attr_size);
+ return drw_instance_data_create(idatalist, attr_size);
}
/** \} */
@@ -343,98 +351,100 @@ DRWInstanceData *DRW_instance_data_request(DRWInstanceDataList *idatalist, uint
DRWInstanceDataList *DRW_instance_data_list_create(void)
{
- DRWInstanceDataList *idatalist = MEM_callocN(sizeof(DRWInstanceDataList), "DRWInstanceDataList");
- idatalist->batching.bbufs = MEM_callocN(sizeof(DRWBatchingBuffer) * BUFFER_CHUNK_SIZE, "DRWBatchingBuffers");
- idatalist->batching.alloc_size = BUFFER_CHUNK_SIZE;
- idatalist->instancing.ibufs = MEM_callocN(sizeof(DRWInstancingBuffer) * BUFFER_CHUNK_SIZE, "DRWInstancingBuffers");
- idatalist->instancing.alloc_size = BUFFER_CHUNK_SIZE;
+ DRWInstanceDataList *idatalist = MEM_callocN(sizeof(DRWInstanceDataList), "DRWInstanceDataList");
+ idatalist->batching.bbufs = MEM_callocN(sizeof(DRWBatchingBuffer) * BUFFER_CHUNK_SIZE,
+ "DRWBatchingBuffers");
+ idatalist->batching.alloc_size = BUFFER_CHUNK_SIZE;
+ idatalist->instancing.ibufs = MEM_callocN(sizeof(DRWInstancingBuffer) * BUFFER_CHUNK_SIZE,
+ "DRWInstancingBuffers");
+ idatalist->instancing.alloc_size = BUFFER_CHUNK_SIZE;
- BLI_addtail(&g_idatalists, idatalist);
+ BLI_addtail(&g_idatalists, idatalist);
- return idatalist;
+ return idatalist;
}
void DRW_instance_data_list_free(DRWInstanceDataList *idatalist)
{
- DRWInstanceData *idata, *next_idata;
-
- for (int i = 0; i < MAX_INSTANCE_DATA_SIZE; ++i) {
- for (idata = idatalist->idata_head[i]; idata; idata = next_idata) {
- next_idata = idata->next;
- DRW_instance_data_free(idata);
- MEM_freeN(idata);
- }
- idatalist->idata_head[i] = NULL;
- idatalist->idata_tail[i] = NULL;
- }
-
- DRWBatchingBuffer *bbuf = idatalist->batching.bbufs;
- for (int i = 0; i < idatalist->batching.alloc_size; i++, bbuf++) {
- GPU_VERTBUF_DISCARD_SAFE(bbuf->vert);
- GPU_BATCH_DISCARD_SAFE(bbuf->batch);
- }
- MEM_freeN(idatalist->batching.bbufs);
-
- DRWInstancingBuffer *ibuf = idatalist->instancing.ibufs;
- for (int i = 0; i < idatalist->instancing.alloc_size; i++, ibuf++) {
- GPU_VERTBUF_DISCARD_SAFE(ibuf->vert);
- GPU_BATCH_DISCARD_SAFE(ibuf->batch);
- }
- MEM_freeN(idatalist->instancing.ibufs);
-
- BLI_remlink(&g_idatalists, idatalist);
+ DRWInstanceData *idata, *next_idata;
+
+ for (int i = 0; i < MAX_INSTANCE_DATA_SIZE; ++i) {
+ for (idata = idatalist->idata_head[i]; idata; idata = next_idata) {
+ next_idata = idata->next;
+ DRW_instance_data_free(idata);
+ MEM_freeN(idata);
+ }
+ idatalist->idata_head[i] = NULL;
+ idatalist->idata_tail[i] = NULL;
+ }
+
+ DRWBatchingBuffer *bbuf = idatalist->batching.bbufs;
+ for (int i = 0; i < idatalist->batching.alloc_size; i++, bbuf++) {
+ GPU_VERTBUF_DISCARD_SAFE(bbuf->vert);
+ GPU_BATCH_DISCARD_SAFE(bbuf->batch);
+ }
+ MEM_freeN(idatalist->batching.bbufs);
+
+ DRWInstancingBuffer *ibuf = idatalist->instancing.ibufs;
+ for (int i = 0; i < idatalist->instancing.alloc_size; i++, ibuf++) {
+ GPU_VERTBUF_DISCARD_SAFE(ibuf->vert);
+ GPU_BATCH_DISCARD_SAFE(ibuf->batch);
+ }
+ MEM_freeN(idatalist->instancing.ibufs);
+
+ BLI_remlink(&g_idatalists, idatalist);
}
void DRW_instance_data_list_reset(DRWInstanceDataList *idatalist)
{
- DRWInstanceData *idata;
+ DRWInstanceData *idata;
- for (int i = 0; i < MAX_INSTANCE_DATA_SIZE; ++i) {
- for (idata = idatalist->idata_head[i]; idata; idata = idata->next) {
- idata->used = false;
- }
- }
+ for (int i = 0; i < MAX_INSTANCE_DATA_SIZE; ++i) {
+ for (idata = idatalist->idata_head[i]; idata; idata = idata->next) {
+ idata->used = false;
+ }
+ }
}
void DRW_instance_data_list_free_unused(DRWInstanceDataList *idatalist)
{
- DRWInstanceData *idata, *next_idata;
-
- /* Remove unused data blocks and sanitize each list. */
- for (int i = 0; i < MAX_INSTANCE_DATA_SIZE; ++i) {
- idatalist->idata_tail[i] = NULL;
- for (idata = idatalist->idata_head[i]; idata; idata = next_idata) {
- next_idata = idata->next;
- if (idata->used == false) {
- if (idatalist->idata_head[i] == idata) {
- idatalist->idata_head[i] = next_idata;
- }
- else {
- /* idatalist->idata_tail[i] is guaranteed not to be null in this case. */
- idatalist->idata_tail[i]->next = next_idata;
- }
- DRW_instance_data_free(idata);
- MEM_freeN(idata);
- }
- else {
- if (idatalist->idata_tail[i] != NULL) {
- idatalist->idata_tail[i]->next = idata;
- }
- idatalist->idata_tail[i] = idata;
- }
- }
- }
+ DRWInstanceData *idata, *next_idata;
+
+ /* Remove unused data blocks and sanitize each list. */
+ for (int i = 0; i < MAX_INSTANCE_DATA_SIZE; ++i) {
+ idatalist->idata_tail[i] = NULL;
+ for (idata = idatalist->idata_head[i]; idata; idata = next_idata) {
+ next_idata = idata->next;
+ if (idata->used == false) {
+ if (idatalist->idata_head[i] == idata) {
+ idatalist->idata_head[i] = next_idata;
+ }
+ else {
+ /* idatalist->idata_tail[i] is guaranteed not to be null in this case. */
+ idatalist->idata_tail[i]->next = next_idata;
+ }
+ DRW_instance_data_free(idata);
+ MEM_freeN(idata);
+ }
+ else {
+ if (idatalist->idata_tail[i] != NULL) {
+ idatalist->idata_tail[i]->next = idata;
+ }
+ idatalist->idata_tail[i] = idata;
+ }
+ }
+ }
}
void DRW_instance_data_list_resize(DRWInstanceDataList *idatalist)
{
- DRWInstanceData *idata;
+ DRWInstanceData *idata;
- for (int i = 0; i < MAX_INSTANCE_DATA_SIZE; ++i) {
- for (idata = idatalist->idata_head[i]; idata; idata = idata->next) {
- BLI_mempool_clear_ex(idata->mempool, BLI_mempool_len(idata->mempool));
- }
- }
+ for (int i = 0; i < MAX_INSTANCE_DATA_SIZE; ++i) {
+ for (idata = idatalist->idata_head[i]; idata; idata = idata->next) {
+ BLI_mempool_clear_ex(idata->mempool, BLI_mempool_len(idata->mempool));
+ }
+ }
}
/** \} */
diff --git a/source/blender/draw/intern/draw_instance_data.h b/source/blender/draw/intern/draw_instance_data.h
index b6f3988dcef..ea5c6ac7bb2 100644
--- a/source/blender/draw/intern/draw_instance_data.h
+++ b/source/blender/draw/intern/draw_instance_data.h
@@ -36,15 +36,20 @@ typedef struct DRWInstanceDataList DRWInstanceDataList;
struct DRWShadingGroup;
void *DRW_instance_data_next(DRWInstanceData *idata);
-DRWInstanceData *DRW_instance_data_request(
- DRWInstanceDataList *idatalist, uint attr_size);
-
-void DRW_batching_buffer_request(
- DRWInstanceDataList *idatalist, GPUVertFormat *format, GPUPrimType type, struct DRWShadingGroup *shgroup,
- GPUBatch **r_batch, GPUVertBuf **r_vert);
-void DRW_instancing_buffer_request(
- DRWInstanceDataList *idatalist, GPUVertFormat *format, GPUBatch *instance, struct DRWShadingGroup *shgroup,
- GPUBatch **r_batch, GPUVertBuf **r_vert);
+DRWInstanceData *DRW_instance_data_request(DRWInstanceDataList *idatalist, uint attr_size);
+
+void DRW_batching_buffer_request(DRWInstanceDataList *idatalist,
+ GPUVertFormat *format,
+ GPUPrimType type,
+ struct DRWShadingGroup *shgroup,
+ GPUBatch **r_batch,
+ GPUVertBuf **r_vert);
+void DRW_instancing_buffer_request(DRWInstanceDataList *idatalist,
+ GPUVertFormat *format,
+ GPUBatch *instance,
+ struct DRWShadingGroup *shgroup,
+ GPUBatch **r_batch,
+ GPUVertBuf **r_vert);
/* Upload all instance data to the GPU as soon as possible. */
void DRW_instance_buffer_finish(DRWInstanceDataList *idatalist);
diff --git a/source/blender/draw/intern/draw_manager.c b/source/blender/draw/intern/draw_manager.c
index 699ac8d5b0b..9e078fd2774 100644
--- a/source/blender/draw/intern/draw_manager.c
+++ b/source/blender/draw/intern/draw_manager.c
@@ -99,18 +99,18 @@ static ListBase DRW_engines = {NULL, NULL};
static void drw_state_prepare_clean_for_draw(DRWManager *dst)
{
- memset(dst, 0x0, offsetof(DRWManager, gl_context));
+ memset(dst, 0x0, offsetof(DRWManager, gl_context));
- /* Maybe not the best place for this. */
- if (!DST.uniform_names.buffer) {
- DST.uniform_names.buffer = MEM_callocN(DRW_UNIFORM_BUFFER_NAME, "Name Buffer");
- DST.uniform_names.buffer_len = DRW_UNIFORM_BUFFER_NAME;
- }
- else if (DST.uniform_names.buffer_len > DRW_UNIFORM_BUFFER_NAME) {
- DST.uniform_names.buffer = MEM_reallocN(DST.uniform_names.buffer, DRW_UNIFORM_BUFFER_NAME);
- DST.uniform_names.buffer_len = DRW_UNIFORM_BUFFER_NAME;
- }
- DST.uniform_names.buffer_ofs = 0;
+ /* Maybe not the best place for this. */
+ if (!DST.uniform_names.buffer) {
+ DST.uniform_names.buffer = MEM_callocN(DRW_UNIFORM_BUFFER_NAME, "Name Buffer");
+ DST.uniform_names.buffer_len = DRW_UNIFORM_BUFFER_NAME;
+ }
+ else if (DST.uniform_names.buffer_len > DRW_UNIFORM_BUFFER_NAME) {
+ DST.uniform_names.buffer = MEM_reallocN(DST.uniform_names.buffer, DRW_UNIFORM_BUFFER_NAME);
+ DST.uniform_names.buffer_len = DRW_UNIFORM_BUFFER_NAME;
+ }
+ DST.uniform_names.buffer_ofs = 0;
}
/* This function is used to reset draw manager to a state
@@ -120,7 +120,7 @@ static void drw_state_prepare_clean_for_draw(DRWManager *dst)
#ifdef DEBUG
static void drw_state_ensure_not_reused(DRWManager *dst)
{
- memset(dst, 0xff, offsetof(DRWManager, gl_context));
+ memset(dst, 0xff, offsetof(DRWManager, gl_context));
}
#endif
@@ -128,50 +128,49 @@ static void drw_state_ensure_not_reused(DRWManager *dst)
void DRW_draw_callbacks_pre_scene(void)
{
- RegionView3D *rv3d = DST.draw_ctx.rv3d;
+ RegionView3D *rv3d = DST.draw_ctx.rv3d;
- GPU_matrix_projection_set(rv3d->winmat);
- GPU_matrix_set(rv3d->viewmat);
+ GPU_matrix_projection_set(rv3d->winmat);
+ GPU_matrix_set(rv3d->viewmat);
}
void DRW_draw_callbacks_post_scene(void)
{
- RegionView3D *rv3d = DST.draw_ctx.rv3d;
+ RegionView3D *rv3d = DST.draw_ctx.rv3d;
- GPU_matrix_projection_set(rv3d->winmat);
- GPU_matrix_set(rv3d->viewmat);
+ GPU_matrix_projection_set(rv3d->winmat);
+ GPU_matrix_set(rv3d->viewmat);
}
struct DRWTextStore *DRW_text_cache_ensure(void)
{
- BLI_assert(DST.text_store_p);
- if (*DST.text_store_p == NULL) {
- *DST.text_store_p = DRW_text_cache_create();
- }
- return *DST.text_store_p;
+ BLI_assert(DST.text_store_p);
+ if (*DST.text_store_p == NULL) {
+ *DST.text_store_p = DRW_text_cache_create();
+ }
+ return *DST.text_store_p;
}
-
/* -------------------------------------------------------------------- */
/** \name Settings
* \{ */
bool DRW_object_is_renderable(const Object *ob)
{
- BLI_assert((ob->base_flag & BASE_VISIBLE) != 0);
+ BLI_assert((ob->base_flag & BASE_VISIBLE) != 0);
- if (ob->type == OB_MESH) {
- if ((ob == DST.draw_ctx.object_edit) || BKE_object_is_in_editmode(ob)) {
- View3D *v3d = DST.draw_ctx.v3d;
- const int mask = (V3D_OVERLAY_EDIT_OCCLUDE_WIRE | V3D_OVERLAY_EDIT_WEIGHT);
+ if (ob->type == OB_MESH) {
+ if ((ob == DST.draw_ctx.object_edit) || BKE_object_is_in_editmode(ob)) {
+ View3D *v3d = DST.draw_ctx.v3d;
+ const int mask = (V3D_OVERLAY_EDIT_OCCLUDE_WIRE | V3D_OVERLAY_EDIT_WEIGHT);
- if (v3d && v3d->overlay.edit_flag & mask) {
- return false;
- }
- }
- }
+ if (v3d && v3d->overlay.edit_flag & mask) {
+ return false;
+ }
+ }
+ }
- return true;
+ return true;
}
/**
@@ -180,87 +179,80 @@ bool DRW_object_is_renderable(const Object *ob)
*/
int DRW_object_visibility_in_active_context(const Object *ob)
{
- const eEvaluationMode mode = DRW_state_is_scene_render() ?
- DAG_EVAL_RENDER :
- DAG_EVAL_VIEWPORT;
- return BKE_object_visibility(ob, mode);
+ const eEvaluationMode mode = DRW_state_is_scene_render() ? DAG_EVAL_RENDER : DAG_EVAL_VIEWPORT;
+ return BKE_object_visibility(ob, mode);
}
bool DRW_object_is_flat_normal(const Object *ob)
{
- if (ob->type == OB_MESH) {
- const Mesh *me = ob->data;
- if (me->mpoly && me->mpoly[0].flag & ME_SMOOTH) {
- return false;
- }
- }
- return true;
+ if (ob->type == OB_MESH) {
+ const Mesh *me = ob->data;
+ if (me->mpoly && me->mpoly[0].flag & ME_SMOOTH) {
+ return false;
+ }
+ }
+ return true;
}
bool DRW_object_use_hide_faces(const struct Object *ob)
{
- if (ob->type == OB_MESH) {
- const Mesh *me = ob->data;
-
- switch (ob->mode) {
- case OB_MODE_TEXTURE_PAINT:
- return (me->editflag & ME_EDIT_PAINT_FACE_SEL) != 0;
- case OB_MODE_VERTEX_PAINT:
- case OB_MODE_WEIGHT_PAINT:
- return (me->editflag & (ME_EDIT_PAINT_FACE_SEL | ME_EDIT_PAINT_VERT_SEL)) != 0;
- }
- }
-
- return false;
-}
-
-bool DRW_object_is_visible_psys_in_active_context(
- const Object *object,
- const ParticleSystem *psys)
-{
- const bool for_render = DRW_state_is_image_render();
- /* NOTE: psys_check_enabled is using object and particle system for only
- * reading, but is using some other functions which are more generic and
- * which are hard to make const-pointer. */
- if (!psys_check_enabled((Object *)object, (ParticleSystem *)psys, for_render)) {
- return false;
- }
- const DRWContextState *draw_ctx = DRW_context_state_get();
- const Scene *scene = draw_ctx->scene;
- if (object == draw_ctx->object_edit) {
- return false;
- }
- const ParticleSettings *part = psys->part;
- const ParticleEditSettings *pset = &scene->toolsettings->particle;
- if (object->mode == OB_MODE_PARTICLE_EDIT) {
- if (psys_in_edit_mode(draw_ctx->depsgraph, psys)) {
- if ((pset->flag & PE_DRAW_PART) == 0) {
- return false;
- }
- if ((part->childtype == 0) &&
- (psys->flag & PSYS_HAIR_DYNAMICS &&
- psys->pointcache->flag & PTCACHE_BAKED) == 0)
- {
- return false;
- }
- }
- }
- return true;
+ if (ob->type == OB_MESH) {
+ const Mesh *me = ob->data;
+
+ switch (ob->mode) {
+ case OB_MODE_TEXTURE_PAINT:
+ return (me->editflag & ME_EDIT_PAINT_FACE_SEL) != 0;
+ case OB_MODE_VERTEX_PAINT:
+ case OB_MODE_WEIGHT_PAINT:
+ return (me->editflag & (ME_EDIT_PAINT_FACE_SEL | ME_EDIT_PAINT_VERT_SEL)) != 0;
+ }
+ }
+
+ return false;
+}
+
+bool DRW_object_is_visible_psys_in_active_context(const Object *object, const ParticleSystem *psys)
+{
+ const bool for_render = DRW_state_is_image_render();
+ /* NOTE: psys_check_enabled is using object and particle system for only
+ * reading, but is using some other functions which are more generic and
+ * which are hard to make const-pointer. */
+ if (!psys_check_enabled((Object *)object, (ParticleSystem *)psys, for_render)) {
+ return false;
+ }
+ const DRWContextState *draw_ctx = DRW_context_state_get();
+ const Scene *scene = draw_ctx->scene;
+ if (object == draw_ctx->object_edit) {
+ return false;
+ }
+ const ParticleSettings *part = psys->part;
+ const ParticleEditSettings *pset = &scene->toolsettings->particle;
+ if (object->mode == OB_MODE_PARTICLE_EDIT) {
+ if (psys_in_edit_mode(draw_ctx->depsgraph, psys)) {
+ if ((pset->flag & PE_DRAW_PART) == 0) {
+ return false;
+ }
+ if ((part->childtype == 0) &&
+ (psys->flag & PSYS_HAIR_DYNAMICS && psys->pointcache->flag & PTCACHE_BAKED) == 0) {
+ return false;
+ }
+ }
+ }
+ return true;
}
struct Object *DRW_object_get_dupli_parent(const Object *UNUSED(ob))
{
- return DST.dupli_parent;
+ return DST.dupli_parent;
}
struct DupliObject *DRW_object_get_dupli(const Object *UNUSED(ob))
{
- return DST.dupli_source;
+ return DST.dupli_source;
}
/** \} */
-
/* -------------------------------------------------------------------- */
/** \name Color Management
* \{ */
@@ -268,110 +260,109 @@ struct DupliObject *DRW_object_get_dupli(const Object *UNUSED(ob))
/* Use color management profile to draw texture to framebuffer */
void DRW_transform_to_display(GPUTexture *tex, bool use_view_transform, bool use_render_settings)
{
- drw_state_set(DRW_STATE_WRITE_COLOR);
-
- GPUVertFormat *vert_format = immVertexFormat();
- uint pos = GPU_vertformat_attr_add(vert_format, "pos", GPU_COMP_F32, 2, GPU_FETCH_FLOAT);
- uint texco = GPU_vertformat_attr_add(vert_format, "texCoord", GPU_COMP_F32, 2, GPU_FETCH_FLOAT);
-
- const float dither = 1.0f;
-
- bool use_ocio = false;
-
- /* View transform is already applied for offscreen, don't apply again, see: T52046 */
- if (!(DST.options.is_image_render && !DST.options.is_scene_render)) {
- Scene *scene = DST.draw_ctx.scene;
- ColorManagedDisplaySettings *display_settings = &scene->display_settings;
- ColorManagedViewSettings view_settings;
- if (use_render_settings) {
- /* Use full render settings, for renders with scene lighting. */
- view_settings = scene->view_settings;
- }
- else if (use_view_transform) {
- /* Use only view transform + look and nothing else for lookdev without
- * scene lighting, as exposure depends on scene light intensity. */
- BKE_color_managed_view_settings_init_render(&view_settings, display_settings, NULL);
- STRNCPY(view_settings.view_transform, scene->view_settings.view_transform);
- STRNCPY(view_settings.look, scene->view_settings.look);
- }
- else {
- /* For workbench use only default view transform in configuration,
- * using no scene settings. */
- BKE_color_managed_view_settings_init_render(&view_settings, display_settings, NULL);
- }
-
- use_ocio = IMB_colormanagement_setup_glsl_draw_from_space(
- &view_settings, display_settings, NULL, dither, false);
- }
-
- if (!use_ocio) {
- /* View transform is already applied for offscreen, don't apply again, see: T52046 */
- if (DST.options.is_image_render && !DST.options.is_scene_render) {
- immBindBuiltinProgram(GPU_SHADER_2D_IMAGE_COLOR);
- immUniformColor4f(1.0f, 1.0f, 1.0f, 1.0f);
- }
- else {
- immBindBuiltinProgram(GPU_SHADER_2D_IMAGE_LINEAR_TO_SRGB);
- }
- immUniform1i("image", 0);
- }
-
- GPU_texture_bind(tex, 0); /* OCIO texture bind point is 0 */
-
- float mat[4][4];
- unit_m4(mat);
- immUniformMatrix4fv("ModelViewProjectionMatrix", mat);
-
- /* Full screen triangle */
- immBegin(GPU_PRIM_TRIS, 3);
- immAttr2f(texco, 0.0f, 0.0f);
- immVertex2f(pos, -1.0f, -1.0f);
-
- immAttr2f(texco, 2.0f, 0.0f);
- immVertex2f(pos, 3.0f, -1.0f);
-
- immAttr2f(texco, 0.0f, 2.0f);
- immVertex2f(pos, -1.0f, 3.0f);
- immEnd();
-
- GPU_texture_unbind(tex);
-
- if (use_ocio) {
- IMB_colormanagement_finish_glsl_draw();
- }
- else {
- immUnbindProgram();
- }
+ drw_state_set(DRW_STATE_WRITE_COLOR);
+
+ GPUVertFormat *vert_format = immVertexFormat();
+ uint pos = GPU_vertformat_attr_add(vert_format, "pos", GPU_COMP_F32, 2, GPU_FETCH_FLOAT);
+ uint texco = GPU_vertformat_attr_add(vert_format, "texCoord", GPU_COMP_F32, 2, GPU_FETCH_FLOAT);
+
+ const float dither = 1.0f;
+
+ bool use_ocio = false;
+
+ /* View transform is already applied for offscreen, don't apply again, see: T52046 */
+ if (!(DST.options.is_image_render && !DST.options.is_scene_render)) {
+ Scene *scene = DST.draw_ctx.scene;
+ ColorManagedDisplaySettings *display_settings = &scene->display_settings;
+ ColorManagedViewSettings view_settings;
+ if (use_render_settings) {
+ /* Use full render settings, for renders with scene lighting. */
+ view_settings = scene->view_settings;
+ }
+ else if (use_view_transform) {
+ /* Use only view transform + look and nothing else for lookdev without
+ * scene lighting, as exposure depends on scene light intensity. */
+ BKE_color_managed_view_settings_init_render(&view_settings, display_settings, NULL);
+ STRNCPY(view_settings.view_transform, scene->view_settings.view_transform);
+ STRNCPY(view_settings.look, scene->view_settings.look);
+ }
+ else {
+ /* For workbench use only default view transform in configuration,
+ * using no scene settings. */
+ BKE_color_managed_view_settings_init_render(&view_settings, display_settings, NULL);
+ }
+
+ use_ocio = IMB_colormanagement_setup_glsl_draw_from_space(
+ &view_settings, display_settings, NULL, dither, false);
+ }
+
+ if (!use_ocio) {
+ /* View transform is already applied for offscreen, don't apply again, see: T52046 */
+ if (DST.options.is_image_render && !DST.options.is_scene_render) {
+ immBindBuiltinProgram(GPU_SHADER_2D_IMAGE_COLOR);
+ immUniformColor4f(1.0f, 1.0f, 1.0f, 1.0f);
+ }
+ else {
+ immBindBuiltinProgram(GPU_SHADER_2D_IMAGE_LINEAR_TO_SRGB);
+ }
+ immUniform1i("image", 0);
+ }
+
+ GPU_texture_bind(tex, 0); /* OCIO texture bind point is 0 */
+
+ float mat[4][4];
+ unit_m4(mat);
+ immUniformMatrix4fv("ModelViewProjectionMatrix", mat);
+
+ /* Full screen triangle */
+ immBegin(GPU_PRIM_TRIS, 3);
+ immAttr2f(texco, 0.0f, 0.0f);
+ immVertex2f(pos, -1.0f, -1.0f);
+
+ immAttr2f(texco, 2.0f, 0.0f);
+ immVertex2f(pos, 3.0f, -1.0f);
+
+ immAttr2f(texco, 0.0f, 2.0f);
+ immVertex2f(pos, -1.0f, 3.0f);
+ immEnd();
+
+ GPU_texture_unbind(tex);
+
+ if (use_ocio) {
+ IMB_colormanagement_finish_glsl_draw();
+ }
+ else {
+ immUnbindProgram();
+ }
}
/* Draw texture to framebuffer without any color transforms */
void DRW_transform_none(GPUTexture *tex)
{
- drw_state_set(DRW_STATE_WRITE_COLOR);
+ drw_state_set(DRW_STATE_WRITE_COLOR);
- /* Draw as texture for final render (without immediate mode). */
- GPUBatch *geom = DRW_cache_fullscreen_quad_get();
- GPU_batch_program_set_builtin(geom, GPU_SHADER_2D_IMAGE_COLOR);
+ /* Draw as texture for final render (without immediate mode). */
+ GPUBatch *geom = DRW_cache_fullscreen_quad_get();
+ GPU_batch_program_set_builtin(geom, GPU_SHADER_2D_IMAGE_COLOR);
- GPU_texture_bind(tex, 0);
+ GPU_texture_bind(tex, 0);
- const float white[4] = {1.0f, 1.0f, 1.0f, 1.0f};
- GPU_batch_uniform_4fv(geom, "color", white);
+ const float white[4] = {1.0f, 1.0f, 1.0f, 1.0f};
+ GPU_batch_uniform_4fv(geom, "color", white);
- float mat[4][4];
- unit_m4(mat);
- GPU_batch_uniform_mat4(geom, "ModelViewProjectionMatrix", mat);
+ float mat[4][4];
+ unit_m4(mat);
+ GPU_batch_uniform_mat4(geom, "ModelViewProjectionMatrix", mat);
- GPU_batch_program_use_begin(geom);
- GPU_batch_draw_range_ex(geom, 0, 0, false);
- GPU_batch_program_use_end(geom);
+ GPU_batch_program_use_begin(geom);
+ GPU_batch_draw_range_ex(geom, 0, 0, false);
+ GPU_batch_program_use_end(geom);
- GPU_texture_unbind(tex);
+ GPU_texture_unbind(tex);
}
/** \} */
-
/* -------------------------------------------------------------------- */
/** \name Multisample Resolve
* \{ */
@@ -383,64 +374,80 @@ void DRW_transform_none(GPUTexture *tex)
*/
void DRW_multisamples_resolve(GPUTexture *src_depth, GPUTexture *src_color, bool use_depth)
{
- DRWState state = DRW_STATE_WRITE_COLOR | DRW_STATE_BLEND_PREMUL;
-
- if (use_depth) {
- state |= DRW_STATE_WRITE_DEPTH | DRW_STATE_DEPTH_LESS_EQUAL;
- }
- drw_state_set(state);
-
- int samples = GPU_texture_samples(src_depth);
-
- BLI_assert(samples > 0);
- BLI_assert(GPU_texture_samples(src_color) == samples);
-
- GPUBatch *geom = DRW_cache_fullscreen_quad_get();
-
- int builtin;
- if (use_depth) {
- switch (samples) {
- case 2: builtin = GPU_SHADER_2D_IMAGE_MULTISAMPLE_2_DEPTH_TEST; break;
- case 4: builtin = GPU_SHADER_2D_IMAGE_MULTISAMPLE_4_DEPTH_TEST; break;
- case 8: builtin = GPU_SHADER_2D_IMAGE_MULTISAMPLE_8_DEPTH_TEST; break;
- case 16: builtin = GPU_SHADER_2D_IMAGE_MULTISAMPLE_16_DEPTH_TEST; break;
- default:
- BLI_assert("Mulisample count unsupported by blit shader.");
- builtin = GPU_SHADER_2D_IMAGE_MULTISAMPLE_2_DEPTH_TEST;
- break;
- }
- }
- else {
- switch (samples) {
- case 2: builtin = GPU_SHADER_2D_IMAGE_MULTISAMPLE_2; break;
- case 4: builtin = GPU_SHADER_2D_IMAGE_MULTISAMPLE_4; break;
- case 8: builtin = GPU_SHADER_2D_IMAGE_MULTISAMPLE_8; break;
- case 16: builtin = GPU_SHADER_2D_IMAGE_MULTISAMPLE_16; break;
- default:
- BLI_assert("Mulisample count unsupported by blit shader.");
- builtin = GPU_SHADER_2D_IMAGE_MULTISAMPLE_2;
- break;
- }
- }
-
- GPU_batch_program_set_builtin(geom, builtin);
-
- if (use_depth) {
- GPU_texture_bind(src_depth, 0);
- GPU_batch_uniform_1i(geom, "depthMulti", 0);
- }
-
- GPU_texture_bind(src_color, 1);
- GPU_batch_uniform_1i(geom, "colorMulti", 1);
-
- float mat[4][4];
- unit_m4(mat);
- GPU_batch_uniform_mat4(geom, "ModelViewProjectionMatrix", mat);
-
- /* avoid gpuMatrix calls */
- GPU_batch_program_use_begin(geom);
- GPU_batch_draw_range_ex(geom, 0, 0, false);
- GPU_batch_program_use_end(geom);
+ DRWState state = DRW_STATE_WRITE_COLOR | DRW_STATE_BLEND_PREMUL;
+
+ if (use_depth) {
+ state |= DRW_STATE_WRITE_DEPTH | DRW_STATE_DEPTH_LESS_EQUAL;
+ }
+ drw_state_set(state);
+
+ int samples = GPU_texture_samples(src_depth);
+
+ BLI_assert(samples > 0);
+ BLI_assert(GPU_texture_samples(src_color) == samples);
+
+ GPUBatch *geom = DRW_cache_fullscreen_quad_get();
+
+ int builtin;
+ if (use_depth) {
+ switch (samples) {
+ case 2:
+ builtin = GPU_SHADER_2D_IMAGE_MULTISAMPLE_2_DEPTH_TEST;
+ break;
+ case 4:
+ builtin = GPU_SHADER_2D_IMAGE_MULTISAMPLE_4_DEPTH_TEST;
+ break;
+ case 8:
+ builtin = GPU_SHADER_2D_IMAGE_MULTISAMPLE_8_DEPTH_TEST;
+ break;
+ case 16:
+ builtin = GPU_SHADER_2D_IMAGE_MULTISAMPLE_16_DEPTH_TEST;
+ break;
+ default:
+ BLI_assert("Mulisample count unsupported by blit shader.");
+ builtin = GPU_SHADER_2D_IMAGE_MULTISAMPLE_2_DEPTH_TEST;
+ break;
+ }
+ }
+ else {
+ switch (samples) {
+ case 2:
+ builtin = GPU_SHADER_2D_IMAGE_MULTISAMPLE_2;
+ break;
+ case 4:
+ builtin = GPU_SHADER_2D_IMAGE_MULTISAMPLE_4;
+ break;
+ case 8:
+ builtin = GPU_SHADER_2D_IMAGE_MULTISAMPLE_8;
+ break;
+ case 16:
+ builtin = GPU_SHADER_2D_IMAGE_MULTISAMPLE_16;
+ break;
+ default:
+ BLI_assert("Mulisample count unsupported by blit shader.");
+ builtin = GPU_SHADER_2D_IMAGE_MULTISAMPLE_2;
+ break;
+ }
+ }
+
+ GPU_batch_program_set_builtin(geom, builtin);
+
+ if (use_depth) {
+ GPU_texture_bind(src_depth, 0);
+ GPU_batch_uniform_1i(geom, "depthMulti", 0);
+ }
+
+ GPU_texture_bind(src_color, 1);
+ GPU_batch_uniform_1i(geom, "colorMulti", 1);
+
+ float mat[4][4];
+ unit_m4(mat);
+ GPU_batch_uniform_mat4(geom, "ModelViewProjectionMatrix", mat);
+
+ /* avoid gpuMatrix calls */
+ GPU_batch_program_use_begin(geom);
+ GPU_batch_draw_range_ex(geom, 0, 0, false);
+ GPU_batch_program_use_end(geom);
}
/** \} */
@@ -451,111 +458,110 @@ void DRW_multisamples_resolve(GPUTexture *src_depth, GPUTexture *src_color, bool
void *drw_viewport_engine_data_ensure(void *engine_type)
{
- void *data = GPU_viewport_engine_data_get(DST.viewport, engine_type);
+ void *data = GPU_viewport_engine_data_get(DST.viewport, engine_type);
- if (data == NULL) {
- data = GPU_viewport_engine_data_create(DST.viewport, engine_type);
- }
- return data;
+ if (data == NULL) {
+ data = GPU_viewport_engine_data_create(DST.viewport, engine_type);
+ }
+ return data;
}
void DRW_engine_viewport_data_size_get(
- const void *engine_type_v,
- int *r_fbl_len, int *r_txl_len, int *r_psl_len, int *r_stl_len)
+ const void *engine_type_v, int *r_fbl_len, int *r_txl_len, int *r_psl_len, int *r_stl_len)
{
- const DrawEngineType *engine_type = engine_type_v;
+ const DrawEngineType *engine_type = engine_type_v;
- if (r_fbl_len) {
- *r_fbl_len = engine_type->vedata_size->fbl_len;
- }
- if (r_txl_len) {
- *r_txl_len = engine_type->vedata_size->txl_len;
- }
- if (r_psl_len) {
- *r_psl_len = engine_type->vedata_size->psl_len;
- }
- if (r_stl_len) {
- *r_stl_len = engine_type->vedata_size->stl_len;
- }
+ if (r_fbl_len) {
+ *r_fbl_len = engine_type->vedata_size->fbl_len;
+ }
+ if (r_txl_len) {
+ *r_txl_len = engine_type->vedata_size->txl_len;
+ }
+ if (r_psl_len) {
+ *r_psl_len = engine_type->vedata_size->psl_len;
+ }
+ if (r_stl_len) {
+ *r_stl_len = engine_type->vedata_size->stl_len;
+ }
}
/* WARNING: only use for custom pipeline. 99% of the time, you don't want to use this. */
void DRW_render_viewport_size_set(int size[2])
{
- DST.size[0] = size[0];
- DST.size[1] = size[1];
+ DST.size[0] = size[0];
+ DST.size[1] = size[1];
}
const float *DRW_viewport_size_get(void)
{
- return DST.size;
+ return DST.size;
}
const float *DRW_viewport_invert_size_get(void)
{
- return DST.inv_size;
+ return DST.inv_size;
}
const float *DRW_viewport_screenvecs_get(void)
{
- return &DST.screenvecs[0][0];
+ return &DST.screenvecs[0][0];
}
const float *DRW_viewport_pixelsize_get(void)
{
- return &DST.pixsize;
+ return &DST.pixsize;
}
static void drw_viewport_cache_resize(void)
{
- /* Release the memiter before clearing the mempools that references them */
- GPU_viewport_cache_release(DST.viewport);
+ /* Release the memiter before clearing the mempools that references them */
+ GPU_viewport_cache_release(DST.viewport);
- if (DST.vmempool != NULL) {
- BLI_mempool_clear_ex(DST.vmempool->calls, BLI_mempool_len(DST.vmempool->calls));
- BLI_mempool_clear_ex(DST.vmempool->states, BLI_mempool_len(DST.vmempool->states));
- BLI_mempool_clear_ex(DST.vmempool->shgroups, BLI_mempool_len(DST.vmempool->shgroups));
- BLI_mempool_clear_ex(DST.vmempool->uniforms, BLI_mempool_len(DST.vmempool->uniforms));
- BLI_mempool_clear_ex(DST.vmempool->passes, BLI_mempool_len(DST.vmempool->passes));
- }
+ if (DST.vmempool != NULL) {
+ BLI_mempool_clear_ex(DST.vmempool->calls, BLI_mempool_len(DST.vmempool->calls));
+ BLI_mempool_clear_ex(DST.vmempool->states, BLI_mempool_len(DST.vmempool->states));
+ BLI_mempool_clear_ex(DST.vmempool->shgroups, BLI_mempool_len(DST.vmempool->shgroups));
+ BLI_mempool_clear_ex(DST.vmempool->uniforms, BLI_mempool_len(DST.vmempool->uniforms));
+ BLI_mempool_clear_ex(DST.vmempool->passes, BLI_mempool_len(DST.vmempool->passes));
+ }
- DRW_instance_data_list_free_unused(DST.idatalist);
- DRW_instance_data_list_resize(DST.idatalist);
+ DRW_instance_data_list_free_unused(DST.idatalist);
+ DRW_instance_data_list_resize(DST.idatalist);
}
/* Not a viewport variable, we could split this out. */
static void drw_context_state_init(void)
{
- if (DST.draw_ctx.obact) {
- DST.draw_ctx.object_mode = DST.draw_ctx.obact->mode;
- }
- else {
- DST.draw_ctx.object_mode = OB_MODE_OBJECT;
- }
-
- /* Edit object. */
- if (DST.draw_ctx.object_mode & OB_MODE_EDIT) {
- DST.draw_ctx.object_edit = DST.draw_ctx.obact;
- }
- else {
- DST.draw_ctx.object_edit = NULL;
- }
-
- /* Pose object. */
- if (DST.draw_ctx.object_mode & OB_MODE_POSE) {
- DST.draw_ctx.object_pose = DST.draw_ctx.obact;
- }
- else if (DST.draw_ctx.object_mode & OB_MODE_WEIGHT_PAINT) {
- DST.draw_ctx.object_pose = BKE_object_pose_armature_get(DST.draw_ctx.obact);
- }
- else {
- DST.draw_ctx.object_pose = NULL;
- }
-
- DST.draw_ctx.sh_cfg = GPU_SHADER_CFG_DEFAULT;
- if (DST.draw_ctx.rv3d && DST.draw_ctx.rv3d->rflag & RV3D_CLIPPING) {
- DST.draw_ctx.sh_cfg = GPU_SHADER_CFG_CLIPPED;
- }
+ if (DST.draw_ctx.obact) {
+ DST.draw_ctx.object_mode = DST.draw_ctx.obact->mode;
+ }
+ else {
+ DST.draw_ctx.object_mode = OB_MODE_OBJECT;
+ }
+
+ /* Edit object. */
+ if (DST.draw_ctx.object_mode & OB_MODE_EDIT) {
+ DST.draw_ctx.object_edit = DST.draw_ctx.obact;
+ }
+ else {
+ DST.draw_ctx.object_edit = NULL;
+ }
+
+ /* Pose object. */
+ if (DST.draw_ctx.object_mode & OB_MODE_POSE) {
+ DST.draw_ctx.object_pose = DST.draw_ctx.obact;
+ }
+ else if (DST.draw_ctx.object_mode & OB_MODE_WEIGHT_PAINT) {
+ DST.draw_ctx.object_pose = BKE_object_pose_armature_get(DST.draw_ctx.obact);
+ }
+ else {
+ DST.draw_ctx.object_pose = NULL;
+ }
+
+ DST.draw_ctx.sh_cfg = GPU_SHADER_CFG_DEFAULT;
+ if (DST.draw_ctx.rv3d && DST.draw_ctx.rv3d->rflag & RV3D_CLIPPING) {
+ DST.draw_ctx.sh_cfg = GPU_SHADER_CFG_CLIPPED;
+ }
}
/* It also stores viewport variable to an immutable place: DST
@@ -564,245 +570,248 @@ static void drw_context_state_init(void)
* if this value change per viewport */
static void drw_viewport_var_init(void)
{
- RegionView3D *rv3d = DST.draw_ctx.rv3d;
- /* Refresh DST.size */
- if (DST.viewport) {
- int size[2];
- GPU_viewport_size_get(DST.viewport, size);
- DST.size[0] = size[0];
- DST.size[1] = size[1];
- DST.inv_size[0] = 1.0f / size[0];
- DST.inv_size[1] = 1.0f / size[1];
-
- DefaultFramebufferList *fbl = (DefaultFramebufferList *)GPU_viewport_framebuffer_list_get(DST.viewport);
- DST.default_framebuffer = fbl->default_fb;
-
- DST.vmempool = GPU_viewport_mempool_get(DST.viewport);
-
- if (DST.vmempool->calls == NULL) {
- DST.vmempool->calls = BLI_mempool_create(sizeof(DRWCall), 0, 512, 0);
- }
- if (DST.vmempool->states == NULL) {
- DST.vmempool->states = BLI_mempool_create(sizeof(DRWCallState), 0, 512, BLI_MEMPOOL_ALLOW_ITER);
- }
- if (DST.vmempool->shgroups == NULL) {
- DST.vmempool->shgroups = BLI_mempool_create(sizeof(DRWShadingGroup), 0, 256, 0);
- }
- if (DST.vmempool->uniforms == NULL) {
- DST.vmempool->uniforms = BLI_mempool_create(sizeof(DRWUniform), 0, 512, 0);
- }
- if (DST.vmempool->passes == NULL) {
- DST.vmempool->passes = BLI_mempool_create(sizeof(DRWPass), 0, 64, 0);
- }
-
- DST.idatalist = GPU_viewport_instance_data_list_get(DST.viewport);
- DRW_instance_data_list_reset(DST.idatalist);
- }
- else {
- DST.size[0] = 0;
- DST.size[1] = 0;
-
- DST.inv_size[0] = 0;
- DST.inv_size[1] = 0;
-
- DST.default_framebuffer = NULL;
- DST.vmempool = NULL;
- }
-
- if (rv3d != NULL) {
- /* Refresh DST.screenvecs */
- copy_v3_v3(DST.screenvecs[0], rv3d->viewinv[0]);
- copy_v3_v3(DST.screenvecs[1], rv3d->viewinv[1]);
- normalize_v3(DST.screenvecs[0]);
- normalize_v3(DST.screenvecs[1]);
-
- /* Refresh DST.pixelsize */
- DST.pixsize = rv3d->pixsize;
-
- copy_m4_m4(DST.original_mat.mat[DRW_MAT_PERS], rv3d->persmat);
- copy_m4_m4(DST.original_mat.mat[DRW_MAT_PERSINV], rv3d->persinv);
- copy_m4_m4(DST.original_mat.mat[DRW_MAT_VIEW], rv3d->viewmat);
- copy_m4_m4(DST.original_mat.mat[DRW_MAT_VIEWINV], rv3d->viewinv);
- copy_m4_m4(DST.original_mat.mat[DRW_MAT_WIN], rv3d->winmat);
- invert_m4_m4(DST.original_mat.mat[DRW_MAT_WININV], rv3d->winmat);
-
- memcpy(DST.view_data.matstate.mat, DST.original_mat.mat, sizeof(DST.original_mat.mat));
-
- copy_v4_v4(DST.view_data.viewcamtexcofac, rv3d->viewcamtexcofac);
- }
- else {
- copy_v4_fl4(DST.view_data.viewcamtexcofac, 1.0f, 1.0f, 0.0f, 0.0f);
- }
-
- /* Reset facing */
- DST.frontface = GL_CCW;
- DST.backface = GL_CW;
- glFrontFace(DST.frontface);
-
- if (DST.draw_ctx.object_edit) {
- ED_view3d_init_mats_rv3d(DST.draw_ctx.object_edit, rv3d);
- }
-
- /* Alloc array of texture reference. */
- memset(&DST.RST, 0x0, sizeof(DST.RST));
-
- if (G_draw.view_ubo == NULL) {
- G_draw.view_ubo = DRW_uniformbuffer_create(sizeof(ViewUboStorage), NULL);
- }
-
- DST.override_mat = 0;
- DST.dirty_mat = true;
- DST.state_cache_id = 1;
-
- DST.clipping.updated = false;
-
- memset(DST.object_instance_data, 0x0, sizeof(DST.object_instance_data));
+ RegionView3D *rv3d = DST.draw_ctx.rv3d;
+ /* Refresh DST.size */
+ if (DST.viewport) {
+ int size[2];
+ GPU_viewport_size_get(DST.viewport, size);
+ DST.size[0] = size[0];
+ DST.size[1] = size[1];
+ DST.inv_size[0] = 1.0f / size[0];
+ DST.inv_size[1] = 1.0f / size[1];
+
+ DefaultFramebufferList *fbl = (DefaultFramebufferList *)GPU_viewport_framebuffer_list_get(
+ DST.viewport);
+ DST.default_framebuffer = fbl->default_fb;
+
+ DST.vmempool = GPU_viewport_mempool_get(DST.viewport);
+
+ if (DST.vmempool->calls == NULL) {
+ DST.vmempool->calls = BLI_mempool_create(sizeof(DRWCall), 0, 512, 0);
+ }
+ if (DST.vmempool->states == NULL) {
+ DST.vmempool->states = BLI_mempool_create(
+ sizeof(DRWCallState), 0, 512, BLI_MEMPOOL_ALLOW_ITER);
+ }
+ if (DST.vmempool->shgroups == NULL) {
+ DST.vmempool->shgroups = BLI_mempool_create(sizeof(DRWShadingGroup), 0, 256, 0);
+ }
+ if (DST.vmempool->uniforms == NULL) {
+ DST.vmempool->uniforms = BLI_mempool_create(sizeof(DRWUniform), 0, 512, 0);
+ }
+ if (DST.vmempool->passes == NULL) {
+ DST.vmempool->passes = BLI_mempool_create(sizeof(DRWPass), 0, 64, 0);
+ }
+
+ DST.idatalist = GPU_viewport_instance_data_list_get(DST.viewport);
+ DRW_instance_data_list_reset(DST.idatalist);
+ }
+ else {
+ DST.size[0] = 0;
+ DST.size[1] = 0;
+
+ DST.inv_size[0] = 0;
+ DST.inv_size[1] = 0;
+
+ DST.default_framebuffer = NULL;
+ DST.vmempool = NULL;
+ }
+
+ if (rv3d != NULL) {
+ /* Refresh DST.screenvecs */
+ copy_v3_v3(DST.screenvecs[0], rv3d->viewinv[0]);
+ copy_v3_v3(DST.screenvecs[1], rv3d->viewinv[1]);
+ normalize_v3(DST.screenvecs[0]);
+ normalize_v3(DST.screenvecs[1]);
+
+ /* Refresh DST.pixelsize */
+ DST.pixsize = rv3d->pixsize;
+
+ copy_m4_m4(DST.original_mat.mat[DRW_MAT_PERS], rv3d->persmat);
+ copy_m4_m4(DST.original_mat.mat[DRW_MAT_PERSINV], rv3d->persinv);
+ copy_m4_m4(DST.original_mat.mat[DRW_MAT_VIEW], rv3d->viewmat);
+ copy_m4_m4(DST.original_mat.mat[DRW_MAT_VIEWINV], rv3d->viewinv);
+ copy_m4_m4(DST.original_mat.mat[DRW_MAT_WIN], rv3d->winmat);
+ invert_m4_m4(DST.original_mat.mat[DRW_MAT_WININV], rv3d->winmat);
+
+ memcpy(DST.view_data.matstate.mat, DST.original_mat.mat, sizeof(DST.original_mat.mat));
+
+ copy_v4_v4(DST.view_data.viewcamtexcofac, rv3d->viewcamtexcofac);
+ }
+ else {
+ copy_v4_fl4(DST.view_data.viewcamtexcofac, 1.0f, 1.0f, 0.0f, 0.0f);
+ }
+
+ /* Reset facing */
+ DST.frontface = GL_CCW;
+ DST.backface = GL_CW;
+ glFrontFace(DST.frontface);
+
+ if (DST.draw_ctx.object_edit) {
+ ED_view3d_init_mats_rv3d(DST.draw_ctx.object_edit, rv3d);
+ }
+
+ /* Alloc array of texture reference. */
+ memset(&DST.RST, 0x0, sizeof(DST.RST));
+
+ if (G_draw.view_ubo == NULL) {
+ G_draw.view_ubo = DRW_uniformbuffer_create(sizeof(ViewUboStorage), NULL);
+ }
+
+ DST.override_mat = 0;
+ DST.dirty_mat = true;
+ DST.state_cache_id = 1;
+
+ DST.clipping.updated = false;
+
+ memset(DST.object_instance_data, 0x0, sizeof(DST.object_instance_data));
}
void DRW_viewport_matrix_get(float mat[4][4], DRWViewportMatrixType type)
{
- BLI_assert(type >= 0 && type < DRW_MAT_COUNT);
- /* Can't use this in render mode. */
- BLI_assert(((DST.override_mat & (1 << type)) != 0) || DST.draw_ctx.rv3d != NULL);
+ BLI_assert(type >= 0 && type < DRW_MAT_COUNT);
+ /* Can't use this in render mode. */
+ BLI_assert(((DST.override_mat & (1 << type)) != 0) || DST.draw_ctx.rv3d != NULL);
- copy_m4_m4(mat, DST.view_data.matstate.mat[type]);
+ copy_m4_m4(mat, DST.view_data.matstate.mat[type]);
}
void DRW_viewport_matrix_get_all(DRWMatrixState *state)
{
- memcpy(state, DST.view_data.matstate.mat, sizeof(DRWMatrixState));
+ memcpy(state, DST.view_data.matstate.mat, sizeof(DRWMatrixState));
}
void DRW_viewport_matrix_override_set(const float mat[4][4], DRWViewportMatrixType type)
{
- BLI_assert(type < DRW_MAT_COUNT);
- copy_m4_m4(DST.view_data.matstate.mat[type], mat);
- DST.override_mat |= (1 << type);
- DST.dirty_mat = true;
- DST.clipping.updated = false;
+ BLI_assert(type < DRW_MAT_COUNT);
+ copy_m4_m4(DST.view_data.matstate.mat[type], mat);
+ DST.override_mat |= (1 << type);
+ DST.dirty_mat = true;
+ DST.clipping.updated = false;
}
void DRW_viewport_matrix_override_unset(DRWViewportMatrixType type)
{
- BLI_assert(type < DRW_MAT_COUNT);
- copy_m4_m4(DST.view_data.matstate.mat[type], DST.original_mat.mat[type]);
- DST.override_mat &= ~(1 << type);
- DST.dirty_mat = true;
- DST.clipping.updated = false;
+ BLI_assert(type < DRW_MAT_COUNT);
+ copy_m4_m4(DST.view_data.matstate.mat[type], DST.original_mat.mat[type]);
+ DST.override_mat &= ~(1 << type);
+ DST.dirty_mat = true;
+ DST.clipping.updated = false;
}
void DRW_viewport_matrix_override_set_all(DRWMatrixState *state)
{
- memcpy(DST.view_data.matstate.mat, state, sizeof(DRWMatrixState));
- DST.override_mat = 0xFFFFFF;
- DST.dirty_mat = true;
- DST.clipping.updated = false;
+ memcpy(DST.view_data.matstate.mat, state, sizeof(DRWMatrixState));
+ DST.override_mat = 0xFFFFFF;
+ DST.dirty_mat = true;
+ DST.clipping.updated = false;
}
void DRW_viewport_matrix_override_unset_all(void)
{
- memcpy(DST.view_data.matstate.mat, DST.original_mat.mat, sizeof(DRWMatrixState));
- DST.override_mat = 0;
- DST.dirty_mat = true;
- DST.clipping.updated = false;
+ memcpy(DST.view_data.matstate.mat, DST.original_mat.mat, sizeof(DRWMatrixState));
+ DST.override_mat = 0;
+ DST.dirty_mat = true;
+ DST.clipping.updated = false;
}
bool DRW_viewport_is_persp_get(void)
{
- RegionView3D *rv3d = DST.draw_ctx.rv3d;
- if (rv3d) {
- return rv3d->is_persp;
- }
- else {
- return DST.view_data.matstate.mat[DRW_MAT_WIN][3][3] == 0.0f;
- }
+ RegionView3D *rv3d = DST.draw_ctx.rv3d;
+ if (rv3d) {
+ return rv3d->is_persp;
+ }
+ else {
+ return DST.view_data.matstate.mat[DRW_MAT_WIN][3][3] == 0.0f;
+ }
}
float DRW_viewport_near_distance_get(void)
{
- float projmat[4][4];
- DRW_viewport_matrix_get(projmat, DRW_MAT_WIN);
+ float projmat[4][4];
+ DRW_viewport_matrix_get(projmat, DRW_MAT_WIN);
- if (DRW_viewport_is_persp_get()) {
- return -projmat[3][2] / (projmat[2][2] - 1.0f);
- }
- else {
- return -(projmat[3][2] + 1.0f) / projmat[2][2];
- }
+ if (DRW_viewport_is_persp_get()) {
+ return -projmat[3][2] / (projmat[2][2] - 1.0f);
+ }
+ else {
+ return -(projmat[3][2] + 1.0f) / projmat[2][2];
+ }
}
float DRW_viewport_far_distance_get(void)
{
- float projmat[4][4];
- DRW_viewport_matrix_get(projmat, DRW_MAT_WIN);
+ float projmat[4][4];
+ DRW_viewport_matrix_get(projmat, DRW_MAT_WIN);
- if (DRW_viewport_is_persp_get()) {
- return -projmat[3][2] / (projmat[2][2] + 1.0f);
- }
- else {
- return -(projmat[3][2] - 1.0f) / projmat[2][2];
- }
+ if (DRW_viewport_is_persp_get()) {
+ return -projmat[3][2] / (projmat[2][2] + 1.0f);
+ }
+ else {
+ return -(projmat[3][2] - 1.0f) / projmat[2][2];
+ }
}
DefaultFramebufferList *DRW_viewport_framebuffer_list_get(void)
{
- return GPU_viewport_framebuffer_list_get(DST.viewport);
+ return GPU_viewport_framebuffer_list_get(DST.viewport);
}
DefaultTextureList *DRW_viewport_texture_list_get(void)
{
- return GPU_viewport_texture_list_get(DST.viewport);
+ return GPU_viewport_texture_list_get(DST.viewport);
}
void DRW_viewport_request_redraw(void)
{
- GPU_viewport_tag_update(DST.viewport);
+ GPU_viewport_tag_update(DST.viewport);
}
/** \} */
-
/* -------------------------------------------------------------------- */
/** \name ViewLayers (DRW_scenelayer)
* \{ */
void *DRW_view_layer_engine_data_get(DrawEngineType *engine_type)
{
- for (ViewLayerEngineData *sled = DST.draw_ctx.view_layer->drawdata.first; sled; sled = sled->next) {
- if (sled->engine_type == engine_type) {
- return sled->storage;
- }
- }
- return NULL;
+ for (ViewLayerEngineData *sled = DST.draw_ctx.view_layer->drawdata.first; sled;
+ sled = sled->next) {
+ if (sled->engine_type == engine_type) {
+ return sled->storage;
+ }
+ }
+ return NULL;
}
-void **DRW_view_layer_engine_data_ensure_ex(
- ViewLayer *view_layer, DrawEngineType *engine_type, void (*callback)(void *storage))
+void **DRW_view_layer_engine_data_ensure_ex(ViewLayer *view_layer,
+ DrawEngineType *engine_type,
+ void (*callback)(void *storage))
{
- ViewLayerEngineData *sled;
+ ViewLayerEngineData *sled;
- for (sled = view_layer->drawdata.first; sled; sled = sled->next) {
- if (sled->engine_type == engine_type) {
- return &sled->storage;
- }
- }
+ for (sled = view_layer->drawdata.first; sled; sled = sled->next) {
+ if (sled->engine_type == engine_type) {
+ return &sled->storage;
+ }
+ }
- sled = MEM_callocN(sizeof(ViewLayerEngineData), "ViewLayerEngineData");
- sled->engine_type = engine_type;
- sled->free = callback;
- BLI_addtail(&view_layer->drawdata, sled);
+ sled = MEM_callocN(sizeof(ViewLayerEngineData), "ViewLayerEngineData");
+ sled->engine_type = engine_type;
+ sled->free = callback;
+ BLI_addtail(&view_layer->drawdata, sled);
- return &sled->storage;
+ return &sled->storage;
}
-void **DRW_view_layer_engine_data_ensure(DrawEngineType *engine_type, void (*callback)(void *storage))
+void **DRW_view_layer_engine_data_ensure(DrawEngineType *engine_type,
+ void (*callback)(void *storage))
{
- return DRW_view_layer_engine_data_ensure_ex(DST.draw_ctx.view_layer, engine_type, callback);
+ return DRW_view_layer_engine_data_ensure_ex(DST.draw_ctx.view_layer, engine_type, callback);
}
/** \} */
-
/* -------------------------------------------------------------------- */
/** \name Draw Data (DRW_drawdata)
* \{ */
@@ -812,36 +821,36 @@ void **DRW_view_layer_engine_data_ensure(DrawEngineType *engine_type, void (*cal
* should have the same arrangement in their structs.
*/
typedef struct IdDdtTemplate {
- ID id;
- struct AnimData *adt;
- DrawDataList drawdata;
+ ID id;
+ struct AnimData *adt;
+ DrawDataList drawdata;
} IdDdtTemplate;
/* Check if ID can have AnimData */
static bool id_type_can_have_drawdata(const short id_type)
{
- /* Only some ID-blocks have this info for now */
- /* TODO: finish adding this for the other blocktypes */
- switch (id_type) {
- /* has DrawData */
- case ID_OB:
- case ID_WO:
- return true;
+ /* Only some ID-blocks have this info for now */
+ /* TODO: finish adding this for the other blocktypes */
+ switch (id_type) {
+ /* has DrawData */
+ case ID_OB:
+ case ID_WO:
+ return true;
- /* no DrawData */
- default:
- return false;
- }
+ /* no DrawData */
+ default:
+ return false;
+ }
}
static bool id_can_have_drawdata(const ID *id)
{
- /* sanity check */
- if (id == NULL) {
- return false;
- }
+ /* sanity check */
+ if (id == NULL) {
+ return false;
+ }
- return id_type_can_have_drawdata(GS(id->name));
+ return id_type_can_have_drawdata(GS(id->name));
}
/* Get DrawData from the given ID-block. In order for this to work, we assume that
@@ -849,324 +858,322 @@ static bool id_can_have_drawdata(const ID *id)
*/
DrawDataList *DRW_drawdatalist_from_id(ID *id)
{
- /* only some ID-blocks have this info for now, so we cast the
- * types that do to be of type IdDdtTemplate, and extract the
- * DrawData that way
- */
- if (id_can_have_drawdata(id)) {
- IdDdtTemplate *idt = (IdDdtTemplate *)id;
- return &idt->drawdata;
- }
- else {
- return NULL;
- }
+ /* only some ID-blocks have this info for now, so we cast the
+ * types that do to be of type IdDdtTemplate, and extract the
+ * DrawData that way
+ */
+ if (id_can_have_drawdata(id)) {
+ IdDdtTemplate *idt = (IdDdtTemplate *)id;
+ return &idt->drawdata;
+ }
+ else {
+ return NULL;
+ }
}
DrawData *DRW_drawdata_get(ID *id, DrawEngineType *engine_type)
{
- DrawDataList *drawdata = DRW_drawdatalist_from_id(id);
-
- if (drawdata == NULL) {
- return NULL;
- }
-
- LISTBASE_FOREACH(DrawData *, dd, drawdata) {
- if (dd->engine_type == engine_type) {
- return dd;
- }
- }
- return NULL;
-}
-
-DrawData *DRW_drawdata_ensure(
- ID *id,
- DrawEngineType *engine_type,
- size_t size,
- DrawDataInitCb init_cb,
- DrawDataFreeCb free_cb)
-{
- BLI_assert(size >= sizeof(DrawData));
- BLI_assert(id_can_have_drawdata(id));
- /* Try to re-use existing data. */
- DrawData *dd = DRW_drawdata_get(id, engine_type);
- if (dd != NULL) {
- return dd;
- }
-
- DrawDataList *drawdata = DRW_drawdatalist_from_id(id);
-
- /* Allocate new data. */
- if ((GS(id->name) == ID_OB) && (((Object *)id)->base_flag & BASE_FROM_DUPLI) != 0) {
- /* NOTE: data is not persistent in this case. It is reset each redraw. */
- BLI_assert(free_cb == NULL); /* No callback allowed. */
- /* Round to sizeof(float) for DRW_instance_data_request(). */
- const size_t t = sizeof(float) - 1;
- size = (size + t) & ~t;
- size_t fsize = size / sizeof(float);
- BLI_assert(fsize < MAX_INSTANCE_DATA_SIZE);
- if (DST.object_instance_data[fsize] == NULL) {
- DST.object_instance_data[fsize] = DRW_instance_data_request(DST.idatalist, fsize);
- }
- dd = (DrawData *)DRW_instance_data_next(DST.object_instance_data[fsize]);
- memset(dd, 0, size);
- }
- else {
- dd = MEM_callocN(size, "DrawData");
- }
- dd->engine_type = engine_type;
- dd->free = free_cb;
- /* Perform user-side initialization, if needed. */
- if (init_cb != NULL) {
- init_cb(dd);
- }
- /* Register in the list. */
- BLI_addtail((ListBase *)drawdata, dd);
- return dd;
+ DrawDataList *drawdata = DRW_drawdatalist_from_id(id);
+
+ if (drawdata == NULL) {
+ return NULL;
+ }
+
+ LISTBASE_FOREACH (DrawData *, dd, drawdata) {
+ if (dd->engine_type == engine_type) {
+ return dd;
+ }
+ }
+ return NULL;
+}
+
+DrawData *DRW_drawdata_ensure(ID *id,
+ DrawEngineType *engine_type,
+ size_t size,
+ DrawDataInitCb init_cb,
+ DrawDataFreeCb free_cb)
+{
+ BLI_assert(size >= sizeof(DrawData));
+ BLI_assert(id_can_have_drawdata(id));
+ /* Try to re-use existing data. */
+ DrawData *dd = DRW_drawdata_get(id, engine_type);
+ if (dd != NULL) {
+ return dd;
+ }
+
+ DrawDataList *drawdata = DRW_drawdatalist_from_id(id);
+
+ /* Allocate new data. */
+ if ((GS(id->name) == ID_OB) && (((Object *)id)->base_flag & BASE_FROM_DUPLI) != 0) {
+ /* NOTE: data is not persistent in this case. It is reset each redraw. */
+ BLI_assert(free_cb == NULL); /* No callback allowed. */
+ /* Round to sizeof(float) for DRW_instance_data_request(). */
+ const size_t t = sizeof(float) - 1;
+ size = (size + t) & ~t;
+ size_t fsize = size / sizeof(float);
+ BLI_assert(fsize < MAX_INSTANCE_DATA_SIZE);
+ if (DST.object_instance_data[fsize] == NULL) {
+ DST.object_instance_data[fsize] = DRW_instance_data_request(DST.idatalist, fsize);
+ }
+ dd = (DrawData *)DRW_instance_data_next(DST.object_instance_data[fsize]);
+ memset(dd, 0, size);
+ }
+ else {
+ dd = MEM_callocN(size, "DrawData");
+ }
+ dd->engine_type = engine_type;
+ dd->free = free_cb;
+ /* Perform user-side initialization, if needed. */
+ if (init_cb != NULL) {
+ init_cb(dd);
+ }
+ /* Register in the list. */
+ BLI_addtail((ListBase *)drawdata, dd);
+ return dd;
}
void DRW_drawdata_free(ID *id)
{
- DrawDataList *drawdata = DRW_drawdatalist_from_id(id);
+ DrawDataList *drawdata = DRW_drawdatalist_from_id(id);
- if (drawdata == NULL) {
- return;
- }
+ if (drawdata == NULL) {
+ return;
+ }
- LISTBASE_FOREACH(DrawData *, dd, drawdata) {
- if (dd->free != NULL) {
- dd->free(dd);
- }
- }
+ LISTBASE_FOREACH (DrawData *, dd, drawdata) {
+ if (dd->free != NULL) {
+ dd->free(dd);
+ }
+ }
- BLI_freelistN((ListBase *)drawdata);
+ BLI_freelistN((ListBase *)drawdata);
}
/* Unlink (but don't free) the drawdata from the DrawDataList if the ID is an OB from dupli. */
static void drw_drawdata_unlink_dupli(ID *id)
{
- if ((GS(id->name) == ID_OB) && (((Object *)id)->base_flag & BASE_FROM_DUPLI) != 0) {
- DrawDataList *drawdata = DRW_drawdatalist_from_id(id);
+ if ((GS(id->name) == ID_OB) && (((Object *)id)->base_flag & BASE_FROM_DUPLI) != 0) {
+ DrawDataList *drawdata = DRW_drawdatalist_from_id(id);
- if (drawdata == NULL) {
- return;
- }
+ if (drawdata == NULL) {
+ return;
+ }
- BLI_listbase_clear((ListBase *)drawdata);
- }
+ BLI_listbase_clear((ListBase *)drawdata);
+ }
}
/** \} */
-
/* -------------------------------------------------------------------- */
/** \name Rendering (DRW_engines)
* \{ */
static void drw_engines_init(void)
{
- for (LinkData *link = DST.enabled_engines.first; link; link = link->next) {
- DrawEngineType *engine = link->data;
- ViewportEngineData *data = drw_viewport_engine_data_ensure(engine);
- PROFILE_START(stime);
+ for (LinkData *link = DST.enabled_engines.first; link; link = link->next) {
+ DrawEngineType *engine = link->data;
+ ViewportEngineData *data = drw_viewport_engine_data_ensure(engine);
+ PROFILE_START(stime);
- if (engine->engine_init) {
- engine->engine_init(data);
- }
+ if (engine->engine_init) {
+ engine->engine_init(data);
+ }
- PROFILE_END_UPDATE(data->init_time, stime);
- }
+ PROFILE_END_UPDATE(data->init_time, stime);
+ }
}
static void drw_engines_cache_init(void)
{
- for (LinkData *link = DST.enabled_engines.first; link; link = link->next) {
- DrawEngineType *engine = link->data;
- ViewportEngineData *data = drw_viewport_engine_data_ensure(engine);
+ for (LinkData *link = DST.enabled_engines.first; link; link = link->next) {
+ DrawEngineType *engine = link->data;
+ ViewportEngineData *data = drw_viewport_engine_data_ensure(engine);
- if (data->text_draw_cache) {
- DRW_text_cache_destroy(data->text_draw_cache);
- data->text_draw_cache = NULL;
- }
- if (DST.text_store_p == NULL) {
- DST.text_store_p = &data->text_draw_cache;
- }
+ if (data->text_draw_cache) {
+ DRW_text_cache_destroy(data->text_draw_cache);
+ data->text_draw_cache = NULL;
+ }
+ if (DST.text_store_p == NULL) {
+ DST.text_store_p = &data->text_draw_cache;
+ }
- if (engine->cache_init) {
- engine->cache_init(data);
- }
- }
+ if (engine->cache_init) {
+ engine->cache_init(data);
+ }
+ }
}
static void drw_engines_world_update(Scene *scene)
{
- if (scene->world == NULL) {
- return;
- }
+ if (scene->world == NULL) {
+ return;
+ }
- for (LinkData *link = DST.enabled_engines.first; link; link = link->next) {
- DrawEngineType *engine = link->data;
- ViewportEngineData *data = drw_viewport_engine_data_ensure(engine);
+ for (LinkData *link = DST.enabled_engines.first; link; link = link->next) {
+ DrawEngineType *engine = link->data;
+ ViewportEngineData *data = drw_viewport_engine_data_ensure(engine);
- if (engine->id_update) {
- engine->id_update(data, &scene->world->id);
- }
- }
+ if (engine->id_update) {
+ engine->id_update(data, &scene->world->id);
+ }
+ }
}
static void drw_engines_cache_populate(Object *ob)
{
- DST.ob_state = NULL;
+ DST.ob_state = NULL;
- /* HACK: DrawData is copied by COW from the duplicated object.
- * This is valid for IDs that cannot be instantiated but this
- * is not what we want in this case so we clear the pointer
- * ourselves here. */
- drw_drawdata_unlink_dupli((ID *)ob);
+ /* HACK: DrawData is copied by COW from the duplicated object.
+ * This is valid for IDs that cannot be instantiated but this
+ * is not what we want in this case so we clear the pointer
+ * ourselves here. */
+ drw_drawdata_unlink_dupli((ID *)ob);
- for (LinkData *link = DST.enabled_engines.first; link; link = link->next) {
- DrawEngineType *engine = link->data;
- ViewportEngineData *data = drw_viewport_engine_data_ensure(engine);
+ for (LinkData *link = DST.enabled_engines.first; link; link = link->next) {
+ DrawEngineType *engine = link->data;
+ ViewportEngineData *data = drw_viewport_engine_data_ensure(engine);
- if (engine->id_update) {
- engine->id_update(data, &ob->id);
- }
+ if (engine->id_update) {
+ engine->id_update(data, &ob->id);
+ }
- if (engine->cache_populate) {
- engine->cache_populate(data, ob);
- }
- }
+ if (engine->cache_populate) {
+ engine->cache_populate(data, ob);
+ }
+ }
- /* TODO: in the future it would be nice to generate once for all viewports.
- * But we need threaded DRW manager first. */
- drw_batch_cache_generate_requested(ob);
+ /* TODO: in the future it would be nice to generate once for all viewports.
+ * But we need threaded DRW manager first. */
+ drw_batch_cache_generate_requested(ob);
- /* ... and clearing it here too because theses draw data are
- * from a mempool and must not be free individually by depsgraph. */
- drw_drawdata_unlink_dupli((ID *)ob);
+ /* ... and clearing it here too because theses draw data are
+ * from a mempool and must not be free individually by depsgraph. */
+ drw_drawdata_unlink_dupli((ID *)ob);
}
static void drw_engines_cache_finish(void)
{
- for (LinkData *link = DST.enabled_engines.first; link; link = link->next) {
- DrawEngineType *engine = link->data;
- ViewportEngineData *data = drw_viewport_engine_data_ensure(engine);
+ for (LinkData *link = DST.enabled_engines.first; link; link = link->next) {
+ DrawEngineType *engine = link->data;
+ ViewportEngineData *data = drw_viewport_engine_data_ensure(engine);
- if (engine->cache_finish) {
- engine->cache_finish(data);
- }
- }
+ if (engine->cache_finish) {
+ engine->cache_finish(data);
+ }
+ }
}
static void drw_engines_draw_background(void)
{
- for (LinkData *link = DST.enabled_engines.first; link; link = link->next) {
- DrawEngineType *engine = link->data;
- ViewportEngineData *data = drw_viewport_engine_data_ensure(engine);
+ for (LinkData *link = DST.enabled_engines.first; link; link = link->next) {
+ DrawEngineType *engine = link->data;
+ ViewportEngineData *data = drw_viewport_engine_data_ensure(engine);
- if (engine->draw_background) {
- PROFILE_START(stime);
+ if (engine->draw_background) {
+ PROFILE_START(stime);
- DRW_stats_group_start(engine->idname);
- engine->draw_background(data);
- DRW_stats_group_end();
+ DRW_stats_group_start(engine->idname);
+ engine->draw_background(data);
+ DRW_stats_group_end();
- PROFILE_END_UPDATE(data->background_time, stime);
- return;
- }
- }
+ PROFILE_END_UPDATE(data->background_time, stime);
+ return;
+ }
+ }
- /* No draw_background found, doing default background */
- if (DRW_state_draw_background()) {
- DRW_draw_background();
- }
+ /* No draw_background found, doing default background */
+ if (DRW_state_draw_background()) {
+ DRW_draw_background();
+ }
}
static void drw_engines_draw_scene(void)
{
- for (LinkData *link = DST.enabled_engines.first; link; link = link->next) {
- DrawEngineType *engine = link->data;
- ViewportEngineData *data = drw_viewport_engine_data_ensure(engine);
- PROFILE_START(stime);
+ for (LinkData *link = DST.enabled_engines.first; link; link = link->next) {
+ DrawEngineType *engine = link->data;
+ ViewportEngineData *data = drw_viewport_engine_data_ensure(engine);
+ PROFILE_START(stime);
- if (engine->draw_scene) {
- DRW_stats_group_start(engine->idname);
- engine->draw_scene(data);
- /* Restore for next engine */
- if (DRW_state_is_fbo()) {
- GPU_framebuffer_bind(DST.default_framebuffer);
- }
- DRW_stats_group_end();
- }
+ if (engine->draw_scene) {
+ DRW_stats_group_start(engine->idname);
+ engine->draw_scene(data);
+ /* Restore for next engine */
+ if (DRW_state_is_fbo()) {
+ GPU_framebuffer_bind(DST.default_framebuffer);
+ }
+ DRW_stats_group_end();
+ }
- PROFILE_END_UPDATE(data->render_time, stime);
- }
- /* Reset state after drawing */
- DRW_state_reset();
+ PROFILE_END_UPDATE(data->render_time, stime);
+ }
+ /* Reset state after drawing */
+ DRW_state_reset();
}
static void drw_engines_draw_text(void)
{
- for (LinkData *link = DST.enabled_engines.first; link; link = link->next) {
- DrawEngineType *engine = link->data;
- ViewportEngineData *data = drw_viewport_engine_data_ensure(engine);
- PROFILE_START(stime);
+ for (LinkData *link = DST.enabled_engines.first; link; link = link->next) {
+ DrawEngineType *engine = link->data;
+ ViewportEngineData *data = drw_viewport_engine_data_ensure(engine);
+ PROFILE_START(stime);
- if (data->text_draw_cache) {
- DRW_text_cache_draw(data->text_draw_cache, DST.draw_ctx.ar);
- }
+ if (data->text_draw_cache) {
+ DRW_text_cache_draw(data->text_draw_cache, DST.draw_ctx.ar);
+ }
- PROFILE_END_UPDATE(data->render_time, stime);
- }
+ PROFILE_END_UPDATE(data->render_time, stime);
+ }
}
/* Draw render engine info. */
void DRW_draw_region_engine_info(int xoffset, int yoffset)
{
- for (LinkData *link = DST.enabled_engines.first; link; link = link->next) {
- DrawEngineType *engine = link->data;
- ViewportEngineData *data = drw_viewport_engine_data_ensure(engine);
+ for (LinkData *link = DST.enabled_engines.first; link; link = link->next) {
+ DrawEngineType *engine = link->data;
+ ViewportEngineData *data = drw_viewport_engine_data_ensure(engine);
- if (data->info[0] != '\0') {
- char *chr_current = data->info;
- char *chr_start = chr_current;
- int line_len = 0;
+ if (data->info[0] != '\0') {
+ char *chr_current = data->info;
+ char *chr_start = chr_current;
+ int line_len = 0;
- const int font_id = BLF_default();
- UI_FontThemeColor(font_id, TH_TEXT_HI);
+ const int font_id = BLF_default();
+ UI_FontThemeColor(font_id, TH_TEXT_HI);
- BLF_enable(font_id, BLF_SHADOW);
- BLF_shadow(font_id, 5, (const float[4]){0.0f, 0.0f, 0.0f, 1.0f});
- BLF_shadow_offset(font_id, 1, -1);
+ BLF_enable(font_id, BLF_SHADOW);
+ BLF_shadow(font_id, 5, (const float[4]){0.0f, 0.0f, 0.0f, 1.0f});
+ BLF_shadow_offset(font_id, 1, -1);
- while (*chr_current++ != '\0') {
- line_len++;
- if (*chr_current == '\n') {
- char info[GPU_INFO_SIZE];
- BLI_strncpy(info, chr_start, line_len + 1);
- yoffset -= U.widget_unit;
- BLF_draw_default(xoffset, yoffset, 0.0f, info, sizeof(info));
+ while (*chr_current++ != '\0') {
+ line_len++;
+ if (*chr_current == '\n') {
+ char info[GPU_INFO_SIZE];
+ BLI_strncpy(info, chr_start, line_len + 1);
+ yoffset -= U.widget_unit;
+ BLF_draw_default(xoffset, yoffset, 0.0f, info, sizeof(info));
- /* Re-start counting. */
- chr_start = chr_current + 1;
- line_len = -1;
- }
- }
+ /* Re-start counting. */
+ chr_start = chr_current + 1;
+ line_len = -1;
+ }
+ }
- char info[GPU_INFO_SIZE];
- BLI_strncpy(info, chr_start, line_len + 1);
- yoffset -= U.widget_unit;
- BLF_draw_default(xoffset, yoffset, 0.0f, info, sizeof(info));
+ char info[GPU_INFO_SIZE];
+ BLI_strncpy(info, chr_start, line_len + 1);
+ yoffset -= U.widget_unit;
+ BLF_draw_default(xoffset, yoffset, 0.0f, info, sizeof(info));
- BLF_disable(font_id, BLF_SHADOW);
- }
- }
+ BLF_disable(font_id, BLF_SHADOW);
+ }
+ }
}
static void use_drw_engine(DrawEngineType *engine)
{
- LinkData *ld = MEM_callocN(sizeof(LinkData), "enabled engine link data");
- ld->data = engine;
- BLI_addtail(&DST.enabled_engines, ld);
+ LinkData *ld = MEM_callocN(sizeof(LinkData), "enabled engine link data");
+ ld->data = engine;
+ BLI_addtail(&DST.enabled_engines, ld);
}
/**
@@ -1174,172 +1181,174 @@ static void use_drw_engine(DrawEngineType *engine)
*/
static void drw_engines_enable_external(void)
{
- use_drw_engine(DRW_engine_viewport_external_type.draw_engine);
+ use_drw_engine(DRW_engine_viewport_external_type.draw_engine);
}
/* TODO revisit this when proper layering is implemented */
/* Gather all draw engines needed and store them in DST.enabled_engines
* That also define the rendering order of engines */
-static void drw_engines_enable_from_engine(RenderEngineType *engine_type, int drawtype, bool use_xray)
-{
- switch (drawtype) {
- case OB_WIRE:
- use_drw_engine(&draw_engine_workbench_transparent);
- break;
-
- case OB_SOLID:
- if (use_xray) {
- use_drw_engine(&draw_engine_workbench_transparent);
- }
- else {
- use_drw_engine(&draw_engine_workbench_solid);
- }
- break;
-
- case OB_MATERIAL:
- case OB_RENDER:
- default:
- /* TODO layers */
- if (engine_type->draw_engine != NULL) {
- use_drw_engine(engine_type->draw_engine);
- }
-
- if ((engine_type->flag & RE_INTERNAL) == 0) {
- drw_engines_enable_external();
- }
- break;
- }
+static void drw_engines_enable_from_engine(RenderEngineType *engine_type,
+ int drawtype,
+ bool use_xray)
+{
+ switch (drawtype) {
+ case OB_WIRE:
+ use_drw_engine(&draw_engine_workbench_transparent);
+ break;
+
+ case OB_SOLID:
+ if (use_xray) {
+ use_drw_engine(&draw_engine_workbench_transparent);
+ }
+ else {
+ use_drw_engine(&draw_engine_workbench_solid);
+ }
+ break;
+
+ case OB_MATERIAL:
+ case OB_RENDER:
+ default:
+ /* TODO layers */
+ if (engine_type->draw_engine != NULL) {
+ use_drw_engine(engine_type->draw_engine);
+ }
+
+ if ((engine_type->flag & RE_INTERNAL) == 0) {
+ drw_engines_enable_external();
+ }
+ break;
+ }
}
static void drw_engines_enable_from_object_mode(void)
{
- use_drw_engine(&draw_engine_object_type);
- /* TODO(fclem) remove this, it does not belong to it's own engine. */
- use_drw_engine(&draw_engine_motion_path_type);
+ use_drw_engine(&draw_engine_object_type);
+ /* TODO(fclem) remove this, it does not belong to it's own engine. */
+ use_drw_engine(&draw_engine_motion_path_type);
}
static void drw_engines_enable_from_paint_mode(int mode)
{
- switch (mode) {
- case CTX_MODE_SCULPT:
- use_drw_engine(&draw_engine_sculpt_type);
- break;
- case CTX_MODE_PAINT_WEIGHT:
- case CTX_MODE_PAINT_VERTEX:
- use_drw_engine(&draw_engine_paint_vertex_type);
- break;
- case CTX_MODE_PAINT_TEXTURE:
- use_drw_engine(&draw_engine_paint_texture_type);
- break;
- default:
- break;
- }
+ switch (mode) {
+ case CTX_MODE_SCULPT:
+ use_drw_engine(&draw_engine_sculpt_type);
+ break;
+ case CTX_MODE_PAINT_WEIGHT:
+ case CTX_MODE_PAINT_VERTEX:
+ use_drw_engine(&draw_engine_paint_vertex_type);
+ break;
+ case CTX_MODE_PAINT_TEXTURE:
+ use_drw_engine(&draw_engine_paint_texture_type);
+ break;
+ default:
+ break;
+ }
}
static void drw_engines_enable_from_mode(int mode)
{
- switch (mode) {
- case CTX_MODE_EDIT_MESH:
- use_drw_engine(&draw_engine_edit_mesh_type);
- break;
- case CTX_MODE_EDIT_SURFACE:
- case CTX_MODE_EDIT_CURVE:
- use_drw_engine(&draw_engine_edit_curve_type);
- break;
- case CTX_MODE_EDIT_TEXT:
- use_drw_engine(&draw_engine_edit_text_type);
- break;
- case CTX_MODE_EDIT_ARMATURE:
- use_drw_engine(&draw_engine_edit_armature_type);
- break;
- case CTX_MODE_EDIT_METABALL:
- use_drw_engine(&draw_engine_edit_metaball_type);
- break;
- case CTX_MODE_EDIT_LATTICE:
- use_drw_engine(&draw_engine_edit_lattice_type);
- break;
- case CTX_MODE_PARTICLE:
- use_drw_engine(&draw_engine_particle_type);
- break;
- case CTX_MODE_POSE:
- case CTX_MODE_PAINT_WEIGHT:
- /* The pose engine clears the depth of the default framebuffer
- * to draw an object with `OB_DRAWXRAY`.
- * (different of workbench that has its own framebuffer).
- * So make sure you call its `draw_scene` after all the other engines. */
- use_drw_engine(&draw_engine_pose_type);
- break;
- case CTX_MODE_SCULPT:
- case CTX_MODE_PAINT_VERTEX:
- case CTX_MODE_PAINT_TEXTURE:
- case CTX_MODE_OBJECT:
- case CTX_MODE_PAINT_GPENCIL:
- case CTX_MODE_EDIT_GPENCIL:
- case CTX_MODE_SCULPT_GPENCIL:
- case CTX_MODE_WEIGHT_GPENCIL:
- break;
- default:
- BLI_assert(!"Draw mode invalid");
- break;
- }
+ switch (mode) {
+ case CTX_MODE_EDIT_MESH:
+ use_drw_engine(&draw_engine_edit_mesh_type);
+ break;
+ case CTX_MODE_EDIT_SURFACE:
+ case CTX_MODE_EDIT_CURVE:
+ use_drw_engine(&draw_engine_edit_curve_type);
+ break;
+ case CTX_MODE_EDIT_TEXT:
+ use_drw_engine(&draw_engine_edit_text_type);
+ break;
+ case CTX_MODE_EDIT_ARMATURE:
+ use_drw_engine(&draw_engine_edit_armature_type);
+ break;
+ case CTX_MODE_EDIT_METABALL:
+ use_drw_engine(&draw_engine_edit_metaball_type);
+ break;
+ case CTX_MODE_EDIT_LATTICE:
+ use_drw_engine(&draw_engine_edit_lattice_type);
+ break;
+ case CTX_MODE_PARTICLE:
+ use_drw_engine(&draw_engine_particle_type);
+ break;
+ case CTX_MODE_POSE:
+ case CTX_MODE_PAINT_WEIGHT:
+ /* The pose engine clears the depth of the default framebuffer
+ * to draw an object with `OB_DRAWXRAY`.
+ * (different of workbench that has its own framebuffer).
+ * So make sure you call its `draw_scene` after all the other engines. */
+ use_drw_engine(&draw_engine_pose_type);
+ break;
+ case CTX_MODE_SCULPT:
+ case CTX_MODE_PAINT_VERTEX:
+ case CTX_MODE_PAINT_TEXTURE:
+ case CTX_MODE_OBJECT:
+ case CTX_MODE_PAINT_GPENCIL:
+ case CTX_MODE_EDIT_GPENCIL:
+ case CTX_MODE_SCULPT_GPENCIL:
+ case CTX_MODE_WEIGHT_GPENCIL:
+ break;
+ default:
+ BLI_assert(!"Draw mode invalid");
+ break;
+ }
}
static void drw_engines_enable_from_overlays(int UNUSED(overlay_flag))
{
- use_drw_engine(&draw_engine_overlay_type);
+ use_drw_engine(&draw_engine_overlay_type);
}
/**
* Use for select and depth-drawing.
*/
static void drw_engines_enable_basic(void)
{
- use_drw_engine(DRW_engine_viewport_basic_type.draw_engine);
+ use_drw_engine(DRW_engine_viewport_basic_type.draw_engine);
}
static void drw_engines_enable(ViewLayer *view_layer, RenderEngineType *engine_type)
{
- Object *obact = OBACT(view_layer);
- const enum eContextObjectMode mode = CTX_data_mode_enum_ex(
- DST.draw_ctx.object_edit, obact, DST.draw_ctx.object_mode);
- View3D *v3d = DST.draw_ctx.v3d;
- const int drawtype = v3d->shading.type;
- const bool use_xray = XRAY_ENABLED(v3d);
-
- drw_engines_enable_from_engine(engine_type, drawtype, use_xray);
- /* grease pencil */
- use_drw_engine(&draw_engine_gpencil_type);
-
- if (DRW_state_draw_support()) {
- /* Draw paint modes first so that they are drawn below the wireframes. */
- drw_engines_enable_from_paint_mode(mode);
- drw_engines_enable_from_overlays(v3d->overlay.flag);
- drw_engines_enable_from_object_mode();
- drw_engines_enable_from_mode(mode);
- }
- else {
- /* Force enable overlays engine for wireframe mode */
- if (v3d->shading.type == OB_WIRE) {
- drw_engines_enable_from_overlays(v3d->overlay.flag);
- }
- }
+ Object *obact = OBACT(view_layer);
+ const enum eContextObjectMode mode = CTX_data_mode_enum_ex(
+ DST.draw_ctx.object_edit, obact, DST.draw_ctx.object_mode);
+ View3D *v3d = DST.draw_ctx.v3d;
+ const int drawtype = v3d->shading.type;
+ const bool use_xray = XRAY_ENABLED(v3d);
+
+ drw_engines_enable_from_engine(engine_type, drawtype, use_xray);
+ /* grease pencil */
+ use_drw_engine(&draw_engine_gpencil_type);
+
+ if (DRW_state_draw_support()) {
+ /* Draw paint modes first so that they are drawn below the wireframes. */
+ drw_engines_enable_from_paint_mode(mode);
+ drw_engines_enable_from_overlays(v3d->overlay.flag);
+ drw_engines_enable_from_object_mode();
+ drw_engines_enable_from_mode(mode);
+ }
+ else {
+ /* Force enable overlays engine for wireframe mode */
+ if (v3d->shading.type == OB_WIRE) {
+ drw_engines_enable_from_overlays(v3d->overlay.flag);
+ }
+ }
}
static void drw_engines_disable(void)
{
- BLI_freelistN(&DST.enabled_engines);
+ BLI_freelistN(&DST.enabled_engines);
}
static uint DRW_engines_get_hash(void)
{
- uint hash = 0;
- /* The cache depends on enabled engines */
- /* FIXME : if collision occurs ... segfault */
- for (LinkData *link = DST.enabled_engines.first; link; link = link->next) {
- DrawEngineType *engine = link->data;
- hash += BLI_ghashutil_strhash_p(engine->idname);
- }
+ uint hash = 0;
+ /* The cache depends on enabled engines */
+ /* FIXME : if collision occurs ... segfault */
+ for (LinkData *link = DST.enabled_engines.first; link; link = link->next) {
+ DrawEngineType *engine = link->data;
+ hash += BLI_ghashutil_strhash_p(engine->idname);
+ }
- return hash;
+ return hash;
}
/* -------------------------------------------------------------------- */
@@ -1348,54 +1357,59 @@ static uint DRW_engines_get_hash(void)
void DRW_notify_view_update(const DRWUpdateContext *update_ctx)
{
- RenderEngineType *engine_type = update_ctx->engine_type;
- ARegion *ar = update_ctx->ar;
- View3D *v3d = update_ctx->v3d;
- RegionView3D *rv3d = ar->regiondata;
- Depsgraph *depsgraph = update_ctx->depsgraph;
- Scene *scene = update_ctx->scene;
- ViewLayer *view_layer = update_ctx->view_layer;
-
- /* Separate update for each stereo view. */
- for (int view = 0; view < 2; view++) {
- GPUViewport *viewport = WM_draw_region_get_viewport(ar, view);
- if (!viewport) {
- continue;
- }
-
- /* XXX Really nasty locking. But else this could
- * be executed by the material previews thread
- * while rendering a viewport. */
- BLI_ticket_mutex_lock(DST.gl_context_mutex);
-
- /* Reset before using it. */
- drw_state_prepare_clean_for_draw(&DST);
-
- DST.viewport = viewport;
- DST.draw_ctx = (DRWContextState){
- .ar = ar, .rv3d = rv3d, .v3d = v3d,
- .scene = scene, .view_layer = view_layer, .obact = OBACT(view_layer),
- .engine_type = engine_type,
- .depsgraph = depsgraph, .object_mode = OB_MODE_OBJECT,
- };
-
- drw_engines_enable(view_layer, engine_type);
-
- for (LinkData *link = DST.enabled_engines.first; link; link = link->next) {
- DrawEngineType *draw_engine = link->data;
- ViewportEngineData *data = drw_viewport_engine_data_ensure(draw_engine);
-
- if (draw_engine->view_update) {
- draw_engine->view_update(data);
- }
- }
-
- DST.viewport = NULL;
-
- drw_engines_disable();
-
- BLI_ticket_mutex_unlock(DST.gl_context_mutex);
- }
+ RenderEngineType *engine_type = update_ctx->engine_type;
+ ARegion *ar = update_ctx->ar;
+ View3D *v3d = update_ctx->v3d;
+ RegionView3D *rv3d = ar->regiondata;
+ Depsgraph *depsgraph = update_ctx->depsgraph;
+ Scene *scene = update_ctx->scene;
+ ViewLayer *view_layer = update_ctx->view_layer;
+
+ /* Separate update for each stereo view. */
+ for (int view = 0; view < 2; view++) {
+ GPUViewport *viewport = WM_draw_region_get_viewport(ar, view);
+ if (!viewport) {
+ continue;
+ }
+
+ /* XXX Really nasty locking. But else this could
+ * be executed by the material previews thread
+ * while rendering a viewport. */
+ BLI_ticket_mutex_lock(DST.gl_context_mutex);
+
+ /* Reset before using it. */
+ drw_state_prepare_clean_for_draw(&DST);
+
+ DST.viewport = viewport;
+ DST.draw_ctx = (DRWContextState){
+ .ar = ar,
+ .rv3d = rv3d,
+ .v3d = v3d,
+ .scene = scene,
+ .view_layer = view_layer,
+ .obact = OBACT(view_layer),
+ .engine_type = engine_type,
+ .depsgraph = depsgraph,
+ .object_mode = OB_MODE_OBJECT,
+ };
+
+ drw_engines_enable(view_layer, engine_type);
+
+ for (LinkData *link = DST.enabled_engines.first; link; link = link->next) {
+ DrawEngineType *draw_engine = link->data;
+ ViewportEngineData *data = drw_viewport_engine_data_ensure(draw_engine);
+
+ if (draw_engine->view_update) {
+ draw_engine->view_update(data);
+ }
+ }
+
+ DST.viewport = NULL;
+
+ drw_engines_disable();
+
+ BLI_ticket_mutex_unlock(DST.gl_context_mutex);
+ }
}
/** \} */
@@ -1409,1173 +1423,1168 @@ void DRW_notify_view_update(const DRWUpdateContext *update_ctx)
* for each relevant engine / mode engine. */
void DRW_draw_view(const bContext *C)
{
- Depsgraph *depsgraph = CTX_data_depsgraph(C);
- ARegion *ar = CTX_wm_region(C);
- View3D *v3d = CTX_wm_view3d(C);
- Scene *scene = DEG_get_evaluated_scene(depsgraph);
- RenderEngineType *engine_type = ED_view3d_engine_type(scene, v3d->shading.type);
- GPUViewport *viewport = WM_draw_region_get_bound_viewport(ar);
+ Depsgraph *depsgraph = CTX_data_depsgraph(C);
+ ARegion *ar = CTX_wm_region(C);
+ View3D *v3d = CTX_wm_view3d(C);
+ Scene *scene = DEG_get_evaluated_scene(depsgraph);
+ RenderEngineType *engine_type = ED_view3d_engine_type(scene, v3d->shading.type);
+ GPUViewport *viewport = WM_draw_region_get_bound_viewport(ar);
- /* Reset before using it. */
- drw_state_prepare_clean_for_draw(&DST);
- DST.options.draw_text = (
- (v3d->flag2 & V3D_HIDE_OVERLAYS) == 0 &&
- (v3d->overlay.flag & V3D_OVERLAY_HIDE_TEXT) != 0);
- DRW_draw_render_loop_ex(depsgraph, engine_type, ar, v3d, viewport, C);
+ /* Reset before using it. */
+ drw_state_prepare_clean_for_draw(&DST);
+ DST.options.draw_text = ((v3d->flag2 & V3D_HIDE_OVERLAYS) == 0 &&
+ (v3d->overlay.flag & V3D_OVERLAY_HIDE_TEXT) != 0);
+ DRW_draw_render_loop_ex(depsgraph, engine_type, ar, v3d, viewport, C);
}
/**
* Used for both regular and off-screen drawing.
* Need to reset DST before calling this function
*/
-void DRW_draw_render_loop_ex(
- struct Depsgraph *depsgraph,
- RenderEngineType *engine_type,
- ARegion *ar, View3D *v3d,
- GPUViewport *viewport,
- const bContext *evil_C)
-{
-
- Scene *scene = DEG_get_evaluated_scene(depsgraph);
- ViewLayer *view_layer = DEG_get_evaluated_view_layer(depsgraph);
- RegionView3D *rv3d = ar->regiondata;
- const bool do_annotations = (
- ((v3d->flag2 & V3D_SHOW_ANNOTATION) != 0) &&
- ((v3d->flag2 & V3D_HIDE_OVERLAYS) == 0));
- const bool do_camera_frame = !DST.options.is_image_render;
-
- DST.draw_ctx.evil_C = evil_C;
- DST.viewport = viewport;
-
- /* Setup viewport */
- GPU_viewport_engines_data_validate(DST.viewport, DRW_engines_get_hash());
-
- DST.draw_ctx = (DRWContextState){
- .ar = ar, .rv3d = rv3d, .v3d = v3d,
- .scene = scene, .view_layer = view_layer, .obact = OBACT(view_layer),
- .engine_type = engine_type,
- .depsgraph = depsgraph,
-
- /* reuse if caller sets */
- .evil_C = DST.draw_ctx.evil_C,
- };
- drw_context_state_init();
- drw_viewport_var_init();
-
- /* Get list of enabled engines */
- drw_engines_enable(view_layer, engine_type);
-
- /* Update ubos */
- DRW_globals_update();
-
- drw_debug_init();
- DRW_hair_init();
-
- /* No framebuffer allowed before drawing. */
- BLI_assert(GPU_framebuffer_active_get() == NULL);
-
- /* Init engines */
- drw_engines_init();
-
- /* Cache filling */
- {
- PROFILE_START(stime);
- drw_engines_cache_init();
- drw_engines_world_update(scene);
-
- const int object_type_exclude_viewport = v3d->object_type_exclude_viewport;
- DEG_OBJECT_ITER_BEGIN(depsgraph, ob,
- DEG_ITER_OBJECT_FLAG_LINKED_DIRECTLY |
- DEG_ITER_OBJECT_FLAG_LINKED_VIA_SET |
- DEG_ITER_OBJECT_FLAG_VISIBLE |
- DEG_ITER_OBJECT_FLAG_DUPLI)
- {
- if ((object_type_exclude_viewport & (1 << ob->type)) != 0) {
- continue;
- }
- if (v3d->localvd && ((v3d->local_view_uuid & ob->base_local_view_bits) == 0)) {
- continue;
- }
- DST.dupli_parent = data_.dupli_parent;
- DST.dupli_source = data_.dupli_object_current;
- drw_engines_cache_populate(ob);
- }
- DEG_OBJECT_ITER_END;
-
- drw_engines_cache_finish();
-
- DRW_render_instance_buffer_finish();
+void DRW_draw_render_loop_ex(struct Depsgraph *depsgraph,
+ RenderEngineType *engine_type,
+ ARegion *ar,
+ View3D *v3d,
+ GPUViewport *viewport,
+ const bContext *evil_C)
+{
+
+ Scene *scene = DEG_get_evaluated_scene(depsgraph);
+ ViewLayer *view_layer = DEG_get_evaluated_view_layer(depsgraph);
+ RegionView3D *rv3d = ar->regiondata;
+ const bool do_annotations = (((v3d->flag2 & V3D_SHOW_ANNOTATION) != 0) &&
+ ((v3d->flag2 & V3D_HIDE_OVERLAYS) == 0));
+ const bool do_camera_frame = !DST.options.is_image_render;
+
+ DST.draw_ctx.evil_C = evil_C;
+ DST.viewport = viewport;
+
+ /* Setup viewport */
+ GPU_viewport_engines_data_validate(DST.viewport, DRW_engines_get_hash());
+
+ DST.draw_ctx = (DRWContextState){
+ .ar = ar,
+ .rv3d = rv3d,
+ .v3d = v3d,
+ .scene = scene,
+ .view_layer = view_layer,
+ .obact = OBACT(view_layer),
+ .engine_type = engine_type,
+ .depsgraph = depsgraph,
+
+ /* reuse if caller sets */
+ .evil_C = DST.draw_ctx.evil_C,
+ };
+ drw_context_state_init();
+ drw_viewport_var_init();
+
+ /* Get list of enabled engines */
+ drw_engines_enable(view_layer, engine_type);
+
+ /* Update ubos */
+ DRW_globals_update();
+
+ drw_debug_init();
+ DRW_hair_init();
+
+ /* No framebuffer allowed before drawing. */
+ BLI_assert(GPU_framebuffer_active_get() == NULL);
+
+ /* Init engines */
+ drw_engines_init();
+
+ /* Cache filling */
+ {
+ PROFILE_START(stime);
+ drw_engines_cache_init();
+ drw_engines_world_update(scene);
+
+ const int object_type_exclude_viewport = v3d->object_type_exclude_viewport;
+ DEG_OBJECT_ITER_BEGIN (depsgraph,
+ ob,
+ DEG_ITER_OBJECT_FLAG_LINKED_DIRECTLY |
+ DEG_ITER_OBJECT_FLAG_LINKED_VIA_SET | DEG_ITER_OBJECT_FLAG_VISIBLE |
+ DEG_ITER_OBJECT_FLAG_DUPLI) {
+ if ((object_type_exclude_viewport & (1 << ob->type)) != 0) {
+ continue;
+ }
+ if (v3d->localvd && ((v3d->local_view_uuid & ob->base_local_view_bits) == 0)) {
+ continue;
+ }
+ DST.dupli_parent = data_.dupli_parent;
+ DST.dupli_source = data_.dupli_object_current;
+ drw_engines_cache_populate(ob);
+ }
+ DEG_OBJECT_ITER_END;
+
+ drw_engines_cache_finish();
+
+ DRW_render_instance_buffer_finish();
#ifdef USE_PROFILE
- double *cache_time = GPU_viewport_cache_time_get(DST.viewport);
- PROFILE_END_UPDATE(*cache_time, stime);
+ double *cache_time = GPU_viewport_cache_time_get(DST.viewport);
+ PROFILE_END_UPDATE(*cache_time, stime);
#endif
- }
+ }
- DRW_stats_begin();
+ DRW_stats_begin();
- GPU_framebuffer_bind(DST.default_framebuffer);
+ GPU_framebuffer_bind(DST.default_framebuffer);
- /* Start Drawing */
- DRW_state_reset();
+ /* Start Drawing */
+ DRW_state_reset();
- DRW_hair_update();
+ DRW_hair_update();
- drw_engines_draw_background();
+ drw_engines_draw_background();
- /* WIP, single image drawn over the camera view (replace) */
- bool do_bg_image = false;
- if (rv3d->persp == RV3D_CAMOB) {
- Object *cam_ob = v3d->camera;
- if (cam_ob && cam_ob->type == OB_CAMERA) {
- Camera *cam = cam_ob->data;
- if (!BLI_listbase_is_empty(&cam->bg_images)) {
- do_bg_image = true;
- }
- }
- }
+ /* WIP, single image drawn over the camera view (replace) */
+ bool do_bg_image = false;
+ if (rv3d->persp == RV3D_CAMOB) {
+ Object *cam_ob = v3d->camera;
+ if (cam_ob && cam_ob->type == OB_CAMERA) {
+ Camera *cam = cam_ob->data;
+ if (!BLI_listbase_is_empty(&cam->bg_images)) {
+ do_bg_image = true;
+ }
+ }
+ }
- GPU_framebuffer_bind(DST.default_framebuffer);
+ GPU_framebuffer_bind(DST.default_framebuffer);
- if (do_bg_image) {
- ED_view3d_draw_bgpic_test(scene, depsgraph, ar, v3d, false, do_camera_frame);
- }
+ if (do_bg_image) {
+ ED_view3d_draw_bgpic_test(scene, depsgraph, ar, v3d, false, do_camera_frame);
+ }
- DRW_draw_callbacks_pre_scene();
- if (DST.draw_ctx.evil_C) {
- ED_region_draw_cb_draw(DST.draw_ctx.evil_C, DST.draw_ctx.ar, REGION_DRAW_PRE_VIEW);
- }
+ DRW_draw_callbacks_pre_scene();
+ if (DST.draw_ctx.evil_C) {
+ ED_region_draw_cb_draw(DST.draw_ctx.evil_C, DST.draw_ctx.ar, REGION_DRAW_PRE_VIEW);
+ }
- drw_engines_draw_scene();
+ drw_engines_draw_scene();
#ifdef __APPLE__
- /* Fix 3D view being "laggy" on macos. (See T56996) */
- GPU_flush();
+ /* Fix 3D view being "laggy" on macos. (See T56996) */
+ GPU_flush();
#endif
- /* annotations - temporary drawing buffer (3d space) */
- /* XXX: Or should we use a proper draw/overlay engine for this case? */
- if (do_annotations) {
- GPU_depth_test(false);
- /* XXX: as scene->gpd is not copied for COW yet */
- ED_annotation_draw_view3d(DEG_get_input_scene(depsgraph), depsgraph, v3d, ar, true);
- GPU_depth_test(true);
- }
-
- DRW_draw_callbacks_post_scene();
- if (DST.draw_ctx.evil_C) {
- DRW_state_reset();
- ED_region_draw_cb_draw(DST.draw_ctx.evil_C, DST.draw_ctx.ar, REGION_DRAW_POST_VIEW);
- /* Callback can be nasty and do whatever they want with the state.
- * Don't trust them! */
- DRW_state_reset();
- }
-
- DRW_state_reset();
-
- drw_debug_draw();
-
- GPU_depth_test(false);
- drw_engines_draw_text();
- GPU_depth_test(true);
-
- if (DST.draw_ctx.evil_C) {
- /* needed so gizmo isn't obscured */
- if ((v3d->gizmo_flag & V3D_GIZMO_HIDE) == 0) {
- glDisable(GL_DEPTH_TEST);
- DRW_draw_gizmo_3d();
- }
-
- DRW_draw_region_info();
-
- /* annotations - temporary drawing buffer (screenspace) */
- /* XXX: Or should we use a proper draw/overlay engine for this case? */
- if (((v3d->flag2 & V3D_HIDE_OVERLAYS) == 0) &&
- (do_annotations))
- {
- GPU_depth_test(false);
- /* XXX: as scene->gpd is not copied for COW yet */
- ED_annotation_draw_view3d(DEG_get_input_scene(depsgraph), depsgraph, v3d, ar, false);
- GPU_depth_test(true);
- }
-
- if ((v3d->gizmo_flag & V3D_GIZMO_HIDE) == 0) {
- /* Draw 2D after region info so we can draw on top of the camera passepartout overlay.
- * 'DRW_draw_region_info' sets the projection in pixel-space. */
- GPU_depth_test(false);
- DRW_draw_gizmo_2d();
- GPU_depth_test(true);
- }
- }
-
- DRW_stats_reset();
-
- if (do_bg_image) {
- ED_view3d_draw_bgpic_test(scene, depsgraph, ar, v3d, true, do_camera_frame);
- }
-
- if (G.debug_value > 20 && G.debug_value < 30) {
- GPU_depth_test(false);
- rcti rect; /* local coordinate visible rect inside region, to accommodate overlapping ui */
- ED_region_visible_rect(DST.draw_ctx.ar, &rect);
- DRW_stats_draw(&rect);
- GPU_depth_test(true);
- }
-
- if (WM_draw_region_get_bound_viewport(ar)) {
- /* Don't unbind the framebuffer yet in this case and let
- * GPU_viewport_unbind do it, so that we can still do further
- * drawing of action zones on top. */
- }
- else {
- GPU_framebuffer_restore();
- }
-
- DRW_state_reset();
- drw_engines_disable();
-
- drw_viewport_cache_resize();
+ /* annotations - temporary drawing buffer (3d space) */
+ /* XXX: Or should we use a proper draw/overlay engine for this case? */
+ if (do_annotations) {
+ GPU_depth_test(false);
+ /* XXX: as scene->gpd is not copied for COW yet */
+ ED_annotation_draw_view3d(DEG_get_input_scene(depsgraph), depsgraph, v3d, ar, true);
+ GPU_depth_test(true);
+ }
+
+ DRW_draw_callbacks_post_scene();
+ if (DST.draw_ctx.evil_C) {
+ DRW_state_reset();
+ ED_region_draw_cb_draw(DST.draw_ctx.evil_C, DST.draw_ctx.ar, REGION_DRAW_POST_VIEW);
+ /* Callback can be nasty and do whatever they want with the state.
+ * Don't trust them! */
+ DRW_state_reset();
+ }
+
+ DRW_state_reset();
+
+ drw_debug_draw();
+
+ GPU_depth_test(false);
+ drw_engines_draw_text();
+ GPU_depth_test(true);
+
+ if (DST.draw_ctx.evil_C) {
+ /* needed so gizmo isn't obscured */
+ if ((v3d->gizmo_flag & V3D_GIZMO_HIDE) == 0) {
+ glDisable(GL_DEPTH_TEST);
+ DRW_draw_gizmo_3d();
+ }
+
+ DRW_draw_region_info();
+
+ /* annotations - temporary drawing buffer (screenspace) */
+ /* XXX: Or should we use a proper draw/overlay engine for this case? */
+ if (((v3d->flag2 & V3D_HIDE_OVERLAYS) == 0) && (do_annotations)) {
+ GPU_depth_test(false);
+ /* XXX: as scene->gpd is not copied for COW yet */
+ ED_annotation_draw_view3d(DEG_get_input_scene(depsgraph), depsgraph, v3d, ar, false);
+ GPU_depth_test(true);
+ }
+
+ if ((v3d->gizmo_flag & V3D_GIZMO_HIDE) == 0) {
+ /* Draw 2D after region info so we can draw on top of the camera passepartout overlay.
+ * 'DRW_draw_region_info' sets the projection in pixel-space. */
+ GPU_depth_test(false);
+ DRW_draw_gizmo_2d();
+ GPU_depth_test(true);
+ }
+ }
+
+ DRW_stats_reset();
+
+ if (do_bg_image) {
+ ED_view3d_draw_bgpic_test(scene, depsgraph, ar, v3d, true, do_camera_frame);
+ }
+
+ if (G.debug_value > 20 && G.debug_value < 30) {
+ GPU_depth_test(false);
+ rcti rect; /* local coordinate visible rect inside region, to accommodate overlapping ui */
+ ED_region_visible_rect(DST.draw_ctx.ar, &rect);
+ DRW_stats_draw(&rect);
+ GPU_depth_test(true);
+ }
+
+ if (WM_draw_region_get_bound_viewport(ar)) {
+ /* Don't unbind the framebuffer yet in this case and let
+ * GPU_viewport_unbind do it, so that we can still do further
+ * drawing of action zones on top. */
+ }
+ else {
+ GPU_framebuffer_restore();
+ }
+
+ DRW_state_reset();
+ drw_engines_disable();
+
+ drw_viewport_cache_resize();
#ifdef DEBUG
- /* Avoid accidental reuse. */
- drw_state_ensure_not_reused(&DST);
+ /* Avoid accidental reuse. */
+ drw_state_ensure_not_reused(&DST);
#endif
}
-void DRW_draw_render_loop(
- struct Depsgraph *depsgraph,
- ARegion *ar, View3D *v3d,
- GPUViewport *viewport)
+void DRW_draw_render_loop(struct Depsgraph *depsgraph,
+ ARegion *ar,
+ View3D *v3d,
+ GPUViewport *viewport)
{
- /* Reset before using it. */
- drw_state_prepare_clean_for_draw(&DST);
+ /* Reset before using it. */
+ drw_state_prepare_clean_for_draw(&DST);
- Scene *scene = DEG_get_evaluated_scene(depsgraph);
- RenderEngineType *engine_type = ED_view3d_engine_type(scene, v3d->shading.type);
+ Scene *scene = DEG_get_evaluated_scene(depsgraph);
+ RenderEngineType *engine_type = ED_view3d_engine_type(scene, v3d->shading.type);
- DRW_draw_render_loop_ex(depsgraph, engine_type, ar, v3d, viewport, NULL);
+ DRW_draw_render_loop_ex(depsgraph, engine_type, ar, v3d, viewport, NULL);
}
/* @viewport CAN be NULL, in this case we create one. */
-void DRW_draw_render_loop_offscreen(
- struct Depsgraph *depsgraph, RenderEngineType *engine_type,
- ARegion *ar, View3D *v3d,
- const bool draw_background,
- const bool do_color_management,
- GPUOffScreen *ofs,
- GPUViewport *viewport)
-{
- /* Create temporary viewport if needed. */
- GPUViewport *render_viewport = viewport;
- if (viewport == NULL) {
- render_viewport = GPU_viewport_create_from_offscreen(ofs);
- }
-
- GPU_framebuffer_restore();
-
- /* Reset before using it. */
- drw_state_prepare_clean_for_draw(&DST);
- /* WATCH: Force color management to output CManaged byte buffer by
- * forcing is_image_render to false. */
- DST.options.is_image_render = !do_color_management;
- DST.options.draw_background = draw_background;
- DRW_draw_render_loop_ex(depsgraph, engine_type, ar, v3d, render_viewport, NULL);
-
- /* Free temporary viewport. */
- if (viewport == NULL) {
- /* don't free data owned by 'ofs' */
- GPU_viewport_clear_from_offscreen(render_viewport);
- GPU_viewport_free(render_viewport);
- }
-
- /* we need to re-bind (annoying!) */
- GPU_offscreen_bind(ofs, false);
+void DRW_draw_render_loop_offscreen(struct Depsgraph *depsgraph,
+ RenderEngineType *engine_type,
+ ARegion *ar,
+ View3D *v3d,
+ const bool draw_background,
+ const bool do_color_management,
+ GPUOffScreen *ofs,
+ GPUViewport *viewport)
+{
+ /* Create temporary viewport if needed. */
+ GPUViewport *render_viewport = viewport;
+ if (viewport == NULL) {
+ render_viewport = GPU_viewport_create_from_offscreen(ofs);
+ }
+
+ GPU_framebuffer_restore();
+
+ /* Reset before using it. */
+ drw_state_prepare_clean_for_draw(&DST);
+ /* WATCH: Force color management to output CManaged byte buffer by
+ * forcing is_image_render to false. */
+ DST.options.is_image_render = !do_color_management;
+ DST.options.draw_background = draw_background;
+ DRW_draw_render_loop_ex(depsgraph, engine_type, ar, v3d, render_viewport, NULL);
+
+ /* Free temporary viewport. */
+ if (viewport == NULL) {
+ /* don't free data owned by 'ofs' */
+ GPU_viewport_clear_from_offscreen(render_viewport);
+ GPU_viewport_free(render_viewport);
+ }
+
+ /* we need to re-bind (annoying!) */
+ GPU_offscreen_bind(ofs, false);
}
/* Helper to check if exit object type to render. */
bool DRW_render_check_grease_pencil(Depsgraph *depsgraph)
{
- DEG_OBJECT_ITER_FOR_RENDER_ENGINE_BEGIN(depsgraph, ob)
- {
- if (ob->type == OB_GPENCIL) {
- if (DRW_object_visibility_in_active_context(ob) & OB_VISIBLE_SELF) {
- return true;
- }
- }
- }
- DEG_OBJECT_ITER_FOR_RENDER_ENGINE_END;
+ DEG_OBJECT_ITER_FOR_RENDER_ENGINE_BEGIN (depsgraph, ob) {
+ if (ob->type == OB_GPENCIL) {
+ if (DRW_object_visibility_in_active_context(ob) & OB_VISIBLE_SELF) {
+ return true;
+ }
+ }
+ }
+ DEG_OBJECT_ITER_FOR_RENDER_ENGINE_END;
- return false;
+ return false;
}
-static void DRW_render_gpencil_to_image(RenderEngine *engine, struct RenderLayer *render_layer, const rcti *rect)
+static void DRW_render_gpencil_to_image(RenderEngine *engine,
+ struct RenderLayer *render_layer,
+ const rcti *rect)
{
- if (draw_engine_gpencil_type.render_to_image) {
- ViewportEngineData *gpdata = drw_viewport_engine_data_ensure(&draw_engine_gpencil_type);
- draw_engine_gpencil_type.render_to_image(gpdata, engine, render_layer, rect);
- }
+ if (draw_engine_gpencil_type.render_to_image) {
+ ViewportEngineData *gpdata = drw_viewport_engine_data_ensure(&draw_engine_gpencil_type);
+ draw_engine_gpencil_type.render_to_image(gpdata, engine, render_layer, rect);
+ }
}
void DRW_render_gpencil(struct RenderEngine *engine, struct Depsgraph *depsgraph)
{
- /* This function is only valid for Cycles
- * Eevee done all work in the Eevee render directly.
- * Maybe it can be done equal for both engines?
- */
- if (STREQ(engine->type->name, "Eevee")) {
- return;
- }
-
- /* Early out if there are no grease pencil objects, especially important
- * to avoid failing in in background renders without OpenGL context. */
- if (!DRW_render_check_grease_pencil(depsgraph)) {
- return;
- }
-
- Scene *scene = DEG_get_evaluated_scene(depsgraph);
- ViewLayer *view_layer = DEG_get_evaluated_view_layer(depsgraph);
- RenderEngineType *engine_type = engine->type;
- RenderData *r = &scene->r;
- Render *render = engine->re;
- /* Changing Context */
- if (G.background && DST.gl_context == NULL) {
- WM_init_opengl(G_MAIN);
- }
-
- void *re_gl_context = RE_gl_context_get(render);
- void *re_gpu_context = NULL;
-
- /* Changing Context */
- if (re_gl_context != NULL) {
- DRW_opengl_render_context_enable(re_gl_context);
- /* We need to query gpu context after a gl context has been bound. */
- re_gpu_context = RE_gpu_context_get(render);
- DRW_gawain_render_context_enable(re_gpu_context);
- }
- else {
- DRW_opengl_context_enable();
- }
-
- /* Reset before using it. */
- drw_state_prepare_clean_for_draw(&DST);
- DST.options.is_image_render = true;
- DST.options.is_scene_render = true;
- DST.options.draw_background = scene->r.alphamode == R_ADDSKY;
- DST.buffer_finish_called = true;
-
- DST.draw_ctx = (DRWContextState) {
- .scene = scene, .view_layer = view_layer,
- .engine_type = engine_type,
- .depsgraph = depsgraph, .object_mode = OB_MODE_OBJECT,
- };
- drw_context_state_init();
-
- DST.viewport = GPU_viewport_create();
- const int size[2] = { (r->size * r->xsch) / 100, (r->size * r->ysch) / 100 };
- GPU_viewport_size_set(DST.viewport, size);
-
- drw_viewport_var_init();
-
- /* Main rendering. */
- rctf view_rect;
- rcti render_rect;
- RE_GetViewPlane(render, &view_rect, &render_rect);
- if (BLI_rcti_is_empty(&render_rect)) {
- BLI_rcti_init(&render_rect, 0, size[0], 0, size[1]);
- }
-
- RenderResult *render_result = RE_engine_get_result(engine);
- RenderLayer *render_layer = render_result->layers.first;
-
- DRW_render_gpencil_to_image(engine, render_layer, &render_rect);
-
- /* Force cache to reset. */
- drw_viewport_cache_resize();
- GPU_viewport_free(DST.viewport);
- DRW_state_reset();
-
- glDisable(GL_DEPTH_TEST);
-
- /* Restore Drawing area. */
- GPU_framebuffer_restore();
-
- /* Changing Context */
- /* GPXX Review this context */
- DRW_opengl_context_disable();
-
- DST.buffer_finish_called = false;
+ /* This function is only valid for Cycles
+ * Eevee done all work in the Eevee render directly.
+ * Maybe it can be done equal for both engines?
+ */
+ if (STREQ(engine->type->name, "Eevee")) {
+ return;
+ }
+
+ /* Early out if there are no grease pencil objects, especially important
+ * to avoid failing in in background renders without OpenGL context. */
+ if (!DRW_render_check_grease_pencil(depsgraph)) {
+ return;
+ }
+
+ Scene *scene = DEG_get_evaluated_scene(depsgraph);
+ ViewLayer *view_layer = DEG_get_evaluated_view_layer(depsgraph);
+ RenderEngineType *engine_type = engine->type;
+ RenderData *r = &scene->r;
+ Render *render = engine->re;
+ /* Changing Context */
+ if (G.background && DST.gl_context == NULL) {
+ WM_init_opengl(G_MAIN);
+ }
+
+ void *re_gl_context = RE_gl_context_get(render);
+ void *re_gpu_context = NULL;
+
+ /* Changing Context */
+ if (re_gl_context != NULL) {
+ DRW_opengl_render_context_enable(re_gl_context);
+ /* We need to query gpu context after a gl context has been bound. */
+ re_gpu_context = RE_gpu_context_get(render);
+ DRW_gawain_render_context_enable(re_gpu_context);
+ }
+ else {
+ DRW_opengl_context_enable();
+ }
+
+ /* Reset before using it. */
+ drw_state_prepare_clean_for_draw(&DST);
+ DST.options.is_image_render = true;
+ DST.options.is_scene_render = true;
+ DST.options.draw_background = scene->r.alphamode == R_ADDSKY;
+ DST.buffer_finish_called = true;
+
+ DST.draw_ctx = (DRWContextState){
+ .scene = scene,
+ .view_layer = view_layer,
+ .engine_type = engine_type,
+ .depsgraph = depsgraph,
+ .object_mode = OB_MODE_OBJECT,
+ };
+ drw_context_state_init();
+
+ DST.viewport = GPU_viewport_create();
+ const int size[2] = {(r->size * r->xsch) / 100, (r->size * r->ysch) / 100};
+ GPU_viewport_size_set(DST.viewport, size);
+
+ drw_viewport_var_init();
+
+ /* Main rendering. */
+ rctf view_rect;
+ rcti render_rect;
+ RE_GetViewPlane(render, &view_rect, &render_rect);
+ if (BLI_rcti_is_empty(&render_rect)) {
+ BLI_rcti_init(&render_rect, 0, size[0], 0, size[1]);
+ }
+
+ RenderResult *render_result = RE_engine_get_result(engine);
+ RenderLayer *render_layer = render_result->layers.first;
+
+ DRW_render_gpencil_to_image(engine, render_layer, &render_rect);
+
+ /* Force cache to reset. */
+ drw_viewport_cache_resize();
+ GPU_viewport_free(DST.viewport);
+ DRW_state_reset();
+
+ glDisable(GL_DEPTH_TEST);
+
+ /* Restore Drawing area. */
+ GPU_framebuffer_restore();
+
+ /* Changing Context */
+ /* GPXX Review this context */
+ DRW_opengl_context_disable();
+
+ DST.buffer_finish_called = false;
}
void DRW_render_to_image(RenderEngine *engine, struct Depsgraph *depsgraph)
{
- Scene *scene = DEG_get_evaluated_scene(depsgraph);
- ViewLayer *view_layer = DEG_get_evaluated_view_layer(depsgraph);
- RenderEngineType *engine_type = engine->type;
- DrawEngineType *draw_engine_type = engine_type->draw_engine;
- Render *render = engine->re;
-
- if (G.background && DST.gl_context == NULL) {
- WM_init_opengl(G_MAIN);
- }
-
- void *re_gl_context = RE_gl_context_get(render);
- void *re_gpu_context = NULL;
-
- /* Changing Context */
- if (re_gl_context != NULL) {
- DRW_opengl_render_context_enable(re_gl_context);
- /* We need to query gpu context after a gl context has been bound. */
- re_gpu_context = RE_gpu_context_get(render);
- DRW_gawain_render_context_enable(re_gpu_context);
- }
- else {
- DRW_opengl_context_enable();
- }
-
- /* IMPORTANT: We dont support immediate mode in render mode!
- * This shall remain in effect until immediate mode supports
- * multiple threads. */
-
- /* Reset before using it. */
- drw_state_prepare_clean_for_draw(&DST);
- DST.options.is_image_render = true;
- DST.options.is_scene_render = true;
- DST.options.draw_background = scene->r.alphamode == R_ADDSKY;
-
- DST.draw_ctx = (DRWContextState){
- .scene = scene, .view_layer = view_layer,
- .engine_type = engine_type,
- .depsgraph = depsgraph, .object_mode = OB_MODE_OBJECT,
- };
- drw_context_state_init();
-
- DST.viewport = GPU_viewport_create();
- const int size[2] = {engine->resolution_x, engine->resolution_y};
- GPU_viewport_size_set(DST.viewport, size);
-
- drw_viewport_var_init();
-
- ViewportEngineData *data = drw_viewport_engine_data_ensure(draw_engine_type);
-
- /* set default viewport */
- glViewport(0, 0, size[0], size[1]);
-
- /* Main rendering. */
- rctf view_rect;
- rcti render_rect;
- RE_GetViewPlane(render, &view_rect, &render_rect);
- if (BLI_rcti_is_empty(&render_rect)) {
- BLI_rcti_init(&render_rect, 0, size[0], 0, size[1]);
- }
-
- /* Reset state before drawing */
- DRW_state_reset();
-
- /* Init render result. */
- RenderResult *render_result = RE_engine_begin_result(
- engine,
- 0,
- 0,
- (int)size[0],
- (int)size[1],
- view_layer->name,
- /* RR_ALL_VIEWS */ NULL);
-
- RenderLayer *render_layer = render_result->layers.first;
- for (RenderView *render_view = render_result->views.first;
- render_view != NULL;
- render_view = render_view->next)
- {
- RE_SetActiveRenderView(render, render_view->name);
- engine_type->draw_engine->render_to_image(data, engine, render_layer, &render_rect);
- /* grease pencil: render result is merged in the previous render result. */
- if (DRW_render_check_grease_pencil(depsgraph)) {
- DRW_state_reset();
- DRW_render_gpencil_to_image(engine, render_layer, &render_rect);
- }
- DST.buffer_finish_called = false;
- }
-
- RE_engine_end_result(engine, render_result, false, false, false);
-
- /* Force cache to reset. */
- drw_viewport_cache_resize();
-
- GPU_viewport_free(DST.viewport);
- GPU_framebuffer_restore();
+ Scene *scene = DEG_get_evaluated_scene(depsgraph);
+ ViewLayer *view_layer = DEG_get_evaluated_view_layer(depsgraph);
+ RenderEngineType *engine_type = engine->type;
+ DrawEngineType *draw_engine_type = engine_type->draw_engine;
+ Render *render = engine->re;
+
+ if (G.background && DST.gl_context == NULL) {
+ WM_init_opengl(G_MAIN);
+ }
+
+ void *re_gl_context = RE_gl_context_get(render);
+ void *re_gpu_context = NULL;
+
+ /* Changing Context */
+ if (re_gl_context != NULL) {
+ DRW_opengl_render_context_enable(re_gl_context);
+ /* We need to query gpu context after a gl context has been bound. */
+ re_gpu_context = RE_gpu_context_get(render);
+ DRW_gawain_render_context_enable(re_gpu_context);
+ }
+ else {
+ DRW_opengl_context_enable();
+ }
+
+ /* IMPORTANT: We dont support immediate mode in render mode!
+ * This shall remain in effect until immediate mode supports
+ * multiple threads. */
+
+ /* Reset before using it. */
+ drw_state_prepare_clean_for_draw(&DST);
+ DST.options.is_image_render = true;
+ DST.options.is_scene_render = true;
+ DST.options.draw_background = scene->r.alphamode == R_ADDSKY;
+
+ DST.draw_ctx = (DRWContextState){
+ .scene = scene,
+ .view_layer = view_layer,
+ .engine_type = engine_type,
+ .depsgraph = depsgraph,
+ .object_mode = OB_MODE_OBJECT,
+ };
+ drw_context_state_init();
+
+ DST.viewport = GPU_viewport_create();
+ const int size[2] = {engine->resolution_x, engine->resolution_y};
+ GPU_viewport_size_set(DST.viewport, size);
+
+ drw_viewport_var_init();
+
+ ViewportEngineData *data = drw_viewport_engine_data_ensure(draw_engine_type);
+
+ /* set default viewport */
+ glViewport(0, 0, size[0], size[1]);
+
+ /* Main rendering. */
+ rctf view_rect;
+ rcti render_rect;
+ RE_GetViewPlane(render, &view_rect, &render_rect);
+ if (BLI_rcti_is_empty(&render_rect)) {
+ BLI_rcti_init(&render_rect, 0, size[0], 0, size[1]);
+ }
+
+ /* Reset state before drawing */
+ DRW_state_reset();
+
+ /* Init render result. */
+ RenderResult *render_result = RE_engine_begin_result(engine,
+ 0,
+ 0,
+ (int)size[0],
+ (int)size[1],
+ view_layer->name,
+ /* RR_ALL_VIEWS */ NULL);
+
+ RenderLayer *render_layer = render_result->layers.first;
+ for (RenderView *render_view = render_result->views.first; render_view != NULL;
+ render_view = render_view->next) {
+ RE_SetActiveRenderView(render, render_view->name);
+ engine_type->draw_engine->render_to_image(data, engine, render_layer, &render_rect);
+ /* grease pencil: render result is merged in the previous render result. */
+ if (DRW_render_check_grease_pencil(depsgraph)) {
+ DRW_state_reset();
+ DRW_render_gpencil_to_image(engine, render_layer, &render_rect);
+ }
+ DST.buffer_finish_called = false;
+ }
+
+ RE_engine_end_result(engine, render_result, false, false, false);
+
+ /* Force cache to reset. */
+ drw_viewport_cache_resize();
+
+ GPU_viewport_free(DST.viewport);
+ GPU_framebuffer_restore();
#ifdef DEBUG
- /* Avoid accidental reuse. */
- drw_state_ensure_not_reused(&DST);
+ /* Avoid accidental reuse. */
+ drw_state_ensure_not_reused(&DST);
#endif
- /* Reset state after drawing */
- DRW_state_reset();
+ /* Reset state after drawing */
+ DRW_state_reset();
- /* Changing Context */
- if (re_gl_context != NULL) {
- DRW_gawain_render_context_disable(re_gpu_context);
- DRW_opengl_render_context_disable(re_gl_context);
- }
- else {
- DRW_opengl_context_disable();
- }
+ /* Changing Context */
+ if (re_gl_context != NULL) {
+ DRW_gawain_render_context_disable(re_gpu_context);
+ DRW_opengl_render_context_disable(re_gl_context);
+ }
+ else {
+ DRW_opengl_context_disable();
+ }
}
void DRW_render_object_iter(
- void *vedata, RenderEngine *engine, struct Depsgraph *depsgraph,
- void (*callback)(void *vedata, Object *ob, RenderEngine *engine, struct Depsgraph *depsgraph))
-{
- const DRWContextState *draw_ctx = DRW_context_state_get();
-
- DRW_hair_init();
-
- const int object_type_exclude_viewport = draw_ctx->v3d ? draw_ctx->v3d->object_type_exclude_viewport : 0;
- DEG_OBJECT_ITER_BEGIN(depsgraph, ob,
- DEG_ITER_OBJECT_FLAG_LINKED_DIRECTLY |
- DEG_ITER_OBJECT_FLAG_LINKED_VIA_SET |
- DEG_ITER_OBJECT_FLAG_VISIBLE |
- DEG_ITER_OBJECT_FLAG_DUPLI)
- {
- if ((object_type_exclude_viewport & (1 << ob->type)) == 0) {
- DST.dupli_parent = data_.dupli_parent;
- DST.dupli_source = data_.dupli_object_current;
- DST.ob_state = NULL;
- callback(vedata, ob, engine, depsgraph);
-
- drw_batch_cache_generate_requested(ob);
- }
- }
- DEG_OBJECT_ITER_END;
+ void *vedata,
+ RenderEngine *engine,
+ struct Depsgraph *depsgraph,
+ void (*callback)(void *vedata, Object *ob, RenderEngine *engine, struct Depsgraph *depsgraph))
+{
+ const DRWContextState *draw_ctx = DRW_context_state_get();
+
+ DRW_hair_init();
+
+ const int object_type_exclude_viewport = draw_ctx->v3d ?
+ draw_ctx->v3d->object_type_exclude_viewport :
+ 0;
+ DEG_OBJECT_ITER_BEGIN (depsgraph,
+ ob,
+ DEG_ITER_OBJECT_FLAG_LINKED_DIRECTLY |
+ DEG_ITER_OBJECT_FLAG_LINKED_VIA_SET | DEG_ITER_OBJECT_FLAG_VISIBLE |
+ DEG_ITER_OBJECT_FLAG_DUPLI) {
+ if ((object_type_exclude_viewport & (1 << ob->type)) == 0) {
+ DST.dupli_parent = data_.dupli_parent;
+ DST.dupli_source = data_.dupli_object_current;
+ DST.ob_state = NULL;
+ callback(vedata, ob, engine, depsgraph);
+
+ drw_batch_cache_generate_requested(ob);
+ }
+ }
+ DEG_OBJECT_ITER_END;
}
/* Assume a valid gl context is bound (and that the gl_context_mutex has been acquired).
* This function only setup DST and execute the given function.
* Warning: similar to DRW_render_to_image you cannot use default lists (dfbl & dtxl). */
-void DRW_custom_pipeline(
- DrawEngineType *draw_engine_type,
- struct Depsgraph *depsgraph,
- void (*callback)(void *vedata, void *user_data),
- void *user_data)
+void DRW_custom_pipeline(DrawEngineType *draw_engine_type,
+ struct Depsgraph *depsgraph,
+ void (*callback)(void *vedata, void *user_data),
+ void *user_data)
{
- Scene *scene = DEG_get_evaluated_scene(depsgraph);
- ViewLayer *view_layer = DEG_get_evaluated_view_layer(depsgraph);
+ Scene *scene = DEG_get_evaluated_scene(depsgraph);
+ ViewLayer *view_layer = DEG_get_evaluated_view_layer(depsgraph);
- /* Reset before using it. */
- drw_state_prepare_clean_for_draw(&DST);
- DST.options.is_image_render = true;
- DST.options.is_scene_render = true;
- DST.options.draw_background = false;
+ /* Reset before using it. */
+ drw_state_prepare_clean_for_draw(&DST);
+ DST.options.is_image_render = true;
+ DST.options.is_scene_render = true;
+ DST.options.draw_background = false;
- DST.draw_ctx = (DRWContextState){
- .scene = scene,
- .view_layer = view_layer,
- .engine_type = NULL,
- .depsgraph = depsgraph,
- .object_mode = OB_MODE_OBJECT,
- };
- drw_context_state_init();
+ DST.draw_ctx = (DRWContextState){
+ .scene = scene,
+ .view_layer = view_layer,
+ .engine_type = NULL,
+ .depsgraph = depsgraph,
+ .object_mode = OB_MODE_OBJECT,
+ };
+ drw_context_state_init();
- DST.viewport = GPU_viewport_create();
- const int size[2] = {1, 1};
- GPU_viewport_size_set(DST.viewport, size);
+ DST.viewport = GPU_viewport_create();
+ const int size[2] = {1, 1};
+ GPU_viewport_size_set(DST.viewport, size);
- drw_viewport_var_init();
+ drw_viewport_var_init();
- DRW_hair_init();
+ DRW_hair_init();
- ViewportEngineData *data = drw_viewport_engine_data_ensure(draw_engine_type);
+ ViewportEngineData *data = drw_viewport_engine_data_ensure(draw_engine_type);
- /* Execute the callback */
- callback(data, user_data);
- DST.buffer_finish_called = false;
+ /* Execute the callback */
+ callback(data, user_data);
+ DST.buffer_finish_called = false;
- GPU_viewport_free(DST.viewport);
- GPU_framebuffer_restore();
+ GPU_viewport_free(DST.viewport);
+ GPU_framebuffer_restore();
- /* The use of custom pipeline in other thread using the same
- * resources as the main thread (viewport) may lead to data
- * races and undefined behavior on certain drivers. Using
- * GPU_finish to sync seems to fix the issue. (see T62997) */
- GPU_finish();
+ /* The use of custom pipeline in other thread using the same
+ * resources as the main thread (viewport) may lead to data
+ * races and undefined behavior on certain drivers. Using
+ * GPU_finish to sync seems to fix the issue. (see T62997) */
+ GPU_finish();
#ifdef DEBUG
- /* Avoid accidental reuse. */
- drw_state_ensure_not_reused(&DST);
+ /* Avoid accidental reuse. */
+ drw_state_ensure_not_reused(&DST);
#endif
}
static struct DRWSelectBuffer {
- struct GPUFrameBuffer *framebuffer_depth_only;
- struct GPUFrameBuffer *framebuffer_select_id;
- struct GPUTexture *texture_depth;
- struct GPUTexture *texture_u32;
+ struct GPUFrameBuffer *framebuffer_depth_only;
+ struct GPUFrameBuffer *framebuffer_select_id;
+ struct GPUTexture *texture_depth;
+ struct GPUTexture *texture_u32;
} g_select_buffer = {NULL};
static void draw_select_framebuffer_depth_only_setup(const int size[2])
{
- if (g_select_buffer.framebuffer_depth_only == NULL) {
- g_select_buffer.framebuffer_depth_only = GPU_framebuffer_create();
- g_select_buffer.framebuffer_select_id = GPU_framebuffer_create();
- }
+ if (g_select_buffer.framebuffer_depth_only == NULL) {
+ g_select_buffer.framebuffer_depth_only = GPU_framebuffer_create();
+ g_select_buffer.framebuffer_select_id = GPU_framebuffer_create();
+ }
- if ((g_select_buffer.texture_depth != NULL) &&
- ((GPU_texture_width(g_select_buffer.texture_depth) != size[0]) ||
- (GPU_texture_height(g_select_buffer.texture_depth) != size[1])))
- {
- GPU_texture_free(g_select_buffer.texture_depth);
- g_select_buffer.texture_depth = NULL;
- }
+ if ((g_select_buffer.texture_depth != NULL) &&
+ ((GPU_texture_width(g_select_buffer.texture_depth) != size[0]) ||
+ (GPU_texture_height(g_select_buffer.texture_depth) != size[1]))) {
+ GPU_texture_free(g_select_buffer.texture_depth);
+ g_select_buffer.texture_depth = NULL;
+ }
- if (g_select_buffer.texture_depth == NULL) {
- g_select_buffer.texture_depth = GPU_texture_create_2d(
- size[0], size[1], GPU_DEPTH_COMPONENT24, NULL, NULL);
+ if (g_select_buffer.texture_depth == NULL) {
+ g_select_buffer.texture_depth = GPU_texture_create_2d(
+ size[0], size[1], GPU_DEPTH_COMPONENT24, NULL, NULL);
- GPU_framebuffer_texture_attach(
- g_select_buffer.framebuffer_depth_only,
- g_select_buffer.texture_depth, 0, 0);
+ GPU_framebuffer_texture_attach(
+ g_select_buffer.framebuffer_depth_only, g_select_buffer.texture_depth, 0, 0);
- GPU_framebuffer_texture_attach(
- g_select_buffer.framebuffer_select_id,
- g_select_buffer.texture_depth, 0, 0);
+ GPU_framebuffer_texture_attach(
+ g_select_buffer.framebuffer_select_id, g_select_buffer.texture_depth, 0, 0);
- GPU_framebuffer_check_valid(
- g_select_buffer.framebuffer_depth_only, NULL);
- GPU_framebuffer_check_valid(
- g_select_buffer.framebuffer_select_id, NULL);
- }
+ GPU_framebuffer_check_valid(g_select_buffer.framebuffer_depth_only, NULL);
+ GPU_framebuffer_check_valid(g_select_buffer.framebuffer_select_id, NULL);
+ }
}
static void draw_select_framebuffer_select_id_setup(const int size[2])
{
- draw_select_framebuffer_depth_only_setup(size);
+ draw_select_framebuffer_depth_only_setup(size);
- if ((g_select_buffer.texture_u32 != NULL) &&
- ((GPU_texture_width(g_select_buffer.texture_u32) != size[0]) ||
- (GPU_texture_height(g_select_buffer.texture_u32) != size[1])))
- {
- GPU_texture_free(g_select_buffer.texture_u32);
- g_select_buffer.texture_u32 = NULL;
- }
+ if ((g_select_buffer.texture_u32 != NULL) &&
+ ((GPU_texture_width(g_select_buffer.texture_u32) != size[0]) ||
+ (GPU_texture_height(g_select_buffer.texture_u32) != size[1]))) {
+ GPU_texture_free(g_select_buffer.texture_u32);
+ g_select_buffer.texture_u32 = NULL;
+ }
- if (g_select_buffer.texture_u32 == NULL) {
- g_select_buffer.texture_u32 = GPU_texture_create_2d(
- size[0], size[1], GPU_R32UI, NULL, NULL);
+ if (g_select_buffer.texture_u32 == NULL) {
+ g_select_buffer.texture_u32 = GPU_texture_create_2d(size[0], size[1], GPU_R32UI, NULL, NULL);
- GPU_framebuffer_texture_attach(
- g_select_buffer.framebuffer_select_id,
- g_select_buffer.texture_u32, 0, 0);
+ GPU_framebuffer_texture_attach(
+ g_select_buffer.framebuffer_select_id, g_select_buffer.texture_u32, 0, 0);
- GPU_framebuffer_check_valid(
- g_select_buffer.framebuffer_select_id, NULL);
- }
+ GPU_framebuffer_check_valid(g_select_buffer.framebuffer_select_id, NULL);
+ }
}
/* Must run after all instance datas have been added. */
void DRW_render_instance_buffer_finish(void)
{
- BLI_assert(!DST.buffer_finish_called && "DRW_render_instance_buffer_finish called twice!");
- DST.buffer_finish_called = true;
- DRW_instance_buffer_finish(DST.idatalist);
+ BLI_assert(!DST.buffer_finish_called && "DRW_render_instance_buffer_finish called twice!");
+ DST.buffer_finish_called = true;
+ DRW_instance_buffer_finish(DST.idatalist);
}
/**
* object mode select-loop, see: ED_view3d_draw_select_loop (legacy drawing).
*/
-void DRW_draw_select_loop(
- struct Depsgraph *depsgraph,
- ARegion *ar, View3D *v3d,
- bool UNUSED(use_obedit_skip), bool draw_surface, bool UNUSED(use_nearest), const rcti *rect,
- DRW_SelectPassFn select_pass_fn, void *select_pass_user_data,
- DRW_ObjectFilterFn object_filter_fn, void *object_filter_user_data)
-{
- Scene *scene = DEG_get_evaluated_scene(depsgraph);
- RenderEngineType *engine_type = ED_view3d_engine_type(scene, v3d->shading.type);
- ViewLayer *view_layer = DEG_get_evaluated_view_layer(depsgraph);
- Object *obact = OBACT(view_layer);
- Object *obedit = OBEDIT_FROM_OBACT(obact);
+void DRW_draw_select_loop(struct Depsgraph *depsgraph,
+ ARegion *ar,
+ View3D *v3d,
+ bool UNUSED(use_obedit_skip),
+ bool draw_surface,
+ bool UNUSED(use_nearest),
+ const rcti *rect,
+ DRW_SelectPassFn select_pass_fn,
+ void *select_pass_user_data,
+ DRW_ObjectFilterFn object_filter_fn,
+ void *object_filter_user_data)
+{
+ Scene *scene = DEG_get_evaluated_scene(depsgraph);
+ RenderEngineType *engine_type = ED_view3d_engine_type(scene, v3d->shading.type);
+ ViewLayer *view_layer = DEG_get_evaluated_view_layer(depsgraph);
+ Object *obact = OBACT(view_layer);
+ Object *obedit = OBEDIT_FROM_OBACT(obact);
#ifndef USE_GPU_SELECT
- UNUSED_VARS(vc, scene, view_layer, v3d, ar, rect);
-#else
- RegionView3D *rv3d = ar->regiondata;
-
- /* Reset before using it. */
- drw_state_prepare_clean_for_draw(&DST);
-
- bool use_obedit = false;
- int obedit_mode = 0;
- if (obedit != NULL) {
- if (obedit->type == OB_MBALL) {
- use_obedit = true;
- obedit_mode = CTX_MODE_EDIT_METABALL;
- }
- else if (obedit->type == OB_ARMATURE) {
- use_obedit = true;
- obedit_mode = CTX_MODE_EDIT_ARMATURE;
- }
- }
- if (v3d->overlay.flag & V3D_OVERLAY_BONE_SELECT) {
- if (!(v3d->flag2 & V3D_HIDE_OVERLAYS)) {
- /* Note: don't use "BKE_object_pose_armature_get" here, it breaks selection. */
- Object *obpose = OBPOSE_FROM_OBACT(obact);
- if (obpose) {
- use_obedit = true;
- obedit_mode = CTX_MODE_POSE;
- }
- }
- }
-
- int viewport_size[2] = {BLI_rcti_size_x(rect), BLI_rcti_size_y(rect)};
- struct GPUViewport *viewport = GPU_viewport_create();
- GPU_viewport_size_set(viewport, viewport_size);
-
- DST.viewport = viewport;
- DST.options.is_select = true;
-
- /* Get list of enabled engines */
- if (use_obedit) {
- drw_engines_enable_from_paint_mode(obedit_mode);
- drw_engines_enable_from_mode(obedit_mode);
- }
- else if (!draw_surface) {
- /* grease pencil selection */
- use_drw_engine(&draw_engine_gpencil_type);
-
- drw_engines_enable_from_overlays(v3d->overlay.flag);
- drw_engines_enable_from_object_mode();
- }
- else {
- drw_engines_enable_basic();
- /* grease pencil selection */
- use_drw_engine(&draw_engine_gpencil_type);
-
- drw_engines_enable_from_overlays(v3d->overlay.flag);
- drw_engines_enable_from_object_mode();
- }
-
- /* Setup viewport */
-
- /* Instead of 'DRW_context_state_init(C, &DST.draw_ctx)', assign from args */
- DST.draw_ctx = (DRWContextState){
- .ar = ar, .rv3d = rv3d, .v3d = v3d,
- .scene = scene, .view_layer = view_layer, .obact = obact,
- .engine_type = engine_type,
- .depsgraph = depsgraph,
- };
- drw_context_state_init();
- drw_viewport_var_init();
-
- /* Update ubos */
- DRW_globals_update();
-
- /* Init engines */
- drw_engines_init();
- DRW_hair_init();
-
- {
- drw_engines_cache_init();
- drw_engines_world_update(scene);
-
- if (use_obedit) {
-#if 0
- drw_engines_cache_populate(obact);
+ UNUSED_VARS(vc, scene, view_layer, v3d, ar, rect);
#else
- FOREACH_OBJECT_IN_MODE_BEGIN (view_layer, v3d, obact->type, obact->mode, ob_iter) {
- drw_engines_cache_populate(ob_iter);
- }
- FOREACH_OBJECT_IN_MODE_END;
-#endif
- }
- else {
- const int object_type_exclude_select = (
- v3d->object_type_exclude_viewport | v3d->object_type_exclude_select
- );
- bool filter_exclude = false;
- DEG_OBJECT_ITER_BEGIN(depsgraph, ob,
- DEG_ITER_OBJECT_FLAG_LINKED_DIRECTLY |
- DEG_ITER_OBJECT_FLAG_LINKED_VIA_SET |
- DEG_ITER_OBJECT_FLAG_VISIBLE |
- DEG_ITER_OBJECT_FLAG_DUPLI)
- {
- if (v3d->localvd && ((v3d->local_view_uuid & ob->base_local_view_bits) == 0)) {
- continue;
- }
-
- if ((ob->base_flag & BASE_SELECTABLE) &&
- (object_type_exclude_select & (1 << ob->type)) == 0)
- {
- if (object_filter_fn != NULL) {
- if (ob->base_flag & BASE_FROM_DUPLI) {
- /* pass (use previous filter_exclude value) */
- }
- else {
- filter_exclude = (object_filter_fn(ob, object_filter_user_data) == false);
- }
- if (filter_exclude) {
- continue;
- }
- }
-
- /* This relies on dupli instances being after their instancing object. */
- if ((ob->base_flag & BASE_FROM_DUPLI) == 0) {
- Object *ob_orig = DEG_get_original_object(ob);
- DRW_select_load_id(ob_orig->select_id);
- }
- DST.dupli_parent = data_.dupli_parent;
- DST.dupli_source = data_.dupli_object_current;
- drw_engines_cache_populate(ob);
- }
- }
- DEG_OBJECT_ITER_END;
- }
-
- drw_engines_cache_finish();
-
- DRW_render_instance_buffer_finish();
- }
-
- /* Setup framebuffer */
- draw_select_framebuffer_depth_only_setup(viewport_size);
- GPU_framebuffer_bind(g_select_buffer.framebuffer_depth_only);
- GPU_framebuffer_clear_depth(g_select_buffer.framebuffer_depth_only, 1.0f);
-
- /* Start Drawing */
- DRW_state_reset();
- DRW_draw_callbacks_pre_scene();
-
- DRW_hair_update();
-
- DRW_state_lock(
- DRW_STATE_WRITE_DEPTH |
- DRW_STATE_DEPTH_ALWAYS |
- DRW_STATE_DEPTH_LESS_EQUAL |
- DRW_STATE_DEPTH_EQUAL |
- DRW_STATE_DEPTH_GREATER |
- DRW_STATE_DEPTH_ALWAYS);
-
- /* Only 1-2 passes. */
- while (true) {
- if (!select_pass_fn(DRW_SELECT_PASS_PRE, select_pass_user_data)) {
- break;
- }
-
- drw_engines_draw_scene();
-
- if (!select_pass_fn(DRW_SELECT_PASS_POST, select_pass_user_data)) {
- break;
- }
- }
-
- DRW_state_lock(0);
-
- DRW_draw_callbacks_post_scene();
-
- DRW_state_reset();
- drw_engines_disable();
-
-#ifdef DEBUG
- /* Avoid accidental reuse. */
- drw_state_ensure_not_reused(&DST);
-#endif
- GPU_framebuffer_restore();
-
- /* Cleanup for selection state */
- GPU_viewport_free(viewport);
-#endif /* USE_GPU_SELECT */
+ RegionView3D *rv3d = ar->regiondata;
+
+ /* Reset before using it. */
+ drw_state_prepare_clean_for_draw(&DST);
+
+ bool use_obedit = false;
+ int obedit_mode = 0;
+ if (obedit != NULL) {
+ if (obedit->type == OB_MBALL) {
+ use_obedit = true;
+ obedit_mode = CTX_MODE_EDIT_METABALL;
+ }
+ else if (obedit->type == OB_ARMATURE) {
+ use_obedit = true;
+ obedit_mode = CTX_MODE_EDIT_ARMATURE;
+ }
+ }
+ if (v3d->overlay.flag & V3D_OVERLAY_BONE_SELECT) {
+ if (!(v3d->flag2 & V3D_HIDE_OVERLAYS)) {
+ /* Note: don't use "BKE_object_pose_armature_get" here, it breaks selection. */
+ Object *obpose = OBPOSE_FROM_OBACT(obact);
+ if (obpose) {
+ use_obedit = true;
+ obedit_mode = CTX_MODE_POSE;
+ }
+ }
+ }
+
+ int viewport_size[2] = {BLI_rcti_size_x(rect), BLI_rcti_size_y(rect)};
+ struct GPUViewport *viewport = GPU_viewport_create();
+ GPU_viewport_size_set(viewport, viewport_size);
+
+ DST.viewport = viewport;
+ DST.options.is_select = true;
+
+ /* Get list of enabled engines */
+ if (use_obedit) {
+ drw_engines_enable_from_paint_mode(obedit_mode);
+ drw_engines_enable_from_mode(obedit_mode);
+ }
+ else if (!draw_surface) {
+ /* grease pencil selection */
+ use_drw_engine(&draw_engine_gpencil_type);
+
+ drw_engines_enable_from_overlays(v3d->overlay.flag);
+ drw_engines_enable_from_object_mode();
+ }
+ else {
+ drw_engines_enable_basic();
+ /* grease pencil selection */
+ use_drw_engine(&draw_engine_gpencil_type);
+
+ drw_engines_enable_from_overlays(v3d->overlay.flag);
+ drw_engines_enable_from_object_mode();
+ }
+
+ /* Setup viewport */
+
+ /* Instead of 'DRW_context_state_init(C, &DST.draw_ctx)', assign from args */
+ DST.draw_ctx = (DRWContextState){
+ .ar = ar,
+ .rv3d = rv3d,
+ .v3d = v3d,
+ .scene = scene,
+ .view_layer = view_layer,
+ .obact = obact,
+ .engine_type = engine_type,
+ .depsgraph = depsgraph,
+ };
+ drw_context_state_init();
+ drw_viewport_var_init();
+
+ /* Update ubos */
+ DRW_globals_update();
+
+ /* Init engines */
+ drw_engines_init();
+ DRW_hair_init();
+
+ {
+ drw_engines_cache_init();
+ drw_engines_world_update(scene);
+
+ if (use_obedit) {
+# if 0
+ drw_engines_cache_populate(obact);
+# else
+ FOREACH_OBJECT_IN_MODE_BEGIN (view_layer, v3d, obact->type, obact->mode, ob_iter) {
+ drw_engines_cache_populate(ob_iter);
+ }
+ FOREACH_OBJECT_IN_MODE_END;
+# endif
+ }
+ else {
+ const int object_type_exclude_select = (v3d->object_type_exclude_viewport |
+ v3d->object_type_exclude_select);
+ bool filter_exclude = false;
+ DEG_OBJECT_ITER_BEGIN (depsgraph,
+ ob,
+ DEG_ITER_OBJECT_FLAG_LINKED_DIRECTLY |
+ DEG_ITER_OBJECT_FLAG_LINKED_VIA_SET |
+ DEG_ITER_OBJECT_FLAG_VISIBLE | DEG_ITER_OBJECT_FLAG_DUPLI) {
+ if (v3d->localvd && ((v3d->local_view_uuid & ob->base_local_view_bits) == 0)) {
+ continue;
+ }
+
+ if ((ob->base_flag & BASE_SELECTABLE) &&
+ (object_type_exclude_select & (1 << ob->type)) == 0) {
+ if (object_filter_fn != NULL) {
+ if (ob->base_flag & BASE_FROM_DUPLI) {
+ /* pass (use previous filter_exclude value) */
+ }
+ else {
+ filter_exclude = (object_filter_fn(ob, object_filter_user_data) == false);
+ }
+ if (filter_exclude) {
+ continue;
+ }
+ }
+
+ /* This relies on dupli instances being after their instancing object. */
+ if ((ob->base_flag & BASE_FROM_DUPLI) == 0) {
+ Object *ob_orig = DEG_get_original_object(ob);
+ DRW_select_load_id(ob_orig->select_id);
+ }
+ DST.dupli_parent = data_.dupli_parent;
+ DST.dupli_source = data_.dupli_object_current;
+ drw_engines_cache_populate(ob);
+ }
+ }
+ DEG_OBJECT_ITER_END;
+ }
+
+ drw_engines_cache_finish();
+
+ DRW_render_instance_buffer_finish();
+ }
+
+ /* Setup framebuffer */
+ draw_select_framebuffer_depth_only_setup(viewport_size);
+ GPU_framebuffer_bind(g_select_buffer.framebuffer_depth_only);
+ GPU_framebuffer_clear_depth(g_select_buffer.framebuffer_depth_only, 1.0f);
+
+ /* Start Drawing */
+ DRW_state_reset();
+ DRW_draw_callbacks_pre_scene();
+
+ DRW_hair_update();
+
+ DRW_state_lock(DRW_STATE_WRITE_DEPTH | DRW_STATE_DEPTH_ALWAYS | DRW_STATE_DEPTH_LESS_EQUAL |
+ DRW_STATE_DEPTH_EQUAL | DRW_STATE_DEPTH_GREATER | DRW_STATE_DEPTH_ALWAYS);
+
+ /* Only 1-2 passes. */
+ while (true) {
+ if (!select_pass_fn(DRW_SELECT_PASS_PRE, select_pass_user_data)) {
+ break;
+ }
+
+ drw_engines_draw_scene();
+
+ if (!select_pass_fn(DRW_SELECT_PASS_POST, select_pass_user_data)) {
+ break;
+ }
+ }
+
+ DRW_state_lock(0);
+
+ DRW_draw_callbacks_post_scene();
+
+ DRW_state_reset();
+ drw_engines_disable();
+
+# ifdef DEBUG
+ /* Avoid accidental reuse. */
+ drw_state_ensure_not_reused(&DST);
+# endif
+ GPU_framebuffer_restore();
+
+ /* Cleanup for selection state */
+ GPU_viewport_free(viewport);
+#endif /* USE_GPU_SELECT */
}
static void draw_depth_texture_to_screen(GPUTexture *texture)
{
- const float w = (float)GPU_texture_width(texture);
- const float h = (float)GPU_texture_height(texture);
+ const float w = (float)GPU_texture_width(texture);
+ const float h = (float)GPU_texture_height(texture);
- GPUVertFormat *format = immVertexFormat();
- uint texcoord = GPU_vertformat_attr_add(format, "texCoord", GPU_COMP_F32, 2, GPU_FETCH_FLOAT);
- uint pos = GPU_vertformat_attr_add(format, "pos", GPU_COMP_F32, 2, GPU_FETCH_FLOAT);
+ GPUVertFormat *format = immVertexFormat();
+ uint texcoord = GPU_vertformat_attr_add(format, "texCoord", GPU_COMP_F32, 2, GPU_FETCH_FLOAT);
+ uint pos = GPU_vertformat_attr_add(format, "pos", GPU_COMP_F32, 2, GPU_FETCH_FLOAT);
- immBindBuiltinProgram(GPU_SHADER_3D_IMAGE_DEPTH_COPY);
+ immBindBuiltinProgram(GPU_SHADER_3D_IMAGE_DEPTH_COPY);
- GPU_texture_bind(texture, 0);
+ GPU_texture_bind(texture, 0);
- immUniform1i("image", 0); /* default GL_TEXTURE0 unit */
+ immUniform1i("image", 0); /* default GL_TEXTURE0 unit */
- immBegin(GPU_PRIM_TRI_STRIP, 4);
+ immBegin(GPU_PRIM_TRI_STRIP, 4);
- immAttr2f(texcoord, 0.0f, 0.0f);
- immVertex2f(pos, 0.0f, 0.0f);
+ immAttr2f(texcoord, 0.0f, 0.0f);
+ immVertex2f(pos, 0.0f, 0.0f);
- immAttr2f(texcoord, 1.0f, 0.0f);
- immVertex2f(pos, w, 0.0f);
+ immAttr2f(texcoord, 1.0f, 0.0f);
+ immVertex2f(pos, w, 0.0f);
- immAttr2f(texcoord, 0.0f, 1.0f);
- immVertex2f(pos, 0.0f, h);
+ immAttr2f(texcoord, 0.0f, 1.0f);
+ immVertex2f(pos, 0.0f, h);
- immAttr2f(texcoord, 1.0f, 1.0f);
- immVertex2f(pos, w, h);
+ immAttr2f(texcoord, 1.0f, 1.0f);
+ immVertex2f(pos, w, h);
- immEnd();
+ immEnd();
- GPU_texture_unbind(texture);
+ GPU_texture_unbind(texture);
- immUnbindProgram();
+ immUnbindProgram();
}
-
/**
* object mode select-loop, see: ED_view3d_draw_depth_loop (legacy drawing).
*/
static void drw_draw_depth_loop_imp(void)
{
- DRW_opengl_context_enable();
+ DRW_opengl_context_enable();
- /* Setup framebuffer */
- DefaultFramebufferList *fbl = (DefaultFramebufferList *)GPU_viewport_framebuffer_list_get(DST.viewport);
- GPU_framebuffer_bind(fbl->depth_only_fb);
- GPU_framebuffer_clear_depth(fbl->depth_only_fb, 1.0f);
+ /* Setup framebuffer */
+ DefaultFramebufferList *fbl = (DefaultFramebufferList *)GPU_viewport_framebuffer_list_get(
+ DST.viewport);
+ GPU_framebuffer_bind(fbl->depth_only_fb);
+ GPU_framebuffer_clear_depth(fbl->depth_only_fb, 1.0f);
- /* Setup viewport */
- drw_context_state_init();
- drw_viewport_var_init();
+ /* Setup viewport */
+ drw_context_state_init();
+ drw_viewport_var_init();
- /* Update ubos */
- DRW_globals_update();
+ /* Update ubos */
+ DRW_globals_update();
- /* Init engines */
- drw_engines_init();
- DRW_hair_init();
+ /* Init engines */
+ drw_engines_init();
+ DRW_hair_init();
- {
- drw_engines_cache_init();
- drw_engines_world_update(DST.draw_ctx.scene);
+ {
+ drw_engines_cache_init();
+ drw_engines_world_update(DST.draw_ctx.scene);
- View3D *v3d = DST.draw_ctx.v3d;
- const int object_type_exclude_viewport = v3d->object_type_exclude_viewport;
- DEG_OBJECT_ITER_BEGIN(DST.draw_ctx.depsgraph, ob,
- DEG_ITER_OBJECT_FLAG_LINKED_DIRECTLY |
- DEG_ITER_OBJECT_FLAG_LINKED_VIA_SET |
- DEG_ITER_OBJECT_FLAG_VISIBLE |
- DEG_ITER_OBJECT_FLAG_DUPLI)
- {
- if ((object_type_exclude_viewport & (1 << ob->type)) != 0) {
- continue;
- }
+ View3D *v3d = DST.draw_ctx.v3d;
+ const int object_type_exclude_viewport = v3d->object_type_exclude_viewport;
+ DEG_OBJECT_ITER_BEGIN (DST.draw_ctx.depsgraph,
+ ob,
+ DEG_ITER_OBJECT_FLAG_LINKED_DIRECTLY |
+ DEG_ITER_OBJECT_FLAG_LINKED_VIA_SET | DEG_ITER_OBJECT_FLAG_VISIBLE |
+ DEG_ITER_OBJECT_FLAG_DUPLI) {
+ if ((object_type_exclude_viewport & (1 << ob->type)) != 0) {
+ continue;
+ }
- if (v3d->localvd && ((v3d->local_view_uuid & ob->base_local_view_bits) == 0)) {
- continue;
- }
+ if (v3d->localvd && ((v3d->local_view_uuid & ob->base_local_view_bits) == 0)) {
+ continue;
+ }
- DST.dupli_parent = data_.dupli_parent;
- DST.dupli_source = data_.dupli_object_current;
- drw_engines_cache_populate(ob);
- }
- DEG_OBJECT_ITER_END;
+ DST.dupli_parent = data_.dupli_parent;
+ DST.dupli_source = data_.dupli_object_current;
+ drw_engines_cache_populate(ob);
+ }
+ DEG_OBJECT_ITER_END;
- drw_engines_cache_finish();
+ drw_engines_cache_finish();
- DRW_render_instance_buffer_finish();
- }
+ DRW_render_instance_buffer_finish();
+ }
- /* Start Drawing */
- DRW_state_reset();
+ /* Start Drawing */
+ DRW_state_reset();
- DRW_hair_update();
+ DRW_hair_update();
- DRW_draw_callbacks_pre_scene();
- drw_engines_draw_scene();
- DRW_draw_callbacks_post_scene();
+ DRW_draw_callbacks_pre_scene();
+ drw_engines_draw_scene();
+ DRW_draw_callbacks_post_scene();
- DRW_state_reset();
+ DRW_state_reset();
- /* TODO: Reading depth for operators should be done here. */
+ /* TODO: Reading depth for operators should be done here. */
- GPU_framebuffer_restore();
+ GPU_framebuffer_restore();
- /* Changin context */
- DRW_opengl_context_disable();
+ /* Changin context */
+ DRW_opengl_context_disable();
}
/**
* object mode select-loop, see: ED_view3d_draw_depth_loop (legacy drawing).
*/
-void DRW_draw_depth_loop(
- struct Depsgraph *depsgraph,
- ARegion *ar, View3D *v3d,
- GPUViewport *viewport)
-{
- Scene *scene = DEG_get_evaluated_scene(depsgraph);
- RenderEngineType *engine_type = ED_view3d_engine_type(scene, v3d->shading.type);
- ViewLayer *view_layer = DEG_get_evaluated_view_layer(depsgraph);
- RegionView3D *rv3d = ar->regiondata;
-
- /* Reset before using it. */
- drw_state_prepare_clean_for_draw(&DST);
-
- DST.viewport = viewport;
- DST.options.is_depth = true;
-
- /* Instead of 'DRW_context_state_init(C, &DST.draw_ctx)', assign from args */
- DST.draw_ctx = (DRWContextState){
- .ar = ar, .rv3d = rv3d, .v3d = v3d,
- .scene = scene, .view_layer = view_layer, .obact = OBACT(view_layer),
- .engine_type = engine_type,
- .depsgraph = depsgraph,
- };
-
- /* Get list of enabled engines */
- {
- drw_engines_enable_basic();
- if (DRW_state_draw_support()) {
- drw_engines_enable_from_object_mode();
- }
- }
-
- drw_draw_depth_loop_imp();
-
- drw_engines_disable();
-
- /* XXX Drawing the resulting buffer to the BACK_BUFFER */
- GPU_matrix_push();
- GPU_matrix_push_projection();
- wmOrtho2_region_pixelspace(DST.draw_ctx.ar);
- GPU_matrix_identity_set();
-
- glEnable(GL_DEPTH_TEST); /* Cannot write to depth buffer without testing */
- glDepthFunc(GL_ALWAYS);
- DefaultTextureList *dtxl = (DefaultTextureList *)GPU_viewport_texture_list_get(DST.viewport);
- draw_depth_texture_to_screen(dtxl->depth);
- glDepthFunc(GL_LEQUAL);
-
- GPU_matrix_pop();
- GPU_matrix_pop_projection();
+void DRW_draw_depth_loop(struct Depsgraph *depsgraph,
+ ARegion *ar,
+ View3D *v3d,
+ GPUViewport *viewport)
+{
+ Scene *scene = DEG_get_evaluated_scene(depsgraph);
+ RenderEngineType *engine_type = ED_view3d_engine_type(scene, v3d->shading.type);
+ ViewLayer *view_layer = DEG_get_evaluated_view_layer(depsgraph);
+ RegionView3D *rv3d = ar->regiondata;
+
+ /* Reset before using it. */
+ drw_state_prepare_clean_for_draw(&DST);
+
+ DST.viewport = viewport;
+ DST.options.is_depth = true;
+
+ /* Instead of 'DRW_context_state_init(C, &DST.draw_ctx)', assign from args */
+ DST.draw_ctx = (DRWContextState){
+ .ar = ar,
+ .rv3d = rv3d,
+ .v3d = v3d,
+ .scene = scene,
+ .view_layer = view_layer,
+ .obact = OBACT(view_layer),
+ .engine_type = engine_type,
+ .depsgraph = depsgraph,
+ };
+
+ /* Get list of enabled engines */
+ {
+ drw_engines_enable_basic();
+ if (DRW_state_draw_support()) {
+ drw_engines_enable_from_object_mode();
+ }
+ }
+
+ drw_draw_depth_loop_imp();
+
+ drw_engines_disable();
+
+ /* XXX Drawing the resulting buffer to the BACK_BUFFER */
+ GPU_matrix_push();
+ GPU_matrix_push_projection();
+ wmOrtho2_region_pixelspace(DST.draw_ctx.ar);
+ GPU_matrix_identity_set();
+
+ glEnable(GL_DEPTH_TEST); /* Cannot write to depth buffer without testing */
+ glDepthFunc(GL_ALWAYS);
+ DefaultTextureList *dtxl = (DefaultTextureList *)GPU_viewport_texture_list_get(DST.viewport);
+ draw_depth_texture_to_screen(dtxl->depth);
+ glDepthFunc(GL_LEQUAL);
+
+ GPU_matrix_pop();
+ GPU_matrix_pop_projection();
#ifdef DEBUG
- /* Avoid accidental reuse. */
- drw_state_ensure_not_reused(&DST);
+ /* Avoid accidental reuse. */
+ drw_state_ensure_not_reused(&DST);
#endif
}
/**
* Converted from ED_view3d_draw_depth_gpencil (legacy drawing).
*/
-void DRW_draw_depth_loop_gpencil(
- struct Depsgraph *depsgraph,
- ARegion *ar, View3D *v3d,
- GPUViewport *viewport)
-{
- Scene *scene = DEG_get_evaluated_scene(depsgraph);
- ViewLayer *view_layer = DEG_get_evaluated_view_layer(depsgraph);
- RegionView3D *rv3d = ar->regiondata;
-
- /* Reset before using it. */
- drw_state_prepare_clean_for_draw(&DST);
-
- DST.viewport = viewport;
- DST.options.is_depth = true;
-
- /* Instead of 'DRW_context_state_init(C, &DST.draw_ctx)', assign from args */
- DST.draw_ctx = (DRWContextState){
- .ar = ar, .rv3d = rv3d, .v3d = v3d,
- .scene = scene, .view_layer = view_layer, .obact = OBACT(view_layer),
- .depsgraph = depsgraph,
- };
-
- use_drw_engine(&draw_engine_gpencil_type);
- drw_draw_depth_loop_imp();
- drw_engines_disable();
+void DRW_draw_depth_loop_gpencil(struct Depsgraph *depsgraph,
+ ARegion *ar,
+ View3D *v3d,
+ GPUViewport *viewport)
+{
+ Scene *scene = DEG_get_evaluated_scene(depsgraph);
+ ViewLayer *view_layer = DEG_get_evaluated_view_layer(depsgraph);
+ RegionView3D *rv3d = ar->regiondata;
+
+ /* Reset before using it. */
+ drw_state_prepare_clean_for_draw(&DST);
+
+ DST.viewport = viewport;
+ DST.options.is_depth = true;
+
+ /* Instead of 'DRW_context_state_init(C, &DST.draw_ctx)', assign from args */
+ DST.draw_ctx = (DRWContextState){
+ .ar = ar,
+ .rv3d = rv3d,
+ .v3d = v3d,
+ .scene = scene,
+ .view_layer = view_layer,
+ .obact = OBACT(view_layer),
+ .depsgraph = depsgraph,
+ };
+
+ use_drw_engine(&draw_engine_gpencil_type);
+ drw_draw_depth_loop_imp();
+ drw_engines_disable();
#ifdef DEBUG
- /* Avoid accidental reuse. */
- drw_state_ensure_not_reused(&DST);
+ /* Avoid accidental reuse. */
+ drw_state_ensure_not_reused(&DST);
#endif
}
-
/* Set an opengl context to be used with shaders that draw on U32 colors. */
void DRW_framebuffer_select_id_setup(ARegion *ar, const bool clear)
{
- RegionView3D *rv3d = ar->regiondata;
+ RegionView3D *rv3d = ar->regiondata;
- DRW_opengl_context_enable();
+ DRW_opengl_context_enable();
- /* Setup framebuffer */
- int viewport_size[2] = {ar->winx, ar->winy};
- draw_select_framebuffer_select_id_setup(viewport_size);
- GPU_framebuffer_bind(g_select_buffer.framebuffer_select_id);
+ /* Setup framebuffer */
+ int viewport_size[2] = {ar->winx, ar->winy};
+ draw_select_framebuffer_select_id_setup(viewport_size);
+ GPU_framebuffer_bind(g_select_buffer.framebuffer_select_id);
- /* dithering and AA break color coding, so disable */
- glDisable(GL_DITHER);
+ /* dithering and AA break color coding, so disable */
+ glDisable(GL_DITHER);
- GPU_depth_test(true);
- glDisable(GL_SCISSOR_TEST);
+ GPU_depth_test(true);
+ glDisable(GL_SCISSOR_TEST);
- if (clear) {
- GPU_framebuffer_clear_color_depth(
- g_select_buffer.framebuffer_select_id, (const float[4]){0.0f}, 1.0f);
- }
+ if (clear) {
+ GPU_framebuffer_clear_color_depth(
+ g_select_buffer.framebuffer_select_id, (const float[4]){0.0f}, 1.0f);
+ }
- if (rv3d->rflag & RV3D_CLIPPING) {
- ED_view3d_clipping_set(rv3d);
- }
+ if (rv3d->rflag & RV3D_CLIPPING) {
+ ED_view3d_clipping_set(rv3d);
+ }
}
-
/* Ends the context for selection and restoring the previous one. */
void DRW_framebuffer_select_id_release(ARegion *ar)
{
- RegionView3D *rv3d = ar->regiondata;
+ RegionView3D *rv3d = ar->regiondata;
- if (rv3d->rflag & RV3D_CLIPPING) {
- ED_view3d_clipping_disable();
- }
+ if (rv3d->rflag & RV3D_CLIPPING) {
+ ED_view3d_clipping_disable();
+ }
- GPU_depth_test(false);
+ GPU_depth_test(false);
- GPU_framebuffer_restore();
+ GPU_framebuffer_restore();
- DRW_opengl_context_disable();
+ DRW_opengl_context_disable();
}
-
/* Read a block of pixels from the select frame buffer. */
void DRW_framebuffer_select_id_read(const rcti *rect, uint *r_buf)
{
- /* clamp rect by texture */
- rcti r = {
- .xmin = 0,
- .xmax = GPU_texture_width(g_select_buffer.texture_u32),
- .ymin = 0,
- .ymax = GPU_texture_height(g_select_buffer.texture_u32),
- };
-
- rcti rect_clamp = *rect;
- if (BLI_rcti_isect(&r, &rect_clamp, &rect_clamp)) {
- GPU_texture_read_rect(
- g_select_buffer.texture_u32,
- GPU_DATA_UNSIGNED_INT, &rect_clamp, r_buf);
-
- if (!BLI_rcti_compare(rect, &rect_clamp)) {
- GPU_select_buffer_stride_realign(rect, &rect_clamp, r_buf);
- }
- }
- else {
- size_t buf_size = BLI_rcti_size_x(rect) *
- BLI_rcti_size_y(rect) *
- sizeof(*r_buf);
-
- memset(r_buf, 0, buf_size);
- }
+ /* clamp rect by texture */
+ rcti r = {
+ .xmin = 0,
+ .xmax = GPU_texture_width(g_select_buffer.texture_u32),
+ .ymin = 0,
+ .ymax = GPU_texture_height(g_select_buffer.texture_u32),
+ };
+
+ rcti rect_clamp = *rect;
+ if (BLI_rcti_isect(&r, &rect_clamp, &rect_clamp)) {
+ GPU_texture_read_rect(g_select_buffer.texture_u32, GPU_DATA_UNSIGNED_INT, &rect_clamp, r_buf);
+
+ if (!BLI_rcti_compare(rect, &rect_clamp)) {
+ GPU_select_buffer_stride_realign(rect, &rect_clamp, r_buf);
+ }
+ }
+ else {
+ size_t buf_size = BLI_rcti_size_x(rect) * BLI_rcti_size_y(rect) * sizeof(*r_buf);
+
+ memset(r_buf, 0, buf_size);
+ }
}
/** \} */
-
/* -------------------------------------------------------------------- */
/** \name Draw Manager State (DRW_state)
* \{ */
void DRW_state_dfdy_factors_get(float dfdyfac[2])
{
- GPU_get_dfdy_factors(dfdyfac);
+ GPU_get_dfdy_factors(dfdyfac);
}
/**
@@ -2584,7 +2593,7 @@ void DRW_state_dfdy_factors_get(float dfdyfac[2])
*/
bool DRW_state_is_fbo(void)
{
- return ((DST.default_framebuffer != NULL) || DST.options.is_image_render);
+ return ((DST.default_framebuffer != NULL) || DST.options.is_image_render);
}
/**
@@ -2592,12 +2601,12 @@ bool DRW_state_is_fbo(void)
*/
bool DRW_state_is_select(void)
{
- return DST.options.is_select;
+ return DST.options.is_select;
}
bool DRW_state_is_depth(void)
{
- return DST.options.is_depth;
+ return DST.options.is_depth;
}
/**
@@ -2605,7 +2614,7 @@ bool DRW_state_is_depth(void)
*/
bool DRW_state_is_image_render(void)
{
- return DST.options.is_image_render;
+ return DST.options.is_image_render;
}
/**
@@ -2614,9 +2623,8 @@ bool DRW_state_is_image_render(void)
*/
bool DRW_state_is_scene_render(void)
{
- BLI_assert(DST.options.is_scene_render ?
- DST.options.is_image_render : true);
- return DST.options.is_scene_render;
+ BLI_assert(DST.options.is_scene_render ? DST.options.is_image_render : true);
+ return DST.options.is_scene_render;
}
/**
@@ -2624,28 +2632,25 @@ bool DRW_state_is_scene_render(void)
*/
bool DRW_state_is_opengl_render(void)
{
- return DST.options.is_image_render && !DST.options.is_scene_render;
+ return DST.options.is_image_render && !DST.options.is_scene_render;
}
bool DRW_state_is_playback(void)
{
- if (DST.draw_ctx.evil_C != NULL) {
- struct wmWindowManager *wm = CTX_wm_manager(DST.draw_ctx.evil_C);
- return ED_screen_animation_playing(wm) != NULL;
- }
- return false;
+ if (DST.draw_ctx.evil_C != NULL) {
+ struct wmWindowManager *wm = CTX_wm_manager(DST.draw_ctx.evil_C);
+ return ED_screen_animation_playing(wm) != NULL;
+ }
+ return false;
}
-
/**
* Should text draw in this mode?
*/
bool DRW_state_show_text(void)
{
- return (DST.options.is_select) == 0 &&
- (DST.options.is_depth) == 0 &&
- (DST.options.is_scene_render) == 0 &&
- (DST.options.draw_text) == 0;
+ return (DST.options.is_select) == 0 && (DST.options.is_depth) == 0 &&
+ (DST.options.is_scene_render) == 0 && (DST.options.draw_text) == 0;
}
/**
@@ -2654,10 +2659,9 @@ bool DRW_state_show_text(void)
*/
bool DRW_state_draw_support(void)
{
- View3D *v3d = DST.draw_ctx.v3d;
- return (DRW_state_is_scene_render() == false) &&
- (v3d != NULL) &&
- ((v3d->flag2 & V3D_HIDE_OVERLAYS) == 0);
+ View3D *v3d = DST.draw_ctx.v3d;
+ return (DRW_state_is_scene_render() == false) && (v3d != NULL) &&
+ ((v3d->flag2 & V3D_HIDE_OVERLAYS) == 0);
}
/**
@@ -2665,127 +2669,125 @@ bool DRW_state_draw_support(void)
*/
bool DRW_state_draw_background(void)
{
- if (DRW_state_is_image_render() == false) {
- return true;
- }
- return DST.options.draw_background;
+ if (DRW_state_is_image_render() == false) {
+ return true;
+ }
+ return DST.options.draw_background;
}
/** \} */
-
/* -------------------------------------------------------------------- */
/** \name Context State (DRW_context_state)
* \{ */
const DRWContextState *DRW_context_state_get(void)
{
- return &DST.draw_ctx;
+ return &DST.draw_ctx;
}
/** \} */
-
/* -------------------------------------------------------------------- */
/** \name Init/Exit (DRW_engines)
* \{ */
bool DRW_engine_render_support(DrawEngineType *draw_engine_type)
{
- return draw_engine_type->render_to_image;
+ return draw_engine_type->render_to_image;
}
void DRW_engine_register(DrawEngineType *draw_engine_type)
{
- BLI_addtail(&DRW_engines, draw_engine_type);
+ BLI_addtail(&DRW_engines, draw_engine_type);
}
void DRW_engines_register(void)
{
- RE_engines_register(&DRW_engine_viewport_eevee_type);
- RE_engines_register(&DRW_engine_viewport_workbench_type);
+ RE_engines_register(&DRW_engine_viewport_eevee_type);
+ RE_engines_register(&DRW_engine_viewport_workbench_type);
- DRW_engine_register(&draw_engine_workbench_solid);
- DRW_engine_register(&draw_engine_workbench_transparent);
+ DRW_engine_register(&draw_engine_workbench_solid);
+ DRW_engine_register(&draw_engine_workbench_transparent);
- DRW_engine_register(&draw_engine_object_type);
- DRW_engine_register(&draw_engine_edit_armature_type);
- DRW_engine_register(&draw_engine_edit_curve_type);
- DRW_engine_register(&draw_engine_edit_lattice_type);
- DRW_engine_register(&draw_engine_edit_mesh_type);
- DRW_engine_register(&draw_engine_edit_metaball_type);
- DRW_engine_register(&draw_engine_edit_text_type);
- DRW_engine_register(&draw_engine_motion_path_type);
- DRW_engine_register(&draw_engine_overlay_type);
- DRW_engine_register(&draw_engine_paint_texture_type);
- DRW_engine_register(&draw_engine_paint_vertex_type);
- DRW_engine_register(&draw_engine_particle_type);
- DRW_engine_register(&draw_engine_pose_type);
- DRW_engine_register(&draw_engine_sculpt_type);
- DRW_engine_register(&draw_engine_gpencil_type);
+ DRW_engine_register(&draw_engine_object_type);
+ DRW_engine_register(&draw_engine_edit_armature_type);
+ DRW_engine_register(&draw_engine_edit_curve_type);
+ DRW_engine_register(&draw_engine_edit_lattice_type);
+ DRW_engine_register(&draw_engine_edit_mesh_type);
+ DRW_engine_register(&draw_engine_edit_metaball_type);
+ DRW_engine_register(&draw_engine_edit_text_type);
+ DRW_engine_register(&draw_engine_motion_path_type);
+ DRW_engine_register(&draw_engine_overlay_type);
+ DRW_engine_register(&draw_engine_paint_texture_type);
+ DRW_engine_register(&draw_engine_paint_vertex_type);
+ DRW_engine_register(&draw_engine_particle_type);
+ DRW_engine_register(&draw_engine_pose_type);
+ DRW_engine_register(&draw_engine_sculpt_type);
+ DRW_engine_register(&draw_engine_gpencil_type);
- /* setup callbacks */
- {
- BKE_mball_batch_cache_dirty_tag_cb = DRW_mball_batch_cache_dirty_tag;
- BKE_mball_batch_cache_free_cb = DRW_mball_batch_cache_free;
+ /* setup callbacks */
+ {
+ BKE_mball_batch_cache_dirty_tag_cb = DRW_mball_batch_cache_dirty_tag;
+ BKE_mball_batch_cache_free_cb = DRW_mball_batch_cache_free;
- BKE_curve_batch_cache_dirty_tag_cb = DRW_curve_batch_cache_dirty_tag;
- BKE_curve_batch_cache_free_cb = DRW_curve_batch_cache_free;
+ BKE_curve_batch_cache_dirty_tag_cb = DRW_curve_batch_cache_dirty_tag;
+ BKE_curve_batch_cache_free_cb = DRW_curve_batch_cache_free;
- BKE_mesh_batch_cache_dirty_tag_cb = DRW_mesh_batch_cache_dirty_tag;
- BKE_mesh_batch_cache_free_cb = DRW_mesh_batch_cache_free;
+ BKE_mesh_batch_cache_dirty_tag_cb = DRW_mesh_batch_cache_dirty_tag;
+ BKE_mesh_batch_cache_free_cb = DRW_mesh_batch_cache_free;
- BKE_lattice_batch_cache_dirty_tag_cb = DRW_lattice_batch_cache_dirty_tag;
- BKE_lattice_batch_cache_free_cb = DRW_lattice_batch_cache_free;
+ BKE_lattice_batch_cache_dirty_tag_cb = DRW_lattice_batch_cache_dirty_tag;
+ BKE_lattice_batch_cache_free_cb = DRW_lattice_batch_cache_free;
- BKE_particle_batch_cache_dirty_tag_cb = DRW_particle_batch_cache_dirty_tag;
- BKE_particle_batch_cache_free_cb = DRW_particle_batch_cache_free;
+ BKE_particle_batch_cache_dirty_tag_cb = DRW_particle_batch_cache_dirty_tag;
+ BKE_particle_batch_cache_free_cb = DRW_particle_batch_cache_free;
- BKE_gpencil_batch_cache_dirty_tag_cb = DRW_gpencil_batch_cache_dirty_tag;
- BKE_gpencil_batch_cache_free_cb = DRW_gpencil_batch_cache_free;
- }
+ BKE_gpencil_batch_cache_dirty_tag_cb = DRW_gpencil_batch_cache_dirty_tag;
+ BKE_gpencil_batch_cache_free_cb = DRW_gpencil_batch_cache_free;
+ }
}
void DRW_engines_free(void)
{
- if (DST.gl_context == NULL) {
- /* Nothing has been setup. Nothing to clear.
- * Otherwise, DRW_opengl_context_enable can
- * create a context in background mode. (see T62355) */
- return;
- }
+ if (DST.gl_context == NULL) {
+ /* Nothing has been setup. Nothing to clear.
+ * Otherwise, DRW_opengl_context_enable can
+ * create a context in background mode. (see T62355) */
+ return;
+ }
- DRW_opengl_context_enable();
+ DRW_opengl_context_enable();
- DRW_TEXTURE_FREE_SAFE(g_select_buffer.texture_u32);
- DRW_TEXTURE_FREE_SAFE(g_select_buffer.texture_depth);
- GPU_FRAMEBUFFER_FREE_SAFE(g_select_buffer.framebuffer_select_id);
- GPU_FRAMEBUFFER_FREE_SAFE(g_select_buffer.framebuffer_depth_only);
+ DRW_TEXTURE_FREE_SAFE(g_select_buffer.texture_u32);
+ DRW_TEXTURE_FREE_SAFE(g_select_buffer.texture_depth);
+ GPU_FRAMEBUFFER_FREE_SAFE(g_select_buffer.framebuffer_select_id);
+ GPU_FRAMEBUFFER_FREE_SAFE(g_select_buffer.framebuffer_depth_only);
- DRW_hair_free();
- DRW_shape_cache_free();
- DRW_stats_free();
- DRW_globals_free();
+ DRW_hair_free();
+ DRW_shape_cache_free();
+ DRW_stats_free();
+ DRW_globals_free();
- DrawEngineType *next;
- for (DrawEngineType *type = DRW_engines.first; type; type = next) {
- next = type->next;
- BLI_remlink(&R_engines, type);
+ DrawEngineType *next;
+ for (DrawEngineType *type = DRW_engines.first; type; type = next) {
+ next = type->next;
+ BLI_remlink(&R_engines, type);
- if (type->engine_free) {
- type->engine_free();
- }
- }
+ if (type->engine_free) {
+ type->engine_free();
+ }
+ }
- DRW_UBO_FREE_SAFE(G_draw.block_ubo);
- DRW_UBO_FREE_SAFE(G_draw.view_ubo);
- DRW_TEXTURE_FREE_SAFE(G_draw.ramp);
- DRW_TEXTURE_FREE_SAFE(G_draw.weight_ramp);
- MEM_SAFE_FREE(g_pos_format);
+ DRW_UBO_FREE_SAFE(G_draw.block_ubo);
+ DRW_UBO_FREE_SAFE(G_draw.view_ubo);
+ DRW_TEXTURE_FREE_SAFE(G_draw.ramp);
+ DRW_TEXTURE_FREE_SAFE(G_draw.weight_ramp);
+ MEM_SAFE_FREE(g_pos_format);
- MEM_SAFE_FREE(DST.uniform_names.buffer);
+ MEM_SAFE_FREE(DST.uniform_names.buffer);
- DRW_opengl_context_disable();
+ DRW_opengl_context_disable();
}
/** \} */
@@ -2795,128 +2797,128 @@ void DRW_engines_free(void)
void DRW_opengl_context_create(void)
{
- BLI_assert(DST.gl_context == NULL); /* Ensure it's called once */
-
- DST.gl_context_mutex = BLI_ticket_mutex_alloc();
- if (!G.background) {
- immDeactivate();
- }
- /* This changes the active context. */
- DST.gl_context = WM_opengl_context_create();
- WM_opengl_context_activate(DST.gl_context);
- /* Be sure to create gawain.context too. */
- DST.gpu_context = GPU_context_create();
- if (!G.background) {
- immActivate();
- }
- /* Set default Blender OpenGL state */
- GPU_state_init();
- /* So we activate the window's one afterwards. */
- wm_window_reset_drawable();
+ BLI_assert(DST.gl_context == NULL); /* Ensure it's called once */
+
+ DST.gl_context_mutex = BLI_ticket_mutex_alloc();
+ if (!G.background) {
+ immDeactivate();
+ }
+ /* This changes the active context. */
+ DST.gl_context = WM_opengl_context_create();
+ WM_opengl_context_activate(DST.gl_context);
+ /* Be sure to create gawain.context too. */
+ DST.gpu_context = GPU_context_create();
+ if (!G.background) {
+ immActivate();
+ }
+ /* Set default Blender OpenGL state */
+ GPU_state_init();
+ /* So we activate the window's one afterwards. */
+ wm_window_reset_drawable();
}
void DRW_opengl_context_destroy(void)
{
- BLI_assert(BLI_thread_is_main());
- if (DST.gl_context != NULL) {
- WM_opengl_context_activate(DST.gl_context);
- GPU_context_active_set(DST.gpu_context);
- GPU_context_discard(DST.gpu_context);
- WM_opengl_context_dispose(DST.gl_context);
- BLI_ticket_mutex_free(DST.gl_context_mutex);
- }
+ BLI_assert(BLI_thread_is_main());
+ if (DST.gl_context != NULL) {
+ WM_opengl_context_activate(DST.gl_context);
+ GPU_context_active_set(DST.gpu_context);
+ GPU_context_discard(DST.gpu_context);
+ WM_opengl_context_dispose(DST.gl_context);
+ BLI_ticket_mutex_free(DST.gl_context_mutex);
+ }
}
void DRW_opengl_context_enable_ex(bool restore)
{
- if (DST.gl_context != NULL) {
- /* IMPORTANT: We dont support immediate mode in render mode!
- * This shall remain in effect until immediate mode supports
- * multiple threads. */
- BLI_ticket_mutex_lock(DST.gl_context_mutex);
- if (BLI_thread_is_main() && restore) {
- if (!G.background) {
- immDeactivate();
- }
- }
- WM_opengl_context_activate(DST.gl_context);
- GPU_context_active_set(DST.gpu_context);
- if (BLI_thread_is_main() && restore) {
- if (!G.background) {
- immActivate();
- }
- BLF_batch_reset();
- }
- }
+ if (DST.gl_context != NULL) {
+ /* IMPORTANT: We dont support immediate mode in render mode!
+ * This shall remain in effect until immediate mode supports
+ * multiple threads. */
+ BLI_ticket_mutex_lock(DST.gl_context_mutex);
+ if (BLI_thread_is_main() && restore) {
+ if (!G.background) {
+ immDeactivate();
+ }
+ }
+ WM_opengl_context_activate(DST.gl_context);
+ GPU_context_active_set(DST.gpu_context);
+ if (BLI_thread_is_main() && restore) {
+ if (!G.background) {
+ immActivate();
+ }
+ BLF_batch_reset();
+ }
+ }
}
void DRW_opengl_context_disable_ex(bool restore)
{
- if (DST.gl_context != NULL) {
+ if (DST.gl_context != NULL) {
#ifdef __APPLE__
- /* Need to flush before disabling draw context, otherwise it does not
- * always finish drawing and viewport can be empty or partially drawn */
- GPU_flush();
+ /* Need to flush before disabling draw context, otherwise it does not
+ * always finish drawing and viewport can be empty or partially drawn */
+ GPU_flush();
#endif
- if (BLI_thread_is_main() && restore) {
- wm_window_reset_drawable();
- }
- else {
- WM_opengl_context_release(DST.gl_context);
- GPU_context_active_set(NULL);
- }
+ if (BLI_thread_is_main() && restore) {
+ wm_window_reset_drawable();
+ }
+ else {
+ WM_opengl_context_release(DST.gl_context);
+ GPU_context_active_set(NULL);
+ }
- BLI_ticket_mutex_unlock(DST.gl_context_mutex);
- }
+ BLI_ticket_mutex_unlock(DST.gl_context_mutex);
+ }
}
void DRW_opengl_context_enable(void)
{
- if (G.background && DST.gl_context == NULL) {
- WM_init_opengl(G_MAIN);
- }
- DRW_opengl_context_enable_ex(true);
+ if (G.background && DST.gl_context == NULL) {
+ WM_init_opengl(G_MAIN);
+ }
+ DRW_opengl_context_enable_ex(true);
}
void DRW_opengl_context_disable(void)
{
- DRW_opengl_context_disable_ex(true);
+ DRW_opengl_context_disable_ex(true);
}
void DRW_opengl_render_context_enable(void *re_gl_context)
{
- /* If thread is main you should use DRW_opengl_context_enable(). */
- BLI_assert(!BLI_thread_is_main());
+ /* If thread is main you should use DRW_opengl_context_enable(). */
+ BLI_assert(!BLI_thread_is_main());
- /* TODO get rid of the blocking. Only here because of the static global DST. */
- BLI_ticket_mutex_lock(DST.gl_context_mutex);
- WM_opengl_context_activate(re_gl_context);
+ /* TODO get rid of the blocking. Only here because of the static global DST. */
+ BLI_ticket_mutex_lock(DST.gl_context_mutex);
+ WM_opengl_context_activate(re_gl_context);
}
void DRW_opengl_render_context_disable(void *re_gl_context)
{
- GPU_flush();
- WM_opengl_context_release(re_gl_context);
- /* TODO get rid of the blocking. */
- BLI_ticket_mutex_unlock(DST.gl_context_mutex);
+ GPU_flush();
+ WM_opengl_context_release(re_gl_context);
+ /* TODO get rid of the blocking. */
+ BLI_ticket_mutex_unlock(DST.gl_context_mutex);
}
/* Needs to be called AFTER DRW_opengl_render_context_enable() */
void DRW_gawain_render_context_enable(void *re_gpu_context)
{
- /* If thread is main you should use DRW_opengl_context_enable(). */
- BLI_assert(!BLI_thread_is_main());
+ /* If thread is main you should use DRW_opengl_context_enable(). */
+ BLI_assert(!BLI_thread_is_main());
- GPU_context_active_set(re_gpu_context);
- DRW_shape_cache_reset(); /* XXX fix that too. */
+ GPU_context_active_set(re_gpu_context);
+ DRW_shape_cache_reset(); /* XXX fix that too. */
}
/* Needs to be called BEFORE DRW_opengl_render_context_disable() */
void DRW_gawain_render_context_disable(void *UNUSED(re_gpu_context))
{
- DRW_shape_cache_reset(); /* XXX fix that too. */
- GPU_context_active_set(NULL);
+ DRW_shape_cache_reset(); /* XXX fix that too. */
+ GPU_context_active_set(NULL);
}
/** \} */
diff --git a/source/blender/draw/intern/draw_manager.h b/source/blender/draw/intern/draw_manager.h
index a489e3dd1a8..35e2ab86a80 100644
--- a/source/blender/draw/intern/draw_manager.h
+++ b/source/blender/draw/intern/draw_manager.h
@@ -56,27 +56,30 @@
# define PROFILE_TIMER_FALLOFF 0.04
-# define PROFILE_START(time_start) \
- double time_start = PIL_check_seconds_timer();
+# define PROFILE_START(time_start) double time_start = PIL_check_seconds_timer();
-# define PROFILE_END_ACCUM(time_accum, time_start) { \
- time_accum += (PIL_check_seconds_timer() - time_start) * 1e3; \
-} ((void)0)
+# define PROFILE_END_ACCUM(time_accum, time_start) \
+ { \
+ time_accum += (PIL_check_seconds_timer() - time_start) * 1e3; \
+ } \
+ ((void)0)
/* exp average */
-# define PROFILE_END_UPDATE(time_update, time_start) { \
- double _time_delta = (PIL_check_seconds_timer() - time_start) * 1e3; \
- time_update = (time_update * (1.0 - PROFILE_TIMER_FALLOFF)) + \
- (_time_delta * PROFILE_TIMER_FALLOFF); \
-} ((void)0)
+# define PROFILE_END_UPDATE(time_update, time_start) \
+ { \
+ double _time_delta = (PIL_check_seconds_timer() - time_start) * 1e3; \
+ time_update = (time_update * (1.0 - PROFILE_TIMER_FALLOFF)) + \
+ (_time_delta * PROFILE_TIMER_FALLOFF); \
+ } \
+ ((void)0)
-#else /* USE_PROFILE */
+#else /* USE_PROFILE */
# define PROFILE_START(time_start) ((void)0)
# define PROFILE_END_ACCUM(time_accum, time_start) ((void)0)
# define PROFILE_END_UPDATE(time_update, time_start) ((void)0)
-#endif /* USE_PROFILE */
+#endif /* USE_PROFILE */
/* ------------ Data Structure --------------- */
/**
@@ -87,337 +90,339 @@
/* Used by DRWCallState.flag */
enum {
- DRW_CALL_CULLED = (1 << 0),
- DRW_CALL_NEGSCALE = (1 << 1),
- DRW_CALL_BYPASS_CULLING = (1 << 2),
+ DRW_CALL_CULLED = (1 << 0),
+ DRW_CALL_NEGSCALE = (1 << 1),
+ DRW_CALL_BYPASS_CULLING = (1 << 2),
};
/* Used by DRWCallState.matflag */
enum {
- DRW_CALL_MODELINVERSE = (1 << 0),
- DRW_CALL_MODELVIEW = (1 << 1),
- DRW_CALL_MODELVIEWINVERSE = (1 << 2),
- DRW_CALL_MODELVIEWPROJECTION = (1 << 3),
- DRW_CALL_NORMALVIEW = (1 << 4),
- DRW_CALL_NORMALVIEWINVERSE = (1 << 5),
- DRW_CALL_NORMALWORLD = (1 << 6),
- DRW_CALL_ORCOTEXFAC = (1 << 7),
- DRW_CALL_EYEVEC = (1 << 8),
- DRW_CALL_OBJECTINFO = (1 << 9),
+ DRW_CALL_MODELINVERSE = (1 << 0),
+ DRW_CALL_MODELVIEW = (1 << 1),
+ DRW_CALL_MODELVIEWINVERSE = (1 << 2),
+ DRW_CALL_MODELVIEWPROJECTION = (1 << 3),
+ DRW_CALL_NORMALVIEW = (1 << 4),
+ DRW_CALL_NORMALVIEWINVERSE = (1 << 5),
+ DRW_CALL_NORMALWORLD = (1 << 6),
+ DRW_CALL_ORCOTEXFAC = (1 << 7),
+ DRW_CALL_EYEVEC = (1 << 8),
+ DRW_CALL_OBJECTINFO = (1 << 9),
};
typedef struct DRWCallState {
- DRWCallVisibilityFn *visibility_cb;
- void *user_data;
-
- uchar flag;
- uchar cache_id; /* Compared with DST.state_cache_id to see if matrices are still valid. */
- uint16_t matflag; /* Which matrices to compute. */
- /* Culling: Using Bounding Sphere for now for faster culling.
- * Not ideal for planes. */
- BoundSphere bsphere;
- /* Matrices */
- float model[4][4];
- float modelinverse[4][4];
- float modelview[4][4];
- float modelviewinverse[4][4];
- float modelviewprojection[4][4];
- float normalview[3][3];
- float normalviewinverse[3][3];
- float normalworld[3][3]; /* Not view dependent */
- float orcotexfac[2][3]; /* Not view dependent */
- float objectinfo[2];
- float eyevec[3];
+ DRWCallVisibilityFn *visibility_cb;
+ void *user_data;
+
+ uchar flag;
+ uchar cache_id; /* Compared with DST.state_cache_id to see if matrices are still valid. */
+ uint16_t matflag; /* Which matrices to compute. */
+ /* Culling: Using Bounding Sphere for now for faster culling.
+ * Not ideal for planes. */
+ BoundSphere bsphere;
+ /* Matrices */
+ float model[4][4];
+ float modelinverse[4][4];
+ float modelview[4][4];
+ float modelviewinverse[4][4];
+ float modelviewprojection[4][4];
+ float normalview[3][3];
+ float normalviewinverse[3][3];
+ float normalworld[3][3]; /* Not view dependent */
+ float orcotexfac[2][3]; /* Not view dependent */
+ float objectinfo[2];
+ float eyevec[3];
} DRWCallState;
typedef enum {
- /** A single batch. */
- DRW_CALL_SINGLE,
- /** Like single but only draw a range of vertices/indices. */
- DRW_CALL_RANGE,
- /** Draw instances without any instancing attributes. */
- DRW_CALL_INSTANCES,
- /** Uses a callback to draw with any number of batches. */
- DRW_CALL_GENERATE,
- /** Generate a drawcall without any #GPUBatch. */
- DRW_CALL_PROCEDURAL,
+ /** A single batch. */
+ DRW_CALL_SINGLE,
+ /** Like single but only draw a range of vertices/indices. */
+ DRW_CALL_RANGE,
+ /** Draw instances without any instancing attributes. */
+ DRW_CALL_INSTANCES,
+ /** Uses a callback to draw with any number of batches. */
+ DRW_CALL_GENERATE,
+ /** Generate a drawcall without any #GPUBatch. */
+ DRW_CALL_PROCEDURAL,
} DRWCallType;
typedef struct DRWCall {
- struct DRWCall *next;
- DRWCallState *state;
-
- union {
- struct { /* type == DRW_CALL_SINGLE */
- GPUBatch *geometry;
- short ma_index;
- } single;
- struct { /* type == DRW_CALL_RANGE */
- GPUBatch *geometry;
- uint start, count;
- } range;
- struct { /* type == DRW_CALL_INSTANCES */
- GPUBatch *geometry;
- /* Count can be adjusted between redraw. If needed, we can add fixed count. */
- uint *count;
- } instances;
- struct { /* type == DRW_CALL_GENERATE */
- DRWCallGenerateFn *geometry_fn;
- void *user_data;
- } generate;
- struct { /* type == DRW_CALL_PROCEDURAL */
- uint vert_count;
- GPUPrimType prim_type;
- } procedural;
- };
-
- DRWCallType type;
+ struct DRWCall *next;
+ DRWCallState *state;
+
+ union {
+ struct { /* type == DRW_CALL_SINGLE */
+ GPUBatch *geometry;
+ short ma_index;
+ } single;
+ struct { /* type == DRW_CALL_RANGE */
+ GPUBatch *geometry;
+ uint start, count;
+ } range;
+ struct { /* type == DRW_CALL_INSTANCES */
+ GPUBatch *geometry;
+ /* Count can be adjusted between redraw. If needed, we can add fixed count. */
+ uint *count;
+ } instances;
+ struct { /* type == DRW_CALL_GENERATE */
+ DRWCallGenerateFn *geometry_fn;
+ void *user_data;
+ } generate;
+ struct { /* type == DRW_CALL_PROCEDURAL */
+ uint vert_count;
+ GPUPrimType prim_type;
+ } procedural;
+ };
+
+ DRWCallType type;
#ifdef USE_GPU_SELECT
- int select_id;
+ int select_id;
#endif
} DRWCall;
/* Used by DRWUniform.type */
typedef enum {
- DRW_UNIFORM_BOOL,
- DRW_UNIFORM_BOOL_COPY,
- DRW_UNIFORM_SHORT_TO_INT,
- DRW_UNIFORM_SHORT_TO_FLOAT,
- DRW_UNIFORM_INT,
- DRW_UNIFORM_INT_COPY,
- DRW_UNIFORM_FLOAT,
- DRW_UNIFORM_FLOAT_COPY,
- DRW_UNIFORM_TEXTURE,
- DRW_UNIFORM_TEXTURE_PERSIST,
- DRW_UNIFORM_TEXTURE_REF,
- DRW_UNIFORM_BLOCK,
- DRW_UNIFORM_BLOCK_PERSIST,
+ DRW_UNIFORM_BOOL,
+ DRW_UNIFORM_BOOL_COPY,
+ DRW_UNIFORM_SHORT_TO_INT,
+ DRW_UNIFORM_SHORT_TO_FLOAT,
+ DRW_UNIFORM_INT,
+ DRW_UNIFORM_INT_COPY,
+ DRW_UNIFORM_FLOAT,
+ DRW_UNIFORM_FLOAT_COPY,
+ DRW_UNIFORM_TEXTURE,
+ DRW_UNIFORM_TEXTURE_PERSIST,
+ DRW_UNIFORM_TEXTURE_REF,
+ DRW_UNIFORM_BLOCK,
+ DRW_UNIFORM_BLOCK_PERSIST,
} DRWUniformType;
struct DRWUniform {
- DRWUniform *next; /* single-linked list */
- union {
- /* For reference or array/vector types. */
- const void *pvalue;
- /* Single values. */
- float fvalue;
- int ivalue;
- };
- int name_ofs; /* name offset in name buffer. */
- int location;
- char type; /* DRWUniformType */
- char length; /* cannot be more than 16 */
- char arraysize; /* cannot be more than 16 too */
+ DRWUniform *next; /* single-linked list */
+ union {
+ /* For reference or array/vector types. */
+ const void *pvalue;
+ /* Single values. */
+ float fvalue;
+ int ivalue;
+ };
+ int name_ofs; /* name offset in name buffer. */
+ int location;
+ char type; /* DRWUniformType */
+ char length; /* cannot be more than 16 */
+ char arraysize; /* cannot be more than 16 too */
};
typedef enum {
- DRW_SHG_NORMAL,
- DRW_SHG_POINT_BATCH,
- DRW_SHG_LINE_BATCH,
- DRW_SHG_TRIANGLE_BATCH,
- DRW_SHG_INSTANCE,
- DRW_SHG_INSTANCE_EXTERNAL,
- DRW_SHG_FEEDBACK_TRANSFORM,
+ DRW_SHG_NORMAL,
+ DRW_SHG_POINT_BATCH,
+ DRW_SHG_LINE_BATCH,
+ DRW_SHG_TRIANGLE_BATCH,
+ DRW_SHG_INSTANCE,
+ DRW_SHG_INSTANCE_EXTERNAL,
+ DRW_SHG_FEEDBACK_TRANSFORM,
} DRWShadingGroupType;
struct DRWShadingGroup {
- DRWShadingGroup *next;
-
- GPUShader *shader; /* Shader to bind */
- DRWUniform *uniforms; /* Uniforms pointers */
-
- /* Watch this! Can be nasty for debugging. */
- union {
- struct { /* DRW_SHG_NORMAL */
- DRWCall *first, *last; /* Linked list of DRWCall or DRWCallDynamic depending of type */
- } calls;
- struct { /* DRW_SHG_FEEDBACK_TRANSFORM */
- DRWCall *first, *last; /* Linked list of DRWCall or DRWCallDynamic depending of type */
- struct GPUVertBuf *tfeedback_target; /* Transform Feedback target. */
- };
- struct { /* DRW_SHG_***_BATCH */
- struct GPUBatch *batch_geom; /* Result of call batching */
- struct GPUVertBuf *batch_vbo;
- uint primitive_count;
- };
- struct { /* DRW_SHG_INSTANCE[_EXTERNAL] */
- struct GPUBatch *instance_geom;
- struct GPUVertBuf *instance_vbo;
- uint instance_count;
- float instance_orcofac[2][3]; /* TODO find a better place. */
- };
- };
-
- DRWState state_extra; /* State changes for this batch only (or'd with the pass's state) */
- DRWState state_extra_disable; /* State changes for this batch only (and'd with the pass's state) */
- uint stencil_mask; /* Stencil mask to use for stencil test / write operations */
- DRWShadingGroupType type;
-
- /* Builtin matrices locations */
- int model;
- int modelinverse;
- int modelview;
- int modelviewinverse;
- int modelviewprojection;
- int normalview;
- int normalviewinverse;
- int normalworld;
- int orcotexfac;
- int eye;
- int callid;
- int objectinfo;
- uint16_t matflag; /* Matrices needed, same as DRWCall.flag */
-
- DRWPass *pass_parent; /* backlink to pass we're in */
+ DRWShadingGroup *next;
+
+ GPUShader *shader; /* Shader to bind */
+ DRWUniform *uniforms; /* Uniforms pointers */
+
+ /* Watch this! Can be nasty for debugging. */
+ union {
+ struct { /* DRW_SHG_NORMAL */
+ DRWCall *first, *last; /* Linked list of DRWCall or DRWCallDynamic depending of type */
+ } calls;
+ struct { /* DRW_SHG_FEEDBACK_TRANSFORM */
+ DRWCall *first, *last; /* Linked list of DRWCall or DRWCallDynamic depending of type */
+ struct GPUVertBuf *tfeedback_target; /* Transform Feedback target. */
+ };
+ struct { /* DRW_SHG_***_BATCH */
+ struct GPUBatch *batch_geom; /* Result of call batching */
+ struct GPUVertBuf *batch_vbo;
+ uint primitive_count;
+ };
+ struct { /* DRW_SHG_INSTANCE[_EXTERNAL] */
+ struct GPUBatch *instance_geom;
+ struct GPUVertBuf *instance_vbo;
+ uint instance_count;
+ float instance_orcofac[2][3]; /* TODO find a better place. */
+ };
+ };
+
+ DRWState state_extra; /* State changes for this batch only (or'd with the pass's state) */
+ DRWState
+ state_extra_disable; /* State changes for this batch only (and'd with the pass's state) */
+ uint stencil_mask; /* Stencil mask to use for stencil test / write operations */
+ DRWShadingGroupType type;
+
+ /* Builtin matrices locations */
+ int model;
+ int modelinverse;
+ int modelview;
+ int modelviewinverse;
+ int modelviewprojection;
+ int normalview;
+ int normalviewinverse;
+ int normalworld;
+ int orcotexfac;
+ int eye;
+ int callid;
+ int objectinfo;
+ uint16_t matflag; /* Matrices needed, same as DRWCall.flag */
+
+ DRWPass *pass_parent; /* backlink to pass we're in */
#ifndef NDEBUG
- char attrs_count;
+ char attrs_count;
#endif
#ifdef USE_GPU_SELECT
- GPUVertBuf *inst_selectid;
- int override_selectid; /* Override for single object instances. */
+ GPUVertBuf *inst_selectid;
+ int override_selectid; /* Override for single object instances. */
#endif
};
#define MAX_PASS_NAME 32
struct DRWPass {
- /* Linked list */
- struct {
- DRWShadingGroup *first;
- DRWShadingGroup *last;
- } shgroups;
-
- DRWState state;
- char name[MAX_PASS_NAME];
+ /* Linked list */
+ struct {
+ DRWShadingGroup *first;
+ DRWShadingGroup *last;
+ } shgroups;
+
+ DRWState state;
+ char name[MAX_PASS_NAME];
};
typedef struct ViewUboStorage {
- DRWMatrixState matstate;
- float viewcamtexcofac[4];
- float clipplanes[2][4];
+ DRWMatrixState matstate;
+ float viewcamtexcofac[4];
+ float clipplanes[2][4];
} ViewUboStorage;
/* ------------- DRAW DEBUG ------------ */
typedef struct DRWDebugLine {
- struct DRWDebugLine *next; /* linked list */
- float pos[2][3];
- float color[4];
+ struct DRWDebugLine *next; /* linked list */
+ float pos[2][3];
+ float color[4];
} DRWDebugLine;
typedef struct DRWDebugSphere {
- struct DRWDebugSphere *next; /* linked list */
- float mat[4][4];
- float color[4];
+ struct DRWDebugSphere *next; /* linked list */
+ float mat[4][4];
+ float color[4];
} DRWDebugSphere;
/* ------------- DRAW MANAGER ------------ */
-#define DST_MAX_SLOTS 64 /* Cannot be changed without modifying RST.bound_tex_slots */
+#define DST_MAX_SLOTS 64 /* Cannot be changed without modifying RST.bound_tex_slots */
#define MAX_CLIP_PLANES 6 /* GL_MAX_CLIP_PLANES is at least 6 */
#define STENCIL_UNDEFINED 256
typedef struct DRWManager {
- /* TODO clean up this struct a bit */
- /* Cache generation */
- ViewportMemoryPool *vmempool;
- DRWInstanceDataList *idatalist;
- DRWInstanceData *object_instance_data[MAX_INSTANCE_DATA_SIZE];
- /* State of the object being evaluated if already allocated. */
- DRWCallState *ob_state;
- uchar state_cache_id; /* Could be larger but 254 view changes is already a lot! */
- struct DupliObject *dupli_source;
- struct Object *dupli_parent;
-
- /* Rendering state */
- GPUShader *shader;
-
- /* Managed by `DRW_state_set`, `DRW_state_reset` */
- DRWState state;
- DRWState state_lock;
- uint stencil_mask;
-
- /* Per viewport */
- GPUViewport *viewport;
- struct GPUFrameBuffer *default_framebuffer;
- float size[2];
- float inv_size[2];
- float screenvecs[2][3];
- float pixsize;
-
- GLenum backface, frontface;
-
- struct {
- uint is_select : 1;
- uint is_depth : 1;
- uint is_image_render : 1;
- uint is_scene_render : 1;
- uint draw_background : 1;
- uint draw_text : 1;
- } options;
-
- /* Current rendering context */
- DRWContextState draw_ctx;
-
- /* Convenience pointer to text_store owned by the viewport */
- struct DRWTextStore **text_store_p;
-
- ListBase enabled_engines; /* RenderEngineType */
-
- bool buffer_finish_called; /* Avoid bad usage of DRW_render_instance_buffer_finish */
-
- /* View dependent uniforms. */
- DRWMatrixState original_mat; /* Original rv3d matrices. */
- int override_mat; /* Bitflag of which matrices are overridden. */
- int clip_planes_len; /* Number of active clipplanes. */
- bool dirty_mat;
-
- /* keep in sync with viewBlock */
- ViewUboStorage view_data;
-
- struct {
- float frustum_planes[6][4];
- BoundBox frustum_corners;
- BoundSphere frustum_bsphere;
- bool updated;
- } clipping;
+ /* TODO clean up this struct a bit */
+ /* Cache generation */
+ ViewportMemoryPool *vmempool;
+ DRWInstanceDataList *idatalist;
+ DRWInstanceData *object_instance_data[MAX_INSTANCE_DATA_SIZE];
+ /* State of the object being evaluated if already allocated. */
+ DRWCallState *ob_state;
+ uchar state_cache_id; /* Could be larger but 254 view changes is already a lot! */
+ struct DupliObject *dupli_source;
+ struct Object *dupli_parent;
+
+ /* Rendering state */
+ GPUShader *shader;
+
+ /* Managed by `DRW_state_set`, `DRW_state_reset` */
+ DRWState state;
+ DRWState state_lock;
+ uint stencil_mask;
+
+ /* Per viewport */
+ GPUViewport *viewport;
+ struct GPUFrameBuffer *default_framebuffer;
+ float size[2];
+ float inv_size[2];
+ float screenvecs[2][3];
+ float pixsize;
+
+ GLenum backface, frontface;
+
+ struct {
+ uint is_select : 1;
+ uint is_depth : 1;
+ uint is_image_render : 1;
+ uint is_scene_render : 1;
+ uint draw_background : 1;
+ uint draw_text : 1;
+ } options;
+
+ /* Current rendering context */
+ DRWContextState draw_ctx;
+
+ /* Convenience pointer to text_store owned by the viewport */
+ struct DRWTextStore **text_store_p;
+
+ ListBase enabled_engines; /* RenderEngineType */
+
+ bool buffer_finish_called; /* Avoid bad usage of DRW_render_instance_buffer_finish */
+
+ /* View dependent uniforms. */
+ DRWMatrixState original_mat; /* Original rv3d matrices. */
+ int override_mat; /* Bitflag of which matrices are overridden. */
+ int clip_planes_len; /* Number of active clipplanes. */
+ bool dirty_mat;
+
+ /* keep in sync with viewBlock */
+ ViewUboStorage view_data;
+
+ struct {
+ float frustum_planes[6][4];
+ BoundBox frustum_corners;
+ BoundSphere frustum_bsphere;
+ bool updated;
+ } clipping;
#ifdef USE_GPU_SELECT
- uint select_id;
+ uint select_id;
#endif
- /* ---------- Nothing after this point is cleared after use ----------- */
-
- /* gl_context serves as the offset for clearing only
- * the top portion of the struct so DO NOT MOVE IT! */
- void *gl_context; /* Unique ghost context used by the draw manager. */
- GPUContext *gpu_context;
- TicketMutex *gl_context_mutex; /* Mutex to lock the drw manager and avoid concurrent context usage. */
-
- /** GPU Resource State: Memory storage between drawing. */
- struct {
- /* High end GPUs supports up to 32 binds per shader stage.
- * We only use textures during the vertex and fragment stage,
- * so 2 * 32 slots is a nice limit. */
- GPUTexture *bound_texs[DST_MAX_SLOTS];
- uint64_t bound_tex_slots;
- uint64_t bound_tex_slots_persist;
-
- GPUUniformBuffer *bound_ubos[DST_MAX_SLOTS];
- uint64_t bound_ubo_slots;
- uint64_t bound_ubo_slots_persist;
- } RST;
-
- struct {
- /* TODO(fclem) optimize: use chunks. */
- DRWDebugLine *lines;
- DRWDebugSphere *spheres;
- } debug;
-
- struct {
- char *buffer;
- uint buffer_len;
- uint buffer_ofs;
- } uniform_names;
+ /* ---------- Nothing after this point is cleared after use ----------- */
+
+ /* gl_context serves as the offset for clearing only
+ * the top portion of the struct so DO NOT MOVE IT! */
+ void *gl_context; /* Unique ghost context used by the draw manager. */
+ GPUContext *gpu_context;
+ TicketMutex
+ *gl_context_mutex; /* Mutex to lock the drw manager and avoid concurrent context usage. */
+
+ /** GPU Resource State: Memory storage between drawing. */
+ struct {
+ /* High end GPUs supports up to 32 binds per shader stage.
+ * We only use textures during the vertex and fragment stage,
+ * so 2 * 32 slots is a nice limit. */
+ GPUTexture *bound_texs[DST_MAX_SLOTS];
+ uint64_t bound_tex_slots;
+ uint64_t bound_tex_slots_persist;
+
+ GPUUniformBuffer *bound_ubos[DST_MAX_SLOTS];
+ uint64_t bound_ubo_slots;
+ uint64_t bound_ubo_slots_persist;
+ } RST;
+
+ struct {
+ /* TODO(fclem) optimize: use chunks. */
+ DRWDebugLine *lines;
+ DRWDebugSphere *spheres;
+ } debug;
+
+ struct {
+ char *buffer;
+ uint buffer_len;
+ uint buffer_ofs;
+ } uniform_names;
} DRWManager;
extern DRWManager DST; /* TODO : get rid of this and allow multithreaded rendering */
diff --git a/source/blender/draw/intern/draw_manager_data.c b/source/blender/draw/intern/draw_manager_data.c
index 7f553c0926d..4a9f4fe910b 100644
--- a/source/blender/draw/intern/draw_manager_data.c
+++ b/source/blender/draw/intern/draw_manager_data.c
@@ -48,17 +48,17 @@ struct GPUVertFormat *g_pos_format = NULL;
GPUUniformBuffer *DRW_uniformbuffer_create(int size, const void *data)
{
- return GPU_uniformbuffer_create(size, data, NULL);
+ return GPU_uniformbuffer_create(size, data, NULL);
}
void DRW_uniformbuffer_update(GPUUniformBuffer *ubo, const void *data)
{
- GPU_uniformbuffer_update(ubo, data);
+ GPU_uniformbuffer_update(ubo, data);
}
void DRW_uniformbuffer_free(GPUUniformBuffer *ubo)
{
- GPU_uniformbuffer_free(ubo);
+ GPU_uniformbuffer_free(ubo);
}
/** \} */
@@ -67,198 +67,245 @@ void DRW_uniformbuffer_free(GPUUniformBuffer *ubo)
/** \name Uniforms (DRW_shgroup_uniform)
* \{ */
-static void drw_shgroup_uniform_create_ex(DRWShadingGroup *shgroup, int loc,
- DRWUniformType type, const void *value, int length, int arraysize)
-{
- DRWUniform *uni = BLI_mempool_alloc(DST.vmempool->uniforms);
- uni->location = loc;
- uni->type = type;
- uni->length = length;
- uni->arraysize = arraysize;
-
- switch (type) {
- case DRW_UNIFORM_INT_COPY:
- uni->ivalue = *((int *)value);
- break;
- case DRW_UNIFORM_BOOL_COPY:
- uni->ivalue = (int)*((bool *)value);
- break;
- case DRW_UNIFORM_FLOAT_COPY:
- uni->fvalue = *((float *)value);
- break;
- default:
- uni->pvalue = value;
- break;
- }
-
- BLI_LINKS_PREPEND(shgroup->uniforms, uni);
+static void drw_shgroup_uniform_create_ex(DRWShadingGroup *shgroup,
+ int loc,
+ DRWUniformType type,
+ const void *value,
+ int length,
+ int arraysize)
+{
+ DRWUniform *uni = BLI_mempool_alloc(DST.vmempool->uniforms);
+ uni->location = loc;
+ uni->type = type;
+ uni->length = length;
+ uni->arraysize = arraysize;
+
+ switch (type) {
+ case DRW_UNIFORM_INT_COPY:
+ uni->ivalue = *((int *)value);
+ break;
+ case DRW_UNIFORM_BOOL_COPY:
+ uni->ivalue = (int)*((bool *)value);
+ break;
+ case DRW_UNIFORM_FLOAT_COPY:
+ uni->fvalue = *((float *)value);
+ break;
+ default:
+ uni->pvalue = value;
+ break;
+ }
+
+ BLI_LINKS_PREPEND(shgroup->uniforms, uni);
}
static void drw_shgroup_builtin_uniform(
- DRWShadingGroup *shgroup, int builtin, const void *value, int length, int arraysize)
+ DRWShadingGroup *shgroup, int builtin, const void *value, int length, int arraysize)
{
- int loc = GPU_shader_get_builtin_uniform(shgroup->shader, builtin);
+ int loc = GPU_shader_get_builtin_uniform(shgroup->shader, builtin);
- if (loc != -1) {
- drw_shgroup_uniform_create_ex(shgroup, loc, DRW_UNIFORM_FLOAT, value, length, arraysize);
- }
+ if (loc != -1) {
+ drw_shgroup_uniform_create_ex(shgroup, loc, DRW_UNIFORM_FLOAT, value, length, arraysize);
+ }
}
-static void drw_shgroup_uniform(DRWShadingGroup *shgroup, const char *name,
- DRWUniformType type, const void *value, int length, int arraysize)
+static void drw_shgroup_uniform(DRWShadingGroup *shgroup,
+ const char *name,
+ DRWUniformType type,
+ const void *value,
+ int length,
+ int arraysize)
{
- int location;
- if (ELEM(type, DRW_UNIFORM_BLOCK, DRW_UNIFORM_BLOCK_PERSIST)) {
- location = GPU_shader_get_uniform_block(shgroup->shader, name);
- }
- else {
- location = GPU_shader_get_uniform(shgroup->shader, name);
- }
+ int location;
+ if (ELEM(type, DRW_UNIFORM_BLOCK, DRW_UNIFORM_BLOCK_PERSIST)) {
+ location = GPU_shader_get_uniform_block(shgroup->shader, name);
+ }
+ else {
+ location = GPU_shader_get_uniform(shgroup->shader, name);
+ }
- if (location == -1) {
- /* Nice to enable eventually, for now eevee uses uniforms that might not exist. */
- // BLI_assert(0);
- return;
- }
+ if (location == -1) {
+ /* Nice to enable eventually, for now eevee uses uniforms that might not exist. */
+ // BLI_assert(0);
+ return;
+ }
- BLI_assert(arraysize > 0 && arraysize <= 16);
- BLI_assert(length >= 0 && length <= 16);
+ BLI_assert(arraysize > 0 && arraysize <= 16);
+ BLI_assert(length >= 0 && length <= 16);
- drw_shgroup_uniform_create_ex(shgroup, location, type, value, length, arraysize);
+ drw_shgroup_uniform_create_ex(shgroup, location, type, value, length, arraysize);
- /* If location is -2, the uniform has not yet been queried.
- * We save the name for query just before drawing. */
- if (location == -2 || DRW_DEBUG_USE_UNIFORM_NAME) {
- int ofs = DST.uniform_names.buffer_ofs;
- int max_len = DST.uniform_names.buffer_len - ofs;
- size_t len = strlen(name) + 1;
+ /* If location is -2, the uniform has not yet been queried.
+ * We save the name for query just before drawing. */
+ if (location == -2 || DRW_DEBUG_USE_UNIFORM_NAME) {
+ int ofs = DST.uniform_names.buffer_ofs;
+ int max_len = DST.uniform_names.buffer_len - ofs;
+ size_t len = strlen(name) + 1;
- if (len >= max_len) {
- DST.uniform_names.buffer_len += DRW_UNIFORM_BUFFER_NAME_INC;
- DST.uniform_names.buffer = MEM_reallocN(DST.uniform_names.buffer, DST.uniform_names.buffer_len);
- }
+ if (len >= max_len) {
+ DST.uniform_names.buffer_len += DRW_UNIFORM_BUFFER_NAME_INC;
+ DST.uniform_names.buffer = MEM_reallocN(DST.uniform_names.buffer,
+ DST.uniform_names.buffer_len);
+ }
- char *dst = DST.uniform_names.buffer + ofs;
- memcpy(dst, name, len); /* Copies NULL terminator. */
+ char *dst = DST.uniform_names.buffer + ofs;
+ memcpy(dst, name, len); /* Copies NULL terminator. */
- DST.uniform_names.buffer_ofs += len;
- shgroup->uniforms->name_ofs = ofs;
- }
+ DST.uniform_names.buffer_ofs += len;
+ shgroup->uniforms->name_ofs = ofs;
+ }
}
void DRW_shgroup_uniform_texture(DRWShadingGroup *shgroup, const char *name, const GPUTexture *tex)
{
- BLI_assert(tex != NULL);
- drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_TEXTURE, tex, 0, 1);
+ BLI_assert(tex != NULL);
+ drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_TEXTURE, tex, 0, 1);
}
/* Same as DRW_shgroup_uniform_texture but is guaranteed to be bound if shader does not change between shgrp. */
-void DRW_shgroup_uniform_texture_persistent(DRWShadingGroup *shgroup, const char *name, const GPUTexture *tex)
+void DRW_shgroup_uniform_texture_persistent(DRWShadingGroup *shgroup,
+ const char *name,
+ const GPUTexture *tex)
{
- BLI_assert(tex != NULL);
- drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_TEXTURE_PERSIST, tex, 0, 1);
+ BLI_assert(tex != NULL);
+ drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_TEXTURE_PERSIST, tex, 0, 1);
}
-void DRW_shgroup_uniform_block(DRWShadingGroup *shgroup, const char *name, const GPUUniformBuffer *ubo)
+void DRW_shgroup_uniform_block(DRWShadingGroup *shgroup,
+ const char *name,
+ const GPUUniformBuffer *ubo)
{
- BLI_assert(ubo != NULL);
- drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_BLOCK, ubo, 0, 1);
+ BLI_assert(ubo != NULL);
+ drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_BLOCK, ubo, 0, 1);
}
/* Same as DRW_shgroup_uniform_block but is guaranteed to be bound if shader does not change between shgrp. */
-void DRW_shgroup_uniform_block_persistent(DRWShadingGroup *shgroup, const char *name, const GPUUniformBuffer *ubo)
+void DRW_shgroup_uniform_block_persistent(DRWShadingGroup *shgroup,
+ const char *name,
+ const GPUUniformBuffer *ubo)
{
- BLI_assert(ubo != NULL);
- drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_BLOCK_PERSIST, ubo, 0, 1);
+ BLI_assert(ubo != NULL);
+ drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_BLOCK_PERSIST, ubo, 0, 1);
}
void DRW_shgroup_uniform_texture_ref(DRWShadingGroup *shgroup, const char *name, GPUTexture **tex)
{
- drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_TEXTURE_REF, tex, 0, 1);
+ drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_TEXTURE_REF, tex, 0, 1);
}
-void DRW_shgroup_uniform_bool(DRWShadingGroup *shgroup, const char *name, const int *value, int arraysize)
+void DRW_shgroup_uniform_bool(DRWShadingGroup *shgroup,
+ const char *name,
+ const int *value,
+ int arraysize)
{
- drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_BOOL, value, 1, arraysize);
+ drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_BOOL, value, 1, arraysize);
}
-void DRW_shgroup_uniform_float(DRWShadingGroup *shgroup, const char *name, const float *value, int arraysize)
+void DRW_shgroup_uniform_float(DRWShadingGroup *shgroup,
+ const char *name,
+ const float *value,
+ int arraysize)
{
- drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_FLOAT, value, 1, arraysize);
+ drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_FLOAT, value, 1, arraysize);
}
-void DRW_shgroup_uniform_vec2(DRWShadingGroup *shgroup, const char *name, const float *value, int arraysize)
+void DRW_shgroup_uniform_vec2(DRWShadingGroup *shgroup,
+ const char *name,
+ const float *value,
+ int arraysize)
{
- drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_FLOAT, value, 2, arraysize);
+ drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_FLOAT, value, 2, arraysize);
}
-void DRW_shgroup_uniform_vec3(DRWShadingGroup *shgroup, const char *name, const float *value, int arraysize)
+void DRW_shgroup_uniform_vec3(DRWShadingGroup *shgroup,
+ const char *name,
+ const float *value,
+ int arraysize)
{
- drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_FLOAT, value, 3, arraysize);
+ drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_FLOAT, value, 3, arraysize);
}
-void DRW_shgroup_uniform_vec4(DRWShadingGroup *shgroup, const char *name, const float *value, int arraysize)
+void DRW_shgroup_uniform_vec4(DRWShadingGroup *shgroup,
+ const char *name,
+ const float *value,
+ int arraysize)
{
- drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_FLOAT, value, 4, arraysize);
+ drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_FLOAT, value, 4, arraysize);
}
-void DRW_shgroup_uniform_short_to_int(DRWShadingGroup *shgroup, const char *name, const short *value, int arraysize)
+void DRW_shgroup_uniform_short_to_int(DRWShadingGroup *shgroup,
+ const char *name,
+ const short *value,
+ int arraysize)
{
- drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_SHORT_TO_INT, value, 1, arraysize);
+ drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_SHORT_TO_INT, value, 1, arraysize);
}
-void DRW_shgroup_uniform_short_to_float(DRWShadingGroup *shgroup, const char *name, const short *value, int arraysize)
+void DRW_shgroup_uniform_short_to_float(DRWShadingGroup *shgroup,
+ const char *name,
+ const short *value,
+ int arraysize)
{
- drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_SHORT_TO_FLOAT, value, 1, arraysize);
+ drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_SHORT_TO_FLOAT, value, 1, arraysize);
}
-void DRW_shgroup_uniform_int(DRWShadingGroup *shgroup, const char *name, const int *value, int arraysize)
+void DRW_shgroup_uniform_int(DRWShadingGroup *shgroup,
+ const char *name,
+ const int *value,
+ int arraysize)
{
- drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_INT, value, 1, arraysize);
+ drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_INT, value, 1, arraysize);
}
-void DRW_shgroup_uniform_ivec2(DRWShadingGroup *shgroup, const char *name, const int *value, int arraysize)
+void DRW_shgroup_uniform_ivec2(DRWShadingGroup *shgroup,
+ const char *name,
+ const int *value,
+ int arraysize)
{
- drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_INT, value, 2, arraysize);
+ drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_INT, value, 2, arraysize);
}
-void DRW_shgroup_uniform_ivec3(DRWShadingGroup *shgroup, const char *name, const int *value, int arraysize)
+void DRW_shgroup_uniform_ivec3(DRWShadingGroup *shgroup,
+ const char *name,
+ const int *value,
+ int arraysize)
{
- drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_INT, value, 3, arraysize);
+ drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_INT, value, 3, arraysize);
}
-void DRW_shgroup_uniform_ivec4(DRWShadingGroup *shgroup, const char *name, const int *value, int arraysize)
+void DRW_shgroup_uniform_ivec4(DRWShadingGroup *shgroup,
+ const char *name,
+ const int *value,
+ int arraysize)
{
- drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_INT, value, 4, arraysize);
+ drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_INT, value, 4, arraysize);
}
void DRW_shgroup_uniform_mat3(DRWShadingGroup *shgroup, const char *name, const float (*value)[3])
{
- drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_FLOAT, (float *)value, 9, 1);
+ drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_FLOAT, (float *)value, 9, 1);
}
void DRW_shgroup_uniform_mat4(DRWShadingGroup *shgroup, const char *name, const float (*value)[4])
{
- drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_FLOAT, (float *)value, 16, 1);
+ drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_FLOAT, (float *)value, 16, 1);
}
/* Stores the int instead of a pointer. */
void DRW_shgroup_uniform_int_copy(DRWShadingGroup *shgroup, const char *name, const int value)
{
- drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_INT_COPY, &value, 1, 1);
+ drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_INT_COPY, &value, 1, 1);
}
void DRW_shgroup_uniform_bool_copy(DRWShadingGroup *shgroup, const char *name, const bool value)
{
- drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_BOOL_COPY, &value, 1, 1);
+ drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_BOOL_COPY, &value, 1, 1);
}
void DRW_shgroup_uniform_float_copy(DRWShadingGroup *shgroup, const char *name, const float value)
{
- drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_FLOAT_COPY, &value, 1, 1);
+ drw_shgroup_uniform(shgroup, name, DRW_UNIFORM_FLOAT_COPY, &value, 1, 1);
}
-
/** \} */
/* -------------------------------------------------------------------- */
@@ -267,390 +314,410 @@ void DRW_shgroup_uniform_float_copy(DRWShadingGroup *shgroup, const char *name,
static void drw_call_calc_orco(Object *ob, float (*r_orcofacs)[3])
{
- ID *ob_data = (ob) ? ob->data : NULL;
- float *texcoloc = NULL;
- float *texcosize = NULL;
- if (ob_data != NULL) {
- switch (GS(ob_data->name)) {
- case ID_ME:
- BKE_mesh_texspace_get_reference((Mesh *)ob_data, NULL, &texcoloc, NULL, &texcosize);
- break;
- case ID_CU:
- {
- Curve *cu = (Curve *)ob_data;
- if (cu->bb == NULL || (cu->bb->flag & BOUNDBOX_DIRTY)) {
- BKE_curve_texspace_calc(cu);
- }
- texcoloc = cu->loc;
- texcosize = cu->size;
- break;
- }
- case ID_MB:
- {
- MetaBall *mb = (MetaBall *)ob_data;
- texcoloc = mb->loc;
- texcosize = mb->size;
- break;
- }
- default:
- break;
- }
- }
-
- if ((texcoloc != NULL) && (texcosize != NULL)) {
- mul_v3_v3fl(r_orcofacs[1], texcosize, 2.0f);
- invert_v3(r_orcofacs[1]);
- sub_v3_v3v3(r_orcofacs[0], texcoloc, texcosize);
- negate_v3(r_orcofacs[0]);
- mul_v3_v3(r_orcofacs[0], r_orcofacs[1]); /* result in a nice MADD in the shader */
- }
- else {
- copy_v3_fl(r_orcofacs[0], 0.0f);
- copy_v3_fl(r_orcofacs[1], 1.0f);
- }
-}
-
-static void drw_call_state_update_matflag(DRWCallState *state, DRWShadingGroup *shgroup, Object *ob)
-{
- uint16_t new_flags = ((state->matflag ^ shgroup->matflag) & shgroup->matflag);
-
- /* HACK: Here we set the matflags bit to 1 when computing the value
- * so that it's not recomputed for other drawcalls.
- * This is the opposite of what draw_matrices_model_prepare() does. */
- state->matflag |= shgroup->matflag;
-
- /* Orco factors: We compute this at creation to not have to save the *ob_data */
- if ((new_flags & DRW_CALL_ORCOTEXFAC) != 0) {
- drw_call_calc_orco(ob, state->orcotexfac);
- }
-
- if ((new_flags & DRW_CALL_OBJECTINFO) != 0) {
- state->objectinfo[0] = ob ? ob->index : 0;
- uint random;
- if (DST.dupli_source) {
- random = DST.dupli_source->random_id;
- }
- else {
- random = BLI_hash_int_2d(BLI_hash_string(ob->id.name + 2), 0);
- }
- state->objectinfo[1] = random * (1.0f / (float)0xFFFFFFFF);
- }
+ ID *ob_data = (ob) ? ob->data : NULL;
+ float *texcoloc = NULL;
+ float *texcosize = NULL;
+ if (ob_data != NULL) {
+ switch (GS(ob_data->name)) {
+ case ID_ME:
+ BKE_mesh_texspace_get_reference((Mesh *)ob_data, NULL, &texcoloc, NULL, &texcosize);
+ break;
+ case ID_CU: {
+ Curve *cu = (Curve *)ob_data;
+ if (cu->bb == NULL || (cu->bb->flag & BOUNDBOX_DIRTY)) {
+ BKE_curve_texspace_calc(cu);
+ }
+ texcoloc = cu->loc;
+ texcosize = cu->size;
+ break;
+ }
+ case ID_MB: {
+ MetaBall *mb = (MetaBall *)ob_data;
+ texcoloc = mb->loc;
+ texcosize = mb->size;
+ break;
+ }
+ default:
+ break;
+ }
+ }
+
+ if ((texcoloc != NULL) && (texcosize != NULL)) {
+ mul_v3_v3fl(r_orcofacs[1], texcosize, 2.0f);
+ invert_v3(r_orcofacs[1]);
+ sub_v3_v3v3(r_orcofacs[0], texcoloc, texcosize);
+ negate_v3(r_orcofacs[0]);
+ mul_v3_v3(r_orcofacs[0], r_orcofacs[1]); /* result in a nice MADD in the shader */
+ }
+ else {
+ copy_v3_fl(r_orcofacs[0], 0.0f);
+ copy_v3_fl(r_orcofacs[1], 1.0f);
+ }
+}
+
+static void drw_call_state_update_matflag(DRWCallState *state,
+ DRWShadingGroup *shgroup,
+ Object *ob)
+{
+ uint16_t new_flags = ((state->matflag ^ shgroup->matflag) & shgroup->matflag);
+
+ /* HACK: Here we set the matflags bit to 1 when computing the value
+ * so that it's not recomputed for other drawcalls.
+ * This is the opposite of what draw_matrices_model_prepare() does. */
+ state->matflag |= shgroup->matflag;
+
+ /* Orco factors: We compute this at creation to not have to save the *ob_data */
+ if ((new_flags & DRW_CALL_ORCOTEXFAC) != 0) {
+ drw_call_calc_orco(ob, state->orcotexfac);
+ }
+
+ if ((new_flags & DRW_CALL_OBJECTINFO) != 0) {
+ state->objectinfo[0] = ob ? ob->index : 0;
+ uint random;
+ if (DST.dupli_source) {
+ random = DST.dupli_source->random_id;
+ }
+ else {
+ random = BLI_hash_int_2d(BLI_hash_string(ob->id.name + 2), 0);
+ }
+ state->objectinfo[1] = random * (1.0f / (float)0xFFFFFFFF);
+ }
}
static DRWCallState *drw_call_state_create(DRWShadingGroup *shgroup, float (*obmat)[4], Object *ob)
{
- DRWCallState *state = BLI_mempool_alloc(DST.vmempool->states);
- state->flag = 0;
- state->cache_id = 0;
- state->visibility_cb = NULL;
- state->matflag = 0;
-
- /* Matrices */
- if (obmat != NULL) {
- copy_m4_m4(state->model, obmat);
-
- if (is_negative_m4(state->model)) {
- state->flag |= DRW_CALL_NEGSCALE;
- }
- }
- else {
- unit_m4(state->model);
- }
-
- if (ob != NULL) {
- float corner[3];
- BoundBox *bbox = BKE_object_boundbox_get(ob);
- /* Get BoundSphere center and radius from the BoundBox. */
- mid_v3_v3v3(state->bsphere.center, bbox->vec[0], bbox->vec[6]);
- mul_v3_m4v3(corner, obmat, bbox->vec[0]);
- mul_m4_v3(obmat, state->bsphere.center);
- state->bsphere.radius = len_v3v3(state->bsphere.center, corner);
- }
- else {
- /* Bypass test. */
- state->bsphere.radius = -1.0f;
- }
-
- drw_call_state_update_matflag(state, shgroup, ob);
-
- return state;
+ DRWCallState *state = BLI_mempool_alloc(DST.vmempool->states);
+ state->flag = 0;
+ state->cache_id = 0;
+ state->visibility_cb = NULL;
+ state->matflag = 0;
+
+ /* Matrices */
+ if (obmat != NULL) {
+ copy_m4_m4(state->model, obmat);
+
+ if (is_negative_m4(state->model)) {
+ state->flag |= DRW_CALL_NEGSCALE;
+ }
+ }
+ else {
+ unit_m4(state->model);
+ }
+
+ if (ob != NULL) {
+ float corner[3];
+ BoundBox *bbox = BKE_object_boundbox_get(ob);
+ /* Get BoundSphere center and radius from the BoundBox. */
+ mid_v3_v3v3(state->bsphere.center, bbox->vec[0], bbox->vec[6]);
+ mul_v3_m4v3(corner, obmat, bbox->vec[0]);
+ mul_m4_v3(obmat, state->bsphere.center);
+ state->bsphere.radius = len_v3v3(state->bsphere.center, corner);
+ }
+ else {
+ /* Bypass test. */
+ state->bsphere.radius = -1.0f;
+ }
+
+ drw_call_state_update_matflag(state, shgroup, ob);
+
+ return state;
}
static DRWCallState *drw_call_state_object(DRWShadingGroup *shgroup, float (*obmat)[4], Object *ob)
{
- if (DST.ob_state == NULL) {
- DST.ob_state = drw_call_state_create(shgroup, obmat, ob);
- }
- else {
- /* If the DRWCallState is reused, add necessary matrices. */
- drw_call_state_update_matflag(DST.ob_state, shgroup, ob);
- }
+ if (DST.ob_state == NULL) {
+ DST.ob_state = drw_call_state_create(shgroup, obmat, ob);
+ }
+ else {
+ /* If the DRWCallState is reused, add necessary matrices. */
+ drw_call_state_update_matflag(DST.ob_state, shgroup, ob);
+ }
- return DST.ob_state;
+ return DST.ob_state;
}
void DRW_shgroup_call_add(DRWShadingGroup *shgroup, GPUBatch *geom, float (*obmat)[4])
{
- BLI_assert(geom != NULL);
- BLI_assert(ELEM(shgroup->type, DRW_SHG_NORMAL, DRW_SHG_FEEDBACK_TRANSFORM));
+ BLI_assert(geom != NULL);
+ BLI_assert(ELEM(shgroup->type, DRW_SHG_NORMAL, DRW_SHG_FEEDBACK_TRANSFORM));
- DRWCall *call = BLI_mempool_alloc(DST.vmempool->calls);
- call->state = drw_call_state_create(shgroup, obmat, NULL);
- call->type = DRW_CALL_SINGLE;
- call->single.geometry = geom;
+ DRWCall *call = BLI_mempool_alloc(DST.vmempool->calls);
+ call->state = drw_call_state_create(shgroup, obmat, NULL);
+ call->type = DRW_CALL_SINGLE;
+ call->single.geometry = geom;
#ifdef USE_GPU_SELECT
- call->select_id = DST.select_id;
+ call->select_id = DST.select_id;
#endif
- BLI_LINKS_APPEND(&shgroup->calls, call);
+ BLI_LINKS_APPEND(&shgroup->calls, call);
}
-void DRW_shgroup_call_range_add(DRWShadingGroup *shgroup, GPUBatch *geom, float (*obmat)[4], uint v_sta, uint v_count)
+void DRW_shgroup_call_range_add(
+ DRWShadingGroup *shgroup, GPUBatch *geom, float (*obmat)[4], uint v_sta, uint v_count)
{
- BLI_assert(geom != NULL);
- BLI_assert(ELEM(shgroup->type, DRW_SHG_NORMAL, DRW_SHG_FEEDBACK_TRANSFORM));
- BLI_assert(v_count);
+ BLI_assert(geom != NULL);
+ BLI_assert(ELEM(shgroup->type, DRW_SHG_NORMAL, DRW_SHG_FEEDBACK_TRANSFORM));
+ BLI_assert(v_count);
- DRWCall *call = BLI_mempool_alloc(DST.vmempool->calls);
- call->state = drw_call_state_create(shgroup, obmat, NULL);
- call->type = DRW_CALL_RANGE;
- call->range.geometry = geom;
- call->range.start = v_sta;
- call->range.count = v_count;
+ DRWCall *call = BLI_mempool_alloc(DST.vmempool->calls);
+ call->state = drw_call_state_create(shgroup, obmat, NULL);
+ call->type = DRW_CALL_RANGE;
+ call->range.geometry = geom;
+ call->range.start = v_sta;
+ call->range.count = v_count;
#ifdef USE_GPU_SELECT
- call->select_id = DST.select_id;
+ call->select_id = DST.select_id;
#endif
- BLI_LINKS_APPEND(&shgroup->calls, call);
+ BLI_LINKS_APPEND(&shgroup->calls, call);
}
-static void drw_shgroup_call_procedural_add_ex(
- DRWShadingGroup *shgroup, GPUPrimType prim_type, uint vert_count, float (*obmat)[4], Object *ob)
+static void drw_shgroup_call_procedural_add_ex(DRWShadingGroup *shgroup,
+ GPUPrimType prim_type,
+ uint vert_count,
+ float (*obmat)[4],
+ Object *ob)
{
- BLI_assert(ELEM(shgroup->type, DRW_SHG_NORMAL, DRW_SHG_FEEDBACK_TRANSFORM));
+ BLI_assert(ELEM(shgroup->type, DRW_SHG_NORMAL, DRW_SHG_FEEDBACK_TRANSFORM));
- DRWCall *call = BLI_mempool_alloc(DST.vmempool->calls);
- if (ob) {
- call->state = drw_call_state_object(shgroup, ob->obmat, ob);
- }
- else {
- call->state = drw_call_state_create(shgroup, obmat, NULL);
- }
- call->type = DRW_CALL_PROCEDURAL;
- call->procedural.prim_type = prim_type;
- call->procedural.vert_count = vert_count;
+ DRWCall *call = BLI_mempool_alloc(DST.vmempool->calls);
+ if (ob) {
+ call->state = drw_call_state_object(shgroup, ob->obmat, ob);
+ }
+ else {
+ call->state = drw_call_state_create(shgroup, obmat, NULL);
+ }
+ call->type = DRW_CALL_PROCEDURAL;
+ call->procedural.prim_type = prim_type;
+ call->procedural.vert_count = vert_count;
#ifdef USE_GPU_SELECT
- call->select_id = DST.select_id;
+ call->select_id = DST.select_id;
#endif
- BLI_LINKS_APPEND(&shgroup->calls, call);
+ BLI_LINKS_APPEND(&shgroup->calls, call);
}
-void DRW_shgroup_call_procedural_points_add(DRWShadingGroup *shgroup, uint point_len, float (*obmat)[4])
+void DRW_shgroup_call_procedural_points_add(DRWShadingGroup *shgroup,
+ uint point_len,
+ float (*obmat)[4])
{
- drw_shgroup_call_procedural_add_ex(shgroup, GPU_PRIM_POINTS, point_len, obmat, NULL);
+ drw_shgroup_call_procedural_add_ex(shgroup, GPU_PRIM_POINTS, point_len, obmat, NULL);
}
-void DRW_shgroup_call_procedural_lines_add(DRWShadingGroup *shgroup, uint line_count, float (*obmat)[4])
+void DRW_shgroup_call_procedural_lines_add(DRWShadingGroup *shgroup,
+ uint line_count,
+ float (*obmat)[4])
{
- drw_shgroup_call_procedural_add_ex(shgroup, GPU_PRIM_LINES, line_count * 2, obmat, NULL);
+ drw_shgroup_call_procedural_add_ex(shgroup, GPU_PRIM_LINES, line_count * 2, obmat, NULL);
}
-void DRW_shgroup_call_procedural_triangles_add(DRWShadingGroup *shgroup, uint tria_count, float (*obmat)[4])
+void DRW_shgroup_call_procedural_triangles_add(DRWShadingGroup *shgroup,
+ uint tria_count,
+ float (*obmat)[4])
{
- drw_shgroup_call_procedural_add_ex(shgroup, GPU_PRIM_TRIS, tria_count * 3, obmat, NULL);
+ drw_shgroup_call_procedural_add_ex(shgroup, GPU_PRIM_TRIS, tria_count * 3, obmat, NULL);
}
/* TODO (fclem): this is a sign that the api is starting to be limiting.
* Maybe add special function that general purpose for special cases. */
-void DRW_shgroup_call_object_procedural_triangles_culled_add(DRWShadingGroup *shgroup, uint tria_count, Object *ob)
+void DRW_shgroup_call_object_procedural_triangles_culled_add(DRWShadingGroup *shgroup,
+ uint tria_count,
+ Object *ob)
{
- drw_shgroup_call_procedural_add_ex(shgroup, GPU_PRIM_TRIS, tria_count * 3, NULL, ob);
+ drw_shgroup_call_procedural_add_ex(shgroup, GPU_PRIM_TRIS, tria_count * 3, NULL, ob);
}
/* These calls can be culled and are optimized for redraw */
-void DRW_shgroup_call_object_add_ex(DRWShadingGroup *shgroup, GPUBatch *geom, Object *ob, Material *ma, bool bypass_culling)
+void DRW_shgroup_call_object_add_ex(
+ DRWShadingGroup *shgroup, GPUBatch *geom, Object *ob, Material *ma, bool bypass_culling)
{
- BLI_assert(geom != NULL);
- BLI_assert(ELEM(shgroup->type, DRW_SHG_NORMAL, DRW_SHG_FEEDBACK_TRANSFORM));
+ BLI_assert(geom != NULL);
+ BLI_assert(ELEM(shgroup->type, DRW_SHG_NORMAL, DRW_SHG_FEEDBACK_TRANSFORM));
- DRWCall *call = BLI_mempool_alloc(DST.vmempool->calls);
- call->state = drw_call_state_object(shgroup, ob->obmat, ob);
- call->type = DRW_CALL_SINGLE;
- call->single.geometry = geom;
- call->single.ma_index = ma ? ma->index : 0;
+ DRWCall *call = BLI_mempool_alloc(DST.vmempool->calls);
+ call->state = drw_call_state_object(shgroup, ob->obmat, ob);
+ call->type = DRW_CALL_SINGLE;
+ call->single.geometry = geom;
+ call->single.ma_index = ma ? ma->index : 0;
#ifdef USE_GPU_SELECT
- call->select_id = DST.select_id;
+ call->select_id = DST.select_id;
#endif
- /* NOTE this will disable culling for the whole object. */
- call->state->flag |= (bypass_culling) ? DRW_CALL_BYPASS_CULLING : 0;
+ /* NOTE this will disable culling for the whole object. */
+ call->state->flag |= (bypass_culling) ? DRW_CALL_BYPASS_CULLING : 0;
- BLI_LINKS_APPEND(&shgroup->calls, call);
+ BLI_LINKS_APPEND(&shgroup->calls, call);
}
-void DRW_shgroup_call_object_add_with_callback(
- DRWShadingGroup *shgroup, GPUBatch *geom, Object *ob, Material *ma,
- DRWCallVisibilityFn *callback, void *user_data)
+void DRW_shgroup_call_object_add_with_callback(DRWShadingGroup *shgroup,
+ GPUBatch *geom,
+ Object *ob,
+ Material *ma,
+ DRWCallVisibilityFn *callback,
+ void *user_data)
{
- BLI_assert(geom != NULL);
- BLI_assert(ELEM(shgroup->type, DRW_SHG_NORMAL, DRW_SHG_FEEDBACK_TRANSFORM));
+ BLI_assert(geom != NULL);
+ BLI_assert(ELEM(shgroup->type, DRW_SHG_NORMAL, DRW_SHG_FEEDBACK_TRANSFORM));
- DRWCall *call = BLI_mempool_alloc(DST.vmempool->calls);
- call->state = drw_call_state_object(shgroup, ob->obmat, ob);
- call->state->visibility_cb = callback;
- call->state->user_data = user_data;
- call->type = DRW_CALL_SINGLE;
- call->single.geometry = geom;
- call->single.ma_index = ma ? ma->index : 0;
+ DRWCall *call = BLI_mempool_alloc(DST.vmempool->calls);
+ call->state = drw_call_state_object(shgroup, ob->obmat, ob);
+ call->state->visibility_cb = callback;
+ call->state->user_data = user_data;
+ call->type = DRW_CALL_SINGLE;
+ call->single.geometry = geom;
+ call->single.ma_index = ma ? ma->index : 0;
#ifdef USE_GPU_SELECT
- call->select_id = DST.select_id;
+ call->select_id = DST.select_id;
#endif
- BLI_LINKS_APPEND(&shgroup->calls, call);
+ BLI_LINKS_APPEND(&shgroup->calls, call);
}
-void DRW_shgroup_call_instances_add(DRWShadingGroup *shgroup, GPUBatch *geom, float (*obmat)[4], uint *count)
+void DRW_shgroup_call_instances_add(DRWShadingGroup *shgroup,
+ GPUBatch *geom,
+ float (*obmat)[4],
+ uint *count)
{
- BLI_assert(geom != NULL);
- BLI_assert(ELEM(shgroup->type, DRW_SHG_NORMAL, DRW_SHG_FEEDBACK_TRANSFORM));
+ BLI_assert(geom != NULL);
+ BLI_assert(ELEM(shgroup->type, DRW_SHG_NORMAL, DRW_SHG_FEEDBACK_TRANSFORM));
- DRWCall *call = BLI_mempool_alloc(DST.vmempool->calls);
- call->state = drw_call_state_create(shgroup, obmat, NULL);
- call->type = DRW_CALL_INSTANCES;
- call->instances.geometry = geom;
- call->instances.count = count;
+ DRWCall *call = BLI_mempool_alloc(DST.vmempool->calls);
+ call->state = drw_call_state_create(shgroup, obmat, NULL);
+ call->type = DRW_CALL_INSTANCES;
+ call->instances.geometry = geom;
+ call->instances.count = count;
#ifdef USE_GPU_SELECT
- call->select_id = DST.select_id;
+ call->select_id = DST.select_id;
#endif
- BLI_LINKS_APPEND(&shgroup->calls, call);
+ BLI_LINKS_APPEND(&shgroup->calls, call);
}
/* These calls can be culled and are optimized for redraw */
-void DRW_shgroup_call_object_instances_add(DRWShadingGroup *shgroup, GPUBatch *geom, Object *ob, uint *count)
-{
- BLI_assert(geom != NULL);
- BLI_assert(ELEM(shgroup->type, DRW_SHG_NORMAL, DRW_SHG_FEEDBACK_TRANSFORM));
-
- DRWCall *call = BLI_mempool_alloc(DST.vmempool->calls);
- call->state = drw_call_state_object(shgroup, ob->obmat, ob);
- call->type = DRW_CALL_INSTANCES;
- call->instances.geometry = geom;
- call->instances.count = count;
+void DRW_shgroup_call_object_instances_add(DRWShadingGroup *shgroup,
+ GPUBatch *geom,
+ Object *ob,
+ uint *count)
+{
+ BLI_assert(geom != NULL);
+ BLI_assert(ELEM(shgroup->type, DRW_SHG_NORMAL, DRW_SHG_FEEDBACK_TRANSFORM));
+
+ DRWCall *call = BLI_mempool_alloc(DST.vmempool->calls);
+ call->state = drw_call_state_object(shgroup, ob->obmat, ob);
+ call->type = DRW_CALL_INSTANCES;
+ call->instances.geometry = geom;
+ call->instances.count = count;
#ifdef USE_GPU_SELECT
- call->select_id = DST.select_id;
+ call->select_id = DST.select_id;
#endif
- BLI_LINKS_APPEND(&shgroup->calls, call);
+ BLI_LINKS_APPEND(&shgroup->calls, call);
}
-void DRW_shgroup_call_generate_add(
- DRWShadingGroup *shgroup,
- DRWCallGenerateFn *geometry_fn, void *user_data,
- float (*obmat)[4])
+void DRW_shgroup_call_generate_add(DRWShadingGroup *shgroup,
+ DRWCallGenerateFn *geometry_fn,
+ void *user_data,
+ float (*obmat)[4])
{
- BLI_assert(geometry_fn != NULL);
- BLI_assert(ELEM(shgroup->type, DRW_SHG_NORMAL, DRW_SHG_FEEDBACK_TRANSFORM));
+ BLI_assert(geometry_fn != NULL);
+ BLI_assert(ELEM(shgroup->type, DRW_SHG_NORMAL, DRW_SHG_FEEDBACK_TRANSFORM));
- DRWCall *call = BLI_mempool_alloc(DST.vmempool->calls);
- call->state = drw_call_state_create(shgroup, obmat, NULL);
- call->type = DRW_CALL_GENERATE;
- call->generate.geometry_fn = geometry_fn;
- call->generate.user_data = user_data;
+ DRWCall *call = BLI_mempool_alloc(DST.vmempool->calls);
+ call->state = drw_call_state_create(shgroup, obmat, NULL);
+ call->type = DRW_CALL_GENERATE;
+ call->generate.geometry_fn = geometry_fn;
+ call->generate.user_data = user_data;
#ifdef USE_GPU_SELECT
- call->select_id = DST.select_id;
+ call->select_id = DST.select_id;
#endif
- BLI_LINKS_APPEND(&shgroup->calls, call);
+ BLI_LINKS_APPEND(&shgroup->calls, call);
}
-static void sculpt_draw_cb(
- DRWShadingGroup *shgroup,
- void (*draw_fn)(DRWShadingGroup *shgroup, GPUBatch *geom),
- void *user_data)
+static void sculpt_draw_cb(DRWShadingGroup *shgroup,
+ void (*draw_fn)(DRWShadingGroup *shgroup, GPUBatch *geom),
+ void *user_data)
{
- Object *ob = user_data;
+ Object *ob = user_data;
- /* XXX should be ensured before but sometime it's not... go figure (see T57040). */
- PBVH *pbvh = BKE_sculpt_object_pbvh_ensure(DST.draw_ctx.depsgraph, ob);
+ /* XXX should be ensured before but sometime it's not... go figure (see T57040). */
+ PBVH *pbvh = BKE_sculpt_object_pbvh_ensure(DST.draw_ctx.depsgraph, ob);
- const DRWContextState *drwctx = DRW_context_state_get();
- int fast_mode = 0;
+ const DRWContextState *drwctx = DRW_context_state_get();
+ int fast_mode = 0;
- if (drwctx->evil_C != NULL) {
- Paint *p = BKE_paint_get_active_from_context(drwctx->evil_C);
- if (p && (p->flags & PAINT_FAST_NAVIGATE)) {
- fast_mode = drwctx->rv3d->rflag & RV3D_NAVIGATING;
- }
- }
+ if (drwctx->evil_C != NULL) {
+ Paint *p = BKE_paint_get_active_from_context(drwctx->evil_C);
+ if (p && (p->flags & PAINT_FAST_NAVIGATE)) {
+ fast_mode = drwctx->rv3d->rflag & RV3D_NAVIGATING;
+ }
+ }
- if (pbvh) {
- BKE_pbvh_draw_cb(
- pbvh, NULL, NULL, fast_mode, false, false,
- (void (*)(void *, GPUBatch *))draw_fn, shgroup);
- }
+ if (pbvh) {
+ BKE_pbvh_draw_cb(
+ pbvh, NULL, NULL, fast_mode, false, false, (void (*)(void *, GPUBatch *))draw_fn, shgroup);
+ }
}
-static void sculpt_draw_wires_cb(
- DRWShadingGroup *shgroup,
- void (*draw_fn)(DRWShadingGroup *shgroup, GPUBatch *geom),
- void *user_data)
+static void sculpt_draw_wires_cb(DRWShadingGroup *shgroup,
+ void (*draw_fn)(DRWShadingGroup *shgroup, GPUBatch *geom),
+ void *user_data)
{
- Object *ob = user_data;
+ Object *ob = user_data;
- /* XXX should be ensured before but sometime it's not... go figure (see T57040). */
- PBVH *pbvh = BKE_sculpt_object_pbvh_ensure(DST.draw_ctx.depsgraph, ob);
+ /* XXX should be ensured before but sometime it's not... go figure (see T57040). */
+ PBVH *pbvh = BKE_sculpt_object_pbvh_ensure(DST.draw_ctx.depsgraph, ob);
- const DRWContextState *drwctx = DRW_context_state_get();
- int fast_mode = 0;
+ const DRWContextState *drwctx = DRW_context_state_get();
+ int fast_mode = 0;
- if (drwctx->evil_C != NULL) {
- Paint *p = BKE_paint_get_active_from_context(drwctx->evil_C);
- if (p && (p->flags & PAINT_FAST_NAVIGATE)) {
- fast_mode = drwctx->rv3d->rflag & RV3D_NAVIGATING;
- }
- }
+ if (drwctx->evil_C != NULL) {
+ Paint *p = BKE_paint_get_active_from_context(drwctx->evil_C);
+ if (p && (p->flags & PAINT_FAST_NAVIGATE)) {
+ fast_mode = drwctx->rv3d->rflag & RV3D_NAVIGATING;
+ }
+ }
- if (pbvh) {
- BKE_pbvh_draw_cb(
- pbvh, NULL, NULL, fast_mode, true, false,
- (void (*)(void *, GPUBatch *))draw_fn, shgroup);
- }
+ if (pbvh) {
+ BKE_pbvh_draw_cb(
+ pbvh, NULL, NULL, fast_mode, true, false, (void (*)(void *, GPUBatch *))draw_fn, shgroup);
+ }
}
void DRW_shgroup_call_sculpt_add(DRWShadingGroup *shgroup, Object *ob, float (*obmat)[4])
{
- DRW_shgroup_call_generate_add(shgroup, sculpt_draw_cb, ob, obmat);
+ DRW_shgroup_call_generate_add(shgroup, sculpt_draw_cb, ob, obmat);
}
void DRW_shgroup_call_sculpt_wires_add(DRWShadingGroup *shgroup, Object *ob, float (*obmat)[4])
{
- DRW_shgroup_call_generate_add(shgroup, sculpt_draw_wires_cb, ob, obmat);
+ DRW_shgroup_call_generate_add(shgroup, sculpt_draw_wires_cb, ob, obmat);
}
-void DRW_shgroup_call_dynamic_add_array(DRWShadingGroup *shgroup, const void *attr[], uint attr_len)
+void DRW_shgroup_call_dynamic_add_array(DRWShadingGroup *shgroup,
+ const void *attr[],
+ uint attr_len)
{
#ifdef USE_GPU_SELECT
- if (G.f & G_FLAG_PICKSEL) {
- if (shgroup->instance_count == shgroup->inst_selectid->vertex_len) {
- GPU_vertbuf_data_resize(shgroup->inst_selectid, shgroup->instance_count + 32);
- }
- GPU_vertbuf_attr_set(shgroup->inst_selectid, 0, shgroup->instance_count, &DST.select_id);
- }
+ if (G.f & G_FLAG_PICKSEL) {
+ if (shgroup->instance_count == shgroup->inst_selectid->vertex_len) {
+ GPU_vertbuf_data_resize(shgroup->inst_selectid, shgroup->instance_count + 32);
+ }
+ GPU_vertbuf_attr_set(shgroup->inst_selectid, 0, shgroup->instance_count, &DST.select_id);
+ }
#endif
- BLI_assert(attr_len == shgroup->attrs_count);
- UNUSED_VARS_NDEBUG(attr_len);
+ BLI_assert(attr_len == shgroup->attrs_count);
+ UNUSED_VARS_NDEBUG(attr_len);
- for (int i = 0; i < attr_len; ++i) {
- if (shgroup->instance_count == shgroup->instance_vbo->vertex_len) {
- GPU_vertbuf_data_resize(shgroup->instance_vbo, shgroup->instance_count + 32);
- }
- GPU_vertbuf_attr_set(shgroup->instance_vbo, i, shgroup->instance_count, attr[i]);
- }
+ for (int i = 0; i < attr_len; ++i) {
+ if (shgroup->instance_count == shgroup->instance_vbo->vertex_len) {
+ GPU_vertbuf_data_resize(shgroup->instance_vbo, shgroup->instance_count + 32);
+ }
+ GPU_vertbuf_attr_set(shgroup->instance_vbo, i, shgroup->instance_count, attr[i]);
+ }
- shgroup->instance_count += 1;
+ shgroup->instance_count += 1;
}
/** \} */
@@ -661,336 +728,371 @@ void DRW_shgroup_call_dynamic_add_array(DRWShadingGroup *shgroup, const void *at
static void drw_shgroup_init(DRWShadingGroup *shgroup, GPUShader *shader)
{
- shgroup->instance_geom = NULL;
- shgroup->instance_vbo = NULL;
- shgroup->instance_count = 0;
- shgroup->uniforms = NULL;
+ shgroup->instance_geom = NULL;
+ shgroup->instance_vbo = NULL;
+ shgroup->instance_count = 0;
+ shgroup->uniforms = NULL;
#ifdef USE_GPU_SELECT
- shgroup->inst_selectid = NULL;
- shgroup->override_selectid = -1;
+ shgroup->inst_selectid = NULL;
+ shgroup->override_selectid = -1;
#endif
#ifndef NDEBUG
- shgroup->attrs_count = 0;
+ shgroup->attrs_count = 0;
#endif
- int view_ubo_location = GPU_shader_get_uniform_block(shader, "viewBlock");
-
- if (view_ubo_location != -1) {
- drw_shgroup_uniform_create_ex(shgroup, view_ubo_location, DRW_UNIFORM_BLOCK_PERSIST, G_draw.view_ubo, 0, 1);
- }
- else {
- /* Only here to support builtin shaders. This should not be used by engines. */
- drw_shgroup_builtin_uniform(shgroup, GPU_UNIFORM_VIEW, DST.view_data.matstate.mat[DRW_MAT_VIEW], 16, 1);
- drw_shgroup_builtin_uniform(shgroup, GPU_UNIFORM_VIEW_INV, DST.view_data.matstate.mat[DRW_MAT_VIEWINV], 16, 1);
- drw_shgroup_builtin_uniform(shgroup, GPU_UNIFORM_VIEWPROJECTION, DST.view_data.matstate.mat[DRW_MAT_PERS], 16, 1);
- drw_shgroup_builtin_uniform(shgroup, GPU_UNIFORM_VIEWPROJECTION_INV, DST.view_data.matstate.mat[DRW_MAT_PERSINV], 16, 1);
- drw_shgroup_builtin_uniform(shgroup, GPU_UNIFORM_PROJECTION, DST.view_data.matstate.mat[DRW_MAT_WIN], 16, 1);
- drw_shgroup_builtin_uniform(shgroup, GPU_UNIFORM_PROJECTION_INV, DST.view_data.matstate.mat[DRW_MAT_WININV], 16, 1);
- drw_shgroup_builtin_uniform(shgroup, GPU_UNIFORM_CAMERATEXCO, DST.view_data.viewcamtexcofac, 3, 2);
- }
-
- shgroup->model = GPU_shader_get_builtin_uniform(shader, GPU_UNIFORM_MODEL);
- shgroup->modelinverse = GPU_shader_get_builtin_uniform(shader, GPU_UNIFORM_MODEL_INV);
- shgroup->modelview = GPU_shader_get_builtin_uniform(shader, GPU_UNIFORM_MODELVIEW);
- shgroup->modelviewinverse = GPU_shader_get_builtin_uniform(shader, GPU_UNIFORM_MODELVIEW_INV);
- shgroup->modelviewprojection = GPU_shader_get_builtin_uniform(shader, GPU_UNIFORM_MVP);
- shgroup->normalview = GPU_shader_get_builtin_uniform(shader, GPU_UNIFORM_NORMAL);
- shgroup->normalviewinverse = GPU_shader_get_builtin_uniform(shader, GPU_UNIFORM_NORMAL_INV);
- shgroup->normalworld = GPU_shader_get_builtin_uniform(shader, GPU_UNIFORM_WORLDNORMAL);
- shgroup->orcotexfac = GPU_shader_get_builtin_uniform(shader, GPU_UNIFORM_ORCO);
- shgroup->objectinfo = GPU_shader_get_builtin_uniform(shader, GPU_UNIFORM_OBJECT_INFO);
- shgroup->eye = GPU_shader_get_builtin_uniform(shader, GPU_UNIFORM_EYE);
- shgroup->callid = GPU_shader_get_builtin_uniform(shader, GPU_UNIFORM_CALLID);
-
- shgroup->matflag = 0;
- if (shgroup->modelinverse > -1) {
- shgroup->matflag |= DRW_CALL_MODELINVERSE;
- }
- if (shgroup->modelview > -1) {
- shgroup->matflag |= DRW_CALL_MODELVIEW;
- }
- if (shgroup->modelviewinverse > -1) {
- shgroup->matflag |= DRW_CALL_MODELVIEWINVERSE;
- }
- if (shgroup->modelviewprojection > -1) {
- shgroup->matflag |= DRW_CALL_MODELVIEWPROJECTION;
- }
- if (shgroup->normalview > -1) {
- shgroup->matflag |= DRW_CALL_NORMALVIEW;
- }
- if (shgroup->normalviewinverse > -1) {
- shgroup->matflag |= DRW_CALL_NORMALVIEWINVERSE;
- }
- if (shgroup->normalworld > -1) {
- shgroup->matflag |= DRW_CALL_NORMALWORLD;
- }
- if (shgroup->orcotexfac > -1) {
- shgroup->matflag |= DRW_CALL_ORCOTEXFAC;
- }
- if (shgroup->objectinfo > -1) {
- shgroup->matflag |= DRW_CALL_OBJECTINFO;
- }
- if (shgroup->eye > -1) {
- shgroup->matflag |= DRW_CALL_EYEVEC;
- }
-}
-
-static void drw_shgroup_instance_init(
- DRWShadingGroup *shgroup, GPUShader *shader, GPUBatch *batch, GPUVertFormat *format)
-{
- BLI_assert(shgroup->type == DRW_SHG_INSTANCE);
- BLI_assert(batch != NULL);
- BLI_assert(format != NULL);
-
- drw_shgroup_init(shgroup, shader);
-
- shgroup->instance_geom = batch;
+ int view_ubo_location = GPU_shader_get_uniform_block(shader, "viewBlock");
+
+ if (view_ubo_location != -1) {
+ drw_shgroup_uniform_create_ex(
+ shgroup, view_ubo_location, DRW_UNIFORM_BLOCK_PERSIST, G_draw.view_ubo, 0, 1);
+ }
+ else {
+ /* Only here to support builtin shaders. This should not be used by engines. */
+ drw_shgroup_builtin_uniform(
+ shgroup, GPU_UNIFORM_VIEW, DST.view_data.matstate.mat[DRW_MAT_VIEW], 16, 1);
+ drw_shgroup_builtin_uniform(
+ shgroup, GPU_UNIFORM_VIEW_INV, DST.view_data.matstate.mat[DRW_MAT_VIEWINV], 16, 1);
+ drw_shgroup_builtin_uniform(
+ shgroup, GPU_UNIFORM_VIEWPROJECTION, DST.view_data.matstate.mat[DRW_MAT_PERS], 16, 1);
+ drw_shgroup_builtin_uniform(shgroup,
+ GPU_UNIFORM_VIEWPROJECTION_INV,
+ DST.view_data.matstate.mat[DRW_MAT_PERSINV],
+ 16,
+ 1);
+ drw_shgroup_builtin_uniform(
+ shgroup, GPU_UNIFORM_PROJECTION, DST.view_data.matstate.mat[DRW_MAT_WIN], 16, 1);
+ drw_shgroup_builtin_uniform(
+ shgroup, GPU_UNIFORM_PROJECTION_INV, DST.view_data.matstate.mat[DRW_MAT_WININV], 16, 1);
+ drw_shgroup_builtin_uniform(
+ shgroup, GPU_UNIFORM_CAMERATEXCO, DST.view_data.viewcamtexcofac, 3, 2);
+ }
+
+ shgroup->model = GPU_shader_get_builtin_uniform(shader, GPU_UNIFORM_MODEL);
+ shgroup->modelinverse = GPU_shader_get_builtin_uniform(shader, GPU_UNIFORM_MODEL_INV);
+ shgroup->modelview = GPU_shader_get_builtin_uniform(shader, GPU_UNIFORM_MODELVIEW);
+ shgroup->modelviewinverse = GPU_shader_get_builtin_uniform(shader, GPU_UNIFORM_MODELVIEW_INV);
+ shgroup->modelviewprojection = GPU_shader_get_builtin_uniform(shader, GPU_UNIFORM_MVP);
+ shgroup->normalview = GPU_shader_get_builtin_uniform(shader, GPU_UNIFORM_NORMAL);
+ shgroup->normalviewinverse = GPU_shader_get_builtin_uniform(shader, GPU_UNIFORM_NORMAL_INV);
+ shgroup->normalworld = GPU_shader_get_builtin_uniform(shader, GPU_UNIFORM_WORLDNORMAL);
+ shgroup->orcotexfac = GPU_shader_get_builtin_uniform(shader, GPU_UNIFORM_ORCO);
+ shgroup->objectinfo = GPU_shader_get_builtin_uniform(shader, GPU_UNIFORM_OBJECT_INFO);
+ shgroup->eye = GPU_shader_get_builtin_uniform(shader, GPU_UNIFORM_EYE);
+ shgroup->callid = GPU_shader_get_builtin_uniform(shader, GPU_UNIFORM_CALLID);
+
+ shgroup->matflag = 0;
+ if (shgroup->modelinverse > -1) {
+ shgroup->matflag |= DRW_CALL_MODELINVERSE;
+ }
+ if (shgroup->modelview > -1) {
+ shgroup->matflag |= DRW_CALL_MODELVIEW;
+ }
+ if (shgroup->modelviewinverse > -1) {
+ shgroup->matflag |= DRW_CALL_MODELVIEWINVERSE;
+ }
+ if (shgroup->modelviewprojection > -1) {
+ shgroup->matflag |= DRW_CALL_MODELVIEWPROJECTION;
+ }
+ if (shgroup->normalview > -1) {
+ shgroup->matflag |= DRW_CALL_NORMALVIEW;
+ }
+ if (shgroup->normalviewinverse > -1) {
+ shgroup->matflag |= DRW_CALL_NORMALVIEWINVERSE;
+ }
+ if (shgroup->normalworld > -1) {
+ shgroup->matflag |= DRW_CALL_NORMALWORLD;
+ }
+ if (shgroup->orcotexfac > -1) {
+ shgroup->matflag |= DRW_CALL_ORCOTEXFAC;
+ }
+ if (shgroup->objectinfo > -1) {
+ shgroup->matflag |= DRW_CALL_OBJECTINFO;
+ }
+ if (shgroup->eye > -1) {
+ shgroup->matflag |= DRW_CALL_EYEVEC;
+ }
+}
+
+static void drw_shgroup_instance_init(DRWShadingGroup *shgroup,
+ GPUShader *shader,
+ GPUBatch *batch,
+ GPUVertFormat *format)
+{
+ BLI_assert(shgroup->type == DRW_SHG_INSTANCE);
+ BLI_assert(batch != NULL);
+ BLI_assert(format != NULL);
+
+ drw_shgroup_init(shgroup, shader);
+
+ shgroup->instance_geom = batch;
#ifndef NDEBUG
- shgroup->attrs_count = format->attr_len;
+ shgroup->attrs_count = format->attr_len;
#endif
- DRW_instancing_buffer_request(DST.idatalist, format, batch, shgroup,
- &shgroup->instance_geom, &shgroup->instance_vbo);
+ DRW_instancing_buffer_request(
+ DST.idatalist, format, batch, shgroup, &shgroup->instance_geom, &shgroup->instance_vbo);
#ifdef USE_GPU_SELECT
- if (G.f & G_FLAG_PICKSEL) {
- /* Not actually used for rendering but alloced in one chunk.
- * Plus we don't have to care about ownership. */
- static GPUVertFormat inst_select_format = {0};
- if (inst_select_format.attr_len == 0) {
- GPU_vertformat_attr_add(&inst_select_format, "selectId", GPU_COMP_I32, 1, GPU_FETCH_INT);
- }
- GPUBatch *batch_dummy; /* Not used */
- DRW_batching_buffer_request(DST.idatalist, &inst_select_format,
- GPU_PRIM_POINTS, shgroup,
- &batch_dummy, &shgroup->inst_selectid);
- }
+ if (G.f & G_FLAG_PICKSEL) {
+ /* Not actually used for rendering but alloced in one chunk.
+ * Plus we don't have to care about ownership. */
+ static GPUVertFormat inst_select_format = {0};
+ if (inst_select_format.attr_len == 0) {
+ GPU_vertformat_attr_add(&inst_select_format, "selectId", GPU_COMP_I32, 1, GPU_FETCH_INT);
+ }
+ GPUBatch *batch_dummy; /* Not used */
+ DRW_batching_buffer_request(DST.idatalist,
+ &inst_select_format,
+ GPU_PRIM_POINTS,
+ shgroup,
+ &batch_dummy,
+ &shgroup->inst_selectid);
+ }
#endif
}
-static void drw_shgroup_batching_init(
- DRWShadingGroup *shgroup, GPUShader *shader, GPUVertFormat *format)
+static void drw_shgroup_batching_init(DRWShadingGroup *shgroup,
+ GPUShader *shader,
+ GPUVertFormat *format)
{
- drw_shgroup_init(shgroup, shader);
+ drw_shgroup_init(shgroup, shader);
#ifndef NDEBUG
- shgroup->attrs_count = (format != NULL) ? format->attr_len : 0;
+ shgroup->attrs_count = (format != NULL) ? format->attr_len : 0;
#endif
- BLI_assert(format != NULL);
-
- GPUPrimType type;
- switch (shgroup->type) {
- case DRW_SHG_POINT_BATCH: type = GPU_PRIM_POINTS; break;
- case DRW_SHG_LINE_BATCH: type = GPU_PRIM_LINES; break;
- case DRW_SHG_TRIANGLE_BATCH: type = GPU_PRIM_TRIS; break;
- default: type = GPU_PRIM_NONE; BLI_assert(0); break;
- }
-
- DRW_batching_buffer_request(DST.idatalist, format, type, shgroup,
- &shgroup->batch_geom, &shgroup->batch_vbo);
+ BLI_assert(format != NULL);
+
+ GPUPrimType type;
+ switch (shgroup->type) {
+ case DRW_SHG_POINT_BATCH:
+ type = GPU_PRIM_POINTS;
+ break;
+ case DRW_SHG_LINE_BATCH:
+ type = GPU_PRIM_LINES;
+ break;
+ case DRW_SHG_TRIANGLE_BATCH:
+ type = GPU_PRIM_TRIS;
+ break;
+ default:
+ type = GPU_PRIM_NONE;
+ BLI_assert(0);
+ break;
+ }
+
+ DRW_batching_buffer_request(
+ DST.idatalist, format, type, shgroup, &shgroup->batch_geom, &shgroup->batch_vbo);
#ifdef USE_GPU_SELECT
- if (G.f & G_FLAG_PICKSEL) {
- /* Not actually used for rendering but alloced in one chunk. */
- static GPUVertFormat inst_select_format = {0};
- if (inst_select_format.attr_len == 0) {
- GPU_vertformat_attr_add(&inst_select_format, "selectId", GPU_COMP_I32, 1, GPU_FETCH_INT);
- }
- GPUBatch *batch; /* Not used */
- DRW_batching_buffer_request(DST.idatalist, &inst_select_format,
- GPU_PRIM_POINTS, shgroup,
- &batch, &shgroup->inst_selectid);
- }
+ if (G.f & G_FLAG_PICKSEL) {
+ /* Not actually used for rendering but alloced in one chunk. */
+ static GPUVertFormat inst_select_format = {0};
+ if (inst_select_format.attr_len == 0) {
+ GPU_vertformat_attr_add(&inst_select_format, "selectId", GPU_COMP_I32, 1, GPU_FETCH_INT);
+ }
+ GPUBatch *batch; /* Not used */
+ DRW_batching_buffer_request(DST.idatalist,
+ &inst_select_format,
+ GPU_PRIM_POINTS,
+ shgroup,
+ &batch,
+ &shgroup->inst_selectid);
+ }
#endif
}
static DRWShadingGroup *drw_shgroup_create_ex(struct GPUShader *shader, DRWPass *pass)
{
- DRWShadingGroup *shgroup = BLI_mempool_alloc(DST.vmempool->shgroups);
+ DRWShadingGroup *shgroup = BLI_mempool_alloc(DST.vmempool->shgroups);
- BLI_LINKS_APPEND(&pass->shgroups, shgroup);
+ BLI_LINKS_APPEND(&pass->shgroups, shgroup);
- shgroup->type = DRW_SHG_NORMAL;
- shgroup->shader = shader;
- shgroup->state_extra = 0;
- shgroup->state_extra_disable = ~0x0;
- shgroup->stencil_mask = 0;
- shgroup->calls.first = NULL;
- shgroup->calls.last = NULL;
+ shgroup->type = DRW_SHG_NORMAL;
+ shgroup->shader = shader;
+ shgroup->state_extra = 0;
+ shgroup->state_extra_disable = ~0x0;
+ shgroup->stencil_mask = 0;
+ shgroup->calls.first = NULL;
+ shgroup->calls.last = NULL;
#if 0 /* All the same in the union! */
- shgroup->batch_geom = NULL;
- shgroup->batch_vbo = NULL;
+ shgroup->batch_geom = NULL;
+ shgroup->batch_vbo = NULL;
- shgroup->instance_geom = NULL;
- shgroup->instance_vbo = NULL;
+ shgroup->instance_geom = NULL;
+ shgroup->instance_vbo = NULL;
#endif
- shgroup->pass_parent = pass;
+ shgroup->pass_parent = pass;
- return shgroup;
+ return shgroup;
}
static DRWShadingGroup *drw_shgroup_material_create_ex(GPUPass *gpupass, DRWPass *pass)
{
- if (!gpupass) {
- /* Shader compilation error */
- return NULL;
- }
+ if (!gpupass) {
+ /* Shader compilation error */
+ return NULL;
+ }
- GPUShader *sh = GPU_pass_shader_get(gpupass);
+ GPUShader *sh = GPU_pass_shader_get(gpupass);
- if (!sh) {
- /* Shader not yet compiled */
- return NULL;
- }
+ if (!sh) {
+ /* Shader not yet compiled */
+ return NULL;
+ }
- DRWShadingGroup *grp = drw_shgroup_create_ex(sh, pass);
- return grp;
+ DRWShadingGroup *grp = drw_shgroup_create_ex(sh, pass);
+ return grp;
}
-static DRWShadingGroup *drw_shgroup_material_inputs(DRWShadingGroup *grp, struct GPUMaterial *material)
+static DRWShadingGroup *drw_shgroup_material_inputs(DRWShadingGroup *grp,
+ struct GPUMaterial *material)
{
- ListBase *inputs = GPU_material_get_inputs(material);
+ ListBase *inputs = GPU_material_get_inputs(material);
- /* Converting dynamic GPUInput to DRWUniform */
- for (GPUInput *input = inputs->first; input; input = input->next) {
- /* Textures */
- if (input->source == GPU_SOURCE_TEX) {
- GPUTexture *tex = NULL;
+ /* Converting dynamic GPUInput to DRWUniform */
+ for (GPUInput *input = inputs->first; input; input = input->next) {
+ /* Textures */
+ if (input->source == GPU_SOURCE_TEX) {
+ GPUTexture *tex = NULL;
- if (input->ima) {
- tex = GPU_texture_from_blender(input->ima, input->iuser, GL_TEXTURE_2D, input->image_isdata);
- }
- else {
- /* Color Ramps */
- tex = *input->coba;
- }
+ if (input->ima) {
+ tex = GPU_texture_from_blender(
+ input->ima, input->iuser, GL_TEXTURE_2D, input->image_isdata);
+ }
+ else {
+ /* Color Ramps */
+ tex = *input->coba;
+ }
- if (input->bindtex) {
- drw_shgroup_uniform_create_ex(grp, input->shaderloc, DRW_UNIFORM_TEXTURE, tex, 0, 1);
- }
- }
- }
+ if (input->bindtex) {
+ drw_shgroup_uniform_create_ex(grp, input->shaderloc, DRW_UNIFORM_TEXTURE, tex, 0, 1);
+ }
+ }
+ }
- GPUUniformBuffer *ubo = GPU_material_uniform_buffer_get(material);
- if (ubo != NULL) {
- DRW_shgroup_uniform_block(grp, GPU_UBO_BLOCK_NAME, ubo);
- }
+ GPUUniformBuffer *ubo = GPU_material_uniform_buffer_get(material);
+ if (ubo != NULL) {
+ DRW_shgroup_uniform_block(grp, GPU_UBO_BLOCK_NAME, ubo);
+ }
- return grp;
+ return grp;
}
-GPUVertFormat *DRW_shgroup_instance_format_array(const DRWInstanceAttrFormat attrs[], int arraysize)
+GPUVertFormat *DRW_shgroup_instance_format_array(const DRWInstanceAttrFormat attrs[],
+ int arraysize)
{
- GPUVertFormat *format = MEM_callocN(sizeof(GPUVertFormat), "GPUVertFormat");
+ GPUVertFormat *format = MEM_callocN(sizeof(GPUVertFormat), "GPUVertFormat");
- for (int i = 0; i < arraysize; ++i) {
- GPU_vertformat_attr_add(
- format, attrs[i].name,
- (attrs[i].type == DRW_ATTR_INT) ? GPU_COMP_I32 : GPU_COMP_F32,
- attrs[i].components,
- (attrs[i].type == DRW_ATTR_INT) ? GPU_FETCH_INT : GPU_FETCH_FLOAT);
- }
- return format;
+ for (int i = 0; i < arraysize; ++i) {
+ GPU_vertformat_attr_add(format,
+ attrs[i].name,
+ (attrs[i].type == DRW_ATTR_INT) ? GPU_COMP_I32 : GPU_COMP_F32,
+ attrs[i].components,
+ (attrs[i].type == DRW_ATTR_INT) ? GPU_FETCH_INT : GPU_FETCH_FLOAT);
+ }
+ return format;
}
-DRWShadingGroup *DRW_shgroup_material_create(
- struct GPUMaterial *material, DRWPass *pass)
+DRWShadingGroup *DRW_shgroup_material_create(struct GPUMaterial *material, DRWPass *pass)
{
- GPUPass *gpupass = GPU_material_get_pass(material);
- DRWShadingGroup *shgroup = drw_shgroup_material_create_ex(gpupass, pass);
+ GPUPass *gpupass = GPU_material_get_pass(material);
+ DRWShadingGroup *shgroup = drw_shgroup_material_create_ex(gpupass, pass);
- if (shgroup) {
- drw_shgroup_init(shgroup, GPU_pass_shader_get(gpupass));
- drw_shgroup_material_inputs(shgroup, material);
- }
+ if (shgroup) {
+ drw_shgroup_init(shgroup, GPU_pass_shader_get(gpupass));
+ drw_shgroup_material_inputs(shgroup, material);
+ }
- return shgroup;
+ return shgroup;
}
DRWShadingGroup *DRW_shgroup_material_instance_create(
- struct GPUMaterial *material, DRWPass *pass, GPUBatch *geom, Object *ob, GPUVertFormat *format)
+ struct GPUMaterial *material, DRWPass *pass, GPUBatch *geom, Object *ob, GPUVertFormat *format)
{
- GPUPass *gpupass = GPU_material_get_pass(material);
- DRWShadingGroup *shgroup = drw_shgroup_material_create_ex(gpupass, pass);
+ GPUPass *gpupass = GPU_material_get_pass(material);
+ DRWShadingGroup *shgroup = drw_shgroup_material_create_ex(gpupass, pass);
- if (shgroup) {
- shgroup->type = DRW_SHG_INSTANCE;
- shgroup->instance_geom = geom;
- drw_call_calc_orco(ob, shgroup->instance_orcofac);
- drw_shgroup_instance_init(shgroup, GPU_pass_shader_get(gpupass), geom, format);
- drw_shgroup_material_inputs(shgroup, material);
- }
+ if (shgroup) {
+ shgroup->type = DRW_SHG_INSTANCE;
+ shgroup->instance_geom = geom;
+ drw_call_calc_orco(ob, shgroup->instance_orcofac);
+ drw_shgroup_instance_init(shgroup, GPU_pass_shader_get(gpupass), geom, format);
+ drw_shgroup_material_inputs(shgroup, material);
+ }
- return shgroup;
+ return shgroup;
}
-DRWShadingGroup *DRW_shgroup_material_empty_tri_batch_create(
- struct GPUMaterial *material, DRWPass *pass, int tri_count)
+DRWShadingGroup *DRW_shgroup_material_empty_tri_batch_create(struct GPUMaterial *material,
+ DRWPass *pass,
+ int tri_count)
{
#ifdef USE_GPU_SELECT
- BLI_assert((G.f & G_FLAG_PICKSEL) == 0);
+ BLI_assert((G.f & G_FLAG_PICKSEL) == 0);
#endif
- GPUPass *gpupass = GPU_material_get_pass(material);
- DRWShadingGroup *shgroup = drw_shgroup_material_create_ex(gpupass, pass);
+ GPUPass *gpupass = GPU_material_get_pass(material);
+ DRWShadingGroup *shgroup = drw_shgroup_material_create_ex(gpupass, pass);
- if (shgroup) {
- /* Calling drw_shgroup_init will cause it to call GPU_draw_primitive(). */
- drw_shgroup_init(shgroup, GPU_pass_shader_get(gpupass));
- shgroup->type = DRW_SHG_TRIANGLE_BATCH;
- shgroup->instance_count = tri_count * 3;
- drw_shgroup_material_inputs(shgroup, material);
- }
+ if (shgroup) {
+ /* Calling drw_shgroup_init will cause it to call GPU_draw_primitive(). */
+ drw_shgroup_init(shgroup, GPU_pass_shader_get(gpupass));
+ shgroup->type = DRW_SHG_TRIANGLE_BATCH;
+ shgroup->instance_count = tri_count * 3;
+ drw_shgroup_material_inputs(shgroup, material);
+ }
- return shgroup;
+ return shgroup;
}
DRWShadingGroup *DRW_shgroup_create(struct GPUShader *shader, DRWPass *pass)
{
- DRWShadingGroup *shgroup = drw_shgroup_create_ex(shader, pass);
- drw_shgroup_init(shgroup, shader);
- return shgroup;
+ DRWShadingGroup *shgroup = drw_shgroup_create_ex(shader, pass);
+ drw_shgroup_init(shgroup, shader);
+ return shgroup;
}
-DRWShadingGroup *DRW_shgroup_instance_create(
- struct GPUShader *shader, DRWPass *pass, GPUBatch *geom, GPUVertFormat *format)
+DRWShadingGroup *DRW_shgroup_instance_create(struct GPUShader *shader,
+ DRWPass *pass,
+ GPUBatch *geom,
+ GPUVertFormat *format)
{
- DRWShadingGroup *shgroup = drw_shgroup_create_ex(shader, pass);
- shgroup->type = DRW_SHG_INSTANCE;
- shgroup->instance_geom = geom;
- drw_call_calc_orco(NULL, shgroup->instance_orcofac);
- drw_shgroup_instance_init(shgroup, shader, geom, format);
+ DRWShadingGroup *shgroup = drw_shgroup_create_ex(shader, pass);
+ shgroup->type = DRW_SHG_INSTANCE;
+ shgroup->instance_geom = geom;
+ drw_call_calc_orco(NULL, shgroup->instance_orcofac);
+ drw_shgroup_instance_init(shgroup, shader, geom, format);
- return shgroup;
+ return shgroup;
}
DRWShadingGroup *DRW_shgroup_point_batch_create(struct GPUShader *shader, DRWPass *pass)
{
- DRW_shgroup_instance_format(g_pos_format, {{"pos", DRW_ATTR_FLOAT, 3}});
+ DRW_shgroup_instance_format(g_pos_format, {{"pos", DRW_ATTR_FLOAT, 3}});
- DRWShadingGroup *shgroup = drw_shgroup_create_ex(shader, pass);
- shgroup->type = DRW_SHG_POINT_BATCH;
+ DRWShadingGroup *shgroup = drw_shgroup_create_ex(shader, pass);
+ shgroup->type = DRW_SHG_POINT_BATCH;
- drw_shgroup_batching_init(shgroup, shader, g_pos_format);
+ drw_shgroup_batching_init(shgroup, shader, g_pos_format);
- return shgroup;
+ return shgroup;
}
-DRWShadingGroup *DRW_shgroup_line_batch_create_with_format(
- struct GPUShader *shader, DRWPass *pass, GPUVertFormat *format)
+DRWShadingGroup *DRW_shgroup_line_batch_create_with_format(struct GPUShader *shader,
+ DRWPass *pass,
+ GPUVertFormat *format)
{
- DRWShadingGroup *shgroup = drw_shgroup_create_ex(shader, pass);
- shgroup->type = DRW_SHG_LINE_BATCH;
+ DRWShadingGroup *shgroup = drw_shgroup_create_ex(shader, pass);
+ shgroup->type = DRW_SHG_LINE_BATCH;
- drw_shgroup_batching_init(shgroup, shader, format);
+ drw_shgroup_batching_init(shgroup, shader, format);
- return shgroup;
+ return shgroup;
}
DRWShadingGroup *DRW_shgroup_line_batch_create(struct GPUShader *shader, DRWPass *pass)
{
- DRW_shgroup_instance_format(g_pos_format, {{"pos", DRW_ATTR_FLOAT, 3}});
+ DRW_shgroup_instance_format(g_pos_format, {{"pos", DRW_ATTR_FLOAT, 3}});
- return DRW_shgroup_line_batch_create_with_format(shader, pass, g_pos_format);
+ return DRW_shgroup_line_batch_create_with_format(shader, pass, g_pos_format);
}
/**
@@ -998,33 +1100,37 @@ DRWShadingGroup *DRW_shgroup_line_batch_create(struct GPUShader *shader, DRWPass
* your vertices with the vertex shader
* and dont need any VBO attribute.
*/
-DRWShadingGroup *DRW_shgroup_empty_tri_batch_create(struct GPUShader *shader, DRWPass *pass, int tri_count)
+DRWShadingGroup *DRW_shgroup_empty_tri_batch_create(struct GPUShader *shader,
+ DRWPass *pass,
+ int tri_count)
{
#ifdef USE_GPU_SELECT
- BLI_assert((G.f & G_FLAG_PICKSEL) == 0);
+ BLI_assert((G.f & G_FLAG_PICKSEL) == 0);
#endif
- DRWShadingGroup *shgroup = drw_shgroup_create_ex(shader, pass);
+ DRWShadingGroup *shgroup = drw_shgroup_create_ex(shader, pass);
- /* Calling drw_shgroup_init will cause it to call GPU_draw_primitive(). */
- drw_shgroup_init(shgroup, shader);
+ /* Calling drw_shgroup_init will cause it to call GPU_draw_primitive(). */
+ drw_shgroup_init(shgroup, shader);
- shgroup->type = DRW_SHG_TRIANGLE_BATCH;
- shgroup->instance_count = tri_count * 3;
+ shgroup->type = DRW_SHG_TRIANGLE_BATCH;
+ shgroup->instance_count = tri_count * 3;
- return shgroup;
+ return shgroup;
}
-DRWShadingGroup *DRW_shgroup_transform_feedback_create(struct GPUShader *shader, DRWPass *pass, GPUVertBuf *tf_target)
+DRWShadingGroup *DRW_shgroup_transform_feedback_create(struct GPUShader *shader,
+ DRWPass *pass,
+ GPUVertBuf *tf_target)
{
- BLI_assert(tf_target != NULL);
- DRWShadingGroup *shgroup = drw_shgroup_create_ex(shader, pass);
- shgroup->type = DRW_SHG_FEEDBACK_TRANSFORM;
+ BLI_assert(tf_target != NULL);
+ DRWShadingGroup *shgroup = drw_shgroup_create_ex(shader, pass);
+ shgroup->type = DRW_SHG_FEEDBACK_TRANSFORM;
- drw_shgroup_init(shgroup, shader);
+ drw_shgroup_init(shgroup, shader);
- shgroup->tfeedback_target = tf_target;
+ shgroup->tfeedback_target = tf_target;
- return shgroup;
+ return shgroup;
}
/**
@@ -1032,27 +1138,27 @@ DRWShadingGroup *DRW_shgroup_transform_feedback_create(struct GPUShader *shader,
*/
void DRW_shgroup_instance_batch(DRWShadingGroup *shgroup, struct GPUBatch *batch)
{
- BLI_assert(shgroup->type == DRW_SHG_INSTANCE);
- BLI_assert(shgroup->instance_count == 0);
- /* You cannot use external instancing batch without a dummy format. */
- BLI_assert(shgroup->attrs_count != 0);
+ BLI_assert(shgroup->type == DRW_SHG_INSTANCE);
+ BLI_assert(shgroup->instance_count == 0);
+ /* You cannot use external instancing batch without a dummy format. */
+ BLI_assert(shgroup->attrs_count != 0);
- shgroup->type = DRW_SHG_INSTANCE_EXTERNAL;
- drw_call_calc_orco(NULL, shgroup->instance_orcofac);
- /* PERF : This destroys the vaos cache so better check if it's necessary. */
- /* Note: This WILL break if batch->verts[0] is destroyed and reallocated
- * at the same address. Bindings/VAOs would remain obsolete. */
- //if (shgroup->instancing_geom->inst != batch->verts[0])
- GPU_batch_instbuf_set(shgroup->instance_geom, batch->verts[0], false);
+ shgroup->type = DRW_SHG_INSTANCE_EXTERNAL;
+ drw_call_calc_orco(NULL, shgroup->instance_orcofac);
+ /* PERF : This destroys the vaos cache so better check if it's necessary. */
+ /* Note: This WILL break if batch->verts[0] is destroyed and reallocated
+ * at the same address. Bindings/VAOs would remain obsolete. */
+ //if (shgroup->instancing_geom->inst != batch->verts[0])
+ GPU_batch_instbuf_set(shgroup->instance_geom, batch->verts[0], false);
#ifdef USE_GPU_SELECT
- shgroup->override_selectid = DST.select_id;
+ shgroup->override_selectid = DST.select_id;
#endif
}
uint DRW_shgroup_get_instance_count(const DRWShadingGroup *shgroup)
{
- return shgroup->instance_count;
+ return shgroup->instance_count;
}
/**
@@ -1061,52 +1167,52 @@ uint DRW_shgroup_get_instance_count(const DRWShadingGroup *shgroup)
*/
void DRW_shgroup_state_enable(DRWShadingGroup *shgroup, DRWState state)
{
- shgroup->state_extra |= state;
+ shgroup->state_extra |= state;
}
void DRW_shgroup_state_disable(DRWShadingGroup *shgroup, DRWState state)
{
- shgroup->state_extra_disable &= ~state;
+ shgroup->state_extra_disable &= ~state;
}
void DRW_shgroup_stencil_mask(DRWShadingGroup *shgroup, uint mask)
{
- BLI_assert(mask <= 255);
- shgroup->stencil_mask = mask;
+ BLI_assert(mask <= 255);
+ shgroup->stencil_mask = mask;
}
bool DRW_shgroup_is_empty(DRWShadingGroup *shgroup)
{
- switch (shgroup->type) {
- case DRW_SHG_NORMAL:
- case DRW_SHG_FEEDBACK_TRANSFORM:
- return shgroup->calls.first == NULL;
- case DRW_SHG_POINT_BATCH:
- case DRW_SHG_LINE_BATCH:
- case DRW_SHG_TRIANGLE_BATCH:
- case DRW_SHG_INSTANCE:
- case DRW_SHG_INSTANCE_EXTERNAL:
- return shgroup->instance_count == 0;
- }
- BLI_assert(!"Shading Group type not supported");
- return true;
+ switch (shgroup->type) {
+ case DRW_SHG_NORMAL:
+ case DRW_SHG_FEEDBACK_TRANSFORM:
+ return shgroup->calls.first == NULL;
+ case DRW_SHG_POINT_BATCH:
+ case DRW_SHG_LINE_BATCH:
+ case DRW_SHG_TRIANGLE_BATCH:
+ case DRW_SHG_INSTANCE:
+ case DRW_SHG_INSTANCE_EXTERNAL:
+ return shgroup->instance_count == 0;
+ }
+ BLI_assert(!"Shading Group type not supported");
+ return true;
}
DRWShadingGroup *DRW_shgroup_create_sub(DRWShadingGroup *shgroup)
{
- /* Remove this assertion if needed but implement the other cases first! */
- BLI_assert(shgroup->type == DRW_SHG_NORMAL);
+ /* Remove this assertion if needed but implement the other cases first! */
+ BLI_assert(shgroup->type == DRW_SHG_NORMAL);
- DRWShadingGroup *shgroup_new = BLI_mempool_alloc(DST.vmempool->shgroups);
+ DRWShadingGroup *shgroup_new = BLI_mempool_alloc(DST.vmempool->shgroups);
- *shgroup_new = *shgroup;
- shgroup_new->uniforms = NULL;
- shgroup_new->calls.first = NULL;
- shgroup_new->calls.last = NULL;
+ *shgroup_new = *shgroup;
+ shgroup_new->uniforms = NULL;
+ shgroup_new->calls.first = NULL;
+ shgroup_new->calls.last = NULL;
- BLI_LINKS_INSERT_AFTER(&shgroup->pass_parent->shgroups, shgroup, shgroup_new);
+ BLI_LINKS_INSERT_AFTER(&shgroup->pass_parent->shgroups, shgroup, shgroup_new);
- return shgroup_new;
+ return shgroup_new;
}
/** \} */
@@ -1117,95 +1223,103 @@ DRWShadingGroup *DRW_shgroup_create_sub(DRWShadingGroup *shgroup)
DRWPass *DRW_pass_create(const char *name, DRWState state)
{
- DRWPass *pass = BLI_mempool_alloc(DST.vmempool->passes);
- pass->state = state;
- if (((G.debug_value > 20) && (G.debug_value < 30)) ||
- (G.debug & G_DEBUG))
- {
- BLI_strncpy(pass->name, name, MAX_PASS_NAME);
- }
+ DRWPass *pass = BLI_mempool_alloc(DST.vmempool->passes);
+ pass->state = state;
+ if (((G.debug_value > 20) && (G.debug_value < 30)) || (G.debug & G_DEBUG)) {
+ BLI_strncpy(pass->name, name, MAX_PASS_NAME);
+ }
- pass->shgroups.first = NULL;
- pass->shgroups.last = NULL;
+ pass->shgroups.first = NULL;
+ pass->shgroups.last = NULL;
- return pass;
+ return pass;
}
bool DRW_pass_is_empty(DRWPass *pass)
{
- for (DRWShadingGroup *shgroup = pass->shgroups.first; shgroup; shgroup = shgroup->next) {
- if (!DRW_shgroup_is_empty(shgroup)) {
- return false;
- }
- }
- return true;
+ for (DRWShadingGroup *shgroup = pass->shgroups.first; shgroup; shgroup = shgroup->next) {
+ if (!DRW_shgroup_is_empty(shgroup)) {
+ return false;
+ }
+ }
+ return true;
}
void DRW_pass_state_set(DRWPass *pass, DRWState state)
{
- pass->state = state;
+ pass->state = state;
}
void DRW_pass_state_add(DRWPass *pass, DRWState state)
{
- pass->state |= state;
+ pass->state |= state;
}
void DRW_pass_state_remove(DRWPass *pass, DRWState state)
{
- pass->state &= ~state;
+ pass->state &= ~state;
}
void DRW_pass_free(DRWPass *pass)
{
- pass->shgroups.first = NULL;
- pass->shgroups.last = NULL;
+ pass->shgroups.first = NULL;
+ pass->shgroups.last = NULL;
}
-void DRW_pass_foreach_shgroup(DRWPass *pass, void (*callback)(void *userData, DRWShadingGroup *shgrp), void *userData)
+void DRW_pass_foreach_shgroup(DRWPass *pass,
+ void (*callback)(void *userData, DRWShadingGroup *shgrp),
+ void *userData)
{
- for (DRWShadingGroup *shgroup = pass->shgroups.first; shgroup; shgroup = shgroup->next) {
- callback(userData, shgroup);
- }
+ for (DRWShadingGroup *shgroup = pass->shgroups.first; shgroup; shgroup = shgroup->next) {
+ callback(userData, shgroup);
+ }
}
typedef struct ZSortData {
- float *axis;
- float *origin;
+ float *axis;
+ float *origin;
} ZSortData;
static int pass_shgroup_dist_sort(void *thunk, const void *a, const void *b)
{
- const ZSortData *zsortdata = (ZSortData *)thunk;
- const DRWShadingGroup *shgrp_a = (const DRWShadingGroup *)a;
- const DRWShadingGroup *shgrp_b = (const DRWShadingGroup *)b;
-
- const DRWCall *call_a = (DRWCall *)shgrp_a->calls.first;
- const DRWCall *call_b = (DRWCall *)shgrp_b->calls.first;
-
- if (call_a == NULL) { return -1; }
- if (call_b == NULL) { return -1; }
-
- float tmp[3];
- sub_v3_v3v3(tmp, zsortdata->origin, call_a->state->model[3]);
- const float a_sq = dot_v3v3(zsortdata->axis, tmp);
- sub_v3_v3v3(tmp, zsortdata->origin, call_b->state->model[3]);
- const float b_sq = dot_v3v3(zsortdata->axis, tmp);
-
- if (a_sq < b_sq) { return 1; }
- else if (a_sq > b_sq) { return -1; }
- else {
- /* If there is a depth prepass put it before */
- if ((shgrp_a->state_extra & DRW_STATE_WRITE_DEPTH) != 0) {
- return -1;
- }
- else if ((shgrp_b->state_extra & DRW_STATE_WRITE_DEPTH) != 0) {
- return 1;
- }
- else {
- return 0;
- }
- }
+ const ZSortData *zsortdata = (ZSortData *)thunk;
+ const DRWShadingGroup *shgrp_a = (const DRWShadingGroup *)a;
+ const DRWShadingGroup *shgrp_b = (const DRWShadingGroup *)b;
+
+ const DRWCall *call_a = (DRWCall *)shgrp_a->calls.first;
+ const DRWCall *call_b = (DRWCall *)shgrp_b->calls.first;
+
+ if (call_a == NULL) {
+ return -1;
+ }
+ if (call_b == NULL) {
+ return -1;
+ }
+
+ float tmp[3];
+ sub_v3_v3v3(tmp, zsortdata->origin, call_a->state->model[3]);
+ const float a_sq = dot_v3v3(zsortdata->axis, tmp);
+ sub_v3_v3v3(tmp, zsortdata->origin, call_b->state->model[3]);
+ const float b_sq = dot_v3v3(zsortdata->axis, tmp);
+
+ if (a_sq < b_sq) {
+ return 1;
+ }
+ else if (a_sq > b_sq) {
+ return -1;
+ }
+ else {
+ /* If there is a depth prepass put it before */
+ if ((shgrp_a->state_extra & DRW_STATE_WRITE_DEPTH) != 0) {
+ return -1;
+ }
+ else if ((shgrp_b->state_extra & DRW_STATE_WRITE_DEPTH) != 0) {
+ return 1;
+ }
+ else {
+ return 0;
+ }
+ }
}
/* ------------------ Shading group sorting --------------------- */
@@ -1226,21 +1340,22 @@ static int pass_shgroup_dist_sort(void *thunk, const void *a, const void *b)
*/
void DRW_pass_sort_shgroup_z(DRWPass *pass)
{
- float (*viewinv)[4];
- viewinv = DST.view_data.matstate.mat[DRW_MAT_VIEWINV];
+ float(*viewinv)[4];
+ viewinv = DST.view_data.matstate.mat[DRW_MAT_VIEWINV];
- ZSortData zsortdata = {viewinv[2], viewinv[3]};
+ ZSortData zsortdata = {viewinv[2], viewinv[3]};
- if (pass->shgroups.first && pass->shgroups.first->next) {
- pass->shgroups.first = shgroup_sort_fn_r(pass->shgroups.first, pass_shgroup_dist_sort, &zsortdata);
+ if (pass->shgroups.first && pass->shgroups.first->next) {
+ pass->shgroups.first = shgroup_sort_fn_r(
+ pass->shgroups.first, pass_shgroup_dist_sort, &zsortdata);
- /* Find the next last */
- DRWShadingGroup *last = pass->shgroups.first;
- while ((last = last->next)) {
- /* Do nothing */
- }
- pass->shgroups.last = last;
- }
+ /* Find the next last */
+ DRWShadingGroup *last = pass->shgroups.first;
+ while ((last = last->next)) {
+ /* Do nothing */
+ }
+ pass->shgroups.last = last;
+ }
}
/** \} */
diff --git a/source/blender/draw/intern/draw_manager_exec.c b/source/blender/draw/intern/draw_manager_exec.c
index 52c3f773e77..db675ee0210 100644
--- a/source/blender/draw/intern/draw_manager_exec.c
+++ b/source/blender/draw/intern/draw_manager_exec.c
@@ -38,8 +38,8 @@
#ifdef USE_GPU_SELECT
void DRW_select_load_id(uint id)
{
- BLI_assert(G.f & G_FLAG_PICKSEL);
- DST.select_id = id;
+ BLI_assert(G.f & G_FLAG_PICKSEL);
+ DST.select_id = id;
}
#endif
@@ -51,349 +51,335 @@ void DRW_select_load_id(uint id)
void drw_state_set(DRWState state)
{
- if (DST.state == state) {
- return;
- }
+ if (DST.state == state) {
+ return;
+ }
#define CHANGED_TO(f) \
- ((DST.state_lock & (f)) ? 0 : \
- (((DST.state & (f)) ? \
- ((state & (f)) ? 0 : -1) : \
- ((state & (f)) ? 1 : 0))))
+ ((DST.state_lock & (f)) ? \
+ 0 : \
+ (((DST.state & (f)) ? ((state & (f)) ? 0 : -1) : ((state & (f)) ? 1 : 0))))
-#define CHANGED_ANY(f) \
- (((DST.state & (f)) != (state & (f))) && \
- ((DST.state_lock & (f)) == 0))
+#define CHANGED_ANY(f) (((DST.state & (f)) != (state & (f))) && ((DST.state_lock & (f)) == 0))
#define CHANGED_ANY_STORE_VAR(f, enabled) \
- (((DST.state & (f)) != (enabled = (state & (f)))) && \
- (((DST.state_lock & (f)) == 0)))
-
- /* Depth Write */
- {
- int test;
- if ((test = CHANGED_TO(DRW_STATE_WRITE_DEPTH))) {
- if (test == 1) {
- glDepthMask(GL_TRUE);
- }
- else {
- glDepthMask(GL_FALSE);
- }
- }
- }
-
- /* Color Write */
- {
- int test;
- if ((test = CHANGED_TO(DRW_STATE_WRITE_COLOR))) {
- if (test == 1) {
- glColorMask(GL_TRUE, GL_TRUE, GL_TRUE, GL_TRUE);
- }
- else {
- glColorMask(GL_FALSE, GL_FALSE, GL_FALSE, GL_FALSE);
- }
- }
- }
-
- /* Raster Discard */
- {
- if (CHANGED_ANY(DRW_STATE_RASTERIZER_ENABLED)) {
- if ((state & DRW_STATE_RASTERIZER_ENABLED) != 0) {
- glDisable(GL_RASTERIZER_DISCARD);
- }
- else {
- glEnable(GL_RASTERIZER_DISCARD);
- }
- }
- }
-
- /* Cull */
- {
- DRWState test;
- if (CHANGED_ANY_STORE_VAR(
- DRW_STATE_CULL_BACK | DRW_STATE_CULL_FRONT,
- test))
- {
- if (test) {
- glEnable(GL_CULL_FACE);
-
- if ((state & DRW_STATE_CULL_BACK) != 0) {
- glCullFace(GL_BACK);
- }
- else if ((state & DRW_STATE_CULL_FRONT) != 0) {
- glCullFace(GL_FRONT);
- }
- else {
- BLI_assert(0);
- }
- }
- else {
- glDisable(GL_CULL_FACE);
- }
- }
- }
-
- /* Depth Test */
- {
- DRWState test;
- if (CHANGED_ANY_STORE_VAR(
- DRW_STATE_DEPTH_LESS | DRW_STATE_DEPTH_LESS_EQUAL | DRW_STATE_DEPTH_EQUAL |
- DRW_STATE_DEPTH_GREATER | DRW_STATE_DEPTH_GREATER_EQUAL | DRW_STATE_DEPTH_ALWAYS,
- test))
- {
- if (test) {
- glEnable(GL_DEPTH_TEST);
-
- if (state & DRW_STATE_DEPTH_LESS) {
- glDepthFunc(GL_LESS);
- }
- else if (state & DRW_STATE_DEPTH_LESS_EQUAL) {
- glDepthFunc(GL_LEQUAL);
- }
- else if (state & DRW_STATE_DEPTH_EQUAL) {
- glDepthFunc(GL_EQUAL);
- }
- else if (state & DRW_STATE_DEPTH_GREATER) {
- glDepthFunc(GL_GREATER);
- }
- else if (state & DRW_STATE_DEPTH_GREATER_EQUAL) {
- glDepthFunc(GL_GEQUAL);
- }
- else if (state & DRW_STATE_DEPTH_ALWAYS) {
- glDepthFunc(GL_ALWAYS);
- }
- else {
- BLI_assert(0);
- }
- }
- else {
- glDisable(GL_DEPTH_TEST);
- }
- }
- }
-
- /* Wire Width */
- {
- int test;
- if (CHANGED_ANY_STORE_VAR(
- DRW_STATE_WIRE | DRW_STATE_WIRE_WIDE | DRW_STATE_WIRE_SMOOTH,
- test))
- {
- if (test & DRW_STATE_WIRE_WIDE) {
- GPU_line_width(3.0f);
- }
- else if (test & DRW_STATE_WIRE_SMOOTH) {
- GPU_line_width(2.0f);
- GPU_line_smooth(true);
- }
- else if (test & DRW_STATE_WIRE) {
- GPU_line_width(1.0f);
- }
- else {
- GPU_line_width(1.0f);
- GPU_line_smooth(false);
- }
- }
- }
-
- /* Points Size */
- {
- int test;
- if ((test = CHANGED_TO(DRW_STATE_POINT))) {
- if (test == 1) {
- GPU_enable_program_point_size();
- glPointSize(5.0f);
- }
- else {
- GPU_disable_program_point_size();
- }
- }
- }
-
- /* Blending (all buffer) */
- {
- int test;
- if (CHANGED_ANY_STORE_VAR(
- DRW_STATE_BLEND | DRW_STATE_BLEND_PREMUL | DRW_STATE_ADDITIVE |
- DRW_STATE_MULTIPLY | DRW_STATE_ADDITIVE_FULL |
- DRW_STATE_BLEND_OIT,
- test))
- {
- if (test) {
- glEnable(GL_BLEND);
-
- if ((state & DRW_STATE_BLEND) != 0) {
- glBlendFuncSeparate(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA, /* RGB */
- GL_ONE, GL_ONE_MINUS_SRC_ALPHA); /* Alpha */
- }
- else if ((state & DRW_STATE_BLEND_PREMUL) != 0) {
- glBlendFunc(GL_ONE, GL_ONE_MINUS_SRC_ALPHA);
- }
- else if ((state & DRW_STATE_MULTIPLY) != 0) {
- glBlendFunc(GL_DST_COLOR, GL_ZERO);
- }
- else if ((state & DRW_STATE_BLEND_OIT) != 0) {
- glBlendFuncSeparate(GL_ONE, GL_ONE, /* RGB */
- GL_ZERO, GL_ONE_MINUS_SRC_ALPHA); /* Alpha */
- }
- else if ((state & DRW_STATE_ADDITIVE) != 0) {
- /* Do not let alpha accumulate but premult the source RGB by it. */
- glBlendFuncSeparate(GL_SRC_ALPHA, GL_ONE, /* RGB */
- GL_ZERO, GL_ONE); /* Alpha */
- }
- else if ((state & DRW_STATE_ADDITIVE_FULL) != 0) {
- /* Let alpha accumulate. */
- glBlendFunc(GL_ONE, GL_ONE);
- }
- else {
- BLI_assert(0);
- }
- }
- else {
- glDisable(GL_BLEND);
- glBlendFunc(GL_ONE, GL_ONE); /* Don't multiply incoming color by alpha. */
- }
- }
- }
-
- /* Clip Planes */
- {
- int test;
- if ((test = CHANGED_TO(DRW_STATE_CLIP_PLANES))) {
- if (test == 1) {
- for (int i = 0; i < DST.clip_planes_len; ++i) {
- glEnable(GL_CLIP_DISTANCE0 + i);
- }
- }
- else {
- for (int i = 0; i < MAX_CLIP_PLANES; ++i) {
- glDisable(GL_CLIP_DISTANCE0 + i);
- }
- }
- }
- }
-
- /* Stencil */
- {
- DRWState test;
- if (CHANGED_ANY_STORE_VAR(
- DRW_STATE_WRITE_STENCIL |
- DRW_STATE_WRITE_STENCIL_SHADOW_PASS |
- DRW_STATE_WRITE_STENCIL_SHADOW_FAIL |
- DRW_STATE_STENCIL_EQUAL |
- DRW_STATE_STENCIL_NEQUAL,
- test))
- {
- if (test) {
- glEnable(GL_STENCIL_TEST);
- /* Stencil Write */
- if ((state & DRW_STATE_WRITE_STENCIL) != 0) {
- glStencilMask(0xFF);
- glStencilOp(GL_KEEP, GL_KEEP, GL_REPLACE);
- }
- else if ((state & DRW_STATE_WRITE_STENCIL_SHADOW_PASS) != 0) {
- glStencilMask(0xFF);
- glStencilOpSeparate(GL_BACK, GL_KEEP, GL_KEEP, GL_INCR_WRAP);
- glStencilOpSeparate(GL_FRONT, GL_KEEP, GL_KEEP, GL_DECR_WRAP);
- }
- else if ((state & DRW_STATE_WRITE_STENCIL_SHADOW_FAIL) != 0) {
- glStencilMask(0xFF);
- glStencilOpSeparate(GL_BACK, GL_KEEP, GL_DECR_WRAP, GL_KEEP);
- glStencilOpSeparate(GL_FRONT, GL_KEEP, GL_INCR_WRAP, GL_KEEP);
- }
- /* Stencil Test */
- else if ((state & (DRW_STATE_STENCIL_EQUAL | DRW_STATE_STENCIL_NEQUAL)) != 0) {
- glStencilMask(0x00); /* disable write */
- DST.stencil_mask = STENCIL_UNDEFINED;
- }
- else {
- BLI_assert(0);
- }
- }
- else {
- /* disable write & test */
- DST.stencil_mask = 0;
- glStencilMask(0x00);
- glStencilFunc(GL_ALWAYS, 0, 0xFF);
- glDisable(GL_STENCIL_TEST);
- }
- }
- }
-
- /* Provoking Vertex */
- {
- int test;
- if ((test = CHANGED_TO(DRW_STATE_FIRST_VERTEX_CONVENTION))) {
- if (test == 1) {
- glProvokingVertex(GL_FIRST_VERTEX_CONVENTION);
- }
- else {
- glProvokingVertex(GL_LAST_VERTEX_CONVENTION);
- }
- }
- }
-
- /* Polygon Offset */
- {
- int test;
- if (CHANGED_ANY_STORE_VAR(
- DRW_STATE_OFFSET_POSITIVE |
- DRW_STATE_OFFSET_NEGATIVE,
- test))
- {
- if (test) {
- glEnable(GL_POLYGON_OFFSET_FILL);
- glEnable(GL_POLYGON_OFFSET_LINE);
- glEnable(GL_POLYGON_OFFSET_POINT);
- /* Stencil Write */
- if ((state & DRW_STATE_OFFSET_POSITIVE) != 0) {
- glPolygonOffset(1.0f, 1.0f);
- }
- else if ((state & DRW_STATE_OFFSET_NEGATIVE) != 0) {
- glPolygonOffset(-1.0f, -1.0f);
- }
- else {
- BLI_assert(0);
- }
- }
- else {
- glDisable(GL_POLYGON_OFFSET_FILL);
- glDisable(GL_POLYGON_OFFSET_LINE);
- glDisable(GL_POLYGON_OFFSET_POINT);
- }
- }
- }
+ (((DST.state & (f)) != (enabled = (state & (f)))) && (((DST.state_lock & (f)) == 0)))
+
+ /* Depth Write */
+ {
+ int test;
+ if ((test = CHANGED_TO(DRW_STATE_WRITE_DEPTH))) {
+ if (test == 1) {
+ glDepthMask(GL_TRUE);
+ }
+ else {
+ glDepthMask(GL_FALSE);
+ }
+ }
+ }
+
+ /* Color Write */
+ {
+ int test;
+ if ((test = CHANGED_TO(DRW_STATE_WRITE_COLOR))) {
+ if (test == 1) {
+ glColorMask(GL_TRUE, GL_TRUE, GL_TRUE, GL_TRUE);
+ }
+ else {
+ glColorMask(GL_FALSE, GL_FALSE, GL_FALSE, GL_FALSE);
+ }
+ }
+ }
+
+ /* Raster Discard */
+ {
+ if (CHANGED_ANY(DRW_STATE_RASTERIZER_ENABLED)) {
+ if ((state & DRW_STATE_RASTERIZER_ENABLED) != 0) {
+ glDisable(GL_RASTERIZER_DISCARD);
+ }
+ else {
+ glEnable(GL_RASTERIZER_DISCARD);
+ }
+ }
+ }
+
+ /* Cull */
+ {
+ DRWState test;
+ if (CHANGED_ANY_STORE_VAR(DRW_STATE_CULL_BACK | DRW_STATE_CULL_FRONT, test)) {
+ if (test) {
+ glEnable(GL_CULL_FACE);
+
+ if ((state & DRW_STATE_CULL_BACK) != 0) {
+ glCullFace(GL_BACK);
+ }
+ else if ((state & DRW_STATE_CULL_FRONT) != 0) {
+ glCullFace(GL_FRONT);
+ }
+ else {
+ BLI_assert(0);
+ }
+ }
+ else {
+ glDisable(GL_CULL_FACE);
+ }
+ }
+ }
+
+ /* Depth Test */
+ {
+ DRWState test;
+ if (CHANGED_ANY_STORE_VAR(DRW_STATE_DEPTH_LESS | DRW_STATE_DEPTH_LESS_EQUAL |
+ DRW_STATE_DEPTH_EQUAL | DRW_STATE_DEPTH_GREATER |
+ DRW_STATE_DEPTH_GREATER_EQUAL | DRW_STATE_DEPTH_ALWAYS,
+ test)) {
+ if (test) {
+ glEnable(GL_DEPTH_TEST);
+
+ if (state & DRW_STATE_DEPTH_LESS) {
+ glDepthFunc(GL_LESS);
+ }
+ else if (state & DRW_STATE_DEPTH_LESS_EQUAL) {
+ glDepthFunc(GL_LEQUAL);
+ }
+ else if (state & DRW_STATE_DEPTH_EQUAL) {
+ glDepthFunc(GL_EQUAL);
+ }
+ else if (state & DRW_STATE_DEPTH_GREATER) {
+ glDepthFunc(GL_GREATER);
+ }
+ else if (state & DRW_STATE_DEPTH_GREATER_EQUAL) {
+ glDepthFunc(GL_GEQUAL);
+ }
+ else if (state & DRW_STATE_DEPTH_ALWAYS) {
+ glDepthFunc(GL_ALWAYS);
+ }
+ else {
+ BLI_assert(0);
+ }
+ }
+ else {
+ glDisable(GL_DEPTH_TEST);
+ }
+ }
+ }
+
+ /* Wire Width */
+ {
+ int test;
+ if (CHANGED_ANY_STORE_VAR(DRW_STATE_WIRE | DRW_STATE_WIRE_WIDE | DRW_STATE_WIRE_SMOOTH,
+ test)) {
+ if (test & DRW_STATE_WIRE_WIDE) {
+ GPU_line_width(3.0f);
+ }
+ else if (test & DRW_STATE_WIRE_SMOOTH) {
+ GPU_line_width(2.0f);
+ GPU_line_smooth(true);
+ }
+ else if (test & DRW_STATE_WIRE) {
+ GPU_line_width(1.0f);
+ }
+ else {
+ GPU_line_width(1.0f);
+ GPU_line_smooth(false);
+ }
+ }
+ }
+
+ /* Points Size */
+ {
+ int test;
+ if ((test = CHANGED_TO(DRW_STATE_POINT))) {
+ if (test == 1) {
+ GPU_enable_program_point_size();
+ glPointSize(5.0f);
+ }
+ else {
+ GPU_disable_program_point_size();
+ }
+ }
+ }
+
+ /* Blending (all buffer) */
+ {
+ int test;
+ if (CHANGED_ANY_STORE_VAR(DRW_STATE_BLEND | DRW_STATE_BLEND_PREMUL | DRW_STATE_ADDITIVE |
+ DRW_STATE_MULTIPLY | DRW_STATE_ADDITIVE_FULL |
+ DRW_STATE_BLEND_OIT,
+ test)) {
+ if (test) {
+ glEnable(GL_BLEND);
+
+ if ((state & DRW_STATE_BLEND) != 0) {
+ glBlendFuncSeparate(GL_SRC_ALPHA,
+ GL_ONE_MINUS_SRC_ALPHA, /* RGB */
+ GL_ONE,
+ GL_ONE_MINUS_SRC_ALPHA); /* Alpha */
+ }
+ else if ((state & DRW_STATE_BLEND_PREMUL) != 0) {
+ glBlendFunc(GL_ONE, GL_ONE_MINUS_SRC_ALPHA);
+ }
+ else if ((state & DRW_STATE_MULTIPLY) != 0) {
+ glBlendFunc(GL_DST_COLOR, GL_ZERO);
+ }
+ else if ((state & DRW_STATE_BLEND_OIT) != 0) {
+ glBlendFuncSeparate(GL_ONE,
+ GL_ONE, /* RGB */
+ GL_ZERO,
+ GL_ONE_MINUS_SRC_ALPHA); /* Alpha */
+ }
+ else if ((state & DRW_STATE_ADDITIVE) != 0) {
+ /* Do not let alpha accumulate but premult the source RGB by it. */
+ glBlendFuncSeparate(GL_SRC_ALPHA,
+ GL_ONE, /* RGB */
+ GL_ZERO,
+ GL_ONE); /* Alpha */
+ }
+ else if ((state & DRW_STATE_ADDITIVE_FULL) != 0) {
+ /* Let alpha accumulate. */
+ glBlendFunc(GL_ONE, GL_ONE);
+ }
+ else {
+ BLI_assert(0);
+ }
+ }
+ else {
+ glDisable(GL_BLEND);
+ glBlendFunc(GL_ONE, GL_ONE); /* Don't multiply incoming color by alpha. */
+ }
+ }
+ }
+
+ /* Clip Planes */
+ {
+ int test;
+ if ((test = CHANGED_TO(DRW_STATE_CLIP_PLANES))) {
+ if (test == 1) {
+ for (int i = 0; i < DST.clip_planes_len; ++i) {
+ glEnable(GL_CLIP_DISTANCE0 + i);
+ }
+ }
+ else {
+ for (int i = 0; i < MAX_CLIP_PLANES; ++i) {
+ glDisable(GL_CLIP_DISTANCE0 + i);
+ }
+ }
+ }
+ }
+
+ /* Stencil */
+ {
+ DRWState test;
+ if (CHANGED_ANY_STORE_VAR(DRW_STATE_WRITE_STENCIL | DRW_STATE_WRITE_STENCIL_SHADOW_PASS |
+ DRW_STATE_WRITE_STENCIL_SHADOW_FAIL | DRW_STATE_STENCIL_EQUAL |
+ DRW_STATE_STENCIL_NEQUAL,
+ test)) {
+ if (test) {
+ glEnable(GL_STENCIL_TEST);
+ /* Stencil Write */
+ if ((state & DRW_STATE_WRITE_STENCIL) != 0) {
+ glStencilMask(0xFF);
+ glStencilOp(GL_KEEP, GL_KEEP, GL_REPLACE);
+ }
+ else if ((state & DRW_STATE_WRITE_STENCIL_SHADOW_PASS) != 0) {
+ glStencilMask(0xFF);
+ glStencilOpSeparate(GL_BACK, GL_KEEP, GL_KEEP, GL_INCR_WRAP);
+ glStencilOpSeparate(GL_FRONT, GL_KEEP, GL_KEEP, GL_DECR_WRAP);
+ }
+ else if ((state & DRW_STATE_WRITE_STENCIL_SHADOW_FAIL) != 0) {
+ glStencilMask(0xFF);
+ glStencilOpSeparate(GL_BACK, GL_KEEP, GL_DECR_WRAP, GL_KEEP);
+ glStencilOpSeparate(GL_FRONT, GL_KEEP, GL_INCR_WRAP, GL_KEEP);
+ }
+ /* Stencil Test */
+ else if ((state & (DRW_STATE_STENCIL_EQUAL | DRW_STATE_STENCIL_NEQUAL)) != 0) {
+ glStencilMask(0x00); /* disable write */
+ DST.stencil_mask = STENCIL_UNDEFINED;
+ }
+ else {
+ BLI_assert(0);
+ }
+ }
+ else {
+ /* disable write & test */
+ DST.stencil_mask = 0;
+ glStencilMask(0x00);
+ glStencilFunc(GL_ALWAYS, 0, 0xFF);
+ glDisable(GL_STENCIL_TEST);
+ }
+ }
+ }
+
+ /* Provoking Vertex */
+ {
+ int test;
+ if ((test = CHANGED_TO(DRW_STATE_FIRST_VERTEX_CONVENTION))) {
+ if (test == 1) {
+ glProvokingVertex(GL_FIRST_VERTEX_CONVENTION);
+ }
+ else {
+ glProvokingVertex(GL_LAST_VERTEX_CONVENTION);
+ }
+ }
+ }
+
+ /* Polygon Offset */
+ {
+ int test;
+ if (CHANGED_ANY_STORE_VAR(DRW_STATE_OFFSET_POSITIVE | DRW_STATE_OFFSET_NEGATIVE, test)) {
+ if (test) {
+ glEnable(GL_POLYGON_OFFSET_FILL);
+ glEnable(GL_POLYGON_OFFSET_LINE);
+ glEnable(GL_POLYGON_OFFSET_POINT);
+ /* Stencil Write */
+ if ((state & DRW_STATE_OFFSET_POSITIVE) != 0) {
+ glPolygonOffset(1.0f, 1.0f);
+ }
+ else if ((state & DRW_STATE_OFFSET_NEGATIVE) != 0) {
+ glPolygonOffset(-1.0f, -1.0f);
+ }
+ else {
+ BLI_assert(0);
+ }
+ }
+ else {
+ glDisable(GL_POLYGON_OFFSET_FILL);
+ glDisable(GL_POLYGON_OFFSET_LINE);
+ glDisable(GL_POLYGON_OFFSET_POINT);
+ }
+ }
+ }
#undef CHANGED_TO
#undef CHANGED_ANY
#undef CHANGED_ANY_STORE_VAR
- DST.state = state;
+ DST.state = state;
}
static void drw_stencil_set(uint mask)
{
- if (DST.stencil_mask != mask) {
- DST.stencil_mask = mask;
- /* Stencil Write */
- if ((DST.state & DRW_STATE_WRITE_STENCIL) != 0) {
- glStencilFunc(GL_ALWAYS, mask, 0xFF);
- }
- /* Stencil Test */
- else if ((DST.state & DRW_STATE_STENCIL_EQUAL) != 0) {
- glStencilFunc(GL_EQUAL, mask, 0xFF);
- }
- else if ((DST.state & DRW_STATE_STENCIL_NEQUAL) != 0) {
- glStencilFunc(GL_NOTEQUAL, mask, 0xFF);
- }
- }
+ if (DST.stencil_mask != mask) {
+ DST.stencil_mask = mask;
+ /* Stencil Write */
+ if ((DST.state & DRW_STATE_WRITE_STENCIL) != 0) {
+ glStencilFunc(GL_ALWAYS, mask, 0xFF);
+ }
+ /* Stencil Test */
+ else if ((DST.state & DRW_STATE_STENCIL_EQUAL) != 0) {
+ glStencilFunc(GL_EQUAL, mask, 0xFF);
+ }
+ else if ((DST.state & DRW_STATE_STENCIL_NEQUAL) != 0) {
+ glStencilFunc(GL_NOTEQUAL, mask, 0xFF);
+ }
+ }
}
/* Reset state to not interfer with other UI drawcall */
void DRW_state_reset_ex(DRWState state)
{
- DST.state = ~state;
- drw_state_set(state);
+ DST.state = ~state;
+ drw_state_set(state);
}
/**
@@ -404,22 +390,22 @@ void DRW_state_reset_ex(DRWState state)
*/
void DRW_state_lock(DRWState state)
{
- DST.state_lock = state;
+ DST.state_lock = state;
}
void DRW_state_reset(void)
{
- DRW_state_reset_ex(DRW_STATE_DEFAULT);
+ DRW_state_reset_ex(DRW_STATE_DEFAULT);
- /* Reset blending function */
- glBlendFuncSeparate(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA, GL_ONE, GL_ONE_MINUS_SRC_ALPHA);
+ /* Reset blending function */
+ glBlendFuncSeparate(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA, GL_ONE, GL_ONE_MINUS_SRC_ALPHA);
}
/* NOTE : Make sure to reset after use! */
void DRW_state_invert_facing(void)
{
- SWAP(GLenum, DST.backface, DST.frontface);
- glFrontFace(DST.frontface);
+ SWAP(GLenum, DST.backface, DST.frontface);
+ glFrontFace(DST.frontface);
}
/**
@@ -429,26 +415,26 @@ void DRW_state_invert_facing(void)
*/
void DRW_state_clip_planes_len_set(uint plane_len)
{
- BLI_assert(plane_len <= MAX_CLIP_PLANES);
- DST.clip_planes_len = plane_len;
+ BLI_assert(plane_len <= MAX_CLIP_PLANES);
+ DST.clip_planes_len = plane_len;
}
void DRW_state_clip_planes_reset(void)
{
- DST.clip_planes_len = 0;
+ DST.clip_planes_len = 0;
}
void DRW_state_clip_planes_set_from_rv3d(RegionView3D *rv3d)
{
- int max_len = 6;
- int real_len = (rv3d->viewlock & RV3D_BOXCLIP) ? 4 : max_len;
- while (real_len < max_len) {
- /* Fill in dummy values that wont change results (6 is hard coded in shaders). */
- copy_v4_v4(rv3d->clip[real_len], rv3d->clip[3]);
- real_len++;
- }
-
- DRW_state_clip_planes_len_set(max_len);
+ int max_len = 6;
+ int real_len = (rv3d->viewlock & RV3D_BOXCLIP) ? 4 : max_len;
+ while (real_len < max_len) {
+ /* Fill in dummy values that wont change results (6 is hard coded in shaders). */
+ copy_v4_v4(rv3d->clip[real_len], rv3d->clip[3]);
+ real_len++;
+ }
+
+ DRW_state_clip_planes_len_set(max_len);
}
/** \} */
@@ -462,284 +448,309 @@ void DRW_state_clip_planes_set_from_rv3d(RegionView3D *rv3d)
* BKE_boundbox_init_from_minmax(&bbox, (const float[3]){-1.0f, -1.0f, -1.0f}, (const float[3]){1.0f, 1.0f, 1.0f});
* for (int i = 0; i < 8; i++) {mul_project_m4_v3(projinv, bbox.vec[i]);}
*/
-static void draw_frustum_boundbox_calc(const float(*projmat)[4], BoundBox *r_bbox)
+static void draw_frustum_boundbox_calc(const float (*projmat)[4], BoundBox *r_bbox)
{
- float left, right, bottom, top, near, far;
- bool is_persp = projmat[3][3] == 0.0f;
-
- projmat_dimensions(
- projmat, &left, &right, &bottom, &top, &near, &far);
-
- if (is_persp) {
- left *= near;
- right *= near;
- bottom *= near;
- top *= near;
- }
-
- r_bbox->vec[0][2] = r_bbox->vec[3][2] = r_bbox->vec[7][2] = r_bbox->vec[4][2] = -near;
- r_bbox->vec[0][0] = r_bbox->vec[3][0] = left;
- r_bbox->vec[4][0] = r_bbox->vec[7][0] = right;
- r_bbox->vec[0][1] = r_bbox->vec[4][1] = bottom;
- r_bbox->vec[7][1] = r_bbox->vec[3][1] = top;
-
- /* Get the coordinates of the far plane. */
- if (is_persp) {
- float sca_far = far / near;
- left *= sca_far;
- right *= sca_far;
- bottom *= sca_far;
- top *= sca_far;
- }
-
- r_bbox->vec[1][2] = r_bbox->vec[2][2] = r_bbox->vec[6][2] = r_bbox->vec[5][2] = -far;
- r_bbox->vec[1][0] = r_bbox->vec[2][0] = left;
- r_bbox->vec[6][0] = r_bbox->vec[5][0] = right;
- r_bbox->vec[1][1] = r_bbox->vec[5][1] = bottom;
- r_bbox->vec[2][1] = r_bbox->vec[6][1] = top;
+ float left, right, bottom, top, near, far;
+ bool is_persp = projmat[3][3] == 0.0f;
+
+ projmat_dimensions(projmat, &left, &right, &bottom, &top, &near, &far);
+
+ if (is_persp) {
+ left *= near;
+ right *= near;
+ bottom *= near;
+ top *= near;
+ }
+
+ r_bbox->vec[0][2] = r_bbox->vec[3][2] = r_bbox->vec[7][2] = r_bbox->vec[4][2] = -near;
+ r_bbox->vec[0][0] = r_bbox->vec[3][0] = left;
+ r_bbox->vec[4][0] = r_bbox->vec[7][0] = right;
+ r_bbox->vec[0][1] = r_bbox->vec[4][1] = bottom;
+ r_bbox->vec[7][1] = r_bbox->vec[3][1] = top;
+
+ /* Get the coordinates of the far plane. */
+ if (is_persp) {
+ float sca_far = far / near;
+ left *= sca_far;
+ right *= sca_far;
+ bottom *= sca_far;
+ top *= sca_far;
+ }
+
+ r_bbox->vec[1][2] = r_bbox->vec[2][2] = r_bbox->vec[6][2] = r_bbox->vec[5][2] = -far;
+ r_bbox->vec[1][0] = r_bbox->vec[2][0] = left;
+ r_bbox->vec[6][0] = r_bbox->vec[5][0] = right;
+ r_bbox->vec[1][1] = r_bbox->vec[5][1] = bottom;
+ r_bbox->vec[2][1] = r_bbox->vec[6][1] = top;
}
static void draw_clipping_setup_from_view(void)
{
- if (DST.clipping.updated) {
- return;
- }
+ if (DST.clipping.updated) {
+ return;
+ }
- float (*viewinv)[4] = DST.view_data.matstate.mat[DRW_MAT_VIEWINV];
- float (*projmat)[4] = DST.view_data.matstate.mat[DRW_MAT_WIN];
- float (*projinv)[4] = DST.view_data.matstate.mat[DRW_MAT_WININV];
- BoundSphere *bsphere = &DST.clipping.frustum_bsphere;
+ float(*viewinv)[4] = DST.view_data.matstate.mat[DRW_MAT_VIEWINV];
+ float(*projmat)[4] = DST.view_data.matstate.mat[DRW_MAT_WIN];
+ float(*projinv)[4] = DST.view_data.matstate.mat[DRW_MAT_WININV];
+ BoundSphere *bsphere = &DST.clipping.frustum_bsphere;
- /* Extract Clipping Planes */
- BoundBox bbox;
+ /* Extract Clipping Planes */
+ BoundBox bbox;
#if 0 /* It has accuracy problems. */
- BKE_boundbox_init_from_minmax(&bbox, (const float[3]){-1.0f, -1.0f, -1.0f}, (const float[3]){1.0f, 1.0f, 1.0f});
- for (int i = 0; i < 8; i++) {
- mul_project_m4_v3(projinv, bbox.vec[i]);
- }
+ BKE_boundbox_init_from_minmax(&bbox, (const float[3]){-1.0f, -1.0f, -1.0f}, (const float[3]){1.0f, 1.0f, 1.0f});
+ for (int i = 0; i < 8; i++) {
+ mul_project_m4_v3(projinv, bbox.vec[i]);
+ }
#else
- draw_frustum_boundbox_calc(projmat, &bbox);
+ draw_frustum_boundbox_calc(projmat, &bbox);
#endif
- /* Transform into world space. */
- for (int i = 0; i < 8; i++) {
- mul_m4_v3(viewinv, bbox.vec[i]);
- }
-
- memcpy(&DST.clipping.frustum_corners, &bbox, sizeof(BoundBox));
-
- /* Compute clip planes using the world space frustum corners. */
- for (int p = 0; p < 6; p++) {
- int q, r, s;
- switch (p) {
- case 0: q = 1; r = 2; s = 3; break; /* -X */
- case 1: q = 0; r = 4; s = 5; break; /* -Y */
- case 2: q = 1; r = 5; s = 6; break; /* +Z (far) */
- case 3: q = 2; r = 6; s = 7; break; /* +Y */
- case 4: q = 0; r = 3; s = 7; break; /* -Z (near) */
- default: q = 4; r = 7; s = 6; break; /* +X */
- }
- if (DST.frontface == GL_CW) {
- SWAP(int, q, s);
- }
-
- normal_quad_v3(DST.clipping.frustum_planes[p], bbox.vec[p], bbox.vec[q], bbox.vec[r], bbox.vec[s]);
- /* Increase precision and use the mean of all 4 corners. */
- DST.clipping.frustum_planes[p][3] = -dot_v3v3(DST.clipping.frustum_planes[p], bbox.vec[p]);
- DST.clipping.frustum_planes[p][3] += -dot_v3v3(DST.clipping.frustum_planes[p], bbox.vec[q]);
- DST.clipping.frustum_planes[p][3] += -dot_v3v3(DST.clipping.frustum_planes[p], bbox.vec[r]);
- DST.clipping.frustum_planes[p][3] += -dot_v3v3(DST.clipping.frustum_planes[p], bbox.vec[s]);
- DST.clipping.frustum_planes[p][3] *= 0.25f;
- }
-
- /* Extract Bounding Sphere */
- if (projmat[3][3] != 0.0f) {
- /* Orthographic */
- /* The most extreme points on the near and far plane. (normalized device coords). */
- float *nearpoint = bbox.vec[0];
- float *farpoint = bbox.vec[6];
-
- /* just use median point */
- mid_v3_v3v3(bsphere->center, farpoint, nearpoint);
- bsphere->radius = len_v3v3(bsphere->center, farpoint);
- }
- else if (projmat[2][0] == 0.0f && projmat[2][1] == 0.0f) {
- /* Perspective with symmetrical frustum. */
-
- /* We obtain the center and radius of the circumscribed circle of the
- * isosceles trapezoid composed by the diagonals of the near and far clipping plane */
-
- /* center of each clipping plane */
- float mid_min[3], mid_max[3];
- mid_v3_v3v3(mid_min, bbox.vec[3], bbox.vec[4]);
- mid_v3_v3v3(mid_max, bbox.vec[2], bbox.vec[5]);
-
- /* square length of the diagonals of each clipping plane */
- float a_sq = len_squared_v3v3(bbox.vec[3], bbox.vec[4]);
- float b_sq = len_squared_v3v3(bbox.vec[2], bbox.vec[5]);
-
- /* distance squared between clipping planes */
- float h_sq = len_squared_v3v3(mid_min, mid_max);
-
- float fac = (4 * h_sq + b_sq - a_sq) / (8 * h_sq);
-
- /* The goal is to get the smallest sphere,
- * not the sphere that passes through each corner */
- CLAMP(fac, 0.0f, 1.0f);
-
- interp_v3_v3v3(bsphere->center, mid_min, mid_max, fac);
-
- /* distance from the center to one of the points of the far plane (1, 2, 5, 6) */
- bsphere->radius = len_v3v3(bsphere->center, bbox.vec[1]);
- }
- else {
- /* Perspective with asymmetrical frustum. */
-
- /* We put the sphere center on the line that goes from origin
- * to the center of the far clipping plane. */
-
- /* Detect which of the corner of the far clipping plane is the farthest to the origin */
- float nfar[4]; /* most extreme far point in NDC space */
- float farxy[2]; /* farpoint projection onto the near plane */
- float farpoint[3] = {0.0f}; /* most extreme far point in camera coordinate */
- float nearpoint[3]; /* most extreme near point in camera coordinate */
- float farcenter[3] = {0.0f}; /* center of far cliping plane in camera coordinate */
- float F = -1.0f, N; /* square distance of far and near point to origin */
- float f, n; /* distance of far and near point to z axis. f is always > 0 but n can be < 0 */
- float e, s; /* far and near clipping distance (<0) */
- float c; /* slope of center line = distance of far clipping center to z axis / far clipping distance */
- float z; /* projection of sphere center on z axis (<0) */
-
- /* Find farthest corner and center of far clip plane. */
- float corner[3] = {1.0f, 1.0f, 1.0f}; /* in clip space */
- for (int i = 0; i < 4; i++) {
- float point[3];
- mul_v3_project_m4_v3(point, projinv, corner);
- float len = len_squared_v3(point);
- if (len > F) {
- copy_v3_v3(nfar, corner);
- copy_v3_v3(farpoint, point);
- F = len;
- }
- add_v3_v3(farcenter, point);
- /* rotate by 90 degree to walk through the 4 points of the far clip plane */
- float tmp = corner[0];
- corner[0] = -corner[1];
- corner[1] = tmp;
- }
-
- /* the far center is the average of the far clipping points */
- mul_v3_fl(farcenter, 0.25f);
- /* the extreme near point is the opposite point on the near clipping plane */
- copy_v3_fl3(nfar, -nfar[0], -nfar[1], -1.0f);
- mul_v3_project_m4_v3(nearpoint, projinv, nfar);
- /* this is a frustum projection */
- N = len_squared_v3(nearpoint);
- e = farpoint[2];
- s = nearpoint[2];
- /* distance to view Z axis */
- f = len_v2(farpoint);
- /* get corresponding point on the near plane */
- mul_v2_v2fl(farxy, farpoint, s / e);
- /* this formula preserve the sign of n */
- sub_v2_v2(nearpoint, farxy);
- n = f * s / e - len_v2(nearpoint);
- c = len_v2(farcenter) / e;
- /* the big formula, it simplifies to (F-N)/(2(e-s)) for the symmetric case */
- z = (F - N) / (2.0f * (e - s + c * (f - n)));
-
- bsphere->center[0] = farcenter[0] * z / e;
- bsphere->center[1] = farcenter[1] * z / e;
- bsphere->center[2] = z;
- bsphere->radius = len_v3v3(bsphere->center, farpoint);
-
- /* Transform to world space. */
- mul_m4_v3(viewinv, bsphere->center);
- }
-
- DST.clipping.updated = true;
+ /* Transform into world space. */
+ for (int i = 0; i < 8; i++) {
+ mul_m4_v3(viewinv, bbox.vec[i]);
+ }
+
+ memcpy(&DST.clipping.frustum_corners, &bbox, sizeof(BoundBox));
+
+ /* Compute clip planes using the world space frustum corners. */
+ for (int p = 0; p < 6; p++) {
+ int q, r, s;
+ switch (p) {
+ case 0:
+ q = 1;
+ r = 2;
+ s = 3;
+ break; /* -X */
+ case 1:
+ q = 0;
+ r = 4;
+ s = 5;
+ break; /* -Y */
+ case 2:
+ q = 1;
+ r = 5;
+ s = 6;
+ break; /* +Z (far) */
+ case 3:
+ q = 2;
+ r = 6;
+ s = 7;
+ break; /* +Y */
+ case 4:
+ q = 0;
+ r = 3;
+ s = 7;
+ break; /* -Z (near) */
+ default:
+ q = 4;
+ r = 7;
+ s = 6;
+ break; /* +X */
+ }
+ if (DST.frontface == GL_CW) {
+ SWAP(int, q, s);
+ }
+
+ normal_quad_v3(
+ DST.clipping.frustum_planes[p], bbox.vec[p], bbox.vec[q], bbox.vec[r], bbox.vec[s]);
+ /* Increase precision and use the mean of all 4 corners. */
+ DST.clipping.frustum_planes[p][3] = -dot_v3v3(DST.clipping.frustum_planes[p], bbox.vec[p]);
+ DST.clipping.frustum_planes[p][3] += -dot_v3v3(DST.clipping.frustum_planes[p], bbox.vec[q]);
+ DST.clipping.frustum_planes[p][3] += -dot_v3v3(DST.clipping.frustum_planes[p], bbox.vec[r]);
+ DST.clipping.frustum_planes[p][3] += -dot_v3v3(DST.clipping.frustum_planes[p], bbox.vec[s]);
+ DST.clipping.frustum_planes[p][3] *= 0.25f;
+ }
+
+ /* Extract Bounding Sphere */
+ if (projmat[3][3] != 0.0f) {
+ /* Orthographic */
+ /* The most extreme points on the near and far plane. (normalized device coords). */
+ float *nearpoint = bbox.vec[0];
+ float *farpoint = bbox.vec[6];
+
+ /* just use median point */
+ mid_v3_v3v3(bsphere->center, farpoint, nearpoint);
+ bsphere->radius = len_v3v3(bsphere->center, farpoint);
+ }
+ else if (projmat[2][0] == 0.0f && projmat[2][1] == 0.0f) {
+ /* Perspective with symmetrical frustum. */
+
+ /* We obtain the center and radius of the circumscribed circle of the
+ * isosceles trapezoid composed by the diagonals of the near and far clipping plane */
+
+ /* center of each clipping plane */
+ float mid_min[3], mid_max[3];
+ mid_v3_v3v3(mid_min, bbox.vec[3], bbox.vec[4]);
+ mid_v3_v3v3(mid_max, bbox.vec[2], bbox.vec[5]);
+
+ /* square length of the diagonals of each clipping plane */
+ float a_sq = len_squared_v3v3(bbox.vec[3], bbox.vec[4]);
+ float b_sq = len_squared_v3v3(bbox.vec[2], bbox.vec[5]);
+
+ /* distance squared between clipping planes */
+ float h_sq = len_squared_v3v3(mid_min, mid_max);
+
+ float fac = (4 * h_sq + b_sq - a_sq) / (8 * h_sq);
+
+ /* The goal is to get the smallest sphere,
+ * not the sphere that passes through each corner */
+ CLAMP(fac, 0.0f, 1.0f);
+
+ interp_v3_v3v3(bsphere->center, mid_min, mid_max, fac);
+
+ /* distance from the center to one of the points of the far plane (1, 2, 5, 6) */
+ bsphere->radius = len_v3v3(bsphere->center, bbox.vec[1]);
+ }
+ else {
+ /* Perspective with asymmetrical frustum. */
+
+ /* We put the sphere center on the line that goes from origin
+ * to the center of the far clipping plane. */
+
+ /* Detect which of the corner of the far clipping plane is the farthest to the origin */
+ float nfar[4]; /* most extreme far point in NDC space */
+ float farxy[2]; /* farpoint projection onto the near plane */
+ float farpoint[3] = {0.0f}; /* most extreme far point in camera coordinate */
+ float nearpoint[3]; /* most extreme near point in camera coordinate */
+ float farcenter[3] = {0.0f}; /* center of far cliping plane in camera coordinate */
+ float F = -1.0f, N; /* square distance of far and near point to origin */
+ float f, n; /* distance of far and near point to z axis. f is always > 0 but n can be < 0 */
+ float e, s; /* far and near clipping distance (<0) */
+ float
+ c; /* slope of center line = distance of far clipping center to z axis / far clipping distance */
+ float z; /* projection of sphere center on z axis (<0) */
+
+ /* Find farthest corner and center of far clip plane. */
+ float corner[3] = {1.0f, 1.0f, 1.0f}; /* in clip space */
+ for (int i = 0; i < 4; i++) {
+ float point[3];
+ mul_v3_project_m4_v3(point, projinv, corner);
+ float len = len_squared_v3(point);
+ if (len > F) {
+ copy_v3_v3(nfar, corner);
+ copy_v3_v3(farpoint, point);
+ F = len;
+ }
+ add_v3_v3(farcenter, point);
+ /* rotate by 90 degree to walk through the 4 points of the far clip plane */
+ float tmp = corner[0];
+ corner[0] = -corner[1];
+ corner[1] = tmp;
+ }
+
+ /* the far center is the average of the far clipping points */
+ mul_v3_fl(farcenter, 0.25f);
+ /* the extreme near point is the opposite point on the near clipping plane */
+ copy_v3_fl3(nfar, -nfar[0], -nfar[1], -1.0f);
+ mul_v3_project_m4_v3(nearpoint, projinv, nfar);
+ /* this is a frustum projection */
+ N = len_squared_v3(nearpoint);
+ e = farpoint[2];
+ s = nearpoint[2];
+ /* distance to view Z axis */
+ f = len_v2(farpoint);
+ /* get corresponding point on the near plane */
+ mul_v2_v2fl(farxy, farpoint, s / e);
+ /* this formula preserve the sign of n */
+ sub_v2_v2(nearpoint, farxy);
+ n = f * s / e - len_v2(nearpoint);
+ c = len_v2(farcenter) / e;
+ /* the big formula, it simplifies to (F-N)/(2(e-s)) for the symmetric case */
+ z = (F - N) / (2.0f * (e - s + c * (f - n)));
+
+ bsphere->center[0] = farcenter[0] * z / e;
+ bsphere->center[1] = farcenter[1] * z / e;
+ bsphere->center[2] = z;
+ bsphere->radius = len_v3v3(bsphere->center, farpoint);
+
+ /* Transform to world space. */
+ mul_m4_v3(viewinv, bsphere->center);
+ }
+
+ DST.clipping.updated = true;
}
/* Return True if the given BoundSphere intersect the current view frustum */
bool DRW_culling_sphere_test(BoundSphere *bsphere)
{
- draw_clipping_setup_from_view();
-
- /* Bypass test if radius is negative. */
- if (bsphere->radius < 0.0f) {
- return true;
- }
-
- /* Do a rough test first: Sphere VS Sphere intersect. */
- BoundSphere *frustum_bsphere = &DST.clipping.frustum_bsphere;
- float center_dist = len_squared_v3v3(bsphere->center, frustum_bsphere->center);
- if (center_dist > SQUARE(bsphere->radius + frustum_bsphere->radius)) {
- return false;
- }
-
- /* Test against the 6 frustum planes. */
- for (int p = 0; p < 6; p++) {
- float dist = plane_point_side_v3(DST.clipping.frustum_planes[p], bsphere->center);
- if (dist < -bsphere->radius) {
- return false;
- }
- }
-
- return true;
+ draw_clipping_setup_from_view();
+
+ /* Bypass test if radius is negative. */
+ if (bsphere->radius < 0.0f) {
+ return true;
+ }
+
+ /* Do a rough test first: Sphere VS Sphere intersect. */
+ BoundSphere *frustum_bsphere = &DST.clipping.frustum_bsphere;
+ float center_dist = len_squared_v3v3(bsphere->center, frustum_bsphere->center);
+ if (center_dist > SQUARE(bsphere->radius + frustum_bsphere->radius)) {
+ return false;
+ }
+
+ /* Test against the 6 frustum planes. */
+ for (int p = 0; p < 6; p++) {
+ float dist = plane_point_side_v3(DST.clipping.frustum_planes[p], bsphere->center);
+ if (dist < -bsphere->radius) {
+ return false;
+ }
+ }
+
+ return true;
}
/* Return True if the given BoundBox intersect the current view frustum.
* bbox must be in world space. */
bool DRW_culling_box_test(BoundBox *bbox)
{
- draw_clipping_setup_from_view();
-
- /* 6 view frustum planes */
- for (int p = 0; p < 6; p++) {
- /* 8 box vertices. */
- for (int v = 0; v < 8 ; v++) {
- float dist = plane_point_side_v3(DST.clipping.frustum_planes[p], bbox->vec[v]);
- if (dist > 0.0f) {
- /* At least one point in front of this plane.
- * Go to next plane. */
- break;
- }
- else if (v == 7) {
- /* 8 points behind this plane. */
- return false;
- }
- }
- }
-
- return true;
+ draw_clipping_setup_from_view();
+
+ /* 6 view frustum planes */
+ for (int p = 0; p < 6; p++) {
+ /* 8 box vertices. */
+ for (int v = 0; v < 8; v++) {
+ float dist = plane_point_side_v3(DST.clipping.frustum_planes[p], bbox->vec[v]);
+ if (dist > 0.0f) {
+ /* At least one point in front of this plane.
+ * Go to next plane. */
+ break;
+ }
+ else if (v == 7) {
+ /* 8 points behind this plane. */
+ return false;
+ }
+ }
+ }
+
+ return true;
}
/* Return True if the current view frustum is inside or intersect the given plane */
bool DRW_culling_plane_test(float plane[4])
{
- draw_clipping_setup_from_view();
+ draw_clipping_setup_from_view();
- /* Test against the 8 frustum corners. */
- for (int c = 0; c < 8; c++) {
- float dist = plane_point_side_v3(plane, DST.clipping.frustum_corners.vec[c]);
- if (dist < 0.0f) {
- return true;
- }
- }
+ /* Test against the 8 frustum corners. */
+ for (int c = 0; c < 8; c++) {
+ float dist = plane_point_side_v3(plane, DST.clipping.frustum_corners.vec[c]);
+ if (dist < 0.0f) {
+ return true;
+ }
+ }
- return false;
+ return false;
}
void DRW_culling_frustum_corners_get(BoundBox *corners)
{
- draw_clipping_setup_from_view();
- memcpy(corners, &DST.clipping.frustum_corners, sizeof(BoundBox));
+ draw_clipping_setup_from_view();
+ memcpy(corners, &DST.clipping.frustum_corners, sizeof(BoundBox));
}
/* See draw_clipping_setup_from_view() for the plane order. */
void DRW_culling_frustum_planes_get(float planes[6][4])
{
- draw_clipping_setup_from_view();
- memcpy(planes, &DST.clipping.frustum_planes, sizeof(DST.clipping.frustum_planes));
+ draw_clipping_setup_from_view();
+ memcpy(planes, &DST.clipping.frustum_planes, sizeof(DST.clipping.frustum_planes));
}
/** \} */
@@ -750,239 +761,254 @@ void DRW_culling_frustum_planes_get(float planes[6][4])
static void draw_visibility_eval(DRWCallState *st)
{
- bool culled = st->flag & DRW_CALL_CULLED;
+ bool culled = st->flag & DRW_CALL_CULLED;
- if (st->cache_id != DST.state_cache_id) {
- /* Update culling result for this view. */
- culled = !DRW_culling_sphere_test(&st->bsphere);
- }
+ if (st->cache_id != DST.state_cache_id) {
+ /* Update culling result for this view. */
+ culled = !DRW_culling_sphere_test(&st->bsphere);
+ }
- if (st->visibility_cb) {
- culled = !st->visibility_cb(!culled, st->user_data);
- }
+ if (st->visibility_cb) {
+ culled = !st->visibility_cb(!culled, st->user_data);
+ }
- SET_FLAG_FROM_TEST(st->flag, culled, DRW_CALL_CULLED);
+ SET_FLAG_FROM_TEST(st->flag, culled, DRW_CALL_CULLED);
}
static void draw_matrices_model_prepare(DRWCallState *st)
{
- if (st->cache_id == DST.state_cache_id) {
- /* Values are already updated for this view. */
- return;
- }
- else {
- st->cache_id = DST.state_cache_id;
- }
-
- /* No need to go further the call will not be used. */
- if ((st->flag & DRW_CALL_CULLED) != 0 &&
- (st->flag & DRW_CALL_BYPASS_CULLING) == 0)
- {
- return;
- }
- /* Order matters */
- if (st->matflag & (DRW_CALL_MODELVIEW | DRW_CALL_MODELVIEWINVERSE |
- DRW_CALL_NORMALVIEW | DRW_CALL_EYEVEC))
- {
- mul_m4_m4m4(st->modelview, DST.view_data.matstate.mat[DRW_MAT_VIEW], st->model);
- }
- if (st->matflag & DRW_CALL_MODELVIEWINVERSE) {
- invert_m4_m4(st->modelviewinverse, st->modelview);
- }
- if (st->matflag & DRW_CALL_MODELVIEWPROJECTION) {
- mul_m4_m4m4(st->modelviewprojection, DST.view_data.matstate.mat[DRW_MAT_PERS], st->model);
- }
- if (st->matflag & (DRW_CALL_NORMALVIEW | DRW_CALL_NORMALVIEWINVERSE | DRW_CALL_EYEVEC)) {
- copy_m3_m4(st->normalview, st->modelview);
- invert_m3(st->normalview);
- transpose_m3(st->normalview);
- }
- if (st->matflag & (DRW_CALL_NORMALVIEWINVERSE | DRW_CALL_EYEVEC)) {
- invert_m3_m3(st->normalviewinverse, st->normalview);
- }
- /* TODO remove eye vec (unused) */
- if (st->matflag & DRW_CALL_EYEVEC) {
- /* Used by orthographic wires */
- copy_v3_fl3(st->eyevec, 0.0f, 0.0f, 1.0f);
- /* set eye vector, transformed to object coords */
- mul_m3_v3(st->normalviewinverse, st->eyevec);
- }
- /* Non view dependent */
- if (st->matflag & DRW_CALL_MODELINVERSE) {
- invert_m4_m4(st->modelinverse, st->model);
- st->matflag &= ~DRW_CALL_MODELINVERSE;
- }
- if (st->matflag & DRW_CALL_NORMALWORLD) {
- copy_m3_m4(st->normalworld, st->model);
- invert_m3(st->normalworld);
- transpose_m3(st->normalworld);
- st->matflag &= ~DRW_CALL_NORMALWORLD;
- }
+ if (st->cache_id == DST.state_cache_id) {
+ /* Values are already updated for this view. */
+ return;
+ }
+ else {
+ st->cache_id = DST.state_cache_id;
+ }
+
+ /* No need to go further the call will not be used. */
+ if ((st->flag & DRW_CALL_CULLED) != 0 && (st->flag & DRW_CALL_BYPASS_CULLING) == 0) {
+ return;
+ }
+ /* Order matters */
+ if (st->matflag &
+ (DRW_CALL_MODELVIEW | DRW_CALL_MODELVIEWINVERSE | DRW_CALL_NORMALVIEW | DRW_CALL_EYEVEC)) {
+ mul_m4_m4m4(st->modelview, DST.view_data.matstate.mat[DRW_MAT_VIEW], st->model);
+ }
+ if (st->matflag & DRW_CALL_MODELVIEWINVERSE) {
+ invert_m4_m4(st->modelviewinverse, st->modelview);
+ }
+ if (st->matflag & DRW_CALL_MODELVIEWPROJECTION) {
+ mul_m4_m4m4(st->modelviewprojection, DST.view_data.matstate.mat[DRW_MAT_PERS], st->model);
+ }
+ if (st->matflag & (DRW_CALL_NORMALVIEW | DRW_CALL_NORMALVIEWINVERSE | DRW_CALL_EYEVEC)) {
+ copy_m3_m4(st->normalview, st->modelview);
+ invert_m3(st->normalview);
+ transpose_m3(st->normalview);
+ }
+ if (st->matflag & (DRW_CALL_NORMALVIEWINVERSE | DRW_CALL_EYEVEC)) {
+ invert_m3_m3(st->normalviewinverse, st->normalview);
+ }
+ /* TODO remove eye vec (unused) */
+ if (st->matflag & DRW_CALL_EYEVEC) {
+ /* Used by orthographic wires */
+ copy_v3_fl3(st->eyevec, 0.0f, 0.0f, 1.0f);
+ /* set eye vector, transformed to object coords */
+ mul_m3_v3(st->normalviewinverse, st->eyevec);
+ }
+ /* Non view dependent */
+ if (st->matflag & DRW_CALL_MODELINVERSE) {
+ invert_m4_m4(st->modelinverse, st->model);
+ st->matflag &= ~DRW_CALL_MODELINVERSE;
+ }
+ if (st->matflag & DRW_CALL_NORMALWORLD) {
+ copy_m3_m4(st->normalworld, st->model);
+ invert_m3(st->normalworld);
+ transpose_m3(st->normalworld);
+ st->matflag &= ~DRW_CALL_NORMALWORLD;
+ }
}
static void draw_geometry_prepare(DRWShadingGroup *shgroup, DRWCall *call)
{
- /* step 1 : bind object dependent matrices */
- if (call != NULL) {
- DRWCallState *state = call->state;
- float objectinfo[4];
- objectinfo[0] = state->objectinfo[0];
- objectinfo[1] = call->single.ma_index; /* WATCH this is only valid for single drawcalls. */
- objectinfo[2] = state->objectinfo[1];
- objectinfo[3] = (state->flag & DRW_CALL_NEGSCALE) ? -1.0f : 1.0f;
-
- GPU_shader_uniform_vector(shgroup->shader, shgroup->model, 16, 1, (float *)state->model);
- GPU_shader_uniform_vector(shgroup->shader, shgroup->modelinverse, 16, 1, (float *)state->modelinverse);
- GPU_shader_uniform_vector(shgroup->shader, shgroup->modelview, 16, 1, (float *)state->modelview);
- GPU_shader_uniform_vector(shgroup->shader, shgroup->modelviewinverse, 16, 1, (float *)state->modelviewinverse);
- GPU_shader_uniform_vector(shgroup->shader, shgroup->modelviewprojection, 16, 1, (float *)state->modelviewprojection);
- GPU_shader_uniform_vector(shgroup->shader, shgroup->normalview, 9, 1, (float *)state->normalview);
- GPU_shader_uniform_vector(shgroup->shader, shgroup->normalviewinverse, 9, 1, (float *)state->normalviewinverse);
- GPU_shader_uniform_vector(shgroup->shader, shgroup->normalworld, 9, 1, (float *)state->normalworld);
- GPU_shader_uniform_vector(shgroup->shader, shgroup->objectinfo, 4, 1, (float *)objectinfo);
- GPU_shader_uniform_vector(shgroup->shader, shgroup->orcotexfac, 3, 2, (float *)state->orcotexfac);
- GPU_shader_uniform_vector(shgroup->shader, shgroup->eye, 3, 1, (float *)state->eyevec);
- }
- else {
- BLI_assert((shgroup->normalview == -1) && (shgroup->normalworld == -1) && (shgroup->eye == -1));
- /* For instancing and batching. */
- float unitmat[4][4];
- unit_m4(unitmat);
- GPU_shader_uniform_vector(shgroup->shader, shgroup->model, 16, 1, (float *)unitmat);
- GPU_shader_uniform_vector(shgroup->shader, shgroup->modelinverse, 16, 1, (float *)unitmat);
- GPU_shader_uniform_vector(shgroup->shader, shgroup->modelview, 16, 1, (float *)DST.view_data.matstate.mat[DRW_MAT_VIEW]);
- GPU_shader_uniform_vector(shgroup->shader, shgroup->modelviewinverse, 16, 1, (float *)DST.view_data.matstate.mat[DRW_MAT_VIEWINV]);
- GPU_shader_uniform_vector(shgroup->shader, shgroup->modelviewprojection, 16, 1, (float *)DST.view_data.matstate.mat[DRW_MAT_PERS]);
- GPU_shader_uniform_vector(shgroup->shader, shgroup->objectinfo, 4, 1, (float *)unitmat);
- GPU_shader_uniform_vector(shgroup->shader, shgroup->orcotexfac, 3, 2, (float *)shgroup->instance_orcofac);
- }
+ /* step 1 : bind object dependent matrices */
+ if (call != NULL) {
+ DRWCallState *state = call->state;
+ float objectinfo[4];
+ objectinfo[0] = state->objectinfo[0];
+ objectinfo[1] = call->single.ma_index; /* WATCH this is only valid for single drawcalls. */
+ objectinfo[2] = state->objectinfo[1];
+ objectinfo[3] = (state->flag & DRW_CALL_NEGSCALE) ? -1.0f : 1.0f;
+
+ GPU_shader_uniform_vector(shgroup->shader, shgroup->model, 16, 1, (float *)state->model);
+ GPU_shader_uniform_vector(
+ shgroup->shader, shgroup->modelinverse, 16, 1, (float *)state->modelinverse);
+ GPU_shader_uniform_vector(
+ shgroup->shader, shgroup->modelview, 16, 1, (float *)state->modelview);
+ GPU_shader_uniform_vector(
+ shgroup->shader, shgroup->modelviewinverse, 16, 1, (float *)state->modelviewinverse);
+ GPU_shader_uniform_vector(
+ shgroup->shader, shgroup->modelviewprojection, 16, 1, (float *)state->modelviewprojection);
+ GPU_shader_uniform_vector(
+ shgroup->shader, shgroup->normalview, 9, 1, (float *)state->normalview);
+ GPU_shader_uniform_vector(
+ shgroup->shader, shgroup->normalviewinverse, 9, 1, (float *)state->normalviewinverse);
+ GPU_shader_uniform_vector(
+ shgroup->shader, shgroup->normalworld, 9, 1, (float *)state->normalworld);
+ GPU_shader_uniform_vector(shgroup->shader, shgroup->objectinfo, 4, 1, (float *)objectinfo);
+ GPU_shader_uniform_vector(
+ shgroup->shader, shgroup->orcotexfac, 3, 2, (float *)state->orcotexfac);
+ GPU_shader_uniform_vector(shgroup->shader, shgroup->eye, 3, 1, (float *)state->eyevec);
+ }
+ else {
+ BLI_assert((shgroup->normalview == -1) && (shgroup->normalworld == -1) &&
+ (shgroup->eye == -1));
+ /* For instancing and batching. */
+ float unitmat[4][4];
+ unit_m4(unitmat);
+ GPU_shader_uniform_vector(shgroup->shader, shgroup->model, 16, 1, (float *)unitmat);
+ GPU_shader_uniform_vector(shgroup->shader, shgroup->modelinverse, 16, 1, (float *)unitmat);
+ GPU_shader_uniform_vector(shgroup->shader,
+ shgroup->modelview,
+ 16,
+ 1,
+ (float *)DST.view_data.matstate.mat[DRW_MAT_VIEW]);
+ GPU_shader_uniform_vector(shgroup->shader,
+ shgroup->modelviewinverse,
+ 16,
+ 1,
+ (float *)DST.view_data.matstate.mat[DRW_MAT_VIEWINV]);
+ GPU_shader_uniform_vector(shgroup->shader,
+ shgroup->modelviewprojection,
+ 16,
+ 1,
+ (float *)DST.view_data.matstate.mat[DRW_MAT_PERS]);
+ GPU_shader_uniform_vector(shgroup->shader, shgroup->objectinfo, 4, 1, (float *)unitmat);
+ GPU_shader_uniform_vector(
+ shgroup->shader, shgroup->orcotexfac, 3, 2, (float *)shgroup->instance_orcofac);
+ }
}
static void draw_geometry_execute_ex(
- DRWShadingGroup *shgroup, GPUBatch *geom, uint start, uint count, bool draw_instance)
+ DRWShadingGroup *shgroup, GPUBatch *geom, uint start, uint count, bool draw_instance)
{
- /* Special case: empty drawcall, placement is done via shader, don't bind anything. */
- /* TODO use DRW_CALL_PROCEDURAL instead */
- if (geom == NULL) {
- BLI_assert(shgroup->type == DRW_SHG_TRIANGLE_BATCH); /* Add other type if needed. */
- /* Shader is already bound. */
- GPU_draw_primitive(GPU_PRIM_TRIS, count);
- return;
- }
-
- /* step 2 : bind vertex array & draw */
- GPU_batch_program_set_no_use(
- geom, GPU_shader_get_program(shgroup->shader), GPU_shader_get_interface(shgroup->shader));
- /* XXX hacking gawain. we don't want to call glUseProgram! (huge performance loss) */
- geom->program_in_use = true;
-
- GPU_batch_draw_range_ex(geom, start, count, draw_instance);
-
- geom->program_in_use = false; /* XXX hacking gawain */
+ /* Special case: empty drawcall, placement is done via shader, don't bind anything. */
+ /* TODO use DRW_CALL_PROCEDURAL instead */
+ if (geom == NULL) {
+ BLI_assert(shgroup->type == DRW_SHG_TRIANGLE_BATCH); /* Add other type if needed. */
+ /* Shader is already bound. */
+ GPU_draw_primitive(GPU_PRIM_TRIS, count);
+ return;
+ }
+
+ /* step 2 : bind vertex array & draw */
+ GPU_batch_program_set_no_use(
+ geom, GPU_shader_get_program(shgroup->shader), GPU_shader_get_interface(shgroup->shader));
+ /* XXX hacking gawain. we don't want to call glUseProgram! (huge performance loss) */
+ geom->program_in_use = true;
+
+ GPU_batch_draw_range_ex(geom, start, count, draw_instance);
+
+ geom->program_in_use = false; /* XXX hacking gawain */
}
static void draw_geometry_execute(DRWShadingGroup *shgroup, GPUBatch *geom)
{
- draw_geometry_execute_ex(shgroup, geom, 0, 0, false);
+ draw_geometry_execute_ex(shgroup, geom, 0, 0, false);
}
enum {
- BIND_NONE = 0,
- BIND_TEMP = 1, /* Release slot after this shading group. */
- BIND_PERSIST = 2, /* Release slot only after the next shader change. */
+ BIND_NONE = 0,
+ BIND_TEMP = 1, /* Release slot after this shading group. */
+ BIND_PERSIST = 2, /* Release slot only after the next shader change. */
};
static void set_bound_flags(uint64_t *slots, uint64_t *persist_slots, int slot_idx, char bind_type)
{
- uint64_t slot = 1lu << slot_idx;
- *slots |= slot;
- if (bind_type == BIND_PERSIST) {
- *persist_slots |= slot;
- }
+ uint64_t slot = 1lu << slot_idx;
+ *slots |= slot;
+ if (bind_type == BIND_PERSIST) {
+ *persist_slots |= slot;
+ }
}
static int get_empty_slot_index(uint64_t slots)
{
- uint64_t empty_slots = ~slots;
- /* Find first empty slot using bitscan. */
- if (empty_slots != 0) {
- if ((empty_slots & 0xFFFFFFFFlu) != 0) {
- return (int)bitscan_forward_uint(empty_slots);
- }
- else {
- return (int)bitscan_forward_uint(empty_slots >> 32) + 32;
- }
- }
- else {
- /* Greater than GPU_max_textures() */
- return 99999;
- }
+ uint64_t empty_slots = ~slots;
+ /* Find first empty slot using bitscan. */
+ if (empty_slots != 0) {
+ if ((empty_slots & 0xFFFFFFFFlu) != 0) {
+ return (int)bitscan_forward_uint(empty_slots);
+ }
+ else {
+ return (int)bitscan_forward_uint(empty_slots >> 32) + 32;
+ }
+ }
+ else {
+ /* Greater than GPU_max_textures() */
+ return 99999;
+ }
}
static void bind_texture(GPUTexture *tex, char bind_type)
{
- int idx = GPU_texture_bound_number(tex);
- if (idx == -1) {
- /* Texture isn't bound yet. Find an empty slot and bind it. */
- idx = get_empty_slot_index(DST.RST.bound_tex_slots);
-
- if (idx < GPU_max_textures()) {
- GPUTexture **gpu_tex_slot = &DST.RST.bound_texs[idx];
- /* Unbind any previous texture. */
- if (*gpu_tex_slot != NULL) {
- GPU_texture_unbind(*gpu_tex_slot);
- }
- GPU_texture_bind(tex, idx);
- *gpu_tex_slot = tex;
- }
- else {
- printf("Not enough texture slots! Reduce number of textures used by your shader.\n");
- return;
- }
- }
- else {
- /* This texture slot was released but the tex
- * is still bound. Just flag the slot again. */
- BLI_assert(DST.RST.bound_texs[idx] == tex);
- }
- set_bound_flags(&DST.RST.bound_tex_slots,
- &DST.RST.bound_tex_slots_persist,
- idx, bind_type);
+ int idx = GPU_texture_bound_number(tex);
+ if (idx == -1) {
+ /* Texture isn't bound yet. Find an empty slot and bind it. */
+ idx = get_empty_slot_index(DST.RST.bound_tex_slots);
+
+ if (idx < GPU_max_textures()) {
+ GPUTexture **gpu_tex_slot = &DST.RST.bound_texs[idx];
+ /* Unbind any previous texture. */
+ if (*gpu_tex_slot != NULL) {
+ GPU_texture_unbind(*gpu_tex_slot);
+ }
+ GPU_texture_bind(tex, idx);
+ *gpu_tex_slot = tex;
+ }
+ else {
+ printf("Not enough texture slots! Reduce number of textures used by your shader.\n");
+ return;
+ }
+ }
+ else {
+ /* This texture slot was released but the tex
+ * is still bound. Just flag the slot again. */
+ BLI_assert(DST.RST.bound_texs[idx] == tex);
+ }
+ set_bound_flags(&DST.RST.bound_tex_slots, &DST.RST.bound_tex_slots_persist, idx, bind_type);
}
static void bind_ubo(GPUUniformBuffer *ubo, char bind_type)
{
- int idx = GPU_uniformbuffer_bindpoint(ubo);
- if (idx == -1) {
- /* UBO isn't bound yet. Find an empty slot and bind it. */
- idx = get_empty_slot_index(DST.RST.bound_ubo_slots);
-
- if (idx < GPU_max_ubo_binds()) {
- GPUUniformBuffer **gpu_ubo_slot = &DST.RST.bound_ubos[idx];
- /* Unbind any previous UBO. */
- if (*gpu_ubo_slot != NULL) {
- GPU_uniformbuffer_unbind(*gpu_ubo_slot);
- }
- GPU_uniformbuffer_bind(ubo, idx);
- *gpu_ubo_slot = ubo;
- }
- else {
- /* printf so user can report bad behavior */
- printf("Not enough ubo slots! This should not happen!\n");
- /* This is not depending on user input.
- * It is our responsibility to make sure there is enough slots. */
- BLI_assert(0);
- return;
- }
- }
- else {
- /* This UBO slot was released but the UBO is
- * still bound here. Just flag the slot again. */
- BLI_assert(DST.RST.bound_ubos[idx] == ubo);
- }
- set_bound_flags(&DST.RST.bound_ubo_slots,
- &DST.RST.bound_ubo_slots_persist,
- idx, bind_type);
+ int idx = GPU_uniformbuffer_bindpoint(ubo);
+ if (idx == -1) {
+ /* UBO isn't bound yet. Find an empty slot and bind it. */
+ idx = get_empty_slot_index(DST.RST.bound_ubo_slots);
+
+ if (idx < GPU_max_ubo_binds()) {
+ GPUUniformBuffer **gpu_ubo_slot = &DST.RST.bound_ubos[idx];
+ /* Unbind any previous UBO. */
+ if (*gpu_ubo_slot != NULL) {
+ GPU_uniformbuffer_unbind(*gpu_ubo_slot);
+ }
+ GPU_uniformbuffer_bind(ubo, idx);
+ *gpu_ubo_slot = ubo;
+ }
+ else {
+ /* printf so user can report bad behavior */
+ printf("Not enough ubo slots! This should not happen!\n");
+ /* This is not depending on user input.
+ * It is our responsibility to make sure there is enough slots. */
+ BLI_assert(0);
+ return;
+ }
+ }
+ else {
+ /* This UBO slot was released but the UBO is
+ * still bound here. Just flag the slot again. */
+ BLI_assert(DST.RST.bound_ubos[idx] == ubo);
+ }
+ set_bound_flags(&DST.RST.bound_ubo_slots, &DST.RST.bound_ubo_slots_persist, idx, bind_type);
}
#ifndef NDEBUG
@@ -1001,403 +1027,418 @@ static void bind_ubo(GPUUniformBuffer *ubo, char bind_type)
* */
static bool ubo_bindings_validate(DRWShadingGroup *shgroup)
{
- bool valid = true;
+ bool valid = true;
# ifdef DEBUG_UBO_BINDING
- /* Check that all active uniform blocks have a non-zero buffer bound. */
- GLint program = 0;
- GLint active_blocks = 0;
-
- glGetIntegerv(GL_CURRENT_PROGRAM, &program);
- glGetProgramiv(program, GL_ACTIVE_UNIFORM_BLOCKS, &active_blocks);
-
- for (uint i = 0; i < active_blocks; ++i) {
- int binding = 0;
- int buffer = 0;
-
- glGetActiveUniformBlockiv(program, i, GL_UNIFORM_BLOCK_BINDING, &binding);
- glGetIntegeri_v(GL_UNIFORM_BUFFER_BINDING, binding, &buffer);
-
- if (buffer == 0) {
- char blockname[64];
- glGetActiveUniformBlockName(program, i, sizeof(blockname), NULL, blockname);
-
- if (valid) {
- printf("Trying to draw with missing UBO binding.\n");
- valid = false;
- }
- printf("Pass : %s, Shader : %s, Block : %s\n", shgroup->pass_parent->name, shgroup->shader->name, blockname);
- }
- }
+ /* Check that all active uniform blocks have a non-zero buffer bound. */
+ GLint program = 0;
+ GLint active_blocks = 0;
+
+ glGetIntegerv(GL_CURRENT_PROGRAM, &program);
+ glGetProgramiv(program, GL_ACTIVE_UNIFORM_BLOCKS, &active_blocks);
+
+ for (uint i = 0; i < active_blocks; ++i) {
+ int binding = 0;
+ int buffer = 0;
+
+ glGetActiveUniformBlockiv(program, i, GL_UNIFORM_BLOCK_BINDING, &binding);
+ glGetIntegeri_v(GL_UNIFORM_BUFFER_BINDING, binding, &buffer);
+
+ if (buffer == 0) {
+ char blockname[64];
+ glGetActiveUniformBlockName(program, i, sizeof(blockname), NULL, blockname);
+
+ if (valid) {
+ printf("Trying to draw with missing UBO binding.\n");
+ valid = false;
+ }
+ printf("Pass : %s, Shader : %s, Block : %s\n",
+ shgroup->pass_parent->name,
+ shgroup->shader->name,
+ blockname);
+ }
+ }
# endif
- return valid;
+ return valid;
}
#endif
static void release_texture_slots(bool with_persist)
{
- if (with_persist) {
- DST.RST.bound_tex_slots = 0;
- DST.RST.bound_tex_slots_persist = 0;
- }
- else {
- DST.RST.bound_tex_slots &= DST.RST.bound_tex_slots_persist;
- }
+ if (with_persist) {
+ DST.RST.bound_tex_slots = 0;
+ DST.RST.bound_tex_slots_persist = 0;
+ }
+ else {
+ DST.RST.bound_tex_slots &= DST.RST.bound_tex_slots_persist;
+ }
}
static void release_ubo_slots(bool with_persist)
{
- if (with_persist) {
- DST.RST.bound_ubo_slots = 0;
- DST.RST.bound_ubo_slots_persist = 0;
- }
- else {
- DST.RST.bound_ubo_slots &= DST.RST.bound_ubo_slots_persist;
- }
+ if (with_persist) {
+ DST.RST.bound_ubo_slots = 0;
+ DST.RST.bound_ubo_slots_persist = 0;
+ }
+ else {
+ DST.RST.bound_ubo_slots &= DST.RST.bound_ubo_slots_persist;
+ }
}
static void draw_shgroup(DRWShadingGroup *shgroup, DRWState pass_state)
{
- BLI_assert(shgroup->shader);
-
- GPUTexture *tex;
- GPUUniformBuffer *ubo;
- int val;
- float fval;
- const bool shader_changed = (DST.shader != shgroup->shader);
- bool use_tfeedback = false;
-
- if (shader_changed) {
- if (DST.shader) {
- GPU_shader_unbind();
- }
- GPU_shader_bind(shgroup->shader);
- DST.shader = shgroup->shader;
- }
-
- if ((pass_state & DRW_STATE_TRANS_FEEDBACK) != 0 &&
- (shgroup->type == DRW_SHG_FEEDBACK_TRANSFORM))
- {
- use_tfeedback = GPU_shader_transform_feedback_enable(shgroup->shader,
- shgroup->tfeedback_target->vbo_id);
- }
-
- release_ubo_slots(shader_changed);
- release_texture_slots(shader_changed);
-
- drw_state_set((pass_state & shgroup->state_extra_disable) | shgroup->state_extra);
- drw_stencil_set(shgroup->stencil_mask);
-
- /* Binding Uniform */
- for (DRWUniform *uni = shgroup->uniforms; uni; uni = uni->next) {
- if (uni->location == -2) {
- uni->location = GPU_shader_get_uniform_ensure(shgroup->shader, DST.uniform_names.buffer + uni->name_ofs);
- if (uni->location == -1) {
- continue;
- }
- }
- switch (uni->type) {
- case DRW_UNIFORM_SHORT_TO_INT:
- val = (int)*((short *)uni->pvalue);
- GPU_shader_uniform_vector_int(
- shgroup->shader, uni->location, uni->length, uni->arraysize, &val);
- break;
- case DRW_UNIFORM_SHORT_TO_FLOAT:
- fval = (float)*((short *)uni->pvalue);
- GPU_shader_uniform_vector(
- shgroup->shader, uni->location, uni->length, uni->arraysize, (float *)&fval);
- break;
- case DRW_UNIFORM_BOOL_COPY:
- case DRW_UNIFORM_INT_COPY:
- GPU_shader_uniform_vector_int(
- shgroup->shader, uni->location, uni->length, uni->arraysize, &uni->ivalue);
- break;
- case DRW_UNIFORM_BOOL:
- case DRW_UNIFORM_INT:
- GPU_shader_uniform_vector_int(
- shgroup->shader, uni->location, uni->length, uni->arraysize, (int *)uni->pvalue);
- break;
- case DRW_UNIFORM_FLOAT_COPY:
- GPU_shader_uniform_vector(
- shgroup->shader, uni->location, uni->length, uni->arraysize, &uni->fvalue);
- break;
- case DRW_UNIFORM_FLOAT:
- GPU_shader_uniform_vector(
- shgroup->shader, uni->location, uni->length, uni->arraysize, (float *)uni->pvalue);
- break;
- case DRW_UNIFORM_TEXTURE:
- tex = (GPUTexture *)uni->pvalue;
- BLI_assert(tex);
- bind_texture(tex, BIND_TEMP);
- GPU_shader_uniform_texture(shgroup->shader, uni->location, tex);
- break;
- case DRW_UNIFORM_TEXTURE_PERSIST:
- tex = (GPUTexture *)uni->pvalue;
- BLI_assert(tex);
- bind_texture(tex, BIND_PERSIST);
- GPU_shader_uniform_texture(shgroup->shader, uni->location, tex);
- break;
- case DRW_UNIFORM_TEXTURE_REF:
- tex = *((GPUTexture **)uni->pvalue);
- BLI_assert(tex);
- bind_texture(tex, BIND_TEMP);
- GPU_shader_uniform_texture(shgroup->shader, uni->location, tex);
- break;
- case DRW_UNIFORM_BLOCK:
- ubo = (GPUUniformBuffer *)uni->pvalue;
- bind_ubo(ubo, BIND_TEMP);
- GPU_shader_uniform_buffer(shgroup->shader, uni->location, ubo);
- break;
- case DRW_UNIFORM_BLOCK_PERSIST:
- ubo = (GPUUniformBuffer *)uni->pvalue;
- bind_ubo(ubo, BIND_PERSIST);
- GPU_shader_uniform_buffer(shgroup->shader, uni->location, ubo);
- break;
- }
- }
+ BLI_assert(shgroup->shader);
+
+ GPUTexture *tex;
+ GPUUniformBuffer *ubo;
+ int val;
+ float fval;
+ const bool shader_changed = (DST.shader != shgroup->shader);
+ bool use_tfeedback = false;
+
+ if (shader_changed) {
+ if (DST.shader) {
+ GPU_shader_unbind();
+ }
+ GPU_shader_bind(shgroup->shader);
+ DST.shader = shgroup->shader;
+ }
+
+ if ((pass_state & DRW_STATE_TRANS_FEEDBACK) != 0 &&
+ (shgroup->type == DRW_SHG_FEEDBACK_TRANSFORM)) {
+ use_tfeedback = GPU_shader_transform_feedback_enable(shgroup->shader,
+ shgroup->tfeedback_target->vbo_id);
+ }
+
+ release_ubo_slots(shader_changed);
+ release_texture_slots(shader_changed);
+
+ drw_state_set((pass_state & shgroup->state_extra_disable) | shgroup->state_extra);
+ drw_stencil_set(shgroup->stencil_mask);
+
+ /* Binding Uniform */
+ for (DRWUniform *uni = shgroup->uniforms; uni; uni = uni->next) {
+ if (uni->location == -2) {
+ uni->location = GPU_shader_get_uniform_ensure(shgroup->shader,
+ DST.uniform_names.buffer + uni->name_ofs);
+ if (uni->location == -1) {
+ continue;
+ }
+ }
+ switch (uni->type) {
+ case DRW_UNIFORM_SHORT_TO_INT:
+ val = (int)*((short *)uni->pvalue);
+ GPU_shader_uniform_vector_int(
+ shgroup->shader, uni->location, uni->length, uni->arraysize, &val);
+ break;
+ case DRW_UNIFORM_SHORT_TO_FLOAT:
+ fval = (float)*((short *)uni->pvalue);
+ GPU_shader_uniform_vector(
+ shgroup->shader, uni->location, uni->length, uni->arraysize, (float *)&fval);
+ break;
+ case DRW_UNIFORM_BOOL_COPY:
+ case DRW_UNIFORM_INT_COPY:
+ GPU_shader_uniform_vector_int(
+ shgroup->shader, uni->location, uni->length, uni->arraysize, &uni->ivalue);
+ break;
+ case DRW_UNIFORM_BOOL:
+ case DRW_UNIFORM_INT:
+ GPU_shader_uniform_vector_int(
+ shgroup->shader, uni->location, uni->length, uni->arraysize, (int *)uni->pvalue);
+ break;
+ case DRW_UNIFORM_FLOAT_COPY:
+ GPU_shader_uniform_vector(
+ shgroup->shader, uni->location, uni->length, uni->arraysize, &uni->fvalue);
+ break;
+ case DRW_UNIFORM_FLOAT:
+ GPU_shader_uniform_vector(
+ shgroup->shader, uni->location, uni->length, uni->arraysize, (float *)uni->pvalue);
+ break;
+ case DRW_UNIFORM_TEXTURE:
+ tex = (GPUTexture *)uni->pvalue;
+ BLI_assert(tex);
+ bind_texture(tex, BIND_TEMP);
+ GPU_shader_uniform_texture(shgroup->shader, uni->location, tex);
+ break;
+ case DRW_UNIFORM_TEXTURE_PERSIST:
+ tex = (GPUTexture *)uni->pvalue;
+ BLI_assert(tex);
+ bind_texture(tex, BIND_PERSIST);
+ GPU_shader_uniform_texture(shgroup->shader, uni->location, tex);
+ break;
+ case DRW_UNIFORM_TEXTURE_REF:
+ tex = *((GPUTexture **)uni->pvalue);
+ BLI_assert(tex);
+ bind_texture(tex, BIND_TEMP);
+ GPU_shader_uniform_texture(shgroup->shader, uni->location, tex);
+ break;
+ case DRW_UNIFORM_BLOCK:
+ ubo = (GPUUniformBuffer *)uni->pvalue;
+ bind_ubo(ubo, BIND_TEMP);
+ GPU_shader_uniform_buffer(shgroup->shader, uni->location, ubo);
+ break;
+ case DRW_UNIFORM_BLOCK_PERSIST:
+ ubo = (GPUUniformBuffer *)uni->pvalue;
+ bind_ubo(ubo, BIND_PERSIST);
+ GPU_shader_uniform_buffer(shgroup->shader, uni->location, ubo);
+ break;
+ }
+ }
#ifdef USE_GPU_SELECT
# define GPU_SELECT_LOAD_IF_PICKSEL(_select_id) \
- if (G.f & G_FLAG_PICKSEL) { \
- GPU_select_load_id(_select_id); \
- } ((void)0)
+ if (G.f & G_FLAG_PICKSEL) { \
+ GPU_select_load_id(_select_id); \
+ } \
+ ((void)0)
# define GPU_SELECT_LOAD_IF_PICKSEL_CALL(_call) \
- if ((G.f & G_FLAG_PICKSEL) && (_call)) { \
- GPU_select_load_id((_call)->select_id); \
- } ((void)0)
-
-# define GPU_SELECT_LOAD_IF_PICKSEL_LIST(_shgroup, _start, _count) \
- _start = 0; \
- _count = _shgroup->instance_count; \
- int *select_id = NULL; \
- if (G.f & G_FLAG_PICKSEL) { \
- if (_shgroup->override_selectid == -1) { \
- /* Hack : get vbo data without actually drawing. */ \
- GPUVertBufRaw raw; \
- GPU_vertbuf_attr_get_raw_data(_shgroup->inst_selectid, 0, &raw); \
- select_id = GPU_vertbuf_raw_step(&raw); \
- switch (_shgroup->type) { \
- case DRW_SHG_TRIANGLE_BATCH: _count = 3; break; \
- case DRW_SHG_LINE_BATCH: _count = 2; break; \
- default: _count = 1; break; \
- } \
- } \
- else { \
- GPU_select_load_id(_shgroup->override_selectid); \
- } \
- } \
- while (_start < _shgroup->instance_count) { \
- if (select_id) { \
- GPU_select_load_id(select_id[_start]); \
- }
+ if ((G.f & G_FLAG_PICKSEL) && (_call)) { \
+ GPU_select_load_id((_call)->select_id); \
+ } \
+ ((void)0)
+
+# define GPU_SELECT_LOAD_IF_PICKSEL_LIST(_shgroup, _start, _count) \
+ _start = 0; \
+ _count = _shgroup->instance_count; \
+ int *select_id = NULL; \
+ if (G.f & G_FLAG_PICKSEL) { \
+ if (_shgroup->override_selectid == -1) { \
+ /* Hack : get vbo data without actually drawing. */ \
+ GPUVertBufRaw raw; \
+ GPU_vertbuf_attr_get_raw_data(_shgroup->inst_selectid, 0, &raw); \
+ select_id = GPU_vertbuf_raw_step(&raw); \
+ switch (_shgroup->type) { \
+ case DRW_SHG_TRIANGLE_BATCH: \
+ _count = 3; \
+ break; \
+ case DRW_SHG_LINE_BATCH: \
+ _count = 2; \
+ break; \
+ default: \
+ _count = 1; \
+ break; \
+ } \
+ } \
+ else { \
+ GPU_select_load_id(_shgroup->override_selectid); \
+ } \
+ } \
+ while (_start < _shgroup->instance_count) { \
+ if (select_id) { \
+ GPU_select_load_id(select_id[_start]); \
+ }
# define GPU_SELECT_LOAD_IF_PICKSEL_LIST_END(_start, _count) \
- _start += _count; \
- }
+ _start += _count; \
+ }
#else
# define GPU_SELECT_LOAD_IF_PICKSEL(select_id)
# define GPU_SELECT_LOAD_IF_PICKSEL_CALL(call)
# define GPU_SELECT_LOAD_IF_PICKSEL_LIST_END(start, count)
# define GPU_SELECT_LOAD_IF_PICKSEL_LIST(_shgroup, _start, _count) \
- _start = 0; \
- _count = _shgroup->instance_count;
+ _start = 0; \
+ _count = _shgroup->instance_count;
#endif
- BLI_assert(ubo_bindings_validate(shgroup));
-
- /* Rendering Calls */
- if (!ELEM(shgroup->type, DRW_SHG_NORMAL, DRW_SHG_FEEDBACK_TRANSFORM)) {
- /* Replacing multiple calls with only one */
- if (ELEM(shgroup->type, DRW_SHG_INSTANCE, DRW_SHG_INSTANCE_EXTERNAL)) {
- if (shgroup->type == DRW_SHG_INSTANCE_EXTERNAL) {
- if (shgroup->instance_geom != NULL) {
- GPU_SELECT_LOAD_IF_PICKSEL(shgroup->override_selectid);
- draw_geometry_prepare(shgroup, NULL);
- draw_geometry_execute_ex(shgroup, shgroup->instance_geom, 0, 0, true);
- }
- }
- else {
- if (shgroup->instance_count > 0) {
- uint count, start;
- draw_geometry_prepare(shgroup, NULL);
- GPU_SELECT_LOAD_IF_PICKSEL_LIST(shgroup, start, count)
- {
- draw_geometry_execute_ex(shgroup, shgroup->instance_geom, start, count, true);
- }
- GPU_SELECT_LOAD_IF_PICKSEL_LIST_END(start, count)
- }
- }
- }
- else { /* DRW_SHG_***_BATCH */
- /* Some dynamic batch can have no geom (no call to aggregate) */
- if (shgroup->instance_count > 0) {
- uint count, start;
- draw_geometry_prepare(shgroup, NULL);
- GPU_SELECT_LOAD_IF_PICKSEL_LIST(shgroup, start, count)
- {
- draw_geometry_execute_ex(shgroup, shgroup->batch_geom, start, count, false);
- }
- GPU_SELECT_LOAD_IF_PICKSEL_LIST_END(start, count)
- }
- }
- }
- else {
- bool prev_neg_scale = false;
- int callid = 0;
- for (DRWCall *call = shgroup->calls.first; call; call = call->next) {
-
- /* OPTI/IDEA(clem): Do this preparation in another thread. */
- draw_visibility_eval(call->state);
- draw_matrices_model_prepare(call->state);
-
- if ((call->state->flag & DRW_CALL_CULLED) != 0 &&
- (call->state->flag & DRW_CALL_BYPASS_CULLING) == 0)
- {
- continue;
- }
-
- /* XXX small exception/optimisation for outline rendering. */
- if (shgroup->callid != -1) {
- GPU_shader_uniform_vector_int(shgroup->shader, shgroup->callid, 1, 1, &callid);
- callid += 1;
- }
-
- /* Negative scale objects */
- bool neg_scale = call->state->flag & DRW_CALL_NEGSCALE;
- if (neg_scale != prev_neg_scale) {
- glFrontFace((neg_scale) ? DST.backface : DST.frontface);
- prev_neg_scale = neg_scale;
- }
-
- GPU_SELECT_LOAD_IF_PICKSEL_CALL(call);
- draw_geometry_prepare(shgroup, call);
-
- switch (call->type) {
- case DRW_CALL_SINGLE:
- draw_geometry_execute(shgroup, call->single.geometry);
- break;
- case DRW_CALL_RANGE:
- draw_geometry_execute_ex(shgroup, call->range.geometry, call->range.start, call->range.count, false);
- break;
- case DRW_CALL_INSTANCES:
- draw_geometry_execute_ex(shgroup, call->instances.geometry, 0, *call->instances.count, true);
- break;
- case DRW_CALL_GENERATE:
- call->generate.geometry_fn(shgroup, draw_geometry_execute, call->generate.user_data);
- break;
- case DRW_CALL_PROCEDURAL:
- GPU_draw_primitive(call->procedural.prim_type, call->procedural.vert_count);
- break;
- default:
- BLI_assert(0);
- }
- }
- /* Reset state */
- glFrontFace(DST.frontface);
- }
-
- if (use_tfeedback) {
- GPU_shader_transform_feedback_disable(shgroup->shader);
- }
+ BLI_assert(ubo_bindings_validate(shgroup));
+
+ /* Rendering Calls */
+ if (!ELEM(shgroup->type, DRW_SHG_NORMAL, DRW_SHG_FEEDBACK_TRANSFORM)) {
+ /* Replacing multiple calls with only one */
+ if (ELEM(shgroup->type, DRW_SHG_INSTANCE, DRW_SHG_INSTANCE_EXTERNAL)) {
+ if (shgroup->type == DRW_SHG_INSTANCE_EXTERNAL) {
+ if (shgroup->instance_geom != NULL) {
+ GPU_SELECT_LOAD_IF_PICKSEL(shgroup->override_selectid);
+ draw_geometry_prepare(shgroup, NULL);
+ draw_geometry_execute_ex(shgroup, shgroup->instance_geom, 0, 0, true);
+ }
+ }
+ else {
+ if (shgroup->instance_count > 0) {
+ uint count, start;
+ draw_geometry_prepare(shgroup, NULL);
+ GPU_SELECT_LOAD_IF_PICKSEL_LIST(shgroup, start, count)
+ {
+ draw_geometry_execute_ex(shgroup, shgroup->instance_geom, start, count, true);
+ }
+ GPU_SELECT_LOAD_IF_PICKSEL_LIST_END(start, count)
+ }
+ }
+ }
+ else { /* DRW_SHG_***_BATCH */
+ /* Some dynamic batch can have no geom (no call to aggregate) */
+ if (shgroup->instance_count > 0) {
+ uint count, start;
+ draw_geometry_prepare(shgroup, NULL);
+ GPU_SELECT_LOAD_IF_PICKSEL_LIST(shgroup, start, count)
+ {
+ draw_geometry_execute_ex(shgroup, shgroup->batch_geom, start, count, false);
+ }
+ GPU_SELECT_LOAD_IF_PICKSEL_LIST_END(start, count)
+ }
+ }
+ }
+ else {
+ bool prev_neg_scale = false;
+ int callid = 0;
+ for (DRWCall *call = shgroup->calls.first; call; call = call->next) {
+
+ /* OPTI/IDEA(clem): Do this preparation in another thread. */
+ draw_visibility_eval(call->state);
+ draw_matrices_model_prepare(call->state);
+
+ if ((call->state->flag & DRW_CALL_CULLED) != 0 &&
+ (call->state->flag & DRW_CALL_BYPASS_CULLING) == 0) {
+ continue;
+ }
+
+ /* XXX small exception/optimisation for outline rendering. */
+ if (shgroup->callid != -1) {
+ GPU_shader_uniform_vector_int(shgroup->shader, shgroup->callid, 1, 1, &callid);
+ callid += 1;
+ }
+
+ /* Negative scale objects */
+ bool neg_scale = call->state->flag & DRW_CALL_NEGSCALE;
+ if (neg_scale != prev_neg_scale) {
+ glFrontFace((neg_scale) ? DST.backface : DST.frontface);
+ prev_neg_scale = neg_scale;
+ }
+
+ GPU_SELECT_LOAD_IF_PICKSEL_CALL(call);
+ draw_geometry_prepare(shgroup, call);
+
+ switch (call->type) {
+ case DRW_CALL_SINGLE:
+ draw_geometry_execute(shgroup, call->single.geometry);
+ break;
+ case DRW_CALL_RANGE:
+ draw_geometry_execute_ex(
+ shgroup, call->range.geometry, call->range.start, call->range.count, false);
+ break;
+ case DRW_CALL_INSTANCES:
+ draw_geometry_execute_ex(
+ shgroup, call->instances.geometry, 0, *call->instances.count, true);
+ break;
+ case DRW_CALL_GENERATE:
+ call->generate.geometry_fn(shgroup, draw_geometry_execute, call->generate.user_data);
+ break;
+ case DRW_CALL_PROCEDURAL:
+ GPU_draw_primitive(call->procedural.prim_type, call->procedural.vert_count);
+ break;
+ default:
+ BLI_assert(0);
+ }
+ }
+ /* Reset state */
+ glFrontFace(DST.frontface);
+ }
+
+ if (use_tfeedback) {
+ GPU_shader_transform_feedback_disable(shgroup->shader);
+ }
}
static void drw_update_view(void)
{
- if (DST.dirty_mat) {
- DST.state_cache_id++;
- DST.dirty_mat = false;
-
- DRW_uniformbuffer_update(G_draw.view_ubo, &DST.view_data);
-
- /* Catch integer wrap around. */
- if (UNLIKELY(DST.state_cache_id == 0)) {
- DST.state_cache_id = 1;
- /* We must reset all CallStates to ensure that not
- * a single one stayed with cache_id equal to 1. */
- BLI_mempool_iter iter;
- DRWCallState *state;
- BLI_mempool_iternew(DST.vmempool->states, &iter);
- while ((state = BLI_mempool_iterstep(&iter))) {
- state->cache_id = 0;
- }
- }
-
- /* TODO dispatch threads to compute matrices/culling */
- }
-
- draw_clipping_setup_from_view();
+ if (DST.dirty_mat) {
+ DST.state_cache_id++;
+ DST.dirty_mat = false;
+
+ DRW_uniformbuffer_update(G_draw.view_ubo, &DST.view_data);
+
+ /* Catch integer wrap around. */
+ if (UNLIKELY(DST.state_cache_id == 0)) {
+ DST.state_cache_id = 1;
+ /* We must reset all CallStates to ensure that not
+ * a single one stayed with cache_id equal to 1. */
+ BLI_mempool_iter iter;
+ DRWCallState *state;
+ BLI_mempool_iternew(DST.vmempool->states, &iter);
+ while ((state = BLI_mempool_iterstep(&iter))) {
+ state->cache_id = 0;
+ }
+ }
+
+ /* TODO dispatch threads to compute matrices/culling */
+ }
+
+ draw_clipping_setup_from_view();
}
-static void drw_draw_pass_ex(DRWPass *pass, DRWShadingGroup *start_group, DRWShadingGroup *end_group)
+static void drw_draw_pass_ex(DRWPass *pass,
+ DRWShadingGroup *start_group,
+ DRWShadingGroup *end_group)
{
- if (start_group == NULL) {
- return;
- }
-
- DST.shader = NULL;
-
- BLI_assert(DST.buffer_finish_called && "DRW_render_instance_buffer_finish had not been called before drawing");
-
- drw_update_view();
-
- /* GPU_framebuffer_clear calls can change the state outside the DRW module.
- * Force reset the affected states to avoid problems later. */
- drw_state_set(DST.state | DRW_STATE_WRITE_DEPTH | DRW_STATE_WRITE_COLOR);
-
- drw_state_set(pass->state);
-
- DRW_stats_query_start(pass->name);
-
- for (DRWShadingGroup *shgroup = start_group; shgroup; shgroup = shgroup->next) {
- draw_shgroup(shgroup, pass->state);
- /* break if upper limit */
- if (shgroup == end_group) {
- break;
- }
- }
-
- /* Clear Bound textures */
- for (int i = 0; i < DST_MAX_SLOTS; i++) {
- if (DST.RST.bound_texs[i] != NULL) {
- GPU_texture_unbind(DST.RST.bound_texs[i]);
- DST.RST.bound_texs[i] = NULL;
- }
- }
-
- /* Clear Bound Ubos */
- for (int i = 0; i < DST_MAX_SLOTS; i++) {
- if (DST.RST.bound_ubos[i] != NULL) {
- GPU_uniformbuffer_unbind(DST.RST.bound_ubos[i]);
- DST.RST.bound_ubos[i] = NULL;
- }
- }
-
- if (DST.shader) {
- GPU_shader_unbind();
- DST.shader = NULL;
- }
-
- /* HACK: Rasterized discard can affect clear commands which are not
- * part of a DRWPass (as of now). So disable rasterized discard here
- * if it has been enabled. */
- if ((DST.state & DRW_STATE_RASTERIZER_ENABLED) == 0) {
- drw_state_set((DST.state & ~DRW_STATE_RASTERIZER_ENABLED) | DRW_STATE_DEFAULT);
- }
-
- DRW_stats_query_end();
+ if (start_group == NULL) {
+ return;
+ }
+
+ DST.shader = NULL;
+
+ BLI_assert(DST.buffer_finish_called &&
+ "DRW_render_instance_buffer_finish had not been called before drawing");
+
+ drw_update_view();
+
+ /* GPU_framebuffer_clear calls can change the state outside the DRW module.
+ * Force reset the affected states to avoid problems later. */
+ drw_state_set(DST.state | DRW_STATE_WRITE_DEPTH | DRW_STATE_WRITE_COLOR);
+
+ drw_state_set(pass->state);
+
+ DRW_stats_query_start(pass->name);
+
+ for (DRWShadingGroup *shgroup = start_group; shgroup; shgroup = shgroup->next) {
+ draw_shgroup(shgroup, pass->state);
+ /* break if upper limit */
+ if (shgroup == end_group) {
+ break;
+ }
+ }
+
+ /* Clear Bound textures */
+ for (int i = 0; i < DST_MAX_SLOTS; i++) {
+ if (DST.RST.bound_texs[i] != NULL) {
+ GPU_texture_unbind(DST.RST.bound_texs[i]);
+ DST.RST.bound_texs[i] = NULL;
+ }
+ }
+
+ /* Clear Bound Ubos */
+ for (int i = 0; i < DST_MAX_SLOTS; i++) {
+ if (DST.RST.bound_ubos[i] != NULL) {
+ GPU_uniformbuffer_unbind(DST.RST.bound_ubos[i]);
+ DST.RST.bound_ubos[i] = NULL;
+ }
+ }
+
+ if (DST.shader) {
+ GPU_shader_unbind();
+ DST.shader = NULL;
+ }
+
+ /* HACK: Rasterized discard can affect clear commands which are not
+ * part of a DRWPass (as of now). So disable rasterized discard here
+ * if it has been enabled. */
+ if ((DST.state & DRW_STATE_RASTERIZER_ENABLED) == 0) {
+ drw_state_set((DST.state & ~DRW_STATE_RASTERIZER_ENABLED) | DRW_STATE_DEFAULT);
+ }
+
+ DRW_stats_query_end();
}
void DRW_draw_pass(DRWPass *pass)
{
- drw_draw_pass_ex(pass, pass->shgroups.first, pass->shgroups.last);
+ drw_draw_pass_ex(pass, pass->shgroups.first, pass->shgroups.last);
}
/* Draw only a subset of shgroups. Used in special situations as grease pencil strokes */
void DRW_draw_pass_subset(DRWPass *pass, DRWShadingGroup *start_group, DRWShadingGroup *end_group)
{
- drw_draw_pass_ex(pass, start_group, end_group);
+ drw_draw_pass_ex(pass, start_group, end_group);
}
/** \} */
diff --git a/source/blender/draw/intern/draw_manager_profiling.c b/source/blender/draw/intern/draw_manager_profiling.c
index 51c0f4c4640..5e21e5e576c 100644
--- a/source/blender/draw/intern/draw_manager_profiling.c
+++ b/source/blender/draw/intern/draw_manager_profiling.c
@@ -35,7 +35,6 @@
#include "UI_resources.h"
-
#include "draw_manager_profiling.h"
#define MAX_TIMER_NAME 32
@@ -44,317 +43,321 @@
#define GPU_TIMER_FALLOFF 0.1
typedef struct DRWTimer {
- GLuint query[2];
- GLuint64 time_average;
- char name[MAX_TIMER_NAME];
- int lvl; /* Hierarchy level for nested timer. */
- bool is_query; /* Does this timer actually perform queries or is it just a group. */
+ GLuint query[2];
+ GLuint64 time_average;
+ char name[MAX_TIMER_NAME];
+ int lvl; /* Hierarchy level for nested timer. */
+ bool is_query; /* Does this timer actually perform queries or is it just a group. */
} DRWTimer;
static struct DRWTimerPool {
- DRWTimer *timers;
- int chunk_count; /* Number of chunk allocated. */
- int timer_count; /* chunk_count * CHUNK_SIZE */
- int timer_increment; /* Keep track of where we are in the stack. */
- int end_increment; /* Keep track of bad usage. */
- bool is_recording; /* Are we in the render loop? */
- bool is_querying; /* Keep track of bad usage. */
+ DRWTimer *timers;
+ int chunk_count; /* Number of chunk allocated. */
+ int timer_count; /* chunk_count * CHUNK_SIZE */
+ int timer_increment; /* Keep track of where we are in the stack. */
+ int end_increment; /* Keep track of bad usage. */
+ bool is_recording; /* Are we in the render loop? */
+ bool is_querying; /* Keep track of bad usage. */
} DTP = {NULL};
void DRW_stats_free(void)
{
- if (DTP.timers != NULL) {
- for (int i = 0; i < DTP.timer_count; ++i) {
- DRWTimer *timer = &DTP.timers[i];
- glDeleteQueries(2, timer->query);
- }
- MEM_freeN(DTP.timers);
- DTP.timers = NULL;
- }
+ if (DTP.timers != NULL) {
+ for (int i = 0; i < DTP.timer_count; ++i) {
+ DRWTimer *timer = &DTP.timers[i];
+ glDeleteQueries(2, timer->query);
+ }
+ MEM_freeN(DTP.timers);
+ DTP.timers = NULL;
+ }
}
void DRW_stats_begin(void)
{
- if (G.debug_value > 20 && G.debug_value < 30) {
- DTP.is_recording = true;
- }
-
- if (DTP.is_recording && DTP.timers == NULL) {
- DTP.chunk_count = 1;
- DTP.timer_count = DTP.chunk_count * CHUNK_SIZE;
- DTP.timers = MEM_callocN(sizeof(DRWTimer) * DTP.timer_count, "DRWTimer stack");
- }
- else if (!DTP.is_recording && DTP.timers != NULL) {
- DRW_stats_free();
- }
-
- DTP.is_querying = false;
- DTP.timer_increment = 0;
- DTP.end_increment = 0;
+ if (G.debug_value > 20 && G.debug_value < 30) {
+ DTP.is_recording = true;
+ }
+
+ if (DTP.is_recording && DTP.timers == NULL) {
+ DTP.chunk_count = 1;
+ DTP.timer_count = DTP.chunk_count * CHUNK_SIZE;
+ DTP.timers = MEM_callocN(sizeof(DRWTimer) * DTP.timer_count, "DRWTimer stack");
+ }
+ else if (!DTP.is_recording && DTP.timers != NULL) {
+ DRW_stats_free();
+ }
+
+ DTP.is_querying = false;
+ DTP.timer_increment = 0;
+ DTP.end_increment = 0;
}
static DRWTimer *drw_stats_timer_get(void)
{
- if (UNLIKELY(DTP.timer_increment >= DTP.timer_count)) {
- /* Resize the stack. */
- DTP.chunk_count++;
- DTP.timer_count = DTP.chunk_count * CHUNK_SIZE;
- DTP.timers = MEM_recallocN(DTP.timers, sizeof(DRWTimer) * DTP.timer_count);
- }
-
- return &DTP.timers[DTP.timer_increment++];
+ if (UNLIKELY(DTP.timer_increment >= DTP.timer_count)) {
+ /* Resize the stack. */
+ DTP.chunk_count++;
+ DTP.timer_count = DTP.chunk_count * CHUNK_SIZE;
+ DTP.timers = MEM_recallocN(DTP.timers, sizeof(DRWTimer) * DTP.timer_count);
+ }
+
+ return &DTP.timers[DTP.timer_increment++];
}
static void drw_stats_timer_start_ex(const char *name, const bool is_query)
{
- if (DTP.is_recording) {
- DRWTimer *timer = drw_stats_timer_get();
- BLI_strncpy(timer->name, name, MAX_TIMER_NAME);
- timer->lvl = DTP.timer_increment - DTP.end_increment - 1;
- timer->is_query = is_query;
-
- /* Queries cannot be nested or interleaved. */
- BLI_assert(!DTP.is_querying);
- if (timer->is_query) {
- if (timer->query[0] == 0) {
- glGenQueries(1, timer->query);
- }
-
- glFinish();
- /* Issue query for the next frame */
- glBeginQuery(GL_TIME_ELAPSED, timer->query[0]);
- DTP.is_querying = true;
- }
- }
+ if (DTP.is_recording) {
+ DRWTimer *timer = drw_stats_timer_get();
+ BLI_strncpy(timer->name, name, MAX_TIMER_NAME);
+ timer->lvl = DTP.timer_increment - DTP.end_increment - 1;
+ timer->is_query = is_query;
+
+ /* Queries cannot be nested or interleaved. */
+ BLI_assert(!DTP.is_querying);
+ if (timer->is_query) {
+ if (timer->query[0] == 0) {
+ glGenQueries(1, timer->query);
+ }
+
+ glFinish();
+ /* Issue query for the next frame */
+ glBeginQuery(GL_TIME_ELAPSED, timer->query[0]);
+ DTP.is_querying = true;
+ }
+ }
}
/* Use this to group the queries. It does NOT keep track
* of the time, it only sum what the queries inside it. */
void DRW_stats_group_start(const char *name)
{
- drw_stats_timer_start_ex(name, false);
+ drw_stats_timer_start_ex(name, false);
}
void DRW_stats_group_end(void)
{
- if (DTP.is_recording) {
- BLI_assert(!DTP.is_querying);
- DTP.end_increment++;
- }
+ if (DTP.is_recording) {
+ BLI_assert(!DTP.is_querying);
+ DTP.end_increment++;
+ }
}
/* NOTE: Only call this when no sub timer will be called. */
void DRW_stats_query_start(const char *name)
{
- drw_stats_timer_start_ex(name, true);
+ drw_stats_timer_start_ex(name, true);
}
void DRW_stats_query_end(void)
{
- if (DTP.is_recording) {
- DTP.end_increment++;
- BLI_assert(DTP.is_querying);
- glEndQuery(GL_TIME_ELAPSED);
- DTP.is_querying = false;
- }
+ if (DTP.is_recording) {
+ DTP.end_increment++;
+ BLI_assert(DTP.is_querying);
+ glEndQuery(GL_TIME_ELAPSED);
+ DTP.is_querying = false;
+ }
}
void DRW_stats_reset(void)
{
- BLI_assert((DTP.timer_increment - DTP.end_increment) <= 0 && "You forgot a DRW_stats_group/query_end somewhere!");
- BLI_assert((DTP.timer_increment - DTP.end_increment) >= 0 && "You forgot a DRW_stats_group/query_start somewhere!");
-
- if (DTP.is_recording) {
- GLuint64 lvl_time[MAX_NESTED_TIMER] = {0};
-
- /* Swap queries for the next frame and sum up each lvl time. */
- for (int i = DTP.timer_increment - 1; i >= 0; --i) {
- DRWTimer *timer = &DTP.timers[i];
- SWAP(GLuint, timer->query[0], timer->query[1]);
-
- BLI_assert(timer->lvl < MAX_NESTED_TIMER);
-
- if (timer->is_query) {
- GLuint64 time;
- if (timer->query[0] != 0) {
- glGetQueryObjectui64v(timer->query[0], GL_QUERY_RESULT, &time);
- }
- else {
- time = 1000000000; /* 1ms default */
- }
-
- timer->time_average = timer->time_average * (1.0 - GPU_TIMER_FALLOFF) + time * GPU_TIMER_FALLOFF;
- timer->time_average = MIN2(timer->time_average, 1000000000);
- }
- else {
- timer->time_average = lvl_time[timer->lvl + 1];
- lvl_time[timer->lvl + 1] = 0;
- }
-
- lvl_time[timer->lvl] += timer->time_average;
- }
-
- DTP.is_recording = false;
- }
+ BLI_assert((DTP.timer_increment - DTP.end_increment) <= 0 &&
+ "You forgot a DRW_stats_group/query_end somewhere!");
+ BLI_assert((DTP.timer_increment - DTP.end_increment) >= 0 &&
+ "You forgot a DRW_stats_group/query_start somewhere!");
+
+ if (DTP.is_recording) {
+ GLuint64 lvl_time[MAX_NESTED_TIMER] = {0};
+
+ /* Swap queries for the next frame and sum up each lvl time. */
+ for (int i = DTP.timer_increment - 1; i >= 0; --i) {
+ DRWTimer *timer = &DTP.timers[i];
+ SWAP(GLuint, timer->query[0], timer->query[1]);
+
+ BLI_assert(timer->lvl < MAX_NESTED_TIMER);
+
+ if (timer->is_query) {
+ GLuint64 time;
+ if (timer->query[0] != 0) {
+ glGetQueryObjectui64v(timer->query[0], GL_QUERY_RESULT, &time);
+ }
+ else {
+ time = 1000000000; /* 1ms default */
+ }
+
+ timer->time_average = timer->time_average * (1.0 - GPU_TIMER_FALLOFF) +
+ time * GPU_TIMER_FALLOFF;
+ timer->time_average = MIN2(timer->time_average, 1000000000);
+ }
+ else {
+ timer->time_average = lvl_time[timer->lvl + 1];
+ lvl_time[timer->lvl + 1] = 0;
+ }
+
+ lvl_time[timer->lvl] += timer->time_average;
+ }
+
+ DTP.is_recording = false;
+ }
}
static void draw_stat_5row(rcti *rect, int u, int v, const char *txt, const int size)
{
- BLF_draw_default_ascii(rect->xmin + (1 + u * 5) * U.widget_unit,
- rect->ymax - (3 + v) * U.widget_unit, 0.0f,
- txt, size);
+ BLF_draw_default_ascii(rect->xmin + (1 + u * 5) * U.widget_unit,
+ rect->ymax - (3 + v) * U.widget_unit,
+ 0.0f,
+ txt,
+ size);
}
static void draw_stat(rcti *rect, int u, int v, const char *txt, const int size)
{
- BLF_draw_default_ascii(rect->xmin + (1 + u) * U.widget_unit,
- rect->ymax - (3 + v) * U.widget_unit, 0.0f,
- txt, size);
+ BLF_draw_default_ascii(
+ rect->xmin + (1 + u) * U.widget_unit, rect->ymax - (3 + v) * U.widget_unit, 0.0f, txt, size);
}
void DRW_stats_draw(rcti *rect)
{
- char stat_string[64];
- int lvl_index[MAX_NESTED_TIMER];
- int v = 0, u = 0;
-
- double init_tot_time = 0.0, background_tot_time = 0.0, render_tot_time = 0.0, tot_time = 0.0;
-
- int fontid = BLF_default();
- UI_FontThemeColor(fontid, TH_TEXT_HI);
- BLF_enable(fontid, BLF_SHADOW);
- BLF_shadow(fontid, 5, (const float[4]){0.0f, 0.0f, 0.0f, 0.75f});
- BLF_shadow_offset(fontid, 0, -1);
-
- BLF_batch_draw_begin();
-
- /* ------------------------------------------ */
- /* ---------------- CPU stats --------------- */
- /* ------------------------------------------ */
- /* Label row */
- char col_label[32];
- sprintf(col_label, "Engine");
- draw_stat_5row(rect, u++, v, col_label, sizeof(col_label));
- sprintf(col_label, "Init");
- draw_stat_5row(rect, u++, v, col_label, sizeof(col_label));
- sprintf(col_label, "Background");
- draw_stat_5row(rect, u++, v, col_label, sizeof(col_label));
- sprintf(col_label, "Render");
- draw_stat_5row(rect, u++, v, col_label, sizeof(col_label));
- sprintf(col_label, "Total (w/o cache)");
- draw_stat_5row(rect, u++, v, col_label, sizeof(col_label));
- v++;
-
- /* Engines rows */
- char time_to_txt[16];
- for (LinkData *link = DST.enabled_engines.first; link; link = link->next) {
- u = 0;
- DrawEngineType *engine = link->data;
- ViewportEngineData *data = drw_viewport_engine_data_ensure(engine);
-
- draw_stat_5row(rect, u++, v, engine->idname, sizeof(engine->idname));
-
- init_tot_time += data->init_time;
- sprintf(time_to_txt, "%.2fms", data->init_time);
- draw_stat_5row(rect, u++, v, time_to_txt, sizeof(time_to_txt));
-
- background_tot_time += data->background_time;
- sprintf(time_to_txt, "%.2fms", data->background_time);
- draw_stat_5row(rect, u++, v, time_to_txt, sizeof(time_to_txt));
-
- render_tot_time += data->render_time;
- sprintf(time_to_txt, "%.2fms", data->render_time);
- draw_stat_5row(rect, u++, v, time_to_txt, sizeof(time_to_txt));
-
- tot_time += data->init_time + data->background_time + data->render_time;
- sprintf(time_to_txt, "%.2fms", data->init_time + data->background_time + data->render_time);
- draw_stat_5row(rect, u++, v, time_to_txt, sizeof(time_to_txt));
- v++;
- }
-
- /* Totals row */
- u = 0;
- sprintf(col_label, "Sub Total");
- draw_stat_5row(rect, u++, v, col_label, sizeof(col_label));
- sprintf(time_to_txt, "%.2fms", init_tot_time);
- draw_stat_5row(rect, u++, v, time_to_txt, sizeof(time_to_txt));
- sprintf(time_to_txt, "%.2fms", background_tot_time);
- draw_stat_5row(rect, u++, v, time_to_txt, sizeof(time_to_txt));
- sprintf(time_to_txt, "%.2fms", render_tot_time);
- draw_stat_5row(rect, u++, v, time_to_txt, sizeof(time_to_txt));
- sprintf(time_to_txt, "%.2fms", tot_time);
- draw_stat_5row(rect, u++, v, time_to_txt, sizeof(time_to_txt));
- v += 2;
-
- u = 0;
- double *cache_time = GPU_viewport_cache_time_get(DST.viewport);
- sprintf(col_label, "Cache Time");
- draw_stat_5row(rect, u++, v, col_label, sizeof(col_label));
- sprintf(time_to_txt, "%.2fms", *cache_time);
- draw_stat_5row(rect, u++, v, time_to_txt, sizeof(time_to_txt));
- v += 2;
-
- /* ------------------------------------------ */
- /* ---------------- GPU stats --------------- */
- /* ------------------------------------------ */
-
- /* Memory Stats */
- uint tex_mem = GPU_texture_memory_usage_get();
- uint vbo_mem = GPU_vertbuf_get_memory_usage();
-
- sprintf(stat_string, "GPU Memory");
- draw_stat(rect, 0, v, stat_string, sizeof(stat_string));
- sprintf(stat_string, "%.2fMB", (double)(tex_mem + vbo_mem) / 1000000.0);
- draw_stat_5row(rect, 1, v++, stat_string, sizeof(stat_string));
- sprintf(stat_string, "Textures");
- draw_stat(rect, 1, v, stat_string, sizeof(stat_string));
- sprintf(stat_string, "%.2fMB", (double)tex_mem / 1000000.0);
- draw_stat_5row(rect, 1, v++, stat_string, sizeof(stat_string));
- sprintf(stat_string, "Meshes");
- draw_stat(rect, 1, v, stat_string, sizeof(stat_string));
- sprintf(stat_string, "%.2fMB", (double)vbo_mem / 1000000.0);
- draw_stat_5row(rect, 1, v++, stat_string, sizeof(stat_string));
- v += 1;
-
- /* GPU Timings */
- BLI_strncpy(stat_string, "GPU Render Timings", sizeof(stat_string));
- draw_stat(rect, 0, v++, stat_string, sizeof(stat_string));
-
- for (int i = 0; i < DTP.timer_increment; ++i) {
- double time_ms, time_percent;
- DRWTimer *timer = &DTP.timers[i];
- DRWTimer *timer_parent = (timer->lvl > 0) ? &DTP.timers[lvl_index[timer->lvl - 1]] : NULL;
-
- /* Only display a number of lvl at a time */
- if ((G.debug_value - 21) < timer->lvl) {
- continue;
- }
-
- BLI_assert(timer->lvl < MAX_NESTED_TIMER);
- lvl_index[timer->lvl] = i;
-
- time_ms = timer->time_average / 1000000.0;
-
- if (timer_parent != NULL) {
- time_percent = ((double)timer->time_average / (double)timer_parent->time_average) * 100.0;
- }
- else {
- time_percent = 100.0;
- }
-
- /* avoid very long number */
- time_ms = MIN2(time_ms, 999.0);
- time_percent = MIN2(time_percent, 100.0);
-
- BLI_snprintf(stat_string, sizeof(stat_string), "%s", timer->name);
- draw_stat(rect, 0 + timer->lvl, v, stat_string, sizeof(stat_string));
- BLI_snprintf(stat_string, sizeof(stat_string), "%.2fms", time_ms);
- draw_stat(rect, 12 + timer->lvl, v, stat_string, sizeof(stat_string));
- BLI_snprintf(stat_string, sizeof(stat_string), "%.0f", time_percent);
- draw_stat(rect, 16 + timer->lvl, v, stat_string, sizeof(stat_string));
- v++;
- }
-
- BLF_batch_draw_end();
- BLF_disable(fontid, BLF_SHADOW);
+ char stat_string[64];
+ int lvl_index[MAX_NESTED_TIMER];
+ int v = 0, u = 0;
+
+ double init_tot_time = 0.0, background_tot_time = 0.0, render_tot_time = 0.0, tot_time = 0.0;
+
+ int fontid = BLF_default();
+ UI_FontThemeColor(fontid, TH_TEXT_HI);
+ BLF_enable(fontid, BLF_SHADOW);
+ BLF_shadow(fontid, 5, (const float[4]){0.0f, 0.0f, 0.0f, 0.75f});
+ BLF_shadow_offset(fontid, 0, -1);
+
+ BLF_batch_draw_begin();
+
+ /* ------------------------------------------ */
+ /* ---------------- CPU stats --------------- */
+ /* ------------------------------------------ */
+ /* Label row */
+ char col_label[32];
+ sprintf(col_label, "Engine");
+ draw_stat_5row(rect, u++, v, col_label, sizeof(col_label));
+ sprintf(col_label, "Init");
+ draw_stat_5row(rect, u++, v, col_label, sizeof(col_label));
+ sprintf(col_label, "Background");
+ draw_stat_5row(rect, u++, v, col_label, sizeof(col_label));
+ sprintf(col_label, "Render");
+ draw_stat_5row(rect, u++, v, col_label, sizeof(col_label));
+ sprintf(col_label, "Total (w/o cache)");
+ draw_stat_5row(rect, u++, v, col_label, sizeof(col_label));
+ v++;
+
+ /* Engines rows */
+ char time_to_txt[16];
+ for (LinkData *link = DST.enabled_engines.first; link; link = link->next) {
+ u = 0;
+ DrawEngineType *engine = link->data;
+ ViewportEngineData *data = drw_viewport_engine_data_ensure(engine);
+
+ draw_stat_5row(rect, u++, v, engine->idname, sizeof(engine->idname));
+
+ init_tot_time += data->init_time;
+ sprintf(time_to_txt, "%.2fms", data->init_time);
+ draw_stat_5row(rect, u++, v, time_to_txt, sizeof(time_to_txt));
+
+ background_tot_time += data->background_time;
+ sprintf(time_to_txt, "%.2fms", data->background_time);
+ draw_stat_5row(rect, u++, v, time_to_txt, sizeof(time_to_txt));
+
+ render_tot_time += data->render_time;
+ sprintf(time_to_txt, "%.2fms", data->render_time);
+ draw_stat_5row(rect, u++, v, time_to_txt, sizeof(time_to_txt));
+
+ tot_time += data->init_time + data->background_time + data->render_time;
+ sprintf(time_to_txt, "%.2fms", data->init_time + data->background_time + data->render_time);
+ draw_stat_5row(rect, u++, v, time_to_txt, sizeof(time_to_txt));
+ v++;
+ }
+
+ /* Totals row */
+ u = 0;
+ sprintf(col_label, "Sub Total");
+ draw_stat_5row(rect, u++, v, col_label, sizeof(col_label));
+ sprintf(time_to_txt, "%.2fms", init_tot_time);
+ draw_stat_5row(rect, u++, v, time_to_txt, sizeof(time_to_txt));
+ sprintf(time_to_txt, "%.2fms", background_tot_time);
+ draw_stat_5row(rect, u++, v, time_to_txt, sizeof(time_to_txt));
+ sprintf(time_to_txt, "%.2fms", render_tot_time);
+ draw_stat_5row(rect, u++, v, time_to_txt, sizeof(time_to_txt));
+ sprintf(time_to_txt, "%.2fms", tot_time);
+ draw_stat_5row(rect, u++, v, time_to_txt, sizeof(time_to_txt));
+ v += 2;
+
+ u = 0;
+ double *cache_time = GPU_viewport_cache_time_get(DST.viewport);
+ sprintf(col_label, "Cache Time");
+ draw_stat_5row(rect, u++, v, col_label, sizeof(col_label));
+ sprintf(time_to_txt, "%.2fms", *cache_time);
+ draw_stat_5row(rect, u++, v, time_to_txt, sizeof(time_to_txt));
+ v += 2;
+
+ /* ------------------------------------------ */
+ /* ---------------- GPU stats --------------- */
+ /* ------------------------------------------ */
+
+ /* Memory Stats */
+ uint tex_mem = GPU_texture_memory_usage_get();
+ uint vbo_mem = GPU_vertbuf_get_memory_usage();
+
+ sprintf(stat_string, "GPU Memory");
+ draw_stat(rect, 0, v, stat_string, sizeof(stat_string));
+ sprintf(stat_string, "%.2fMB", (double)(tex_mem + vbo_mem) / 1000000.0);
+ draw_stat_5row(rect, 1, v++, stat_string, sizeof(stat_string));
+ sprintf(stat_string, "Textures");
+ draw_stat(rect, 1, v, stat_string, sizeof(stat_string));
+ sprintf(stat_string, "%.2fMB", (double)tex_mem / 1000000.0);
+ draw_stat_5row(rect, 1, v++, stat_string, sizeof(stat_string));
+ sprintf(stat_string, "Meshes");
+ draw_stat(rect, 1, v, stat_string, sizeof(stat_string));
+ sprintf(stat_string, "%.2fMB", (double)vbo_mem / 1000000.0);
+ draw_stat_5row(rect, 1, v++, stat_string, sizeof(stat_string));
+ v += 1;
+
+ /* GPU Timings */
+ BLI_strncpy(stat_string, "GPU Render Timings", sizeof(stat_string));
+ draw_stat(rect, 0, v++, stat_string, sizeof(stat_string));
+
+ for (int i = 0; i < DTP.timer_increment; ++i) {
+ double time_ms, time_percent;
+ DRWTimer *timer = &DTP.timers[i];
+ DRWTimer *timer_parent = (timer->lvl > 0) ? &DTP.timers[lvl_index[timer->lvl - 1]] : NULL;
+
+ /* Only display a number of lvl at a time */
+ if ((G.debug_value - 21) < timer->lvl) {
+ continue;
+ }
+
+ BLI_assert(timer->lvl < MAX_NESTED_TIMER);
+ lvl_index[timer->lvl] = i;
+
+ time_ms = timer->time_average / 1000000.0;
+
+ if (timer_parent != NULL) {
+ time_percent = ((double)timer->time_average / (double)timer_parent->time_average) * 100.0;
+ }
+ else {
+ time_percent = 100.0;
+ }
+
+ /* avoid very long number */
+ time_ms = MIN2(time_ms, 999.0);
+ time_percent = MIN2(time_percent, 100.0);
+
+ BLI_snprintf(stat_string, sizeof(stat_string), "%s", timer->name);
+ draw_stat(rect, 0 + timer->lvl, v, stat_string, sizeof(stat_string));
+ BLI_snprintf(stat_string, sizeof(stat_string), "%.2fms", time_ms);
+ draw_stat(rect, 12 + timer->lvl, v, stat_string, sizeof(stat_string));
+ BLI_snprintf(stat_string, sizeof(stat_string), "%.0f", time_percent);
+ draw_stat(rect, 16 + timer->lvl, v, stat_string, sizeof(stat_string));
+ v++;
+ }
+
+ BLF_batch_draw_end();
+ BLF_disable(fontid, BLF_SHADOW);
}
diff --git a/source/blender/draw/intern/draw_manager_shader.c b/source/blender/draw/intern/draw_manager_shader.c
index 1fc6b61b87a..9cb3c1bf226 100644
--- a/source/blender/draw/intern/draw_manager_shader.c
+++ b/source/blender/draw/intern/draw_manager_shader.c
@@ -57,339 +57,395 @@ extern char datatoc_common_fullscreen_vert_glsl[];
* \{ */
typedef struct DRWDeferredShader {
- struct DRWDeferredShader *prev, *next;
+ struct DRWDeferredShader *prev, *next;
- GPUMaterial *mat;
+ GPUMaterial *mat;
} DRWDeferredShader;
typedef struct DRWShaderCompiler {
- ListBase queue; /* DRWDeferredShader */
- SpinLock list_lock;
+ ListBase queue; /* DRWDeferredShader */
+ SpinLock list_lock;
- DRWDeferredShader *mat_compiling;
- ThreadMutex compilation_lock;
+ DRWDeferredShader *mat_compiling;
+ ThreadMutex compilation_lock;
- void *gl_context;
- bool own_context;
+ void *gl_context;
+ bool own_context;
- int shaders_done; /* To compute progress. */
+ int shaders_done; /* To compute progress. */
} DRWShaderCompiler;
static void drw_deferred_shader_free(DRWDeferredShader *dsh)
{
- /* Make sure it is not queued before freeing. */
- MEM_freeN(dsh);
+ /* Make sure it is not queued before freeing. */
+ MEM_freeN(dsh);
}
static void drw_deferred_shader_queue_free(ListBase *queue)
{
- DRWDeferredShader *dsh;
- while ((dsh = BLI_pophead(queue))) {
- drw_deferred_shader_free(dsh);
- }
+ DRWDeferredShader *dsh;
+ while ((dsh = BLI_pophead(queue))) {
+ drw_deferred_shader_free(dsh);
+ }
}
-static void drw_deferred_shader_compilation_exec(void *custom_data, short *stop, short *do_update, float *progress)
+static void drw_deferred_shader_compilation_exec(void *custom_data,
+ short *stop,
+ short *do_update,
+ float *progress)
{
- DRWShaderCompiler *comp = (DRWShaderCompiler *)custom_data;
- void *gl_context = comp->gl_context;
+ DRWShaderCompiler *comp = (DRWShaderCompiler *)custom_data;
+ void *gl_context = comp->gl_context;
- WM_opengl_context_activate(gl_context);
+ WM_opengl_context_activate(gl_context);
- while (true) {
- BLI_spin_lock(&comp->list_lock);
+ while (true) {
+ BLI_spin_lock(&comp->list_lock);
- if (*stop != 0) {
- /* We don't want user to be able to cancel the compilation
- * but wm can kill the task if we are closing blender. */
- BLI_spin_unlock(&comp->list_lock);
- break;
- }
+ if (*stop != 0) {
+ /* We don't want user to be able to cancel the compilation
+ * but wm can kill the task if we are closing blender. */
+ BLI_spin_unlock(&comp->list_lock);
+ break;
+ }
- /* Pop tail because it will be less likely to lock the main thread
- * if all GPUMaterials are to be freed (see DRW_deferred_shader_remove()). */
- comp->mat_compiling = BLI_poptail(&comp->queue);
- if (comp->mat_compiling == NULL) {
- /* No more Shader to compile. */
- BLI_spin_unlock(&comp->list_lock);
- break;
- }
+ /* Pop tail because it will be less likely to lock the main thread
+ * if all GPUMaterials are to be freed (see DRW_deferred_shader_remove()). */
+ comp->mat_compiling = BLI_poptail(&comp->queue);
+ if (comp->mat_compiling == NULL) {
+ /* No more Shader to compile. */
+ BLI_spin_unlock(&comp->list_lock);
+ break;
+ }
- comp->shaders_done++;
- int total = BLI_listbase_count(&comp->queue) + comp->shaders_done;
+ comp->shaders_done++;
+ int total = BLI_listbase_count(&comp->queue) + comp->shaders_done;
- BLI_mutex_lock(&comp->compilation_lock);
- BLI_spin_unlock(&comp->list_lock);
+ BLI_mutex_lock(&comp->compilation_lock);
+ BLI_spin_unlock(&comp->list_lock);
- /* Do the compilation. */
- GPU_material_compile(comp->mat_compiling->mat);
+ /* Do the compilation. */
+ GPU_material_compile(comp->mat_compiling->mat);
- *progress = (float)comp->shaders_done / (float)total;
- *do_update = true;
+ *progress = (float)comp->shaders_done / (float)total;
+ *do_update = true;
- GPU_flush();
- BLI_mutex_unlock(&comp->compilation_lock);
+ GPU_flush();
+ BLI_mutex_unlock(&comp->compilation_lock);
- BLI_spin_lock(&comp->list_lock);
- drw_deferred_shader_free(comp->mat_compiling);
- comp->mat_compiling = NULL;
- BLI_spin_unlock(&comp->list_lock);
- }
+ BLI_spin_lock(&comp->list_lock);
+ drw_deferred_shader_free(comp->mat_compiling);
+ comp->mat_compiling = NULL;
+ BLI_spin_unlock(&comp->list_lock);
+ }
- WM_opengl_context_release(gl_context);
+ WM_opengl_context_release(gl_context);
}
static void drw_deferred_shader_compilation_free(void *custom_data)
{
- DRWShaderCompiler *comp = (DRWShaderCompiler *)custom_data;
+ DRWShaderCompiler *comp = (DRWShaderCompiler *)custom_data;
- drw_deferred_shader_queue_free(&comp->queue);
+ drw_deferred_shader_queue_free(&comp->queue);
- BLI_spin_end(&comp->list_lock);
- BLI_mutex_end(&comp->compilation_lock);
+ BLI_spin_end(&comp->list_lock);
+ BLI_mutex_end(&comp->compilation_lock);
- if (comp->own_context) {
- /* Only destroy if the job owns the context. */
- WM_opengl_context_dispose(comp->gl_context);
- }
+ if (comp->own_context) {
+ /* Only destroy if the job owns the context. */
+ WM_opengl_context_dispose(comp->gl_context);
+ }
- MEM_freeN(comp);
+ MEM_freeN(comp);
}
static void drw_deferred_shader_add(GPUMaterial *mat, bool deferred)
{
- /* Do not deferre the compilation if we are rendering for image. */
- if (DRW_state_is_image_render() || !USE_DEFERRED_COMPILATION || !deferred) {
- /* Double checking that this GPUMaterial is not going to be
- * compiled by another thread. */
- DRW_deferred_shader_remove(mat);
- GPU_material_compile(mat);
- return;
- }
-
- DRWDeferredShader *dsh = MEM_callocN(sizeof(DRWDeferredShader), "Deferred Shader");
-
- dsh->mat = mat;
-
- BLI_assert(DST.draw_ctx.evil_C);
- wmWindowManager *wm = CTX_wm_manager(DST.draw_ctx.evil_C);
- wmWindow *win = CTX_wm_window(DST.draw_ctx.evil_C);
-
- /* Use original scene ID since this is what the jobs template tests for. */
- Scene *scene = (Scene *)DEG_get_original_id(&DST.draw_ctx.scene->id);
-
- /* Get the running job or a new one if none is running. Can only have one job per type & owner. */
- wmJob *wm_job = WM_jobs_get(wm, win, scene, "Shaders Compilation",
- WM_JOB_PROGRESS | WM_JOB_SUSPEND, WM_JOB_TYPE_SHADER_COMPILATION);
-
- DRWShaderCompiler *old_comp = (DRWShaderCompiler *)WM_jobs_customdata_get(wm_job);
-
- DRWShaderCompiler *comp = MEM_callocN(sizeof(DRWShaderCompiler), "DRWShaderCompiler");
- BLI_spin_init(&comp->list_lock);
- BLI_mutex_init(&comp->compilation_lock);
-
- if (old_comp) {
- BLI_spin_lock(&old_comp->list_lock);
- BLI_movelisttolist(&comp->queue, &old_comp->queue);
- BLI_spin_unlock(&old_comp->list_lock);
- /* Do not recreate context, just pass ownership. */
- if (old_comp->gl_context) {
- comp->gl_context = old_comp->gl_context;
- old_comp->own_context = false;
- comp->own_context = true;
- }
- }
-
- BLI_addtail(&comp->queue, dsh);
-
- /* Create only one context. */
- if (comp->gl_context == NULL) {
- comp->gl_context = WM_opengl_context_create();
- WM_opengl_context_activate(DST.gl_context);
- comp->own_context = true;
- }
-
- WM_jobs_customdata_set(wm_job, comp, drw_deferred_shader_compilation_free);
- WM_jobs_timer(wm_job, 0.1, NC_MATERIAL | ND_SHADING_DRAW, 0);
- WM_jobs_callbacks(wm_job, drw_deferred_shader_compilation_exec, NULL, NULL, NULL);
- WM_jobs_start(wm, wm_job);
+ /* Do not deferre the compilation if we are rendering for image. */
+ if (DRW_state_is_image_render() || !USE_DEFERRED_COMPILATION || !deferred) {
+ /* Double checking that this GPUMaterial is not going to be
+ * compiled by another thread. */
+ DRW_deferred_shader_remove(mat);
+ GPU_material_compile(mat);
+ return;
+ }
+
+ DRWDeferredShader *dsh = MEM_callocN(sizeof(DRWDeferredShader), "Deferred Shader");
+
+ dsh->mat = mat;
+
+ BLI_assert(DST.draw_ctx.evil_C);
+ wmWindowManager *wm = CTX_wm_manager(DST.draw_ctx.evil_C);
+ wmWindow *win = CTX_wm_window(DST.draw_ctx.evil_C);
+
+ /* Use original scene ID since this is what the jobs template tests for. */
+ Scene *scene = (Scene *)DEG_get_original_id(&DST.draw_ctx.scene->id);
+
+ /* Get the running job or a new one if none is running. Can only have one job per type & owner. */
+ wmJob *wm_job = WM_jobs_get(wm,
+ win,
+ scene,
+ "Shaders Compilation",
+ WM_JOB_PROGRESS | WM_JOB_SUSPEND,
+ WM_JOB_TYPE_SHADER_COMPILATION);
+
+ DRWShaderCompiler *old_comp = (DRWShaderCompiler *)WM_jobs_customdata_get(wm_job);
+
+ DRWShaderCompiler *comp = MEM_callocN(sizeof(DRWShaderCompiler), "DRWShaderCompiler");
+ BLI_spin_init(&comp->list_lock);
+ BLI_mutex_init(&comp->compilation_lock);
+
+ if (old_comp) {
+ BLI_spin_lock(&old_comp->list_lock);
+ BLI_movelisttolist(&comp->queue, &old_comp->queue);
+ BLI_spin_unlock(&old_comp->list_lock);
+ /* Do not recreate context, just pass ownership. */
+ if (old_comp->gl_context) {
+ comp->gl_context = old_comp->gl_context;
+ old_comp->own_context = false;
+ comp->own_context = true;
+ }
+ }
+
+ BLI_addtail(&comp->queue, dsh);
+
+ /* Create only one context. */
+ if (comp->gl_context == NULL) {
+ comp->gl_context = WM_opengl_context_create();
+ WM_opengl_context_activate(DST.gl_context);
+ comp->own_context = true;
+ }
+
+ WM_jobs_customdata_set(wm_job, comp, drw_deferred_shader_compilation_free);
+ WM_jobs_timer(wm_job, 0.1, NC_MATERIAL | ND_SHADING_DRAW, 0);
+ WM_jobs_callbacks(wm_job, drw_deferred_shader_compilation_exec, NULL, NULL, NULL);
+ WM_jobs_start(wm, wm_job);
}
void DRW_deferred_shader_remove(GPUMaterial *mat)
{
- Scene *scene = GPU_material_scene(mat);
-
- for (wmWindowManager *wm = G_MAIN->wm.first; wm; wm = wm->id.next) {
- if (WM_jobs_test(wm, scene, WM_JOB_TYPE_SHADER_COMPILATION) == false) {
- /* No job running, do not create a new one by calling WM_jobs_get. */
- continue;
- }
- for (wmWindow *win = wm->windows.first; win; win = win->next) {
- wmJob *wm_job = WM_jobs_get(wm, win, scene, "Shaders Compilation",
- WM_JOB_PROGRESS | WM_JOB_SUSPEND, WM_JOB_TYPE_SHADER_COMPILATION);
-
- DRWShaderCompiler *comp = (DRWShaderCompiler *)WM_jobs_customdata_get(wm_job);
- if (comp != NULL) {
- BLI_spin_lock(&comp->list_lock);
- DRWDeferredShader *dsh;
- dsh = (DRWDeferredShader *)BLI_findptr(&comp->queue, mat, offsetof(DRWDeferredShader, mat));
- if (dsh) {
- BLI_remlink(&comp->queue, dsh);
- }
-
- /* Wait for compilation to finish */
- if ((comp->mat_compiling != NULL) && (comp->mat_compiling->mat == mat)) {
- BLI_mutex_lock(&comp->compilation_lock);
- BLI_mutex_unlock(&comp->compilation_lock);
- }
-
- BLI_spin_unlock(&comp->list_lock);
-
- if (dsh) {
- drw_deferred_shader_free(dsh);
- }
- }
- }
- }
+ Scene *scene = GPU_material_scene(mat);
+
+ for (wmWindowManager *wm = G_MAIN->wm.first; wm; wm = wm->id.next) {
+ if (WM_jobs_test(wm, scene, WM_JOB_TYPE_SHADER_COMPILATION) == false) {
+ /* No job running, do not create a new one by calling WM_jobs_get. */
+ continue;
+ }
+ for (wmWindow *win = wm->windows.first; win; win = win->next) {
+ wmJob *wm_job = WM_jobs_get(wm,
+ win,
+ scene,
+ "Shaders Compilation",
+ WM_JOB_PROGRESS | WM_JOB_SUSPEND,
+ WM_JOB_TYPE_SHADER_COMPILATION);
+
+ DRWShaderCompiler *comp = (DRWShaderCompiler *)WM_jobs_customdata_get(wm_job);
+ if (comp != NULL) {
+ BLI_spin_lock(&comp->list_lock);
+ DRWDeferredShader *dsh;
+ dsh = (DRWDeferredShader *)BLI_findptr(
+ &comp->queue, mat, offsetof(DRWDeferredShader, mat));
+ if (dsh) {
+ BLI_remlink(&comp->queue, dsh);
+ }
+
+ /* Wait for compilation to finish */
+ if ((comp->mat_compiling != NULL) && (comp->mat_compiling->mat == mat)) {
+ BLI_mutex_lock(&comp->compilation_lock);
+ BLI_mutex_unlock(&comp->compilation_lock);
+ }
+
+ BLI_spin_unlock(&comp->list_lock);
+
+ if (dsh) {
+ drw_deferred_shader_free(dsh);
+ }
+ }
+ }
+ }
}
/** \} */
/* -------------------------------------------------------------------- */
-GPUShader *DRW_shader_create(const char *vert, const char *geom, const char *frag, const char *defines)
+GPUShader *DRW_shader_create(const char *vert,
+ const char *geom,
+ const char *frag,
+ const char *defines)
{
- return GPU_shader_create(vert, frag, geom, NULL, defines, __func__);
+ return GPU_shader_create(vert, frag, geom, NULL, defines, __func__);
}
GPUShader *DRW_shader_create_with_lib(
- const char *vert, const char *geom, const char *frag, const char *lib, const char *defines)
+ const char *vert, const char *geom, const char *frag, const char *lib, const char *defines)
{
- GPUShader *sh;
- char *vert_with_lib = NULL;
- char *frag_with_lib = NULL;
- char *geom_with_lib = NULL;
-
- vert_with_lib = BLI_string_joinN(lib, vert);
- frag_with_lib = BLI_string_joinN(lib, frag);
- if (geom) {
- geom_with_lib = BLI_string_joinN(lib, geom);
- }
-
- sh = GPU_shader_create(vert_with_lib, frag_with_lib, geom_with_lib, NULL, defines, __func__);
-
- MEM_freeN(vert_with_lib);
- MEM_freeN(frag_with_lib);
- if (geom) {
- MEM_freeN(geom_with_lib);
- }
-
- return sh;
+ GPUShader *sh;
+ char *vert_with_lib = NULL;
+ char *frag_with_lib = NULL;
+ char *geom_with_lib = NULL;
+
+ vert_with_lib = BLI_string_joinN(lib, vert);
+ frag_with_lib = BLI_string_joinN(lib, frag);
+ if (geom) {
+ geom_with_lib = BLI_string_joinN(lib, geom);
+ }
+
+ sh = GPU_shader_create(vert_with_lib, frag_with_lib, geom_with_lib, NULL, defines, __func__);
+
+ MEM_freeN(vert_with_lib);
+ MEM_freeN(frag_with_lib);
+ if (geom) {
+ MEM_freeN(geom_with_lib);
+ }
+
+ return sh;
}
-GPUShader *DRW_shader_create_with_transform_feedback(
- const char *vert, const char *geom, const char *defines,
- const eGPUShaderTFBType prim_type, const char **varying_names, const int varying_count)
+GPUShader *DRW_shader_create_with_transform_feedback(const char *vert,
+ const char *geom,
+ const char *defines,
+ const eGPUShaderTFBType prim_type,
+ const char **varying_names,
+ const int varying_count)
{
- return GPU_shader_create_ex(vert,
- datatoc_gpu_shader_depth_only_frag_glsl,
- geom, NULL, defines,
- prim_type, varying_names, varying_count, __func__);
+ return GPU_shader_create_ex(vert,
+ datatoc_gpu_shader_depth_only_frag_glsl,
+ geom,
+ NULL,
+ defines,
+ prim_type,
+ varying_names,
+ varying_count,
+ __func__);
}
GPUShader *DRW_shader_create_2d(const char *frag, const char *defines)
{
- return GPU_shader_create(datatoc_gpu_shader_2D_vert_glsl, frag, NULL, NULL, defines, __func__);
+ return GPU_shader_create(datatoc_gpu_shader_2D_vert_glsl, frag, NULL, NULL, defines, __func__);
}
GPUShader *DRW_shader_create_3d(const char *frag, const char *defines)
{
- return GPU_shader_create(datatoc_gpu_shader_3D_vert_glsl, frag, NULL, NULL, defines, __func__);
+ return GPU_shader_create(datatoc_gpu_shader_3D_vert_glsl, frag, NULL, NULL, defines, __func__);
}
GPUShader *DRW_shader_create_fullscreen(const char *frag, const char *defines)
{
- return GPU_shader_create(datatoc_common_fullscreen_vert_glsl, frag, NULL, NULL, defines, __func__);
+ return GPU_shader_create(
+ datatoc_common_fullscreen_vert_glsl, frag, NULL, NULL, defines, __func__);
}
GPUShader *DRW_shader_create_3d_depth_only(eGPUShaderConfig sh_cfg)
{
- return GPU_shader_get_builtin_shader_with_config(GPU_SHADER_3D_DEPTH_ONLY, sh_cfg);
+ return GPU_shader_get_builtin_shader_with_config(GPU_SHADER_3D_DEPTH_ONLY, sh_cfg);
}
-GPUMaterial *DRW_shader_find_from_world(World *wo, const void *engine_type, int options, bool deferred)
+GPUMaterial *DRW_shader_find_from_world(World *wo,
+ const void *engine_type,
+ int options,
+ bool deferred)
{
- GPUMaterial *mat = GPU_material_from_nodetree_find(&wo->gpumaterial, engine_type, options);
- if (DRW_state_is_image_render() || !deferred) {
- if (mat != NULL && GPU_material_status(mat) == GPU_MAT_QUEUED) {
- /* XXX Hack : we return NULL so that the engine will call DRW_shader_create_from_XXX
- * with the shader code and we will resume the compilation from there. */
- return NULL;
- }
- }
- return mat;
+ GPUMaterial *mat = GPU_material_from_nodetree_find(&wo->gpumaterial, engine_type, options);
+ if (DRW_state_is_image_render() || !deferred) {
+ if (mat != NULL && GPU_material_status(mat) == GPU_MAT_QUEUED) {
+ /* XXX Hack : we return NULL so that the engine will call DRW_shader_create_from_XXX
+ * with the shader code and we will resume the compilation from there. */
+ return NULL;
+ }
+ }
+ return mat;
}
-GPUMaterial *DRW_shader_find_from_material(Material *ma, const void *engine_type, int options, bool deferred)
+GPUMaterial *DRW_shader_find_from_material(Material *ma,
+ const void *engine_type,
+ int options,
+ bool deferred)
{
- GPUMaterial *mat = GPU_material_from_nodetree_find(&ma->gpumaterial, engine_type, options);
- if (DRW_state_is_image_render() || !deferred) {
- if (mat != NULL && GPU_material_status(mat) == GPU_MAT_QUEUED) {
- /* XXX Hack : we return NULL so that the engine will call DRW_shader_create_from_XXX
- * with the shader code and we will resume the compilation from there. */
- return NULL;
- }
- }
- return mat;
+ GPUMaterial *mat = GPU_material_from_nodetree_find(&ma->gpumaterial, engine_type, options);
+ if (DRW_state_is_image_render() || !deferred) {
+ if (mat != NULL && GPU_material_status(mat) == GPU_MAT_QUEUED) {
+ /* XXX Hack : we return NULL so that the engine will call DRW_shader_create_from_XXX
+ * with the shader code and we will resume the compilation from there. */
+ return NULL;
+ }
+ }
+ return mat;
}
-GPUMaterial *DRW_shader_create_from_world(
- struct Scene *scene, World *wo, const void *engine_type, int options,
- const char *vert, const char *geom, const char *frag_lib, const char *defines, bool deferred)
+GPUMaterial *DRW_shader_create_from_world(struct Scene *scene,
+ World *wo,
+ const void *engine_type,
+ int options,
+ const char *vert,
+ const char *geom,
+ const char *frag_lib,
+ const char *defines,
+ bool deferred)
{
- GPUMaterial *mat = NULL;
- if (DRW_state_is_image_render()) {
- mat = GPU_material_from_nodetree_find(&wo->gpumaterial, engine_type, options);
- }
-
- if (mat == NULL) {
- scene = (Scene *)DEG_get_original_id(&DST.draw_ctx.scene->id);
- mat = GPU_material_from_nodetree(
- scene, wo->nodetree, &wo->gpumaterial, engine_type, options,
- vert, geom, frag_lib, defines, wo->id.name);
- }
-
- if (GPU_material_status(mat) == GPU_MAT_QUEUED) {
- drw_deferred_shader_add(mat, deferred);
- }
-
- return mat;
+ GPUMaterial *mat = NULL;
+ if (DRW_state_is_image_render()) {
+ mat = GPU_material_from_nodetree_find(&wo->gpumaterial, engine_type, options);
+ }
+
+ if (mat == NULL) {
+ scene = (Scene *)DEG_get_original_id(&DST.draw_ctx.scene->id);
+ mat = GPU_material_from_nodetree(scene,
+ wo->nodetree,
+ &wo->gpumaterial,
+ engine_type,
+ options,
+ vert,
+ geom,
+ frag_lib,
+ defines,
+ wo->id.name);
+ }
+
+ if (GPU_material_status(mat) == GPU_MAT_QUEUED) {
+ drw_deferred_shader_add(mat, deferred);
+ }
+
+ return mat;
}
-GPUMaterial *DRW_shader_create_from_material(
- struct Scene *scene, Material *ma, const void *engine_type, int options,
- const char *vert, const char *geom, const char *frag_lib, const char *defines, bool deferred)
+GPUMaterial *DRW_shader_create_from_material(struct Scene *scene,
+ Material *ma,
+ const void *engine_type,
+ int options,
+ const char *vert,
+ const char *geom,
+ const char *frag_lib,
+ const char *defines,
+ bool deferred)
{
- GPUMaterial *mat = NULL;
- if (DRW_state_is_image_render()) {
- mat = GPU_material_from_nodetree_find(&ma->gpumaterial, engine_type, options);
- }
-
- if (mat == NULL) {
- scene = (Scene *)DEG_get_original_id(&DST.draw_ctx.scene->id);
- mat = GPU_material_from_nodetree(
- scene, ma->nodetree, &ma->gpumaterial, engine_type, options,
- vert, geom, frag_lib, defines, ma->id.name);
- }
-
- if (GPU_material_status(mat) == GPU_MAT_QUEUED) {
- drw_deferred_shader_add(mat, deferred);
- }
-
- return mat;
+ GPUMaterial *mat = NULL;
+ if (DRW_state_is_image_render()) {
+ mat = GPU_material_from_nodetree_find(&ma->gpumaterial, engine_type, options);
+ }
+
+ if (mat == NULL) {
+ scene = (Scene *)DEG_get_original_id(&DST.draw_ctx.scene->id);
+ mat = GPU_material_from_nodetree(scene,
+ ma->nodetree,
+ &ma->gpumaterial,
+ engine_type,
+ options,
+ vert,
+ geom,
+ frag_lib,
+ defines,
+ ma->id.name);
+ }
+
+ if (GPU_material_status(mat) == GPU_MAT_QUEUED) {
+ drw_deferred_shader_add(mat, deferred);
+ }
+
+ return mat;
}
void DRW_shader_free(GPUShader *shader)
{
- GPU_shader_free(shader);
+ GPU_shader_free(shader);
}
diff --git a/source/blender/draw/intern/draw_manager_text.c b/source/blender/draw/intern/draw_manager_text.c
index eff3688589c..6e06998f1cc 100644
--- a/source/blender/draw/intern/draw_manager_text.c
+++ b/source/blender/draw/intern/draw_manager_text.c
@@ -38,143 +38,143 @@
#include "draw_manager_text.h"
typedef struct ViewCachedString {
- float vec[3];
- union {
- uchar ub[4];
- int pack;
- } col;
- short sco[2];
- short xoffs, yoffs;
- short flag;
- int str_len;
-
- /* str is allocated past the end */
- char str[0];
+ float vec[3];
+ union {
+ uchar ub[4];
+ int pack;
+ } col;
+ short sco[2];
+ short xoffs, yoffs;
+ short flag;
+ int str_len;
+
+ /* str is allocated past the end */
+ char str[0];
} ViewCachedString;
typedef struct DRWTextStore {
- BLI_memiter *cache_strings;
+ BLI_memiter *cache_strings;
} DRWTextStore;
DRWTextStore *DRW_text_cache_create(void)
{
- DRWTextStore *dt = MEM_callocN(sizeof(*dt), __func__);
- dt->cache_strings = BLI_memiter_create(1 << 14); /* 16kb */
- return dt;
+ DRWTextStore *dt = MEM_callocN(sizeof(*dt), __func__);
+ dt->cache_strings = BLI_memiter_create(1 << 14); /* 16kb */
+ return dt;
}
void DRW_text_cache_destroy(struct DRWTextStore *dt)
{
- BLI_memiter_destroy(dt->cache_strings);
- MEM_freeN(dt);
+ BLI_memiter_destroy(dt->cache_strings);
+ MEM_freeN(dt);
}
-void DRW_text_cache_add(
- DRWTextStore *dt,
- const float co[3],
- const char *str, const int str_len,
- short xoffs, short yoffs, short flag,
- const uchar col[4])
+void DRW_text_cache_add(DRWTextStore *dt,
+ const float co[3],
+ const char *str,
+ const int str_len,
+ short xoffs,
+ short yoffs,
+ short flag,
+ const uchar col[4])
{
- int alloc_len;
- ViewCachedString *vos;
-
- if (flag & DRW_TEXT_CACHE_STRING_PTR) {
- BLI_assert(str_len == strlen(str));
- alloc_len = sizeof(void *);
- }
- else {
- alloc_len = str_len + 1;
- }
-
- vos = BLI_memiter_alloc(dt->cache_strings, sizeof(ViewCachedString) + alloc_len);
-
- copy_v3_v3(vos->vec, co);
- copy_v4_v4_uchar(vos->col.ub, col);
- vos->xoffs = xoffs;
- vos->yoffs = yoffs;
- vos->flag = flag;
- vos->str_len = str_len;
-
- /* allocate past the end */
- if (flag & DRW_TEXT_CACHE_STRING_PTR) {
- memcpy(vos->str, &str, alloc_len);
- }
- else {
- memcpy(vos->str, str, alloc_len);
- }
+ int alloc_len;
+ ViewCachedString *vos;
+
+ if (flag & DRW_TEXT_CACHE_STRING_PTR) {
+ BLI_assert(str_len == strlen(str));
+ alloc_len = sizeof(void *);
+ }
+ else {
+ alloc_len = str_len + 1;
+ }
+
+ vos = BLI_memiter_alloc(dt->cache_strings, sizeof(ViewCachedString) + alloc_len);
+
+ copy_v3_v3(vos->vec, co);
+ copy_v4_v4_uchar(vos->col.ub, col);
+ vos->xoffs = xoffs;
+ vos->yoffs = yoffs;
+ vos->flag = flag;
+ vos->str_len = str_len;
+
+ /* allocate past the end */
+ if (flag & DRW_TEXT_CACHE_STRING_PTR) {
+ memcpy(vos->str, &str, alloc_len);
+ }
+ else {
+ memcpy(vos->str, str, alloc_len);
+ }
}
void DRW_text_cache_draw(DRWTextStore *dt, ARegion *ar)
{
- RegionView3D *rv3d = ar->regiondata;
- ViewCachedString *vos;
- int tot = 0;
-
- /* project first and test */
- BLI_memiter_handle it;
- BLI_memiter_iter_init(dt->cache_strings, &it);
- while ((vos = BLI_memiter_iter_step(&it))) {
- if (ED_view3d_project_short_ex(
- ar,
- (vos->flag & DRW_TEXT_CACHE_GLOBALSPACE) ? rv3d->persmat : rv3d->persmatob,
- (vos->flag & DRW_TEXT_CACHE_LOCALCLIP) != 0,
- vos->vec, vos->sco,
- V3D_PROJ_TEST_CLIP_BB | V3D_PROJ_TEST_CLIP_WIN | V3D_PROJ_TEST_CLIP_NEAR) == V3D_PROJ_RET_OK)
- {
- tot++;
- }
- else {
- vos->sco[0] = IS_CLIPPED;
- }
- }
-
- if (tot) {
- int col_pack_prev = 0;
-
- if (rv3d->rflag & RV3D_CLIPPING) {
- ED_view3d_clipping_disable();
- }
-
- float original_proj[4][4];
- GPU_matrix_projection_get(original_proj);
- wmOrtho2_region_pixelspace(ar);
-
- GPU_matrix_push();
- GPU_matrix_identity_set();
-
- const int font_id = BLF_default();
-
- const uiStyle *style = UI_style_get();
-
- BLF_size(font_id, style->widget.points * U.pixelsize, U.dpi);
-
- BLI_memiter_iter_init(dt->cache_strings, &it);
- while ((vos = BLI_memiter_iter_step(&it))) {
- if (vos->sco[0] != IS_CLIPPED) {
- if (col_pack_prev != vos->col.pack) {
- BLF_color4ubv(font_id, vos->col.ub);
- col_pack_prev = vos->col.pack;
- }
-
- BLF_position(
- font_id,
- (float)(vos->sco[0] + vos->xoffs), (float)(vos->sco[1] + vos->yoffs), 2.0f);
-
- ((vos->flag & DRW_TEXT_CACHE_ASCII) ?
- BLF_draw_ascii :
- BLF_draw
- )(font_id,
- (vos->flag & DRW_TEXT_CACHE_STRING_PTR) ? *((const char **)vos->str) : vos->str,
- vos->str_len);
- }
- }
-
- GPU_matrix_pop();
- GPU_matrix_projection_set(original_proj);
-
- if (rv3d->rflag & RV3D_CLIPPING) {
- ED_view3d_clipping_enable();
- }
- }
+ RegionView3D *rv3d = ar->regiondata;
+ ViewCachedString *vos;
+ int tot = 0;
+
+ /* project first and test */
+ BLI_memiter_handle it;
+ BLI_memiter_iter_init(dt->cache_strings, &it);
+ while ((vos = BLI_memiter_iter_step(&it))) {
+ if (ED_view3d_project_short_ex(
+ ar,
+ (vos->flag & DRW_TEXT_CACHE_GLOBALSPACE) ? rv3d->persmat : rv3d->persmatob,
+ (vos->flag & DRW_TEXT_CACHE_LOCALCLIP) != 0,
+ vos->vec,
+ vos->sco,
+ V3D_PROJ_TEST_CLIP_BB | V3D_PROJ_TEST_CLIP_WIN | V3D_PROJ_TEST_CLIP_NEAR) ==
+ V3D_PROJ_RET_OK) {
+ tot++;
+ }
+ else {
+ vos->sco[0] = IS_CLIPPED;
+ }
+ }
+
+ if (tot) {
+ int col_pack_prev = 0;
+
+ if (rv3d->rflag & RV3D_CLIPPING) {
+ ED_view3d_clipping_disable();
+ }
+
+ float original_proj[4][4];
+ GPU_matrix_projection_get(original_proj);
+ wmOrtho2_region_pixelspace(ar);
+
+ GPU_matrix_push();
+ GPU_matrix_identity_set();
+
+ const int font_id = BLF_default();
+
+ const uiStyle *style = UI_style_get();
+
+ BLF_size(font_id, style->widget.points * U.pixelsize, U.dpi);
+
+ BLI_memiter_iter_init(dt->cache_strings, &it);
+ while ((vos = BLI_memiter_iter_step(&it))) {
+ if (vos->sco[0] != IS_CLIPPED) {
+ if (col_pack_prev != vos->col.pack) {
+ BLF_color4ubv(font_id, vos->col.ub);
+ col_pack_prev = vos->col.pack;
+ }
+
+ BLF_position(
+ font_id, (float)(vos->sco[0] + vos->xoffs), (float)(vos->sco[1] + vos->yoffs), 2.0f);
+
+ ((vos->flag & DRW_TEXT_CACHE_ASCII) ? BLF_draw_ascii : BLF_draw)(
+ font_id,
+ (vos->flag & DRW_TEXT_CACHE_STRING_PTR) ? *((const char **)vos->str) : vos->str,
+ vos->str_len);
+ }
+ }
+
+ GPU_matrix_pop();
+ GPU_matrix_projection_set(original_proj);
+
+ if (rv3d->rflag & RV3D_CLIPPING) {
+ ED_view3d_clipping_enable();
+ }
+ }
}
diff --git a/source/blender/draw/intern/draw_manager_text.h b/source/blender/draw/intern/draw_manager_text.h
index 308d569faa9..9f5dd1d4beb 100644
--- a/source/blender/draw/intern/draw_manager_text.h
+++ b/source/blender/draw/intern/draw_manager_text.h
@@ -28,21 +28,23 @@ struct DRWTextStore;
struct DRWTextStore *DRW_text_cache_create(void);
void DRW_text_cache_destroy(struct DRWTextStore *dt);
-void DRW_text_cache_add(
- struct DRWTextStore *dt,
- const float co[3],
- const char *str, const int str_len,
- short xoffs, short yoffs, short flag,
- const uchar col[4]);
+void DRW_text_cache_add(struct DRWTextStore *dt,
+ const float co[3],
+ const char *str,
+ const int str_len,
+ short xoffs,
+ short yoffs,
+ short flag,
+ const uchar col[4]);
void DRW_text_cache_draw(struct DRWTextStore *dt, struct ARegion *ar);
enum {
- DRW_TEXT_CACHE_ASCII = (1 << 0),
- DRW_TEXT_CACHE_GLOBALSPACE = (1 << 1),
- DRW_TEXT_CACHE_LOCALCLIP = (1 << 2),
- /* reference the string by pointer */
- DRW_TEXT_CACHE_STRING_PTR = (1 << 3),
+ DRW_TEXT_CACHE_ASCII = (1 << 0),
+ DRW_TEXT_CACHE_GLOBALSPACE = (1 << 1),
+ DRW_TEXT_CACHE_LOCALCLIP = (1 << 2),
+ /* reference the string by pointer */
+ DRW_TEXT_CACHE_STRING_PTR = (1 << 3),
};
/* draw_manager.c */
diff --git a/source/blender/draw/intern/draw_manager_texture.c b/source/blender/draw/intern/draw_manager_texture.c
index 92fca2d4f04..4750a35d784 100644
--- a/source/blender/draw/intern/draw_manager_texture.c
+++ b/source/blender/draw/intern/draw_manager_texture.c
@@ -26,126 +26,139 @@
/* Maybe gpu_texture.c is a better place for this. */
static bool drw_texture_format_supports_framebuffer(eGPUTextureFormat format)
{
- /* Some formats do not work with framebuffers. */
- switch (format) {
- /* Only add formats that are COMPATIBLE with FB.
- * Generally they are multiple of 16bit. */
- case GPU_R8:
- case GPU_R8UI:
- case GPU_R16F:
- case GPU_R16I:
- case GPU_R16UI:
- case GPU_R16:
- case GPU_R32F:
- case GPU_R32UI:
- case GPU_RG8:
- case GPU_RG16:
- case GPU_RG16F:
- case GPU_RG16I:
- case GPU_RG32F:
- case GPU_R11F_G11F_B10F:
- case GPU_RGBA8:
- case GPU_RGBA16F:
- case GPU_RGBA32F:
- case GPU_DEPTH_COMPONENT16:
- case GPU_DEPTH_COMPONENT24:
- case GPU_DEPTH24_STENCIL8:
- case GPU_DEPTH_COMPONENT32F:
- return true;
- default:
- return false;
- }
+ /* Some formats do not work with framebuffers. */
+ switch (format) {
+ /* Only add formats that are COMPATIBLE with FB.
+ * Generally they are multiple of 16bit. */
+ case GPU_R8:
+ case GPU_R8UI:
+ case GPU_R16F:
+ case GPU_R16I:
+ case GPU_R16UI:
+ case GPU_R16:
+ case GPU_R32F:
+ case GPU_R32UI:
+ case GPU_RG8:
+ case GPU_RG16:
+ case GPU_RG16F:
+ case GPU_RG16I:
+ case GPU_RG32F:
+ case GPU_R11F_G11F_B10F:
+ case GPU_RGBA8:
+ case GPU_RGBA16F:
+ case GPU_RGBA32F:
+ case GPU_DEPTH_COMPONENT16:
+ case GPU_DEPTH_COMPONENT24:
+ case GPU_DEPTH24_STENCIL8:
+ case GPU_DEPTH_COMPONENT32F:
+ return true;
+ default:
+ return false;
+ }
}
#endif
void drw_texture_set_parameters(GPUTexture *tex, DRWTextureFlag flags)
{
- GPU_texture_bind(tex, 0);
- if (flags & DRW_TEX_MIPMAP) {
- GPU_texture_mipmap_mode(tex, true, flags & DRW_TEX_FILTER);
- GPU_texture_generate_mipmap(tex);
- }
- else {
- GPU_texture_filter_mode(tex, flags & DRW_TEX_FILTER);
- }
- GPU_texture_wrap_mode(tex, flags & DRW_TEX_WRAP);
- GPU_texture_compare_mode(tex, flags & DRW_TEX_COMPARE);
- GPU_texture_unbind(tex);
+ GPU_texture_bind(tex, 0);
+ if (flags & DRW_TEX_MIPMAP) {
+ GPU_texture_mipmap_mode(tex, true, flags & DRW_TEX_FILTER);
+ GPU_texture_generate_mipmap(tex);
+ }
+ else {
+ GPU_texture_filter_mode(tex, flags & DRW_TEX_FILTER);
+ }
+ GPU_texture_wrap_mode(tex, flags & DRW_TEX_WRAP);
+ GPU_texture_compare_mode(tex, flags & DRW_TEX_COMPARE);
+ GPU_texture_unbind(tex);
}
-GPUTexture *DRW_texture_create_1d(int w, eGPUTextureFormat format, DRWTextureFlag flags, const float *fpixels)
+GPUTexture *DRW_texture_create_1d(int w,
+ eGPUTextureFormat format,
+ DRWTextureFlag flags,
+ const float *fpixels)
{
- GPUTexture *tex = GPU_texture_create_1d(w, format, fpixels, NULL);
- drw_texture_set_parameters(tex, flags);
+ GPUTexture *tex = GPU_texture_create_1d(w, format, fpixels, NULL);
+ drw_texture_set_parameters(tex, flags);
- return tex;
+ return tex;
}
-GPUTexture *DRW_texture_create_2d(int w, int h, eGPUTextureFormat format, DRWTextureFlag flags, const float *fpixels)
+GPUTexture *DRW_texture_create_2d(
+ int w, int h, eGPUTextureFormat format, DRWTextureFlag flags, const float *fpixels)
{
- GPUTexture *tex = GPU_texture_create_2d(w, h, format, fpixels, NULL);
- drw_texture_set_parameters(tex, flags);
+ GPUTexture *tex = GPU_texture_create_2d(w, h, format, fpixels, NULL);
+ drw_texture_set_parameters(tex, flags);
- return tex;
+ return tex;
}
GPUTexture *DRW_texture_create_2d_array(
- int w, int h, int d, eGPUTextureFormat format, DRWTextureFlag flags, const float *fpixels)
+ int w, int h, int d, eGPUTextureFormat format, DRWTextureFlag flags, const float *fpixels)
{
- GPUTexture *tex = GPU_texture_create_2d_array(w, h, d, format, fpixels, NULL);
- drw_texture_set_parameters(tex, flags);
+ GPUTexture *tex = GPU_texture_create_2d_array(w, h, d, format, fpixels, NULL);
+ drw_texture_set_parameters(tex, flags);
- return tex;
+ return tex;
}
GPUTexture *DRW_texture_create_3d(
- int w, int h, int d, eGPUTextureFormat format, DRWTextureFlag flags, const float *fpixels)
+ int w, int h, int d, eGPUTextureFormat format, DRWTextureFlag flags, const float *fpixels)
{
- GPUTexture *tex = GPU_texture_create_3d(w, h, d, format, fpixels, NULL);
- drw_texture_set_parameters(tex, flags);
+ GPUTexture *tex = GPU_texture_create_3d(w, h, d, format, fpixels, NULL);
+ drw_texture_set_parameters(tex, flags);
- return tex;
+ return tex;
}
-GPUTexture *DRW_texture_create_cube(int w, eGPUTextureFormat format, DRWTextureFlag flags, const float *fpixels)
+GPUTexture *DRW_texture_create_cube(int w,
+ eGPUTextureFormat format,
+ DRWTextureFlag flags,
+ const float *fpixels)
{
- GPUTexture *tex = GPU_texture_create_cube(w, format, fpixels, NULL);
- drw_texture_set_parameters(tex, flags);
+ GPUTexture *tex = GPU_texture_create_cube(w, format, fpixels, NULL);
+ drw_texture_set_parameters(tex, flags);
- return tex;
+ return tex;
}
-GPUTexture *DRW_texture_pool_query_2d(int w, int h, eGPUTextureFormat format, DrawEngineType *engine_type)
+GPUTexture *DRW_texture_pool_query_2d(int w,
+ int h,
+ eGPUTextureFormat format,
+ DrawEngineType *engine_type)
{
- BLI_assert(drw_texture_format_supports_framebuffer(format));
- GPUTexture *tex = GPU_viewport_texture_pool_query(DST.viewport, engine_type, w, h, format);
+ BLI_assert(drw_texture_format_supports_framebuffer(format));
+ GPUTexture *tex = GPU_viewport_texture_pool_query(DST.viewport, engine_type, w, h, format);
- return tex;
+ return tex;
}
-void DRW_texture_ensure_fullscreen_2d(GPUTexture **tex, eGPUTextureFormat format, DRWTextureFlag flags)
+void DRW_texture_ensure_fullscreen_2d(GPUTexture **tex,
+ eGPUTextureFormat format,
+ DRWTextureFlag flags)
{
- if (*(tex) == NULL) {
- const float *size = DRW_viewport_size_get();
- *(tex) = DRW_texture_create_2d((int)size[0], (int)size[1], format, flags, NULL);
- }
+ if (*(tex) == NULL) {
+ const float *size = DRW_viewport_size_get();
+ *(tex) = DRW_texture_create_2d((int)size[0], (int)size[1], format, flags, NULL);
+ }
}
-void DRW_texture_ensure_2d(GPUTexture **tex, int w, int h, eGPUTextureFormat format, DRWTextureFlag flags)
+void DRW_texture_ensure_2d(
+ GPUTexture **tex, int w, int h, eGPUTextureFormat format, DRWTextureFlag flags)
{
- if (*(tex) == NULL) {
- *(tex) = DRW_texture_create_2d(w, h, format, flags, NULL);
- }
+ if (*(tex) == NULL) {
+ *(tex) = DRW_texture_create_2d(w, h, format, flags, NULL);
+ }
}
void DRW_texture_generate_mipmaps(GPUTexture *tex)
{
- GPU_texture_bind(tex, 0);
- GPU_texture_generate_mipmap(tex);
- GPU_texture_unbind(tex);
+ GPU_texture_bind(tex, 0);
+ GPU_texture_generate_mipmap(tex);
+ GPU_texture_unbind(tex);
}
void DRW_texture_free(GPUTexture *tex)
{
- GPU_texture_free(tex);
+ GPU_texture_free(tex);
}
diff --git a/source/blender/draw/intern/draw_view.c b/source/blender/draw/intern/draw_view.c
index b907452dad2..1543e381d8c 100644
--- a/source/blender/draw/intern/draw_view.c
+++ b/source/blender/draw/intern/draw_view.c
@@ -51,264 +51,263 @@
void DRW_draw_region_info(void)
{
- const DRWContextState *draw_ctx = DRW_context_state_get();
- ARegion *ar = draw_ctx->ar;
+ const DRWContextState *draw_ctx = DRW_context_state_get();
+ ARegion *ar = draw_ctx->ar;
- DRW_draw_cursor();
+ DRW_draw_cursor();
- view3d_draw_region_info(draw_ctx->evil_C, ar);
+ view3d_draw_region_info(draw_ctx->evil_C, ar);
}
/* ************************* Background ************************** */
void DRW_draw_background(void)
{
- /* Just to make sure */
- glDepthMask(GL_TRUE);
- glColorMask(GL_TRUE, GL_TRUE, GL_TRUE, GL_TRUE);
- glStencilMask(0xFF);
+ /* Just to make sure */
+ glDepthMask(GL_TRUE);
+ glColorMask(GL_TRUE, GL_TRUE, GL_TRUE, GL_TRUE);
+ glStencilMask(0xFF);
- if (UI_GetThemeValue(TH_SHOW_BACK_GRAD)) {
- float m[4][4];
- unit_m4(m);
+ if (UI_GetThemeValue(TH_SHOW_BACK_GRAD)) {
+ float m[4][4];
+ unit_m4(m);
- /* Gradient background Color */
- glDisable(GL_DEPTH_TEST);
+ /* Gradient background Color */
+ glDisable(GL_DEPTH_TEST);
- GPUVertFormat *format = immVertexFormat();
- uint pos = GPU_vertformat_attr_add(format, "pos", GPU_COMP_F32, 2, GPU_FETCH_FLOAT);
- uint color = GPU_vertformat_attr_add(format, "color", GPU_COMP_U8, 3, GPU_FETCH_INT_TO_FLOAT_UNIT);
- uchar col_hi[3], col_lo[3];
+ GPUVertFormat *format = immVertexFormat();
+ uint pos = GPU_vertformat_attr_add(format, "pos", GPU_COMP_F32, 2, GPU_FETCH_FLOAT);
+ uint color = GPU_vertformat_attr_add(
+ format, "color", GPU_COMP_U8, 3, GPU_FETCH_INT_TO_FLOAT_UNIT);
+ uchar col_hi[3], col_lo[3];
- GPU_matrix_push();
- GPU_matrix_identity_set();
- GPU_matrix_projection_set(m);
+ GPU_matrix_push();
+ GPU_matrix_identity_set();
+ GPU_matrix_projection_set(m);
- immBindBuiltinProgram(GPU_SHADER_2D_SMOOTH_COLOR_DITHER);
+ immBindBuiltinProgram(GPU_SHADER_2D_SMOOTH_COLOR_DITHER);
- UI_GetThemeColor3ubv(TH_BACK_GRAD, col_lo);
- UI_GetThemeColor3ubv(TH_BACK, col_hi);
+ UI_GetThemeColor3ubv(TH_BACK_GRAD, col_lo);
+ UI_GetThemeColor3ubv(TH_BACK, col_hi);
- immBegin(GPU_PRIM_TRI_FAN, 4);
- immAttr3ubv(color, col_lo);
- immVertex2f(pos, -1.0f, -1.0f);
- immVertex2f(pos, 1.0f, -1.0f);
+ immBegin(GPU_PRIM_TRI_FAN, 4);
+ immAttr3ubv(color, col_lo);
+ immVertex2f(pos, -1.0f, -1.0f);
+ immVertex2f(pos, 1.0f, -1.0f);
- immAttr3ubv(color, col_hi);
- immVertex2f(pos, 1.0f, 1.0f);
- immVertex2f(pos, -1.0f, 1.0f);
- immEnd();
+ immAttr3ubv(color, col_hi);
+ immVertex2f(pos, 1.0f, 1.0f);
+ immVertex2f(pos, -1.0f, 1.0f);
+ immEnd();
- immUnbindProgram();
+ immUnbindProgram();
- GPU_matrix_pop();
+ GPU_matrix_pop();
- glClear(GL_DEPTH_BUFFER_BIT | GL_STENCIL_BUFFER_BIT);
+ glClear(GL_DEPTH_BUFFER_BIT | GL_STENCIL_BUFFER_BIT);
- glEnable(GL_DEPTH_TEST);
- }
- else {
- /* Solid background Color */
- UI_ThemeClearColorAlpha(TH_BACK, 1.0f);
- glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT | GL_STENCIL_BUFFER_BIT);
- }
+ glEnable(GL_DEPTH_TEST);
+ }
+ else {
+ /* Solid background Color */
+ UI_ThemeClearColorAlpha(TH_BACK, 1.0f);
+ glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT | GL_STENCIL_BUFFER_BIT);
+ }
}
GPUBatch *DRW_draw_background_clipping_batch_from_rv3d(const RegionView3D *rv3d)
{
- const BoundBox *bb = rv3d->clipbb;
- const uint clipping_index[6][4] = {
- {0, 1, 2, 3},
- {0, 4, 5, 1},
- {4, 7, 6, 5},
- {7, 3, 2, 6},
- {1, 5, 6, 2},
- {7, 4, 0, 3},
- };
- GPUVertBuf *vbo;
- GPUIndexBuf *el;
- GPUIndexBufBuilder elb = {0};
-
- /* Elements */
- GPU_indexbuf_init(&elb, GPU_PRIM_TRIS, ARRAY_SIZE(clipping_index) * 2, ARRAY_SIZE(bb->vec));
- for (int i = 0; i < ARRAY_SIZE(clipping_index); i++) {
- const uint *idx = clipping_index[i];
- GPU_indexbuf_add_tri_verts(&elb, idx[0], idx[1], idx[2]);
- GPU_indexbuf_add_tri_verts(&elb, idx[0], idx[2], idx[3]);
- }
- el = GPU_indexbuf_build(&elb);
-
- GPUVertFormat format = {0};
- uint pos_id = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
-
- vbo = GPU_vertbuf_create_with_format(&format);
- GPU_vertbuf_data_alloc(vbo, ARRAY_SIZE(bb->vec));
- GPU_vertbuf_attr_fill(vbo, pos_id, bb->vec);
-
- return GPU_batch_create_ex(GPU_PRIM_TRIS, vbo, el, GPU_BATCH_OWNS_VBO | GPU_BATCH_OWNS_INDEX);
+ const BoundBox *bb = rv3d->clipbb;
+ const uint clipping_index[6][4] = {
+ {0, 1, 2, 3},
+ {0, 4, 5, 1},
+ {4, 7, 6, 5},
+ {7, 3, 2, 6},
+ {1, 5, 6, 2},
+ {7, 4, 0, 3},
+ };
+ GPUVertBuf *vbo;
+ GPUIndexBuf *el;
+ GPUIndexBufBuilder elb = {0};
+
+ /* Elements */
+ GPU_indexbuf_init(&elb, GPU_PRIM_TRIS, ARRAY_SIZE(clipping_index) * 2, ARRAY_SIZE(bb->vec));
+ for (int i = 0; i < ARRAY_SIZE(clipping_index); i++) {
+ const uint *idx = clipping_index[i];
+ GPU_indexbuf_add_tri_verts(&elb, idx[0], idx[1], idx[2]);
+ GPU_indexbuf_add_tri_verts(&elb, idx[0], idx[2], idx[3]);
+ }
+ el = GPU_indexbuf_build(&elb);
+
+ GPUVertFormat format = {0};
+ uint pos_id = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
+
+ vbo = GPU_vertbuf_create_with_format(&format);
+ GPU_vertbuf_data_alloc(vbo, ARRAY_SIZE(bb->vec));
+ GPU_vertbuf_attr_fill(vbo, pos_id, bb->vec);
+
+ return GPU_batch_create_ex(GPU_PRIM_TRIS, vbo, el, GPU_BATCH_OWNS_VBO | GPU_BATCH_OWNS_INDEX);
}
-
/* **************************** 3D Cursor ******************************** */
static bool is_cursor_visible(const DRWContextState *draw_ctx, Scene *scene, ViewLayer *view_layer)
{
- View3D *v3d = draw_ctx->v3d;
- if ((v3d->flag2 & V3D_HIDE_OVERLAYS) || (v3d->overlay.flag & V3D_OVERLAY_HIDE_CURSOR)) {
- return false;
- }
-
- /* don't draw cursor in paint modes, but with a few exceptions */
- if (draw_ctx->object_mode & OB_MODE_ALL_PAINT) {
- /* exception: object is in weight paint and has deforming armature in pose mode */
- if (draw_ctx->object_mode & OB_MODE_WEIGHT_PAINT) {
- if (BKE_object_pose_armature_get(draw_ctx->obact) != NULL) {
- return true;
- }
- }
- /* exception: object in texture paint mode, clone brush, use_clone_layer disabled */
- else if (draw_ctx->object_mode & OB_MODE_TEXTURE_PAINT) {
- const Paint *p = BKE_paint_get_active(scene, view_layer);
-
- if (p && p->brush && p->brush->imagepaint_tool == PAINT_TOOL_CLONE) {
- if ((scene->toolsettings->imapaint.flag & IMAGEPAINT_PROJECT_LAYER_CLONE) == 0) {
- return true;
- }
- }
- }
-
- /* no exception met? then don't draw cursor! */
- return false;
- }
- else if (draw_ctx->object_mode & OB_MODE_WEIGHT_GPENCIL) {
- /* grease pencil hide always in some modes */
- return false;
- }
-
- return true;
+ View3D *v3d = draw_ctx->v3d;
+ if ((v3d->flag2 & V3D_HIDE_OVERLAYS) || (v3d->overlay.flag & V3D_OVERLAY_HIDE_CURSOR)) {
+ return false;
+ }
+
+ /* don't draw cursor in paint modes, but with a few exceptions */
+ if (draw_ctx->object_mode & OB_MODE_ALL_PAINT) {
+ /* exception: object is in weight paint and has deforming armature in pose mode */
+ if (draw_ctx->object_mode & OB_MODE_WEIGHT_PAINT) {
+ if (BKE_object_pose_armature_get(draw_ctx->obact) != NULL) {
+ return true;
+ }
+ }
+ /* exception: object in texture paint mode, clone brush, use_clone_layer disabled */
+ else if (draw_ctx->object_mode & OB_MODE_TEXTURE_PAINT) {
+ const Paint *p = BKE_paint_get_active(scene, view_layer);
+
+ if (p && p->brush && p->brush->imagepaint_tool == PAINT_TOOL_CLONE) {
+ if ((scene->toolsettings->imapaint.flag & IMAGEPAINT_PROJECT_LAYER_CLONE) == 0) {
+ return true;
+ }
+ }
+ }
+
+ /* no exception met? then don't draw cursor! */
+ return false;
+ }
+ else if (draw_ctx->object_mode & OB_MODE_WEIGHT_GPENCIL) {
+ /* grease pencil hide always in some modes */
+ return false;
+ }
+
+ return true;
}
void DRW_draw_cursor(void)
{
- const DRWContextState *draw_ctx = DRW_context_state_get();
- ARegion *ar = draw_ctx->ar;
- Scene *scene = draw_ctx->scene;
- ViewLayer *view_layer = draw_ctx->view_layer;
-
- glColorMask(GL_TRUE, GL_TRUE, GL_TRUE, GL_TRUE);
- glDepthMask(GL_FALSE);
- glDisable(GL_DEPTH_TEST);
-
- if (is_cursor_visible(draw_ctx, scene, view_layer)) {
- int co[2];
-
- /* Get cursor data into quaternion form */
- const View3DCursor *cursor = &scene->cursor;
-
- if (ED_view3d_project_int_global(
- ar, cursor->location, co, V3D_PROJ_TEST_NOP | V3D_PROJ_TEST_CLIP_NEAR) == V3D_PROJ_RET_OK)
- {
- RegionView3D *rv3d = ar->regiondata;
-
- float cursor_quat[4];
- BKE_scene_cursor_rot_to_quat(cursor, cursor_quat);
-
- /* Draw nice Anti Aliased cursor. */
- GPU_line_width(1.0f);
- GPU_blend(true);
- GPU_line_smooth(true);
-
- float eps = 1e-5f;
- rv3d->viewquat[0] = -rv3d->viewquat[0];
- bool is_aligned = compare_v4v4(cursor_quat, rv3d->viewquat, eps);
- if (is_aligned == false) {
- float tquat[4];
- rotation_between_quats_to_quat(tquat, rv3d->viewquat, cursor_quat);
- is_aligned = tquat[0] - eps < -1.0f;
- }
- rv3d->viewquat[0] = -rv3d->viewquat[0];
-
- /* Draw lines */
- if (is_aligned == false) {
- uint pos = GPU_vertformat_attr_add(immVertexFormat(), "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
- immBindBuiltinProgram(GPU_SHADER_3D_UNIFORM_COLOR);
- immUniformThemeColor3(TH_VIEW_OVERLAY);
- immBegin(GPU_PRIM_LINES, 12);
-
- const float scale = ED_view3d_pixel_size_no_ui_scale(rv3d, cursor->location) * U.widget_unit;
+ const DRWContextState *draw_ctx = DRW_context_state_get();
+ ARegion *ar = draw_ctx->ar;
+ Scene *scene = draw_ctx->scene;
+ ViewLayer *view_layer = draw_ctx->view_layer;
+
+ glColorMask(GL_TRUE, GL_TRUE, GL_TRUE, GL_TRUE);
+ glDepthMask(GL_FALSE);
+ glDisable(GL_DEPTH_TEST);
+
+ if (is_cursor_visible(draw_ctx, scene, view_layer)) {
+ int co[2];
+
+ /* Get cursor data into quaternion form */
+ const View3DCursor *cursor = &scene->cursor;
+
+ if (ED_view3d_project_int_global(
+ ar, cursor->location, co, V3D_PROJ_TEST_NOP | V3D_PROJ_TEST_CLIP_NEAR) ==
+ V3D_PROJ_RET_OK) {
+ RegionView3D *rv3d = ar->regiondata;
+
+ float cursor_quat[4];
+ BKE_scene_cursor_rot_to_quat(cursor, cursor_quat);
+
+ /* Draw nice Anti Aliased cursor. */
+ GPU_line_width(1.0f);
+ GPU_blend(true);
+ GPU_line_smooth(true);
+
+ float eps = 1e-5f;
+ rv3d->viewquat[0] = -rv3d->viewquat[0];
+ bool is_aligned = compare_v4v4(cursor_quat, rv3d->viewquat, eps);
+ if (is_aligned == false) {
+ float tquat[4];
+ rotation_between_quats_to_quat(tquat, rv3d->viewquat, cursor_quat);
+ is_aligned = tquat[0] - eps < -1.0f;
+ }
+ rv3d->viewquat[0] = -rv3d->viewquat[0];
+
+ /* Draw lines */
+ if (is_aligned == false) {
+ uint pos = GPU_vertformat_attr_add(
+ immVertexFormat(), "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
+ immBindBuiltinProgram(GPU_SHADER_3D_UNIFORM_COLOR);
+ immUniformThemeColor3(TH_VIEW_OVERLAY);
+ immBegin(GPU_PRIM_LINES, 12);
+
+ const float scale = ED_view3d_pixel_size_no_ui_scale(rv3d, cursor->location) *
+ U.widget_unit;
#define CURSOR_VERT(axis_vec, axis, fac) \
- immVertex3f( \
- pos, \
- cursor->location[0] + axis_vec[0] * (fac), \
- cursor->location[1] + axis_vec[1] * (fac), \
- cursor->location[2] + axis_vec[2] * (fac))
-
-#define CURSOR_EDGE(axis_vec, axis, sign) { \
- CURSOR_VERT(axis_vec, axis, sign 1.0f); \
- CURSOR_VERT(axis_vec, axis, sign 0.25f); \
- } ((void)0)
-
- for (int axis = 0; axis < 3; axis++) {
- float axis_vec[3] = {0};
- axis_vec[axis] = scale;
- mul_qt_v3(cursor_quat, axis_vec);
- CURSOR_EDGE(axis_vec, axis, +);
- CURSOR_EDGE(axis_vec, axis, -);
- }
+ immVertex3f(pos, \
+ cursor->location[0] + axis_vec[0] * (fac), \
+ cursor->location[1] + axis_vec[1] * (fac), \
+ cursor->location[2] + axis_vec[2] * (fac))
+
+#define CURSOR_EDGE(axis_vec, axis, sign) \
+ { \
+ CURSOR_VERT(axis_vec, axis, sign 1.0f); \
+ CURSOR_VERT(axis_vec, axis, sign 0.25f); \
+ } \
+ ((void)0)
+
+ for (int axis = 0; axis < 3; axis++) {
+ float axis_vec[3] = {0};
+ axis_vec[axis] = scale;
+ mul_qt_v3(cursor_quat, axis_vec);
+ CURSOR_EDGE(axis_vec, axis, +);
+ CURSOR_EDGE(axis_vec, axis, -);
+ }
#undef CURSOR_VERT
#undef CURSOR_EDGE
- immEnd();
- immUnbindProgram();
- }
-
- float original_proj[4][4];
- GPU_matrix_projection_get(original_proj);
- GPU_matrix_push();
- ED_region_pixelspace(ar);
- GPU_matrix_translate_2f(co[0] + 0.5f, co[1] + 0.5f);
- GPU_matrix_scale_2f(U.widget_unit, U.widget_unit);
-
- GPUBatch *cursor_batch = DRW_cache_cursor_get(is_aligned);
- GPUShader *shader = GPU_shader_get_builtin_shader(GPU_SHADER_2D_FLAT_COLOR);
- GPU_batch_program_set(cursor_batch, GPU_shader_get_program(shader), GPU_shader_get_interface(shader));
-
- GPU_batch_draw(cursor_batch);
-
- GPU_blend(false);
- GPU_line_smooth(false);
- GPU_matrix_pop();
- GPU_matrix_projection_set(original_proj);
- }
- }
+ immEnd();
+ immUnbindProgram();
+ }
+
+ float original_proj[4][4];
+ GPU_matrix_projection_get(original_proj);
+ GPU_matrix_push();
+ ED_region_pixelspace(ar);
+ GPU_matrix_translate_2f(co[0] + 0.5f, co[1] + 0.5f);
+ GPU_matrix_scale_2f(U.widget_unit, U.widget_unit);
+
+ GPUBatch *cursor_batch = DRW_cache_cursor_get(is_aligned);
+ GPUShader *shader = GPU_shader_get_builtin_shader(GPU_SHADER_2D_FLAT_COLOR);
+ GPU_batch_program_set(
+ cursor_batch, GPU_shader_get_program(shader), GPU_shader_get_interface(shader));
+
+ GPU_batch_draw(cursor_batch);
+
+ GPU_blend(false);
+ GPU_line_smooth(false);
+ GPU_matrix_pop();
+ GPU_matrix_projection_set(original_proj);
+ }
+ }
}
/* **************************** 3D Gizmo ******************************** */
void DRW_draw_gizmo_3d(void)
{
- const DRWContextState *draw_ctx = DRW_context_state_get();
- ARegion *ar = draw_ctx->ar;
-
- /* draw depth culled gizmos - gizmos need to be updated *after* view matrix was set up */
- /* TODO depth culling gizmos is not yet supported, just drawing _3D here, should
- * later become _IN_SCENE (and draw _3D separate) */
- WM_gizmomap_draw(
- ar->gizmo_map, draw_ctx->evil_C,
- WM_GIZMOMAP_DRAWSTEP_3D);
+ const DRWContextState *draw_ctx = DRW_context_state_get();
+ ARegion *ar = draw_ctx->ar;
+ /* draw depth culled gizmos - gizmos need to be updated *after* view matrix was set up */
+ /* TODO depth culling gizmos is not yet supported, just drawing _3D here, should
+ * later become _IN_SCENE (and draw _3D separate) */
+ WM_gizmomap_draw(ar->gizmo_map, draw_ctx->evil_C, WM_GIZMOMAP_DRAWSTEP_3D);
}
void DRW_draw_gizmo_2d(void)
{
- const DRWContextState *draw_ctx = DRW_context_state_get();
- ARegion *ar = draw_ctx->ar;
+ const DRWContextState *draw_ctx = DRW_context_state_get();
+ ARegion *ar = draw_ctx->ar;
- WM_gizmomap_draw(
- ar->gizmo_map, draw_ctx->evil_C,
- WM_GIZMOMAP_DRAWSTEP_2D);
+ WM_gizmomap_draw(ar->gizmo_map, draw_ctx->evil_C, WM_GIZMOMAP_DRAWSTEP_2D);
- glDepthMask(GL_TRUE);
+ glDepthMask(GL_TRUE);
}