Welcome to mirror list, hosted at ThFree Co, Russian Federation.

git.blender.org/blender.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
path: root/source
diff options
context:
space:
mode:
authorClément Foucault <foucault.clem@gmail.com>2019-05-13 18:56:20 +0300
committerClément Foucault <foucault.clem@gmail.com>2019-05-14 11:57:03 +0300
commit8bc8a62c57f91326ab3f8850785dce5452b5d703 (patch)
treec578b9786bc3e519f36f782cd74abd77ad52b344 /source
parent20d9cd3a1fbd763dbe002e9baf2e3ba7fbb66f2f (diff)
DRW: Refactor: Use DRWCall to accumulate per instance attributes
This is a big change that cleanup a lot of confusing code. - The instancing/batching data buffer distribution in draw_instance_data.c. - The selection & drawing code in draw_manager_exec.c - Prety much every non-meshes object drawing (object_mode.c). Most of the changes are just renaming but there still a chance a typo might have sneek through. The Batching/Instancing Shading groups are replace by DRWCallBuffers. This is cleaner and conceptually more in line with what a DRWShadingGroup should be. There is still some little confusion in draw_common.c where some function takes shgroup as input and some don't.
Diffstat (limited to 'source')
-rw-r--r--source/blender/draw/engines/eevee/eevee_lightprobes.c14
-rw-r--r--source/blender/draw/engines/eevee/eevee_private.h3
-rw-r--r--source/blender/draw/intern/DRW_render.h75
-rw-r--r--source/blender/draw/intern/draw_armature.c204
-rw-r--r--source/blender/draw/intern/draw_common.c310
-rw-r--r--source/blender/draw/intern/draw_common.h120
-rw-r--r--source/blender/draw/intern/draw_instance_data.c343
-rw-r--r--source/blender/draw/intern/draw_instance_data.h23
-rw-r--r--source/blender/draw/intern/draw_manager.c1
-rw-r--r--source/blender/draw/intern/draw_manager.h51
-rw-r--r--source/blender/draw/intern/draw_manager_data.c332
-rw-r--r--source/blender/draw/intern/draw_manager_exec.c150
-rw-r--r--source/blender/draw/modes/edit_metaball_mode.c10
-rw-r--r--source/blender/draw/modes/edit_text_mode.c28
-rw-r--r--source/blender/draw/modes/object_mode.c689
15 files changed, 1011 insertions, 1342 deletions
diff --git a/source/blender/draw/engines/eevee/eevee_lightprobes.c b/source/blender/draw/engines/eevee/eevee_lightprobes.c
index 5976a30232e..a45a29ce9cb 100644
--- a/source/blender/draw/engines/eevee/eevee_lightprobes.c
+++ b/source/blender/draw/engines/eevee/eevee_lightprobes.c
@@ -436,12 +436,12 @@ void EEVEE_lightprobes_cache_init(EEVEE_ViewLayerData *sldata, EEVEE_Data *vedat
{"probe_mat", DRW_ATTR_FLOAT, 16},
});
- DRWShadingGroup *grp = DRW_shgroup_instance_create(EEVEE_shaders_probe_planar_display_sh_get(),
- psl->probe_display,
- DRW_cache_quad_get(),
- e_data.format_probe_display_planar);
- stl->g_data->planar_display_shgrp = grp;
+ DRWShadingGroup *grp = DRW_shgroup_create(EEVEE_shaders_probe_planar_display_sh_get(),
+ psl->probe_display);
DRW_shgroup_uniform_texture_ref(grp, "probePlanars", &txl->planar_pool);
+
+ stl->g_data->planar_display_shgrp = DRW_shgroup_call_buffer_instance_add(
+ grp, e_data.format_probe_display_planar, DRW_cache_quad_get());
}
else {
stl->g_data->planar_display_shgrp = NULL;
@@ -499,9 +499,9 @@ void EEVEE_lightprobes_cache_add(EEVEE_ViewLayerData *sldata, EEVEE_Data *vedata
EEVEE_lightprobes_planar_data_from_object(
ob, &pinfo->planar_data[pinfo->num_planar], &pinfo->planar_vis_tests[pinfo->num_planar]);
/* Debug Display */
- DRWShadingGroup *grp = vedata->stl->g_data->planar_display_shgrp;
+ DRWCallBuffer *grp = vedata->stl->g_data->planar_display_shgrp;
if (grp && (probe->flag & LIGHTPROBE_FLAG_SHOW_DATA)) {
- DRW_shgroup_call_dynamic_add(grp, &pinfo->num_planar, ob->obmat);
+ DRW_buffer_add_entry(grp, &pinfo->num_planar, ob->obmat);
}
pinfo->num_planar++;
diff --git a/source/blender/draw/engines/eevee/eevee_private.h b/source/blender/draw/engines/eevee/eevee_private.h
index 3d243b70bd1..ca9314daa95 100644
--- a/source/blender/draw/engines/eevee/eevee_private.h
+++ b/source/blender/draw/engines/eevee/eevee_private.h
@@ -816,8 +816,7 @@ typedef struct EEVEE_PrivateData {
struct DRWShadingGroup *refract_depth_shgrp_cull;
struct DRWShadingGroup *refract_depth_shgrp_clip;
struct DRWShadingGroup *refract_depth_shgrp_clip_cull;
- struct DRWShadingGroup *cube_display_shgrp;
- struct DRWShadingGroup *planar_display_shgrp;
+ struct DRWCallBuffer *planar_display_shgrp;
struct GHash *material_hash;
float background_alpha; /* TODO find a better place for this. */
/* Chosen lightcache: can come from Lookdev or the viewlayer. */
diff --git a/source/blender/draw/intern/DRW_render.h b/source/blender/draw/intern/DRW_render.h
index 1c943c18ed3..401ed50c1dc 100644
--- a/source/blender/draw/intern/DRW_render.h
+++ b/source/blender/draw/intern/DRW_render.h
@@ -44,6 +44,7 @@
#include "DNA_world_types.h"
#include "GPU_framebuffer.h"
+#include "GPU_primitive.h"
#include "GPU_texture.h"
#include "GPU_shader.h"
@@ -83,6 +84,9 @@ typedef struct DRWPass DRWPass;
typedef struct DRWShadingGroup DRWShadingGroup;
typedef struct DRWUniform DRWUniform;
+/* Opaque type to avoid usage as a DRWCall but it is exactly the same thing. */
+typedef struct DRWCallBuffer DRWCallBuffer;
+
/* TODO Put it somewhere else? */
typedef struct BoundSphere {
float center[3], radius;
@@ -319,8 +323,8 @@ typedef enum {
DRW_STATE_DEPTH_GREATER_EQUAL = (1 << 7),
DRW_STATE_CULL_BACK = (1 << 8),
DRW_STATE_CULL_FRONT = (1 << 9),
- DRW_STATE_WIRE = (1 << 10),
- DRW_STATE_POINT = (1 << 11),
+ DRW_STATE_WIRE = (1 << 10), /* TODO remove */
+ DRW_STATE_POINT = (1 << 11), /* TODO remove */
/** Polygon offset. Does not work with lines and points. */
DRW_STATE_OFFSET_POSITIVE = (1 << 12),
/** Polygon offset. Does not work with lines and points. */
@@ -374,19 +378,11 @@ struct GPUVertFormat *DRW_shgroup_instance_format_array(const DRWInstanceAttrFor
} \
} while (0)
+/* TODO(fclem): Remove the _create suffix. */
DRWShadingGroup *DRW_shgroup_create(struct GPUShader *shader, DRWPass *pass);
DRWShadingGroup *DRW_shgroup_create_sub(DRWShadingGroup *shgroup);
DRWShadingGroup *DRW_shgroup_material_create(struct GPUMaterial *material, DRWPass *pass);
-DRWShadingGroup *DRW_shgroup_instance_create(struct GPUShader *shader,
- DRWPass *pass,
- struct GPUBatch *geom,
- struct GPUVertFormat *format);
-DRWShadingGroup *DRW_shgroup_point_batch_create(struct GPUShader *shader, DRWPass *pass);
-DRWShadingGroup *DRW_shgroup_line_batch_create_with_format(struct GPUShader *shader,
- DRWPass *pass,
- struct GPUVertFormat *format);
-DRWShadingGroup *DRW_shgroup_line_batch_create(struct GPUShader *shader, DRWPass *pass);
DRWShadingGroup *DRW_shgroup_transform_feedback_create(struct GPUShader *shader,
DRWPass *pass,
struct GPUVertBuf *tf_target);
@@ -394,20 +390,17 @@ DRWShadingGroup *DRW_shgroup_transform_feedback_create(struct GPUShader *shader,
/* return final visibility */
typedef bool(DRWCallVisibilityFn)(bool vis_in, void *user_data);
-void DRW_shgroup_instance_batch(DRWShadingGroup *shgroup, struct GPUBatch *batch);
-
-void DRW_shgroup_call_add(DRWShadingGroup *shgroup, struct GPUBatch *geom, float (*obmat)[4]);
+/* TODO(fclem): Remove the _add suffix. */
+void DRW_shgroup_call_add(DRWShadingGroup *sh, struct GPUBatch *geom, float (*obmat)[4]);
void DRW_shgroup_call_range_add(
- DRWShadingGroup *shgroup, struct GPUBatch *geom, float (*obmat)[4], uint v_sta, uint v_count);
-void DRW_shgroup_call_procedural_points_add(DRWShadingGroup *shgroup,
- uint point_len,
- float (*obmat)[4]);
-void DRW_shgroup_call_procedural_lines_add(DRWShadingGroup *shgroup,
- uint line_count,
- float (*obmat)[4]);
-void DRW_shgroup_call_procedural_triangles_add(DRWShadingGroup *shgroup,
- uint tria_count,
+ DRWShadingGroup *sh, struct GPUBatch *geom, float (*obmat)[4], uint v_sta, uint v_ct);
+
+void DRW_shgroup_call_procedural_points_add(DRWShadingGroup *sh, uint point_ct, float (*obmat)[4]);
+void DRW_shgroup_call_procedural_lines_add(DRWShadingGroup *sh, uint line_ct, float (*obmat)[4]);
+void DRW_shgroup_call_procedural_triangles_add(DRWShadingGroup *sh,
+ uint tri_ct,
float (*obmat)[4]);
+
void DRW_shgroup_call_object_add_ex(DRWShadingGroup *shgroup,
struct GPUBatch *geom,
struct Object *ob,
@@ -422,31 +415,33 @@ void DRW_shgroup_call_object_add_with_callback(DRWShadingGroup *shgroup,
DRWCallVisibilityFn *callback,
void *user_data);
-void DRW_shgroup_call_sculpt_add(DRWShadingGroup *shading_group,
- Object *object,
- bool use_wire,
- bool use_mask,
- bool use_vert_color);
-void DRW_shgroup_call_sculpt_with_materials_add(DRWShadingGroup **shgroups,
- Object *ob,
- bool use_vcol);
-
-/* Used for drawing a batch with instancing without instance attributes. */
void DRW_shgroup_call_instances_add(DRWShadingGroup *shgroup,
struct GPUBatch *geom,
float (*obmat)[4],
uint count);
-void DRW_shgroup_call_dynamic_add_array(DRWShadingGroup *shgroup,
- const void *attr[],
- uint attr_len);
-#define DRW_shgroup_call_dynamic_add(shgroup, ...) \
+void DRW_shgroup_call_instances_with_attribs_add(DRWShadingGroup *shgroup,
+ struct GPUBatch *geom,
+ float (*obmat)[4],
+ struct GPUBatch *inst_attributes);
+
+void DRW_shgroup_call_sculpt_add(DRWShadingGroup *sh, Object *ob, bool wire, bool mask, bool vcol);
+void DRW_shgroup_call_sculpt_with_materials_add(DRWShadingGroup **sh, Object *ob, bool vcol);
+
+DRWCallBuffer *DRW_shgroup_call_buffer_add(DRWShadingGroup *shading_group,
+ struct GPUVertFormat *format,
+ GPUPrimType prim_type);
+DRWCallBuffer *DRW_shgroup_call_buffer_instance_add(DRWShadingGroup *shading_group,
+ struct GPUVertFormat *format,
+ struct GPUBatch *geom);
+
+void DRW_buffer_add_entry_array(DRWCallBuffer *buffer, const void *attr[], uint attr_len);
+
+#define DRW_buffer_add_entry(buffer, ...) \
do { \
const void *array[] = {__VA_ARGS__}; \
- DRW_shgroup_call_dynamic_add_array(shgroup, array, (sizeof(array) / sizeof(*array))); \
+ DRW_buffer_add_entry_array(buffer, array, (sizeof(array) / sizeof(*array))); \
} while (0)
-uint DRW_shgroup_get_instance_count(const DRWShadingGroup *shgroup);
-
void DRW_shgroup_state_enable(DRWShadingGroup *shgroup, DRWState state);
void DRW_shgroup_state_disable(DRWShadingGroup *shgroup, DRWState state);
void DRW_shgroup_stencil_mask(DRWShadingGroup *shgroup, uint mask);
diff --git a/source/blender/draw/intern/draw_armature.c b/source/blender/draw/intern/draw_armature.c
index 6361ff63fd4..3f651b27dd0 100644
--- a/source/blender/draw/intern/draw_armature.c
+++ b/source/blender/draw/intern/draw_armature.c
@@ -59,26 +59,26 @@ static struct {
/* Current armature object */
Object *ob;
/* Reset when changing current_armature */
- DRWShadingGroup *bone_octahedral_solid;
- DRWShadingGroup *bone_octahedral_wire;
- DRWShadingGroup *bone_octahedral_outline;
- DRWShadingGroup *bone_box_solid;
- DRWShadingGroup *bone_box_wire;
- DRWShadingGroup *bone_box_outline;
- DRWShadingGroup *bone_wire;
- DRWShadingGroup *bone_stick;
- DRWShadingGroup *bone_dof_sphere;
- DRWShadingGroup *bone_dof_lines;
- DRWShadingGroup *bone_envelope_solid;
- DRWShadingGroup *bone_envelope_distance;
- DRWShadingGroup *bone_envelope_wire;
- DRWShadingGroup *bone_point_solid;
- DRWShadingGroup *bone_point_wire;
- DRWShadingGroup *bone_axes;
- DRWShadingGroup *lines_relationship;
- DRWShadingGroup *lines_ik;
- DRWShadingGroup *lines_ik_no_target;
- DRWShadingGroup *lines_ik_spline;
+ DRWCallBuffer *bone_octahedral_solid;
+ DRWCallBuffer *bone_octahedral_wire;
+ DRWCallBuffer *bone_octahedral_outline;
+ DRWCallBuffer *bone_box_solid;
+ DRWCallBuffer *bone_box_wire;
+ DRWCallBuffer *bone_box_outline;
+ DRWCallBuffer *bone_wire;
+ DRWCallBuffer *bone_stick;
+ DRWCallBuffer *bone_dof_sphere;
+ DRWCallBuffer *bone_dof_lines;
+ DRWCallBuffer *bone_envelope_solid;
+ DRWCallBuffer *bone_envelope_distance;
+ DRWCallBuffer *bone_envelope_wire;
+ DRWCallBuffer *bone_point_solid;
+ DRWCallBuffer *bone_point_wire;
+ DRWCallBuffer *bone_axes;
+ DRWCallBuffer *lines_relationship;
+ DRWCallBuffer *lines_ik;
+ DRWCallBuffer *lines_ik_no_target;
+ DRWCallBuffer *lines_ik_spline;
DRWArmaturePasses passes;
@@ -122,22 +122,21 @@ static void drw_shgroup_bone_octahedral(const float (*bone_mat)[4],
{
if (g_data.bone_octahedral_outline == NULL) {
struct GPUBatch *geom = DRW_cache_bone_octahedral_wire_get();
- g_data.bone_octahedral_outline = shgroup_instance_bone_shape_outline(
+ g_data.bone_octahedral_outline = buffer_instance_bone_shape_outline(
g_data.passes.bone_outline, geom, sh_cfg);
}
if (g_data.bone_octahedral_solid == NULL && g_data.passes.bone_solid != NULL) {
struct GPUBatch *geom = DRW_cache_bone_octahedral_get();
- g_data.bone_octahedral_solid = shgroup_instance_bone_shape_solid(
+ g_data.bone_octahedral_solid = buffer_instance_bone_shape_solid(
g_data.passes.bone_solid, geom, g_data.transparent, sh_cfg);
}
float final_bonemat[4][4];
mul_m4_m4m4(final_bonemat, g_data.ob->obmat, bone_mat);
if (g_data.bone_octahedral_solid != NULL) {
- DRW_shgroup_call_dynamic_add(
- g_data.bone_octahedral_solid, final_bonemat, bone_color, hint_color);
+ DRW_buffer_add_entry(g_data.bone_octahedral_solid, final_bonemat, bone_color, hint_color);
}
if (outline_color[3] > 0.0f) {
- DRW_shgroup_call_dynamic_add(g_data.bone_octahedral_outline, final_bonemat, outline_color);
+ DRW_buffer_add_entry(g_data.bone_octahedral_outline, final_bonemat, outline_color);
}
}
@@ -150,21 +149,21 @@ static void drw_shgroup_bone_box(const float (*bone_mat)[4],
{
if (g_data.bone_box_wire == NULL) {
struct GPUBatch *geom = DRW_cache_bone_box_wire_get();
- g_data.bone_box_outline = shgroup_instance_bone_shape_outline(
+ g_data.bone_box_outline = buffer_instance_bone_shape_outline(
g_data.passes.bone_outline, geom, sh_cfg);
}
if (g_data.bone_box_solid == NULL && g_data.passes.bone_solid != NULL) {
struct GPUBatch *geom = DRW_cache_bone_box_get();
- g_data.bone_box_solid = shgroup_instance_bone_shape_solid(
+ g_data.bone_box_solid = buffer_instance_bone_shape_solid(
g_data.passes.bone_solid, geom, g_data.transparent, sh_cfg);
}
float final_bonemat[4][4];
mul_m4_m4m4(final_bonemat, g_data.ob->obmat, bone_mat);
if (g_data.bone_box_solid != NULL) {
- DRW_shgroup_call_dynamic_add(g_data.bone_box_solid, final_bonemat, bone_color, hint_color);
+ DRW_buffer_add_entry(g_data.bone_box_solid, final_bonemat, bone_color, hint_color);
}
if (outline_color[3] > 0.0f) {
- DRW_shgroup_call_dynamic_add(g_data.bone_box_outline, final_bonemat, outline_color);
+ DRW_buffer_add_entry(g_data.bone_box_outline, final_bonemat, outline_color);
}
}
@@ -174,15 +173,15 @@ static void drw_shgroup_bone_wire(const float (*bone_mat)[4],
const eGPUShaderConfig sh_cfg)
{
if (g_data.bone_wire == NULL) {
- g_data.bone_wire = shgroup_dynlines_flat_color(g_data.passes.bone_wire, sh_cfg);
+ g_data.bone_wire = buffer_dynlines_flat_color(g_data.passes.bone_wire, sh_cfg);
}
float head[3], tail[3];
mul_v3_m4v3(head, g_data.ob->obmat, bone_mat[3]);
- DRW_shgroup_call_dynamic_add(g_data.bone_wire, head, color);
+ DRW_buffer_add_entry(g_data.bone_wire, head, color);
add_v3_v3v3(tail, bone_mat[3], bone_mat[1]);
mul_m4_v3(g_data.ob->obmat, tail);
- DRW_shgroup_call_dynamic_add(g_data.bone_wire, tail, color);
+ DRW_buffer_add_entry(g_data.bone_wire, tail, color);
}
/* Stick */
@@ -194,12 +193,12 @@ static void drw_shgroup_bone_stick(const float (*bone_mat)[4],
const eGPUShaderConfig sh_cfg)
{
if (g_data.bone_stick == NULL) {
- g_data.bone_stick = shgroup_instance_bone_stick(g_data.passes.bone_wire, sh_cfg);
+ g_data.bone_stick = buffer_instance_bone_stick(g_data.passes.bone_wire, sh_cfg);
}
float final_bonemat[4][4], tail[4];
mul_m4_m4m4(final_bonemat, g_data.ob->obmat, bone_mat);
add_v3_v3v3(tail, final_bonemat[3], final_bonemat[1]);
- DRW_shgroup_call_dynamic_add(
+ DRW_buffer_add_entry(
g_data.bone_stick, final_bonemat[3], tail, col_wire, col_bone, col_head, col_tail);
}
@@ -212,7 +211,7 @@ static void drw_shgroup_bone_envelope_distance(const float (*bone_mat)[4],
{
if (g_data.passes.bone_envelope != NULL) {
if (g_data.bone_envelope_distance == NULL) {
- g_data.bone_envelope_distance = shgroup_instance_bone_envelope_distance(
+ g_data.bone_envelope_distance = buffer_instance_bone_envelope_distance(
g_data.passes.bone_envelope, sh_cfg);
/* passes.bone_envelope should have the DRW_STATE_CULL_FRONT state enabled. */
}
@@ -227,7 +226,7 @@ static void drw_shgroup_bone_envelope_distance(const float (*bone_mat)[4],
head_sphere[3] += *distance;
tail_sphere[3] = *radius_tail;
tail_sphere[3] += *distance;
- DRW_shgroup_call_dynamic_add(
+ DRW_buffer_add_entry(
g_data.bone_envelope_distance, head_sphere, tail_sphere, final_bonemat[0]);
}
}
@@ -241,22 +240,19 @@ static void drw_shgroup_bone_envelope(const float (*bone_mat)[4],
const eGPUShaderConfig sh_cfg)
{
if (g_data.bone_point_wire == NULL) {
- g_data.bone_point_wire = shgroup_instance_bone_sphere_outline(g_data.passes.bone_wire, sh_cfg);
+ g_data.bone_point_wire = buffer_instance_bone_sphere_outline(g_data.passes.bone_wire, sh_cfg);
}
if (g_data.bone_point_solid == NULL && g_data.passes.bone_solid != NULL) {
- g_data.bone_point_solid = shgroup_instance_bone_sphere_solid(
+ g_data.bone_point_solid = buffer_instance_bone_sphere_solid(
g_data.passes.bone_solid, g_data.transparent, sh_cfg);
}
if (g_data.bone_envelope_wire == NULL) {
- g_data.bone_envelope_wire = shgroup_instance_bone_envelope_outline(g_data.passes.bone_wire,
- sh_cfg);
+ g_data.bone_envelope_wire = buffer_instance_bone_envelope_outline(g_data.passes.bone_wire,
+ sh_cfg);
}
if (g_data.bone_envelope_solid == NULL && g_data.passes.bone_solid != NULL) {
- g_data.bone_envelope_solid = shgroup_instance_bone_envelope_solid(
+ g_data.bone_envelope_solid = buffer_instance_bone_envelope_solid(
g_data.passes.bone_solid, g_data.transparent, sh_cfg);
- /* We can have a lot of overdraw if we don't do this. Also envelope are not subject to
- * inverted matrix. */
- DRW_shgroup_state_enable(g_data.bone_envelope_solid, DRW_STATE_CULL_BACK);
}
float head_sphere[4] = {0.0f, 0.0f, 0.0f, 1.0f}, tail_sphere[4] = {0.0f, 1.0f, 0.0f, 1.0f};
@@ -274,10 +270,10 @@ static void drw_shgroup_bone_envelope(const float (*bone_mat)[4],
tmp[3][3] = 1.0f;
copy_v3_v3(tmp[3], tail_sphere);
if (g_data.bone_point_solid != NULL) {
- DRW_shgroup_call_dynamic_add(g_data.bone_point_solid, tmp, bone_color, hint_color);
+ DRW_buffer_add_entry(g_data.bone_point_solid, tmp, bone_color, hint_color);
}
if (outline_color[3] > 0.0f) {
- DRW_shgroup_call_dynamic_add(g_data.bone_point_wire, tmp, outline_color);
+ DRW_buffer_add_entry(g_data.bone_point_wire, tmp, outline_color);
}
}
else if (tail_sphere[3] < 0.0f) {
@@ -287,10 +283,10 @@ static void drw_shgroup_bone_envelope(const float (*bone_mat)[4],
tmp[3][3] = 1.0f;
copy_v3_v3(tmp[3], head_sphere);
if (g_data.bone_point_solid != NULL) {
- DRW_shgroup_call_dynamic_add(g_data.bone_point_solid, tmp, bone_color, hint_color);
+ DRW_buffer_add_entry(g_data.bone_point_solid, tmp, bone_color, hint_color);
}
if (outline_color[3] > 0.0f) {
- DRW_shgroup_call_dynamic_add(g_data.bone_point_wire, tmp, outline_color);
+ DRW_buffer_add_entry(g_data.bone_point_wire, tmp, outline_color);
}
}
else {
@@ -307,15 +303,15 @@ static void drw_shgroup_bone_envelope(const float (*bone_mat)[4],
interp_v4_v4v4(head_sphere, tail_sphere, head_sphere, fac_head);
interp_v4_v4v4(tail_sphere, tmp_sphere, tail_sphere, fac_tail);
if (g_data.bone_envelope_solid != NULL) {
- DRW_shgroup_call_dynamic_add(g_data.bone_envelope_solid,
- head_sphere,
- tail_sphere,
- bone_color,
- hint_color,
- final_bonemat[0]);
+ DRW_buffer_add_entry(g_data.bone_envelope_solid,
+ head_sphere,
+ tail_sphere,
+ bone_color,
+ hint_color,
+ final_bonemat[0]);
}
if (outline_color[3] > 0.0f) {
- DRW_shgroup_call_dynamic_add(
+ DRW_buffer_add_entry(
g_data.bone_envelope_wire, head_sphere, tail_sphere, outline_color, final_bonemat[0]);
}
}
@@ -327,10 +323,10 @@ static void drw_shgroup_bone_envelope(const float (*bone_mat)[4],
tmp[3][3] = 1.0f;
copy_v3_v3(tmp[3], tmp_sphere);
if (g_data.bone_point_solid != NULL) {
- DRW_shgroup_call_dynamic_add(g_data.bone_point_solid, tmp, bone_color, hint_color);
+ DRW_buffer_add_entry(g_data.bone_point_solid, tmp, bone_color, hint_color);
}
if (outline_color[3] > 0.0f) {
- DRW_shgroup_call_dynamic_add(g_data.bone_point_wire, tmp, outline_color);
+ DRW_buffer_add_entry(g_data.bone_point_wire, tmp, outline_color);
}
}
}
@@ -364,50 +360,50 @@ static void drw_shgroup_bone_custom_solid(const float (*bone_mat)[4],
BLI_assert(g_data.passes.custom_shapes != NULL);
if (surf && g_data.passes.bone_solid != NULL) {
- DRWShadingGroup *shgrp_geom_solid = BLI_ghash_lookup(g_data.passes.custom_shapes, surf);
+ DRWCallBuffer *buf_geom_solid = BLI_ghash_lookup(g_data.passes.custom_shapes, surf);
- if (shgrp_geom_solid == NULL) {
+ if (buf_geom_solid == NULL) {
/* TODO(fclem) needs to be moved elsewhere. */
drw_batch_cache_generate_requested(custom);
/* NOTE! g_data.transparent require a separate shading group if the
* object is transparent. This is done by passing a different ghash
* for transparent armature in pose mode. */
- shgrp_geom_solid = shgroup_instance_bone_shape_solid(
+ buf_geom_solid = buffer_instance_bone_shape_solid(
g_data.passes.bone_solid, surf, g_data.transparent, sh_cfg);
- BLI_ghash_insert(g_data.passes.custom_shapes, surf, shgrp_geom_solid);
+ BLI_ghash_insert(g_data.passes.custom_shapes, surf, buf_geom_solid);
}
- DRW_shgroup_call_dynamic_add(shgrp_geom_solid, final_bonemat, bone_color, hint_color);
+ DRW_buffer_add_entry(buf_geom_solid, final_bonemat, bone_color, hint_color);
}
if (edges && outline_color[3] > 0.0f) {
- DRWShadingGroup *shgrp_geom_wire = BLI_ghash_lookup(g_data.passes.custom_shapes, edges);
+ DRWCallBuffer *buf_geom_wire = BLI_ghash_lookup(g_data.passes.custom_shapes, edges);
- if (shgrp_geom_wire == NULL) {
+ if (buf_geom_wire == NULL) {
/* TODO(fclem) needs to be moved elsewhere. */
drw_batch_cache_generate_requested(custom);
- shgrp_geom_wire = shgroup_instance_bone_shape_outline(
+ buf_geom_wire = buffer_instance_bone_shape_outline(
g_data.passes.bone_outline, edges, sh_cfg);
- BLI_ghash_insert(g_data.passes.custom_shapes, edges, shgrp_geom_wire);
+ BLI_ghash_insert(g_data.passes.custom_shapes, edges, buf_geom_wire);
}
- DRW_shgroup_call_dynamic_add(shgrp_geom_wire, final_bonemat, outline_color);
+ DRW_buffer_add_entry(buf_geom_wire, final_bonemat, outline_color);
}
if (ledges) {
- DRWShadingGroup *shgrp_geom_ledges = BLI_ghash_lookup(g_data.passes.custom_shapes, ledges);
+ DRWCallBuffer *buf_geom_ledges = BLI_ghash_lookup(g_data.passes.custom_shapes, ledges);
- if (shgrp_geom_ledges == NULL) {
+ if (buf_geom_ledges == NULL) {
/* TODO(fclem) needs to be moved elsewhere. */
drw_batch_cache_generate_requested(custom);
- shgrp_geom_ledges = shgroup_instance_wire(g_data.passes.bone_wire, ledges);
+ buf_geom_ledges = buffer_instance_wire(g_data.passes.bone_wire, ledges);
- BLI_ghash_insert(g_data.passes.custom_shapes, ledges, shgrp_geom_ledges);
+ BLI_ghash_insert(g_data.passes.custom_shapes, ledges, buf_geom_ledges);
}
float final_color[4] = {outline_color[0], outline_color[1], outline_color[2], 1.0f};
- DRW_shgroup_call_dynamic_add(shgrp_geom_ledges, final_bonemat, final_color);
+ DRW_buffer_add_entry(buf_geom_ledges, final_bonemat, final_color);
}
}
@@ -422,20 +418,20 @@ static void drw_shgroup_bone_custom_wire(const float (*bone_mat)[4],
struct GPUBatch *geom = DRW_cache_object_all_edges_get(custom);
if (geom) {
- DRWShadingGroup *shgrp_geom_wire = BLI_ghash_lookup(g_data.passes.custom_shapes, geom);
+ DRWCallBuffer *buf_geom_wire = BLI_ghash_lookup(g_data.passes.custom_shapes, geom);
- if (shgrp_geom_wire == NULL) {
+ if (buf_geom_wire == NULL) {
/* TODO(fclem) needs to be moved elsewhere. */
drw_batch_cache_generate_requested(custom);
- shgrp_geom_wire = shgroup_instance_wire(g_data.passes.bone_wire, geom);
+ buf_geom_wire = buffer_instance_wire(g_data.passes.bone_wire, geom);
- BLI_ghash_insert(g_data.passes.custom_shapes, geom, shgrp_geom_wire);
+ BLI_ghash_insert(g_data.passes.custom_shapes, geom, buf_geom_wire);
}
float final_color[4] = {color[0], color[1], color[2], 1.0f};
float final_bonemat[4][4];
mul_m4_m4m4(final_bonemat, g_data.ob->obmat, bone_mat);
- DRW_shgroup_call_dynamic_add(shgrp_geom_wire, final_bonemat, final_color);
+ DRW_buffer_add_entry(buf_geom_wire, final_bonemat, final_color);
}
}
@@ -447,19 +443,19 @@ static void drw_shgroup_bone_point(const float (*bone_mat)[4],
const eGPUShaderConfig sh_cfg)
{
if (g_data.bone_point_wire == NULL) {
- g_data.bone_point_wire = shgroup_instance_bone_sphere_outline(g_data.passes.bone_wire, sh_cfg);
+ g_data.bone_point_wire = buffer_instance_bone_sphere_outline(g_data.passes.bone_wire, sh_cfg);
}
if (g_data.bone_point_solid == NULL && g_data.passes.bone_solid != NULL) {
- g_data.bone_point_solid = shgroup_instance_bone_sphere_solid(
+ g_data.bone_point_solid = buffer_instance_bone_sphere_solid(
g_data.passes.bone_solid, g_data.transparent, sh_cfg);
}
float final_bonemat[4][4];
mul_m4_m4m4(final_bonemat, g_data.ob->obmat, bone_mat);
if (g_data.bone_point_solid != NULL) {
- DRW_shgroup_call_dynamic_add(g_data.bone_point_solid, final_bonemat, bone_color, hint_color);
+ DRW_buffer_add_entry(g_data.bone_point_solid, final_bonemat, bone_color, hint_color);
}
if (outline_color[3] > 0.0f) {
- DRW_shgroup_call_dynamic_add(g_data.bone_point_wire, final_bonemat, outline_color);
+ DRW_buffer_add_entry(g_data.bone_point_wire, final_bonemat, outline_color);
}
}
@@ -469,11 +465,11 @@ static void drw_shgroup_bone_axes(const float (*bone_mat)[4],
const eGPUShaderConfig sh_cfg)
{
if (g_data.bone_axes == NULL) {
- g_data.bone_axes = shgroup_instance_bone_axes(g_data.passes.bone_axes, sh_cfg);
+ g_data.bone_axes = buffer_instance_bone_axes(g_data.passes.bone_axes, sh_cfg);
}
float final_bonemat[4][4];
mul_m4_m4m4(final_bonemat, g_data.ob->obmat, bone_mat);
- DRW_shgroup_call_dynamic_add(g_data.bone_axes, final_bonemat, color);
+ DRW_buffer_add_entry(g_data.bone_axes, final_bonemat, color);
}
/* Relationship lines */
@@ -482,15 +478,15 @@ static void drw_shgroup_bone_relationship_lines(const float start[3],
const eGPUShaderConfig sh_cfg)
{
if (g_data.lines_relationship == NULL) {
- g_data.lines_relationship = shgroup_dynlines_dashed_uniform_color(
+ g_data.lines_relationship = buffer_dynlines_dashed_uniform_color(
g_data.passes.relationship_lines, g_theme.wire_color, sh_cfg);
}
/* reverse order to have less stipple overlap */
float v[3];
mul_v3_m4v3(v, g_data.ob->obmat, end);
- DRW_shgroup_call_dynamic_add(g_data.lines_relationship, v);
+ DRW_buffer_add_entry(g_data.lines_relationship, v);
mul_v3_m4v3(v, g_data.ob->obmat, start);
- DRW_shgroup_call_dynamic_add(g_data.lines_relationship, v);
+ DRW_buffer_add_entry(g_data.lines_relationship, v);
}
static void drw_shgroup_bone_ik_lines(const float start[3],
@@ -499,15 +495,15 @@ static void drw_shgroup_bone_ik_lines(const float start[3],
{
if (g_data.lines_ik == NULL) {
static float fcolor[4] = {0.8f, 0.5f, 0.0f, 1.0f}; /* add theme! */
- g_data.lines_ik = shgroup_dynlines_dashed_uniform_color(
+ g_data.lines_ik = buffer_dynlines_dashed_uniform_color(
g_data.passes.relationship_lines, fcolor, sh_cfg);
}
/* reverse order to have less stipple overlap */
float v[3];
mul_v3_m4v3(v, g_data.ob->obmat, end);
- DRW_shgroup_call_dynamic_add(g_data.lines_ik, v);
+ DRW_buffer_add_entry(g_data.lines_ik, v);
mul_v3_m4v3(v, g_data.ob->obmat, start);
- DRW_shgroup_call_dynamic_add(g_data.lines_ik, v);
+ DRW_buffer_add_entry(g_data.lines_ik, v);
}
static void drw_shgroup_bone_ik_no_target_lines(const float start[3],
@@ -516,15 +512,15 @@ static void drw_shgroup_bone_ik_no_target_lines(const float start[3],
{
if (g_data.lines_ik_no_target == NULL) {
static float fcolor[4] = {0.8f, 0.8f, 0.2f, 1.0f}; /* add theme! */
- g_data.lines_ik_no_target = shgroup_dynlines_dashed_uniform_color(
+ g_data.lines_ik_no_target = buffer_dynlines_dashed_uniform_color(
g_data.passes.relationship_lines, fcolor, sh_cfg);
}
/* reverse order to have less stipple overlap */
float v[3];
mul_v3_m4v3(v, g_data.ob->obmat, end);
- DRW_shgroup_call_dynamic_add(g_data.lines_ik_no_target, v);
+ DRW_buffer_add_entry(g_data.lines_ik_no_target, v);
mul_v3_m4v3(v, g_data.ob->obmat, start);
- DRW_shgroup_call_dynamic_add(g_data.lines_ik_no_target, v);
+ DRW_buffer_add_entry(g_data.lines_ik_no_target, v);
}
static void drw_shgroup_bone_ik_spline_lines(const float start[3],
@@ -533,15 +529,15 @@ static void drw_shgroup_bone_ik_spline_lines(const float start[3],
{
if (g_data.lines_ik_spline == NULL) {
static float fcolor[4] = {0.8f, 0.8f, 0.2f, 1.0f}; /* add theme! */
- g_data.lines_ik_spline = shgroup_dynlines_dashed_uniform_color(
+ g_data.lines_ik_spline = buffer_dynlines_dashed_uniform_color(
g_data.passes.relationship_lines, fcolor, sh_cfg);
}
/* reverse order to have less stipple overlap */
float v[3];
mul_v3_m4v3(v, g_data.ob->obmat, end);
- DRW_shgroup_call_dynamic_add(g_data.lines_ik_spline, v);
+ DRW_buffer_add_entry(g_data.lines_ik_spline, v);
mul_v3_m4v3(v, g_data.ob->obmat, start);
- DRW_shgroup_call_dynamic_add(g_data.lines_ik_spline, v);
+ DRW_buffer_add_entry(g_data.lines_ik_spline, v);
}
/** \} */
@@ -1645,12 +1641,10 @@ static void draw_bone_dofs(bPoseChannel *pchan)
}
if (g_data.bone_dof_sphere == NULL) {
- g_data.bone_dof_lines = shgroup_instance_bone_dof(g_data.passes.bone_wire,
- DRW_cache_bone_dof_lines_get());
- g_data.bone_dof_sphere = shgroup_instance_bone_dof(g_data.passes.bone_envelope,
- DRW_cache_bone_dof_sphere_get());
- DRW_shgroup_state_enable(g_data.bone_dof_sphere, DRW_STATE_BLEND);
- DRW_shgroup_state_disable(g_data.bone_dof_sphere, DRW_STATE_CULL_FRONT);
+ g_data.bone_dof_lines = buffer_instance_bone_dof(
+ g_data.passes.bone_wire, DRW_cache_bone_dof_lines_get(), false);
+ g_data.bone_dof_sphere = buffer_instance_bone_dof(
+ g_data.passes.bone_envelope, DRW_cache_bone_dof_sphere_get(), true);
}
/* *0.5f here comes from M_PI/360.0f when rotations were still in degrees */
@@ -1683,20 +1677,20 @@ static void draw_bone_dofs(bPoseChannel *pchan)
amax[0] = xminmax[1];
amin[1] = zminmax[0];
amax[1] = zminmax[1];
- DRW_shgroup_call_dynamic_add(g_data.bone_dof_sphere, final_bonemat, col_sphere, amin, amax);
- DRW_shgroup_call_dynamic_add(g_data.bone_dof_lines, final_bonemat, col_lines, amin, amax);
+ DRW_buffer_add_entry(g_data.bone_dof_sphere, final_bonemat, col_sphere, amin, amax);
+ DRW_buffer_add_entry(g_data.bone_dof_lines, final_bonemat, col_lines, amin, amax);
}
if (pchan->ikflag & BONE_IK_XLIMIT) {
amin[0] = xminmax[0];
amax[0] = xminmax[1];
amin[1] = amax[1] = 0.0f;
- DRW_shgroup_call_dynamic_add(g_data.bone_dof_lines, final_bonemat, col_xaxis, amin, amax);
+ DRW_buffer_add_entry(g_data.bone_dof_lines, final_bonemat, col_xaxis, amin, amax);
}
if (pchan->ikflag & BONE_IK_ZLIMIT) {
amin[1] = zminmax[0];
amax[1] = zminmax[1];
amin[0] = amax[0] = 0.0f;
- DRW_shgroup_call_dynamic_add(g_data.bone_dof_lines, final_bonemat, col_zaxis, amin, amax);
+ DRW_buffer_add_entry(g_data.bone_dof_lines, final_bonemat, col_zaxis, amin, amax);
}
}
diff --git a/source/blender/draw/intern/draw_common.c b/source/blender/draw/intern/draw_common.c
index 9abc7ec6c6e..f2351885962 100644
--- a/source/blender/draw/intern/draw_common.c
+++ b/source/blender/draw/intern/draw_common.c
@@ -285,7 +285,8 @@ static struct {
struct GPUVertFormat *instance_bone_envelope_distance;
struct GPUVertFormat *instance_bone_envelope_outline;
struct GPUVertFormat *instance_mball_handles;
- struct GPUVertFormat *dynlines_color;
+ struct GPUVertFormat *pos_color;
+ struct GPUVertFormat *pos;
} g_formats = {NULL};
void DRW_globals_free(void)
@@ -310,34 +311,36 @@ void DRW_shgroup_world_clip_planes_from_rv3d(DRWShadingGroup *shgrp, const Regio
DRW_shgroup_state_enable(shgrp, DRW_STATE_CLIP_PLANES);
}
-DRWShadingGroup *shgroup_dynlines_flat_color(DRWPass *pass, eGPUShaderConfig sh_cfg)
+struct DRWCallBuffer *buffer_dynlines_flat_color(DRWPass *pass, eGPUShaderConfig sh_cfg)
{
GPUShader *sh = GPU_shader_get_builtin_shader_with_config(GPU_SHADER_3D_FLAT_COLOR, sh_cfg);
- DRW_shgroup_instance_format(g_formats.dynlines_color,
+ DRW_shgroup_instance_format(g_formats.pos_color,
{
{"pos", DRW_ATTR_FLOAT, 3},
{"color", DRW_ATTR_FLOAT, 4},
});
- DRWShadingGroup *grp = DRW_shgroup_line_batch_create_with_format(
- sh, pass, g_formats.dynlines_color);
+ DRWShadingGroup *grp = DRW_shgroup_create(sh, pass);
if (sh_cfg == GPU_SHADER_CFG_CLIPPED) {
DRW_shgroup_world_clip_planes_from_rv3d(grp, DRW_context_state_get()->rv3d);
}
- return grp;
+ return DRW_shgroup_call_buffer_add(grp, g_formats.pos_color, GPU_PRIM_LINES);
}
-DRWShadingGroup *shgroup_dynlines_dashed_uniform_color(DRWPass *pass,
- const float color[4],
- eGPUShaderConfig sh_cfg)
+struct DRWCallBuffer *buffer_dynlines_dashed_uniform_color(DRWPass *pass,
+ const float color[4],
+ eGPUShaderConfig sh_cfg)
{
GPUShader *sh = GPU_shader_get_builtin_shader_with_config(
GPU_SHADER_3D_LINE_DASHED_UNIFORM_COLOR, sh_cfg);
static float dash_width = 6.0f;
static float dash_factor = 0.5f;
- DRWShadingGroup *grp = DRW_shgroup_line_batch_create(sh, pass);
+
+ DRW_shgroup_instance_format(g_formats.pos, {{"pos", DRW_ATTR_FLOAT, 3}});
+
+ DRWShadingGroup *grp = DRW_shgroup_create(sh, pass);
DRW_shgroup_uniform_vec4(grp, "color", color, 1);
DRW_shgroup_uniform_vec2(grp, "viewport_size", DRW_viewport_size_get(), 1);
DRW_shgroup_uniform_float(grp, "dash_width", &dash_width, 1);
@@ -346,60 +349,53 @@ DRWShadingGroup *shgroup_dynlines_dashed_uniform_color(DRWPass *pass,
if (sh_cfg == GPU_SHADER_CFG_CLIPPED) {
DRW_shgroup_world_clip_planes_from_rv3d(grp, DRW_context_state_get()->rv3d);
}
- return grp;
+ return DRW_shgroup_call_buffer_add(grp, g_formats.pos, GPU_PRIM_LINES);
}
-DRWShadingGroup *shgroup_dynpoints_uniform_color(DRWPass *pass,
- const float color[4],
- const float *size,
- eGPUShaderConfig sh_cfg)
+struct DRWCallBuffer *buffer_dynpoints_uniform_color(DRWShadingGroup *grp)
{
- GPUShader *sh = GPU_shader_get_builtin_shader_with_config(
- GPU_SHADER_3D_POINT_UNIFORM_SIZE_UNIFORM_COLOR_AA, sh_cfg);
+ DRW_shgroup_instance_format(g_formats.pos, {{"pos", DRW_ATTR_FLOAT, 3}});
- DRWShadingGroup *grp = DRW_shgroup_point_batch_create(sh, pass);
- DRW_shgroup_uniform_vec4(grp, "color", color, 1);
- DRW_shgroup_uniform_float(grp, "size", size, 1);
- DRW_shgroup_state_enable(grp, DRW_STATE_POINT);
- if (sh_cfg == GPU_SHADER_CFG_CLIPPED) {
- DRW_shgroup_world_clip_planes_from_rv3d(grp, DRW_context_state_get()->rv3d);
- }
- return grp;
+ return DRW_shgroup_call_buffer_add(grp, g_formats.pos, GPU_PRIM_POINTS);
}
-DRWShadingGroup *shgroup_groundlines_uniform_color(DRWPass *pass,
- const float color[4],
- eGPUShaderConfig sh_cfg)
+struct DRWCallBuffer *buffer_groundlines_uniform_color(DRWPass *pass,
+ const float color[4],
+ eGPUShaderConfig sh_cfg)
{
GPUShader *sh = GPU_shader_get_builtin_shader_with_config(GPU_SHADER_3D_GROUNDLINE, sh_cfg);
- DRWShadingGroup *grp = DRW_shgroup_point_batch_create(sh, pass);
+ DRW_shgroup_instance_format(g_formats.pos, {{"pos", DRW_ATTR_FLOAT, 3}});
+
+ DRWShadingGroup *grp = DRW_shgroup_create(sh, pass);
DRW_shgroup_uniform_vec4(grp, "color", color, 1);
if (sh_cfg == GPU_SHADER_CFG_CLIPPED) {
DRW_shgroup_world_clip_planes_from_rv3d(grp, DRW_context_state_get()->rv3d);
}
- return grp;
+ return DRW_shgroup_call_buffer_add(grp, g_formats.pos, GPU_PRIM_POINTS);
}
-DRWShadingGroup *shgroup_groundpoints_uniform_color(DRWPass *pass,
- const float color[4],
- eGPUShaderConfig sh_cfg)
+struct DRWCallBuffer *buffer_groundpoints_uniform_color(DRWPass *pass,
+ const float color[4],
+ eGPUShaderConfig sh_cfg)
{
GPUShader *sh = GPU_shader_get_builtin_shader_with_config(GPU_SHADER_3D_GROUNDPOINT, sh_cfg);
- DRWShadingGroup *grp = DRW_shgroup_point_batch_create(sh, pass);
+ DRW_shgroup_instance_format(g_formats.pos, {{"pos", DRW_ATTR_FLOAT, 3}});
+
+ DRWShadingGroup *grp = DRW_shgroup_create(sh, pass);
DRW_shgroup_uniform_vec4(grp, "color", color, 1);
DRW_shgroup_state_enable(grp, DRW_STATE_POINT);
if (sh_cfg == GPU_SHADER_CFG_CLIPPED) {
DRW_shgroup_world_clip_planes_from_rv3d(grp, DRW_context_state_get()->rv3d);
}
- return grp;
+ return DRW_shgroup_call_buffer_add(grp, g_formats.pos, GPU_PRIM_POINTS);
}
-DRWShadingGroup *shgroup_instance_screenspace(DRWPass *pass,
- struct GPUBatch *geom,
- const float *size,
- eGPUShaderConfig sh_cfg)
+struct DRWCallBuffer *buffer_instance_screenspace(DRWPass *pass,
+ struct GPUBatch *geom,
+ const float *size,
+ eGPUShaderConfig sh_cfg)
{
GPUShader *sh = GPU_shader_get_builtin_shader_with_config(
GPU_SHADER_3D_SCREENSPACE_VARIYING_COLOR, sh_cfg);
@@ -410,18 +406,17 @@ DRWShadingGroup *shgroup_instance_screenspace(DRWPass *pass,
{"color", DRW_ATTR_FLOAT, 3},
});
- DRWShadingGroup *grp = DRW_shgroup_instance_create(
- sh, pass, geom, g_formats.instance_screenspace);
+ DRWShadingGroup *grp = DRW_shgroup_create(sh, pass);
DRW_shgroup_uniform_float(grp, "size", size, 1);
DRW_shgroup_uniform_float(grp, "pixel_size", DRW_viewport_pixelsize_get(), 1);
DRW_shgroup_uniform_vec3(grp, "screen_vecs[0]", DRW_viewport_screenvecs_get(), 2);
if (sh_cfg == GPU_SHADER_CFG_CLIPPED) {
DRW_shgroup_world_clip_planes_from_rv3d(grp, DRW_context_state_get()->rv3d);
}
- return grp;
+ return DRW_shgroup_call_buffer_instance_add(grp, g_formats.instance_screenspace, geom);
}
-DRWShadingGroup *shgroup_instance_solid(DRWPass *pass, struct GPUBatch *geom)
+struct DRWCallBuffer *buffer_instance_solid(DRWPass *pass, struct GPUBatch *geom)
{
static float light[3] = {0.0f, 0.0f, 1.0f};
GPUShader *sh = GPU_shader_get_builtin_shader(
@@ -433,13 +428,13 @@ DRWShadingGroup *shgroup_instance_solid(DRWPass *pass, struct GPUBatch *geom)
{"color", DRW_ATTR_FLOAT, 4},
});
- DRWShadingGroup *grp = DRW_shgroup_instance_create(sh, pass, geom, g_formats.instance_color);
+ DRWShadingGroup *grp = DRW_shgroup_create(sh, pass);
DRW_shgroup_uniform_vec3(grp, "light", light, 1);
- return grp;
+ return DRW_shgroup_call_buffer_instance_add(grp, g_formats.instance_color, geom);
}
-DRWShadingGroup *shgroup_instance_wire(DRWPass *pass, struct GPUBatch *geom)
+struct DRWCallBuffer *buffer_instance_wire(DRWPass *pass, struct GPUBatch *geom)
{
GPUShader *sh = GPU_shader_get_builtin_shader(GPU_SHADER_3D_OBJECTSPACE_VARIYING_COLOR);
@@ -449,14 +444,14 @@ DRWShadingGroup *shgroup_instance_wire(DRWPass *pass, struct GPUBatch *geom)
{"color", DRW_ATTR_FLOAT, 4},
});
- DRWShadingGroup *grp = DRW_shgroup_instance_create(sh, pass, geom, g_formats.instance_color);
+ DRWShadingGroup *grp = DRW_shgroup_create(sh, pass);
- return grp;
+ return DRW_shgroup_call_buffer_instance_add(grp, g_formats.instance_color, geom);
}
-DRWShadingGroup *shgroup_instance_screen_aligned(DRWPass *pass,
- struct GPUBatch *geom,
- eGPUShaderConfig sh_cfg)
+struct DRWCallBuffer *buffer_instance_screen_aligned(DRWPass *pass,
+ struct GPUBatch *geom,
+ eGPUShaderConfig sh_cfg)
{
GPUShader *sh = GPU_shader_get_builtin_shader_with_config(GPU_SHADER_3D_INSTANCE_SCREEN_ALIGNED,
sh_cfg);
@@ -468,18 +463,17 @@ DRWShadingGroup *shgroup_instance_screen_aligned(DRWPass *pass,
{"InstanceModelMatrix", DRW_ATTR_FLOAT, 16},
});
- DRWShadingGroup *grp = DRW_shgroup_instance_create(
- sh, pass, geom, g_formats.instance_screen_aligned);
+ DRWShadingGroup *grp = DRW_shgroup_create(sh, pass);
DRW_shgroup_uniform_vec3(grp, "screen_vecs[0]", DRW_viewport_screenvecs_get(), 2);
if (sh_cfg == GPU_SHADER_CFG_CLIPPED) {
DRW_shgroup_world_clip_planes_from_rv3d(grp, DRW_context_state_get()->rv3d);
}
- return grp;
+ return DRW_shgroup_call_buffer_instance_add(grp, g_formats.instance_screen_aligned, geom);
}
-DRWShadingGroup *shgroup_instance_scaled(DRWPass *pass,
- struct GPUBatch *geom,
- eGPUShaderConfig sh_cfg)
+struct DRWCallBuffer *buffer_instance_scaled(DRWPass *pass,
+ struct GPUBatch *geom,
+ eGPUShaderConfig sh_cfg)
{
GPUShader *sh_inst = GPU_shader_get_builtin_shader_with_config(
GPU_SHADER_INSTANCE_VARIYING_COLOR_VARIYING_SCALE, sh_cfg);
@@ -491,15 +485,16 @@ DRWShadingGroup *shgroup_instance_scaled(DRWPass *pass,
{"InstanceModelMatrix", DRW_ATTR_FLOAT, 16},
});
- DRWShadingGroup *grp = DRW_shgroup_instance_create(
- sh_inst, pass, geom, g_formats.instance_scaled);
+ DRWShadingGroup *grp = DRW_shgroup_create(sh_inst, pass);
if (sh_cfg == GPU_SHADER_CFG_CLIPPED) {
DRW_shgroup_world_clip_planes_from_rv3d(grp, DRW_context_state_get()->rv3d);
}
- return grp;
+ return DRW_shgroup_call_buffer_instance_add(grp, g_formats.instance_scaled, geom);
}
-DRWShadingGroup *shgroup_instance(DRWPass *pass, struct GPUBatch *geom, eGPUShaderConfig sh_cfg)
+struct DRWCallBuffer *buffer_instance(DRWPass *pass,
+ struct GPUBatch *geom,
+ eGPUShaderConfig sh_cfg)
{
GPUShader *sh_inst = GPU_shader_get_builtin_shader_with_config(
GPU_SHADER_INSTANCE_VARIYING_COLOR_VARIYING_SIZE, sh_cfg);
@@ -511,22 +506,16 @@ DRWShadingGroup *shgroup_instance(DRWPass *pass, struct GPUBatch *geom, eGPUShad
{"InstanceModelMatrix", DRW_ATTR_FLOAT, 16},
});
- DRWShadingGroup *grp = DRW_shgroup_instance_create(
- sh_inst, pass, geom, g_formats.instance_sized);
+ DRWShadingGroup *grp = DRW_shgroup_create(sh_inst, pass);
DRW_shgroup_state_disable(grp, DRW_STATE_BLEND);
if (sh_cfg == GPU_SHADER_CFG_CLIPPED) {
DRW_shgroup_world_clip_planes_from_rv3d(grp, DRW_context_state_get()->rv3d);
}
- return grp;
+ return DRW_shgroup_call_buffer_instance_add(grp, g_formats.instance_sized, geom);
}
-DRWShadingGroup *shgroup_instance_alpha(DRWPass *pass,
- struct GPUBatch *geom,
- eGPUShaderConfig sh_cfg)
+struct DRWCallBuffer *buffer_instance_alpha(DRWShadingGroup *grp, struct GPUBatch *geom)
{
- GPUShader *sh_inst = GPU_shader_get_builtin_shader_with_config(
- GPU_SHADER_INSTANCE_VARIYING_COLOR_VARIYING_SIZE, sh_cfg);
-
DRW_shgroup_instance_format(g_formats.instance_sized,
{
{"color", DRW_ATTR_FLOAT, 4},
@@ -534,17 +523,12 @@ DRWShadingGroup *shgroup_instance_alpha(DRWPass *pass,
{"InstanceModelMatrix", DRW_ATTR_FLOAT, 16},
});
- DRWShadingGroup *grp = DRW_shgroup_instance_create(
- sh_inst, pass, geom, g_formats.instance_sized);
- if (sh_cfg == GPU_SHADER_CFG_CLIPPED) {
- DRW_shgroup_world_clip_planes_from_rv3d(grp, DRW_context_state_get()->rv3d);
- }
- return grp;
+ return DRW_shgroup_call_buffer_instance_add(grp, g_formats.instance_sized, geom);
}
-DRWShadingGroup *shgroup_instance_empty_axes(DRWPass *pass,
- struct GPUBatch *geom,
- eGPUShaderConfig sh_cfg)
+struct DRWCallBuffer *buffer_instance_empty_axes(DRWPass *pass,
+ struct GPUBatch *geom,
+ eGPUShaderConfig sh_cfg)
{
COMMON_Shaders *sh_data = &g_shaders[sh_cfg];
if (sh_data->empty_axes_sh == NULL) {
@@ -563,16 +547,15 @@ DRWShadingGroup *shgroup_instance_empty_axes(DRWPass *pass,
{"InstanceModelMatrix", DRW_ATTR_FLOAT, 16},
});
- DRWShadingGroup *grp = DRW_shgroup_instance_create(
- sh_data->empty_axes_sh, pass, geom, g_formats.instance_sized);
+ DRWShadingGroup *grp = DRW_shgroup_create(sh_data->empty_axes_sh, pass);
DRW_shgroup_uniform_vec3(grp, "screenVecs[0]", DRW_viewport_screenvecs_get(), 2);
if (sh_cfg == GPU_SHADER_CFG_CLIPPED) {
DRW_shgroup_world_clip_planes_from_rv3d(grp, DRW_context_state_get()->rv3d);
}
- return grp;
+ return DRW_shgroup_call_buffer_instance_add(grp, g_formats.instance_sized, geom);
}
-DRWShadingGroup *shgroup_instance_outline(DRWPass *pass, struct GPUBatch *geom, int *baseid)
+struct DRWCallBuffer *buffer_instance_outline(DRWPass *pass, struct GPUBatch *geom, int *baseid)
{
GPUShader *sh_inst = GPU_shader_get_builtin_shader(
GPU_SHADER_INSTANCE_VARIYING_ID_VARIYING_SIZE);
@@ -584,16 +567,15 @@ DRWShadingGroup *shgroup_instance_outline(DRWPass *pass, struct GPUBatch *geom,
{"InstanceModelMatrix", DRW_ATTR_FLOAT, 16},
});
- DRWShadingGroup *grp = DRW_shgroup_instance_create(
- sh_inst, pass, geom, g_formats.instance_outline);
+ DRWShadingGroup *grp = DRW_shgroup_create(sh_inst, pass);
DRW_shgroup_uniform_int(grp, "baseId", baseid, 1);
- return grp;
+ return DRW_shgroup_call_buffer_instance_add(grp, g_formats.instance_outline, geom);
}
-DRWShadingGroup *shgroup_camera_instance(DRWPass *pass,
- struct GPUBatch *geom,
- eGPUShaderConfig sh_cfg)
+struct DRWCallBuffer *buffer_camera_instance(DRWPass *pass,
+ struct GPUBatch *geom,
+ eGPUShaderConfig sh_cfg)
{
GPUShader *sh_inst = GPU_shader_get_builtin_shader_with_config(GPU_SHADER_CAMERA, sh_cfg);
@@ -606,17 +588,16 @@ DRWShadingGroup *shgroup_camera_instance(DRWPass *pass,
{"InstanceModelMatrix", DRW_ATTR_FLOAT, 16},
});
- DRWShadingGroup *grp = DRW_shgroup_instance_create(
- sh_inst, pass, geom, g_formats.instance_camera);
+ DRWShadingGroup *grp = DRW_shgroup_create(sh_inst, pass);
if (sh_cfg == GPU_SHADER_CFG_CLIPPED) {
DRW_shgroup_world_clip_planes_from_rv3d(grp, DRW_context_state_get()->rv3d);
}
- return grp;
+ return DRW_shgroup_call_buffer_instance_add(grp, g_formats.instance_camera, geom);
}
-DRWShadingGroup *shgroup_distance_lines_instance(DRWPass *pass,
- struct GPUBatch *geom,
- eGPUShaderConfig sh_cfg)
+struct DRWCallBuffer *buffer_distance_lines_instance(DRWPass *pass,
+ struct GPUBatch *geom,
+ eGPUShaderConfig sh_cfg)
{
GPUShader *sh_inst = GPU_shader_get_builtin_shader_with_config(GPU_SHADER_DISTANCE_LINES,
sh_cfg);
@@ -630,18 +611,17 @@ DRWShadingGroup *shgroup_distance_lines_instance(DRWPass *pass,
{"InstanceModelMatrix", DRW_ATTR_FLOAT, 16},
});
- DRWShadingGroup *grp = DRW_shgroup_instance_create(
- sh_inst, pass, geom, g_formats.instance_distance_lines);
+ DRWShadingGroup *grp = DRW_shgroup_create(sh_inst, pass);
DRW_shgroup_uniform_float(grp, "size", &point_size, 1);
if (sh_cfg == GPU_SHADER_CFG_CLIPPED) {
DRW_shgroup_world_clip_planes_from_rv3d(grp, DRW_context_state_get()->rv3d);
}
- return grp;
+ return DRW_shgroup_call_buffer_instance_add(grp, g_formats.instance_distance_lines, geom);
}
-DRWShadingGroup *shgroup_spot_instance(DRWPass *pass,
- struct GPUBatch *geom,
- eGPUShaderConfig sh_cfg)
+struct DRWCallBuffer *buffer_spot_instance(DRWPass *pass,
+ struct GPUBatch *geom,
+ eGPUShaderConfig sh_cfg)
{
GPUShader *sh_inst = GPU_shader_get_builtin_shader_with_config(
GPU_SHADER_INSTANCE_EDGES_VARIYING_COLOR, sh_cfg);
@@ -654,17 +634,17 @@ DRWShadingGroup *shgroup_spot_instance(DRWPass *pass,
{"InstanceModelMatrix", DRW_ATTR_FLOAT, 16},
});
- DRWShadingGroup *grp = DRW_shgroup_instance_create(sh_inst, pass, geom, g_formats.instance_spot);
+ DRWShadingGroup *grp = DRW_shgroup_create(sh_inst, pass);
DRW_shgroup_uniform_bool(grp, "drawFront", &False, 1);
DRW_shgroup_uniform_bool(grp, "drawBack", &False, 1);
DRW_shgroup_uniform_bool(grp, "drawSilhouette", &True, 1);
if (sh_cfg == GPU_SHADER_CFG_CLIPPED) {
DRW_shgroup_world_clip_planes_from_rv3d(grp, DRW_context_state_get()->rv3d);
}
- return grp;
+ return DRW_shgroup_call_buffer_instance_add(grp, g_formats.instance_spot, geom);
}
-DRWShadingGroup *shgroup_instance_bone_axes(DRWPass *pass, eGPUShaderConfig sh_cfg)
+struct DRWCallBuffer *buffer_instance_bone_axes(DRWPass *pass, eGPUShaderConfig sh_cfg)
{
COMMON_Shaders *sh_data = &g_shaders[sh_cfg];
if (sh_data->bone_axes == NULL) {
@@ -682,16 +662,16 @@ DRWShadingGroup *shgroup_instance_bone_axes(DRWPass *pass, eGPUShaderConfig sh_c
{"color", DRW_ATTR_FLOAT, 4},
});
- DRWShadingGroup *grp = DRW_shgroup_instance_create(
- sh_data->bone_axes, pass, DRW_cache_bone_arrows_get(), g_formats.instance_color);
+ DRWShadingGroup *grp = DRW_shgroup_create(sh_data->bone_axes, pass);
DRW_shgroup_uniform_vec3(grp, "screenVecs[0]", DRW_viewport_screenvecs_get(), 2);
if (sh_cfg == GPU_SHADER_CFG_CLIPPED) {
DRW_shgroup_world_clip_planes_from_rv3d(grp, DRW_context_state_get()->rv3d);
}
- return grp;
+ return DRW_shgroup_call_buffer_instance_add(
+ grp, g_formats.instance_color, DRW_cache_bone_arrows_get());
}
-DRWShadingGroup *shgroup_instance_bone_envelope_outline(DRWPass *pass, eGPUShaderConfig sh_cfg)
+struct DRWCallBuffer *buffer_instance_bone_envelope_outline(DRWPass *pass, eGPUShaderConfig sh_cfg)
{
COMMON_Shaders *sh_data = &g_shaders[sh_cfg];
if (sh_data->bone_envelope_outline == NULL) {
@@ -712,18 +692,17 @@ DRWShadingGroup *shgroup_instance_bone_envelope_outline(DRWPass *pass, eGPUShade
{"xAxis", DRW_ATTR_FLOAT, 3},
});
- DRWShadingGroup *grp = DRW_shgroup_instance_create(sh_data->bone_envelope_outline,
- pass,
- DRW_cache_bone_envelope_outline_get(),
- g_formats.instance_bone_envelope_outline);
+ DRWShadingGroup *grp = DRW_shgroup_create(sh_data->bone_envelope_outline, pass);
DRW_shgroup_uniform_vec2(grp, "viewportSize", DRW_viewport_size_get(), 1);
if (sh_cfg == GPU_SHADER_CFG_CLIPPED) {
DRW_shgroup_world_clip_planes_from_rv3d(grp, DRW_context_state_get()->rv3d);
}
- return grp;
+ return DRW_shgroup_call_buffer_instance_add(
+ grp, g_formats.instance_bone_envelope_outline, DRW_cache_bone_envelope_outline_get());
}
-DRWShadingGroup *shgroup_instance_bone_envelope_distance(DRWPass *pass, eGPUShaderConfig sh_cfg)
+struct DRWCallBuffer *buffer_instance_bone_envelope_distance(DRWPass *pass,
+ eGPUShaderConfig sh_cfg)
{
COMMON_Shaders *sh_data = &g_shaders[sh_cfg];
if (sh_data->bone_envelope_distance == NULL) {
@@ -743,19 +722,17 @@ DRWShadingGroup *shgroup_instance_bone_envelope_distance(DRWPass *pass, eGPUShad
{"xAxis", DRW_ATTR_FLOAT, 3},
});
- DRWShadingGroup *grp = DRW_shgroup_instance_create(sh_data->bone_envelope_distance,
- pass,
- DRW_cache_bone_envelope_solid_get(),
- g_formats.instance_bone_envelope_distance);
+ DRWShadingGroup *grp = DRW_shgroup_create(sh_data->bone_envelope_distance, pass);
if (sh_cfg == GPU_SHADER_CFG_CLIPPED) {
DRW_shgroup_world_clip_planes_from_rv3d(grp, DRW_context_state_get()->rv3d);
}
- return grp;
+ return DRW_shgroup_call_buffer_instance_add(
+ grp, g_formats.instance_bone_envelope_distance, DRW_cache_bone_envelope_solid_get());
}
-DRWShadingGroup *shgroup_instance_bone_envelope_solid(DRWPass *pass,
- bool transp,
- eGPUShaderConfig sh_cfg)
+struct DRWCallBuffer *buffer_instance_bone_envelope_solid(DRWPass *pass,
+ bool transp,
+ eGPUShaderConfig sh_cfg)
{
COMMON_Shaders *sh_data = &g_shaders[sh_cfg];
if (sh_data->bone_envelope == NULL) {
@@ -777,18 +754,19 @@ DRWShadingGroup *shgroup_instance_bone_envelope_solid(DRWPass *pass,
{"xAxis", DRW_ATTR_FLOAT, 3},
});
- DRWShadingGroup *grp = DRW_shgroup_instance_create(sh_data->bone_envelope,
- pass,
- DRW_cache_bone_envelope_solid_get(),
- g_formats.instance_bone_envelope);
+ DRWShadingGroup *grp = DRW_shgroup_create(sh_data->bone_envelope, pass);
+ /* We can have a lot of overdraw if we don't do this. Also envelope are not subject to
+ * inverted matrix. */
+ DRW_shgroup_state_enable(grp, DRW_STATE_CULL_BACK);
DRW_shgroup_uniform_float_copy(grp, "alpha", transp ? 0.6f : 1.0f);
if (sh_cfg == GPU_SHADER_CFG_CLIPPED) {
DRW_shgroup_world_clip_planes_from_rv3d(grp, DRW_context_state_get()->rv3d);
}
- return grp;
+ return DRW_shgroup_call_buffer_instance_add(
+ grp, g_formats.instance_bone_envelope, DRW_cache_bone_envelope_solid_get());
}
-DRWShadingGroup *shgroup_instance_mball_handles(DRWPass *pass, eGPUShaderConfig sh_cfg)
+struct DRWCallBuffer *buffer_instance_mball_handles(DRWPass *pass, eGPUShaderConfig sh_cfg)
{
COMMON_Shaders *sh_data = &g_shaders[sh_cfg];
if (sh_data->mball_handles == NULL) {
@@ -807,21 +785,19 @@ DRWShadingGroup *shgroup_instance_mball_handles(DRWPass *pass, eGPUShaderConfig
{"color", DRW_ATTR_FLOAT, 3},
});
- DRWShadingGroup *grp = DRW_shgroup_instance_create(sh_data->mball_handles,
- pass,
- DRW_cache_screenspace_circle_get(),
- g_formats.instance_mball_handles);
+ DRWShadingGroup *grp = DRW_shgroup_create(sh_data->mball_handles, pass);
DRW_shgroup_uniform_vec3(grp, "screen_vecs[0]", DRW_viewport_screenvecs_get(), 2);
if (sh_cfg == GPU_SHADER_CFG_CLIPPED) {
DRW_shgroup_world_clip_planes_from_rv3d(grp, DRW_context_state_get()->rv3d);
}
- return grp;
+ return DRW_shgroup_call_buffer_instance_add(
+ grp, g_formats.instance_mball_handles, DRW_cache_screenspace_circle_get());
}
/* Only works with batches with adjacency infos. */
-DRWShadingGroup *shgroup_instance_bone_shape_outline(DRWPass *pass,
- struct GPUBatch *geom,
- eGPUShaderConfig sh_cfg)
+struct DRWCallBuffer *buffer_instance_bone_shape_outline(DRWPass *pass,
+ struct GPUBatch *geom,
+ eGPUShaderConfig sh_cfg)
{
COMMON_Shaders *sh_data = &g_shaders[sh_cfg];
if (sh_data->shape_outline == NULL) {
@@ -846,19 +822,18 @@ DRWShadingGroup *shgroup_instance_bone_shape_outline(DRWPass *pass,
{"outlineColorSize", DRW_ATTR_FLOAT, 4},
});
- DRWShadingGroup *grp = DRW_shgroup_instance_create(
- sh_data->shape_outline, pass, geom, g_formats.instance_bone_outline);
+ DRWShadingGroup *grp = DRW_shgroup_create(sh_data->shape_outline, pass);
DRW_shgroup_uniform_vec2(grp, "viewportSize", DRW_viewport_size_get(), 1);
if (sh_cfg == GPU_SHADER_CFG_CLIPPED) {
DRW_shgroup_world_clip_planes_from_rv3d(grp, DRW_context_state_get()->rv3d);
}
- return grp;
+ return DRW_shgroup_call_buffer_instance_add(grp, g_formats.instance_bone_outline, geom);
}
-DRWShadingGroup *shgroup_instance_bone_shape_solid(DRWPass *pass,
- struct GPUBatch *geom,
- bool transp,
- eGPUShaderConfig sh_cfg)
+struct DRWCallBuffer *buffer_instance_bone_shape_solid(DRWPass *pass,
+ struct GPUBatch *geom,
+ bool transp,
+ eGPUShaderConfig sh_cfg)
{
COMMON_Shaders *sh_data = &g_shaders[sh_cfg];
if (sh_data->shape_solid == NULL) {
@@ -880,18 +855,17 @@ DRWShadingGroup *shgroup_instance_bone_shape_solid(DRWPass *pass,
{"stateColor", DRW_ATTR_FLOAT, 3},
});
- DRWShadingGroup *grp = DRW_shgroup_instance_create(
- sh_data->shape_solid, pass, geom, g_formats.instance_bone);
+ DRWShadingGroup *grp = DRW_shgroup_create(sh_data->shape_solid, pass);
DRW_shgroup_uniform_float_copy(grp, "alpha", transp ? 0.6f : 1.0f);
if (sh_cfg == GPU_SHADER_CFG_CLIPPED) {
DRW_shgroup_world_clip_planes_from_rv3d(grp, DRW_context_state_get()->rv3d);
}
- return grp;
+ return DRW_shgroup_call_buffer_instance_add(grp, g_formats.instance_bone, geom);
}
-DRWShadingGroup *shgroup_instance_bone_sphere_solid(DRWPass *pass,
- bool transp,
- eGPUShaderConfig sh_cfg)
+struct DRWCallBuffer *buffer_instance_bone_sphere_solid(DRWPass *pass,
+ bool transp,
+ eGPUShaderConfig sh_cfg)
{
COMMON_Shaders *sh_data = &g_shaders[sh_cfg];
if (sh_data->bone_sphere == NULL) {
@@ -910,17 +884,17 @@ DRWShadingGroup *shgroup_instance_bone_sphere_solid(DRWPass *pass,
{"stateColor", DRW_ATTR_FLOAT, 3},
});
- DRWShadingGroup *grp = DRW_shgroup_instance_create(
- sh_data->bone_sphere, pass, DRW_cache_bone_point_get(), g_formats.instance_bone);
+ DRWShadingGroup *grp = DRW_shgroup_create(sh_data->bone_sphere, pass);
/* More transparent than the shape to be less distractive. */
DRW_shgroup_uniform_float_copy(grp, "alpha", transp ? 0.4f : 1.0f);
if (sh_cfg == GPU_SHADER_CFG_CLIPPED) {
DRW_shgroup_world_clip_planes_from_rv3d(grp, DRW_context_state_get()->rv3d);
}
- return grp;
+ return DRW_shgroup_call_buffer_instance_add(
+ grp, g_formats.instance_bone, DRW_cache_bone_point_get());
}
-DRWShadingGroup *shgroup_instance_bone_sphere_outline(DRWPass *pass, eGPUShaderConfig sh_cfg)
+struct DRWCallBuffer *buffer_instance_bone_sphere_outline(DRWPass *pass, eGPUShaderConfig sh_cfg)
{
COMMON_Shaders *sh_data = &g_shaders[sh_cfg];
if (sh_data->bone_sphere_outline == NULL) {
@@ -939,18 +913,16 @@ DRWShadingGroup *shgroup_instance_bone_sphere_outline(DRWPass *pass, eGPUShaderC
{"outlineColorSize", DRW_ATTR_FLOAT, 4},
});
- DRWShadingGroup *grp = DRW_shgroup_instance_create(sh_data->bone_sphere_outline,
- pass,
- DRW_cache_bone_point_wire_outline_get(),
- g_formats.instance_bone_outline);
+ DRWShadingGroup *grp = DRW_shgroup_create(sh_data->bone_sphere_outline, pass);
DRW_shgroup_uniform_vec2(grp, "viewportSize", DRW_viewport_size_get(), 1);
if (sh_cfg == GPU_SHADER_CFG_CLIPPED) {
DRW_shgroup_world_clip_planes_from_rv3d(grp, DRW_context_state_get()->rv3d);
}
- return grp;
+ return DRW_shgroup_call_buffer_instance_add(
+ grp, g_formats.instance_bone_outline, DRW_cache_bone_point_wire_outline_get());
}
-DRWShadingGroup *shgroup_instance_bone_stick(DRWPass *pass, eGPUShaderConfig sh_cfg)
+struct DRWCallBuffer *buffer_instance_bone_stick(DRWPass *pass, eGPUShaderConfig sh_cfg)
{
COMMON_Shaders *sh_data = &g_shaders[sh_cfg];
if (sh_data->bone_stick == NULL) {
@@ -973,17 +945,19 @@ DRWShadingGroup *shgroup_instance_bone_stick(DRWPass *pass, eGPUShaderConfig sh_
{"tailColor", DRW_ATTR_FLOAT, 4},
});
- DRWShadingGroup *grp = DRW_shgroup_instance_create(
- sh_data->bone_stick, pass, DRW_cache_bone_stick_get(), g_formats.instance_bone_stick);
+ DRWShadingGroup *grp = DRW_shgroup_create(sh_data->bone_stick, pass);
DRW_shgroup_uniform_vec2(grp, "viewportSize", DRW_viewport_size_get(), 1);
DRW_shgroup_uniform_float_copy(grp, "stickSize", 5.0f * U.pixelsize);
if (sh_cfg == GPU_SHADER_CFG_CLIPPED) {
DRW_shgroup_world_clip_planes_from_rv3d(grp, DRW_context_state_get()->rv3d);
}
- return grp;
+ return DRW_shgroup_call_buffer_instance_add(
+ grp, g_formats.instance_bone_stick, DRW_cache_bone_stick_get());
}
-struct DRWShadingGroup *shgroup_instance_bone_dof(struct DRWPass *pass, struct GPUBatch *geom)
+struct DRWCallBuffer *buffer_instance_bone_dof(struct DRWPass *pass,
+ struct GPUBatch *geom,
+ bool blend)
{
COMMON_Shaders *sh_data = &g_shaders[GPU_SHADER_CFG_DEFAULT];
if (sh_data->bone_dofs == NULL) {
@@ -999,10 +973,12 @@ struct DRWShadingGroup *shgroup_instance_bone_dof(struct DRWPass *pass, struct G
{"amax", DRW_ATTR_FLOAT, 2},
});
- DRWShadingGroup *grp = DRW_shgroup_instance_create(
- sh_data->bone_dofs, pass, geom, g_formats.instance_bone_dof);
-
- return grp;
+ DRWShadingGroup *grp = DRW_shgroup_create(sh_data->bone_dofs, pass);
+ if (blend) {
+ DRW_shgroup_state_enable(grp, DRW_STATE_BLEND);
+ DRW_shgroup_state_disable(grp, DRW_STATE_CULL_FRONT);
+ }
+ return DRW_shgroup_call_buffer_instance_add(grp, g_formats.instance_bone_dof, geom);
}
struct GPUShader *mpath_line_shader_get(void)
diff --git a/source/blender/draw/intern/draw_common.h b/source/blender/draw/intern/draw_common.h
index 489bc7459df..df7220c0d2a 100644
--- a/source/blender/draw/intern/draw_common.h
+++ b/source/blender/draw/intern/draw_common.h
@@ -23,6 +23,7 @@
#ifndef __DRAW_COMMON_H__
#define __DRAW_COMMON_H__
+struct DRWCallBuffer;
struct DRWPass;
struct DRWShadingGroup;
struct GPUBatch;
@@ -125,77 +126,74 @@ void DRW_globals_free(void);
void DRW_shgroup_world_clip_planes_from_rv3d(struct DRWShadingGroup *shgrp,
const RegionView3D *rv3d);
-struct DRWShadingGroup *shgroup_dynlines_flat_color(struct DRWPass *pass, eGPUShaderConfig sh_cfg);
-struct DRWShadingGroup *shgroup_dynlines_dashed_uniform_color(struct DRWPass *pass,
- const float color[4],
- eGPUShaderConfig sh_cfg);
-struct DRWShadingGroup *shgroup_dynpoints_uniform_color(struct DRWPass *pass,
- const float color[4],
- const float *size,
- eGPUShaderConfig sh_cfg);
-struct DRWShadingGroup *shgroup_groundlines_uniform_color(struct DRWPass *pass,
- const float color[4],
- eGPUShaderConfig sh_cfg);
-struct DRWShadingGroup *shgroup_groundpoints_uniform_color(struct DRWPass *pass,
+/* TODO(fclem) ideally, most of the DRWCallBuffer functions shouldn't create a shgroup. */
+struct DRWCallBuffer *buffer_dynlines_flat_color(struct DRWPass *pass, eGPUShaderConfig sh_cfg);
+struct DRWCallBuffer *buffer_dynlines_dashed_uniform_color(struct DRWPass *pass,
const float color[4],
eGPUShaderConfig sh_cfg);
-struct DRWShadingGroup *shgroup_instance_screenspace(struct DRWPass *pass,
+struct DRWCallBuffer *buffer_dynpoints_uniform_color(struct DRWShadingGroup *grp);
+struct DRWCallBuffer *buffer_groundlines_uniform_color(struct DRWPass *pass,
+ const float color[4],
+ eGPUShaderConfig sh_cfg);
+struct DRWCallBuffer *buffer_groundpoints_uniform_color(struct DRWPass *pass,
+ const float color[4],
+ eGPUShaderConfig sh_cfg);
+struct DRWCallBuffer *buffer_instance_screenspace(struct DRWPass *pass,
+ struct GPUBatch *geom,
+ const float *size,
+ eGPUShaderConfig sh_cfg);
+struct DRWCallBuffer *buffer_instance_solid(struct DRWPass *pass, struct GPUBatch *geom);
+struct DRWCallBuffer *buffer_instance_wire(struct DRWPass *pass, struct GPUBatch *geom);
+struct DRWCallBuffer *buffer_instance_screen_aligned(struct DRWPass *pass,
struct GPUBatch *geom,
- const float *size,
eGPUShaderConfig sh_cfg);
-struct DRWShadingGroup *shgroup_instance_solid(struct DRWPass *pass, struct GPUBatch *geom);
-struct DRWShadingGroup *shgroup_instance_wire(struct DRWPass *pass, struct GPUBatch *geom);
-struct DRWShadingGroup *shgroup_instance_screen_aligned(struct DRWPass *pass,
- struct GPUBatch *geom,
- eGPUShaderConfig sh_cfg);
-struct DRWShadingGroup *shgroup_instance_empty_axes(struct DRWPass *pass,
- struct GPUBatch *geom,
- eGPUShaderConfig sh_cfg);
-struct DRWShadingGroup *shgroup_instance_scaled(struct DRWPass *pass,
- struct GPUBatch *geom,
- eGPUShaderConfig sh_cfg);
-struct DRWShadingGroup *shgroup_instance(struct DRWPass *pass,
- struct GPUBatch *geom,
- eGPUShaderConfig sh_cfg);
-struct DRWShadingGroup *shgroup_instance_alpha(struct DRWPass *pass,
- struct GPUBatch *geom,
- eGPUShaderConfig sh_cfg);
-struct DRWShadingGroup *shgroup_instance_outline(struct DRWPass *pass,
+struct DRWCallBuffer *buffer_instance_empty_axes(struct DRWPass *pass,
struct GPUBatch *geom,
- int *baseid);
-struct DRWShadingGroup *shgroup_camera_instance(struct DRWPass *pass,
- struct GPUBatch *geom,
- eGPUShaderConfig sh_cfg);
-struct DRWShadingGroup *shgroup_distance_lines_instance(struct DRWPass *pass,
- struct GPUBatch *geom,
- eGPUShaderConfig sh_cfg);
-struct DRWShadingGroup *shgroup_spot_instance(struct DRWPass *pass,
+ eGPUShaderConfig sh_cfg);
+struct DRWCallBuffer *buffer_instance_scaled(struct DRWPass *pass,
+ struct GPUBatch *geom,
+ eGPUShaderConfig sh_cfg);
+struct DRWCallBuffer *buffer_instance(struct DRWPass *pass,
+ struct GPUBatch *geom,
+ eGPUShaderConfig sh_cfg);
+struct DRWCallBuffer *buffer_instance_alpha(struct DRWShadingGroup *grp, struct GPUBatch *geom);
+struct DRWCallBuffer *buffer_instance_outline(struct DRWPass *pass,
struct GPUBatch *geom,
- eGPUShaderConfig sh_cfg);
-struct DRWShadingGroup *shgroup_instance_mball_handles(struct DRWPass *pass,
- eGPUShaderConfig sh_cfg);
-struct DRWShadingGroup *shgroup_instance_bone_axes(struct DRWPass *pass, eGPUShaderConfig sh_cfg);
-struct DRWShadingGroup *shgroup_instance_bone_envelope_distance(struct DRWPass *pass,
- eGPUShaderConfig sh_cfg);
-struct DRWShadingGroup *shgroup_instance_bone_envelope_outline(struct DRWPass *pass,
- eGPUShaderConfig sh_cfg);
-struct DRWShadingGroup *shgroup_instance_bone_envelope_solid(struct DRWPass *pass,
- bool transp,
+ int *baseid);
+struct DRWCallBuffer *buffer_camera_instance(struct DRWPass *pass,
+ struct GPUBatch *geom,
+ eGPUShaderConfig sh_cfg);
+struct DRWCallBuffer *buffer_distance_lines_instance(struct DRWPass *pass,
+ struct GPUBatch *geom,
+ eGPUShaderConfig sh_cfg);
+struct DRWCallBuffer *buffer_spot_instance(struct DRWPass *pass,
+ struct GPUBatch *geom,
+ eGPUShaderConfig sh_cfg);
+struct DRWCallBuffer *buffer_instance_mball_handles(struct DRWPass *pass, eGPUShaderConfig sh_cfg);
+struct DRWCallBuffer *buffer_instance_bone_axes(struct DRWPass *pass, eGPUShaderConfig sh_cfg);
+struct DRWCallBuffer *buffer_instance_bone_envelope_distance(struct DRWPass *pass,
eGPUShaderConfig sh_cfg);
-struct DRWShadingGroup *shgroup_instance_bone_shape_outline(struct DRWPass *pass,
- struct GPUBatch *geom,
+struct DRWCallBuffer *buffer_instance_bone_envelope_outline(struct DRWPass *pass,
eGPUShaderConfig sh_cfg);
-struct DRWShadingGroup *shgroup_instance_bone_shape_solid(struct DRWPass *pass,
- struct GPUBatch *geom,
+struct DRWCallBuffer *buffer_instance_bone_envelope_solid(struct DRWPass *pass,
bool transp,
eGPUShaderConfig sh_cfg);
-struct DRWShadingGroup *shgroup_instance_bone_sphere_outline(struct DRWPass *pass,
- eGPUShaderConfig sh_cfg);
-struct DRWShadingGroup *shgroup_instance_bone_sphere_solid(struct DRWPass *pass,
- bool transp,
- eGPUShaderConfig sh_cfg);
-struct DRWShadingGroup *shgroup_instance_bone_stick(struct DRWPass *pass, eGPUShaderConfig sh_cfg);
-struct DRWShadingGroup *shgroup_instance_bone_dof(struct DRWPass *pass, struct GPUBatch *geom);
+struct DRWCallBuffer *buffer_instance_bone_shape_outline(struct DRWPass *pass,
+ struct GPUBatch *geom,
+ eGPUShaderConfig sh_cfg);
+struct DRWCallBuffer *buffer_instance_bone_shape_solid(struct DRWPass *pass,
+ struct GPUBatch *geom,
+ bool transp,
+ eGPUShaderConfig sh_cfg);
+struct DRWCallBuffer *buffer_instance_bone_sphere_outline(struct DRWPass *pass,
+ eGPUShaderConfig sh_cfg);
+struct DRWCallBuffer *buffer_instance_bone_sphere_solid(struct DRWPass *pass,
+ bool transp,
+ eGPUShaderConfig sh_cfg);
+struct DRWCallBuffer *buffer_instance_bone_stick(struct DRWPass *pass, eGPUShaderConfig sh_cfg);
+struct DRWCallBuffer *buffer_instance_bone_dof(struct DRWPass *pass,
+ struct GPUBatch *geom,
+ bool blend);
struct GPUShader *mpath_line_shader_get(void);
struct GPUShader *mpath_points_shader_get(void);
diff --git a/source/blender/draw/intern/draw_instance_data.c b/source/blender/draw/intern/draw_instance_data.c
index e8d91309e06..b88ad936c28 100644
--- a/source/blender/draw/intern/draw_instance_data.c
+++ b/source/blender/draw/intern/draw_instance_data.c
@@ -36,33 +36,9 @@
#include "MEM_guardedalloc.h"
#include "BLI_utildefines.h"
#include "BLI_mempool.h"
+#include "BLI_memblock.h"
-#define BUFFER_CHUNK_SIZE 32
-#define BUFFER_VERTS_CHUNK 32
-
-typedef struct DRWBatchingBuffer {
- struct DRWShadingGroup *shgroup; /* Link back to the owning shGroup. Also tells if it's used */
- GPUVertFormat *format; /* Identifier. */
- GPUVertBuf *vert; /* GPUVertBuf contained in the GPUBatch. */
- GPUBatch *batch; /* GPUBatch containing the GPUVertBuf. */
-} DRWBatchingBuffer;
-
-typedef struct DRWInstancingBuffer {
- struct DRWShadingGroup *shgroup; /* Link back to the owning shGroup. Also tells if it's used */
- GPUVertFormat *format; /* Identifier. */
- GPUBatch *instance; /* Identifier. */
- GPUVertBuf *vert; /* GPUVertBuf contained in the GPUBatch. */
- GPUBatch *batch; /* GPUBatch containing the GPUVertBuf. */
-} DRWInstancingBuffer;
-
-typedef struct DRWInstanceChunk {
- size_t cursor; /* Offset to the next instance data. */
- size_t alloc_size; /* Number of DRWBatchingBuffer/Batches alloc'd in ibufs/btchs. */
- union {
- DRWBatchingBuffer *bbufs;
- DRWInstancingBuffer *ibufs;
- };
-} DRWInstanceChunk;
+#include "intern/gpu_primitive_private.h"
struct DRWInstanceData {
struct DRWInstanceData *next;
@@ -77,212 +53,167 @@ struct DRWInstanceDataList {
DRWInstanceData *idata_head[MAX_INSTANCE_DATA_SIZE];
DRWInstanceData *idata_tail[MAX_INSTANCE_DATA_SIZE];
- DRWInstanceChunk instancing;
- DRWInstanceChunk batching;
+ BLI_memblock *pool_instancing;
+ BLI_memblock *pool_batching;
+ BLI_memblock *pool_buffers;
};
+typedef struct DRWTempBufferHandle {
+ /** Must be first for casting. */
+ GPUVertBuf buf;
+ /** Format pointer for reuse. */
+ GPUVertFormat *format;
+ /** Touched vertex length for resize. */
+ uint *vert_len;
+} DRWTempBufferHandle;
+
static ListBase g_idatalists = {NULL, NULL};
/* -------------------------------------------------------------------- */
/** \name Instance Buffer Management
* \{ */
-/**
- * This manager allows to distribute existing batches for instancing
- * attributes. This reduce the number of batches creation.
- * Querying a batch is done with a vertex format. This format should
- * be static so that it's pointer never changes (because we are using
- * this pointer as identifier [we don't want to check the full format
- * that would be too slow]).
- */
-static void instance_batch_free(GPUBatch *batch, void *UNUSED(user_data))
+static void instance_batch_free(GPUBatch *geom, void *UNUSED(user_data))
{
- if (batch->verts[0] == NULL) {
+ if (geom->verts[0] == NULL) {
/** XXX This is a false positive case.
* The batch has been requested but not init yet
* and there is a chance that it might become init.
*/
return;
}
- /* Free all batches that have the same key before they are reused. */
+
+ /* Free all batches that use the same vbos before they are reused. */
/* TODO: Make it thread safe! Batch freeing can happen from another thread. */
- /* XXX we need to iterate over all idatalists unless we make some smart
- * data structure to store the locations to update. */
- for (DRWInstanceDataList *idatalist = g_idatalists.first; idatalist;
- idatalist = idatalist->next) {
- DRWInstancingBuffer *ibuf = idatalist->instancing.ibufs;
- for (int i = 0; i < idatalist->instancing.alloc_size; i++, ibuf++) {
- if (ibuf->instance == batch) {
- BLI_assert(ibuf->shgroup == NULL); /* Make sure it has no other users. */
- GPU_VERTBUF_DISCARD_SAFE(ibuf->vert);
- GPU_BATCH_DISCARD_SAFE(ibuf->batch);
- /* Tag as non alloced. */
- ibuf->format = NULL;
+ /* FIXME: This is not really correct. The correct way would be to check based on
+ * the vertex buffers. We assume the batch containing the VBO is being when it should. */
+ /* PERF: This is doing a linear search. This can be very costly. */
+ LISTBASE_FOREACH (DRWInstanceDataList *, data_list, &g_idatalists) {
+ BLI_memblock *memblock = data_list->pool_instancing;
+ BLI_memblock_iter iter;
+ BLI_memblock_iternew(memblock, &iter);
+ GPUBatch *batch;
+ while ((batch = (GPUBatch *)BLI_memblock_iterstep(&iter))) {
+ /* Only check verts[0] that's enough. */
+ if (batch->verts[0] == geom->verts[0]) {
+ GPU_batch_clear(batch);
}
}
}
}
-void DRW_batching_buffer_request(DRWInstanceDataList *idatalist,
- GPUVertFormat *format,
- GPUPrimType type,
- struct DRWShadingGroup *shgroup,
- GPUBatch **r_batch,
- GPUVertBuf **r_vert)
+/**
+ * This manager allows to distribute existing batches for instancing
+ * attributes. This reduce the number of batches creation.
+ * Querying a batch is done with a vertex format. This format should
+ * be static so that it's pointer never changes (because we are using
+ * this pointer as identifier [we don't want to check the full format
+ * that would be too slow]).
+ */
+GPUVertBuf *DRW_temp_buffer_request(DRWInstanceDataList *idatalist,
+ GPUVertFormat *format,
+ uint *vert_len)
{
- DRWInstanceChunk *chunk = &idatalist->batching;
- DRWBatchingBuffer *bbuf = idatalist->batching.bbufs;
- BLI_assert(format);
- /* Search for an unused batch. */
- for (int i = 0; i < idatalist->batching.alloc_size; i++, bbuf++) {
- if (bbuf->shgroup == NULL) {
- if (bbuf->format == format) {
- bbuf->shgroup = shgroup;
- *r_batch = bbuf->batch;
- *r_vert = bbuf->vert;
- return;
- }
- }
- }
- int new_id = 0; /* Find insertion point. */
- for (; new_id < chunk->alloc_size; ++new_id) {
- if (chunk->bbufs[new_id].format == NULL) {
- break;
- }
+ BLI_assert(format != NULL);
+ BLI_assert(vert_len != NULL);
+
+ DRWTempBufferHandle *handle = BLI_memblock_alloc(idatalist->pool_buffers);
+ GPUVertBuf *vert = &handle->buf;
+ handle->vert_len = vert_len;
+
+ if (handle->format != format) {
+ handle->format = format;
+ /* TODO/PERF: Save the allocated data from freeing to avoid reallocation. */
+ GPU_vertbuf_clear(vert);
+ GPU_vertbuf_init_with_format_ex(vert, format, GPU_USAGE_DYNAMIC);
+ GPU_vertbuf_data_alloc(vert, DRW_BUFFER_VERTS_CHUNK);
}
- /* If there is no batch left. Allocate more. */
- if (new_id == chunk->alloc_size) {
- new_id = chunk->alloc_size;
- chunk->alloc_size += BUFFER_CHUNK_SIZE;
- chunk->bbufs = MEM_reallocN(chunk->bbufs, chunk->alloc_size * sizeof(DRWBatchingBuffer));
- memset(chunk->bbufs + new_id, 0, sizeof(DRWBatchingBuffer) * BUFFER_CHUNK_SIZE);
- }
- /* Create the batch. */
- bbuf = chunk->bbufs + new_id;
- bbuf->vert = *r_vert = GPU_vertbuf_create_with_format_ex(format, GPU_USAGE_DYNAMIC);
- bbuf->batch = *r_batch = GPU_batch_create_ex(type, bbuf->vert, NULL, 0);
- bbuf->format = format;
- bbuf->shgroup = shgroup;
- GPU_vertbuf_data_alloc(*r_vert, BUFFER_VERTS_CHUNK);
+ return vert;
}
-void DRW_instancing_buffer_request(DRWInstanceDataList *idatalist,
- GPUVertFormat *format,
- GPUBatch *instance,
- struct DRWShadingGroup *shgroup,
- GPUBatch **r_batch,
- GPUVertBuf **r_vert)
+/* NOTE: Does not return a valid drawable batch until DRW_instance_buffer_finish has run. */
+GPUBatch *DRW_temp_batch_instance_request(DRWInstanceDataList *idatalist,
+ GPUVertBuf *buf,
+ GPUBatch *geom)
{
- DRWInstanceChunk *chunk = &idatalist->instancing;
- DRWInstancingBuffer *ibuf = idatalist->instancing.ibufs;
- BLI_assert(format);
- /* Search for an unused batch. */
- for (int i = 0; i < idatalist->instancing.alloc_size; i++, ibuf++) {
- if (ibuf->shgroup == NULL) {
- if (ibuf->format == format) {
- if (ibuf->instance == instance) {
- ibuf->shgroup = shgroup;
- *r_batch = ibuf->batch;
- *r_vert = ibuf->vert;
- return;
- }
- }
+ /* Do not call this with a batch that is already an instancing batch. */
+ BLI_assert(geom->inst == NULL);
+
+ GPUBatch *batch = BLI_memblock_alloc(idatalist->pool_instancing);
+ bool is_compatible = (batch->gl_prim_type == geom->gl_prim_type) && (batch->inst == buf) &&
+ (batch->phase == GPU_BATCH_READY_TO_DRAW);
+ for (int i = 0; i < GPU_BATCH_VBO_MAX_LEN && is_compatible; i++) {
+ if (batch->verts[i] != geom->verts[i]) {
+ is_compatible = false;
}
}
- int new_id = 0; /* Find insertion point. */
- for (; new_id < chunk->alloc_size; ++new_id) {
- if (chunk->ibufs[new_id].format == NULL) {
- break;
- }
+
+ if (!is_compatible) {
+ GPU_batch_clear(batch);
+ /* Save args and init later */
+ batch->inst = buf;
+ batch->phase = GPU_BATCH_READY_TO_BUILD;
+ batch->verts[0] = (void *)geom; /* HACK to save the pointer without other alloc. */
+
+ /* Make sure to free this batch if the instance geom gets free. */
+ GPU_batch_callback_free_set(geom, &instance_batch_free, NULL);
}
- /* If there is no batch left. Allocate more. */
- if (new_id == chunk->alloc_size) {
- new_id = chunk->alloc_size;
- chunk->alloc_size += BUFFER_CHUNK_SIZE;
- chunk->ibufs = MEM_reallocN(chunk->ibufs, chunk->alloc_size * sizeof(DRWInstancingBuffer));
- memset(chunk->ibufs + new_id, 0, sizeof(DRWInstancingBuffer) * BUFFER_CHUNK_SIZE);
+ return batch;
+}
+
+/* NOTE: Use only with buf allocated via DRW_temp_buffer_request. */
+GPUBatch *DRW_temp_batch_request(DRWInstanceDataList *idatalist,
+ GPUVertBuf *buf,
+ GPUPrimType prim_type)
+{
+ GPUBatch *batch = BLI_memblock_alloc(idatalist->pool_batching);
+ bool is_compatible = (batch->verts[0] == buf) &&
+ (batch->gl_prim_type == convert_prim_type_to_gl(prim_type));
+ if (!is_compatible) {
+ GPU_batch_clear(batch);
+ GPU_batch_init(batch, prim_type, buf, NULL);
}
- /* Create the batch. */
- ibuf = chunk->ibufs + new_id;
- ibuf->vert = *r_vert = GPU_vertbuf_create_with_format_ex(format, GPU_USAGE_DYNAMIC);
- ibuf->batch = *r_batch = MEM_callocN(sizeof(GPUBatch), "GPUBatch");
- ibuf->format = format;
- ibuf->shgroup = shgroup;
- ibuf->instance = instance;
- GPU_vertbuf_data_alloc(*r_vert, BUFFER_VERTS_CHUNK);
- /* Make sure to free this ibuf if the instance batch gets free. */
- GPU_batch_callback_free_set(instance, &instance_batch_free, NULL);
+ return batch;
+}
+
+static void temp_buffer_handle_free(DRWTempBufferHandle *handle)
+{
+ handle->format = NULL;
+ GPU_vertbuf_clear(&handle->buf);
}
void DRW_instance_buffer_finish(DRWInstanceDataList *idatalist)
{
- size_t realloc_size = 1; /* Avoid 0 size realloc. */
- /* Resize down buffers in use and send data to GPU & free unused buffers. */
- DRWInstanceChunk *batching = &idatalist->batching;
- DRWBatchingBuffer *bbuf = batching->bbufs;
- for (int i = 0; i < batching->alloc_size; i++, bbuf++) {
- if (bbuf->shgroup != NULL) {
- realloc_size = i + 1;
- uint vert_len = DRW_shgroup_get_instance_count(bbuf->shgroup);
- vert_len += (vert_len == 0) ? 1 : 0; /* Do not realloc to 0 size buffer */
- if (vert_len + BUFFER_VERTS_CHUNK <= bbuf->vert->vertex_len) {
- uint size = vert_len + BUFFER_VERTS_CHUNK - 1;
- size = size - size % BUFFER_VERTS_CHUNK;
- GPU_vertbuf_data_resize(bbuf->vert, size);
+ /* Resize down buffers in use and send data to GPU. */
+ BLI_memblock_iter iter;
+ DRWTempBufferHandle *handle;
+ BLI_memblock_iternew(idatalist->pool_buffers, &iter);
+ while ((handle = BLI_memblock_iterstep(&iter))) {
+ if (handle->vert_len != NULL) {
+ uint vert_len = *(handle->vert_len);
+ uint target_buf_size = ((vert_len / DRW_BUFFER_VERTS_CHUNK) + 1) * DRW_BUFFER_VERTS_CHUNK;
+ if (target_buf_size < handle->buf.vertex_alloc) {
+ GPU_vertbuf_data_resize(&handle->buf, target_buf_size);
}
- GPU_vertbuf_use(bbuf->vert); /* Send data. */
- bbuf->shgroup = NULL; /* Set as non used for the next round. */
- }
- else {
- GPU_VERTBUF_DISCARD_SAFE(bbuf->vert);
- GPU_BATCH_DISCARD_SAFE(bbuf->batch);
- bbuf->format = NULL; /* Tag as non alloced. */
+ GPU_vertbuf_data_len_set(&handle->buf, vert_len);
+ GPU_vertbuf_use(&handle->buf); /* Send data. */
}
}
- /* Rounding up to nearest chunk size. */
- realloc_size += BUFFER_CHUNK_SIZE - 1;
- realloc_size -= realloc_size % BUFFER_CHUNK_SIZE;
- /* Resize down if necessary. */
- if (realloc_size < batching->alloc_size) {
- batching->alloc_size = realloc_size;
- batching->ibufs = MEM_reallocN(batching->ibufs, realloc_size * sizeof(DRWBatchingBuffer));
- }
-
- realloc_size = 1;
- /* Resize down buffers in use and send data to GPU & free unused buffers. */
- DRWInstanceChunk *instancing = &idatalist->instancing;
- DRWInstancingBuffer *ibuf = instancing->ibufs;
- for (int i = 0; i < instancing->alloc_size; i++, ibuf++) {
- if (ibuf->shgroup != NULL) {
- realloc_size = i + 1;
- uint vert_len = DRW_shgroup_get_instance_count(ibuf->shgroup);
- vert_len += (vert_len == 0) ? 1 : 0; /* Do not realloc to 0 size buffer */
- if (vert_len + BUFFER_VERTS_CHUNK <= ibuf->vert->vertex_len) {
- uint size = vert_len + BUFFER_VERTS_CHUNK - 1;
- size = size - size % BUFFER_VERTS_CHUNK;
- GPU_vertbuf_data_resize(ibuf->vert, size);
- }
- GPU_vertbuf_use(ibuf->vert); /* Send data. */
- /* Setup batch now that we are sure ibuf->instance is setup. */
- GPU_batch_copy(ibuf->batch, ibuf->instance);
- GPU_batch_instbuf_set(ibuf->batch, ibuf->vert, false);
- ibuf->shgroup = NULL; /* Set as non used for the next round. */
- }
- else {
- GPU_VERTBUF_DISCARD_SAFE(ibuf->vert);
- GPU_BATCH_DISCARD_SAFE(ibuf->batch);
- ibuf->format = NULL; /* Tag as non alloced. */
+ /* Finish pending instancing batches. */
+ GPUBatch *batch;
+ BLI_memblock_iternew(idatalist->pool_instancing, &iter);
+ while ((batch = BLI_memblock_iterstep(&iter))) {
+ if (batch->phase == GPU_BATCH_READY_TO_BUILD) {
+ GPUVertBuf *inst = batch->inst;
+ GPUBatch *geom = (void *)batch->verts[0]; /* HACK see DRW_temp_batch_instance_request. */
+ GPU_batch_copy(batch, geom);
+ GPU_batch_instbuf_set(batch, inst, false);
}
}
- /* Rounding up to nearest chunk size. */
- realloc_size += BUFFER_CHUNK_SIZE - 1;
- realloc_size -= realloc_size % BUFFER_CHUNK_SIZE;
- /* Resize down if necessary. */
- if (realloc_size < instancing->alloc_size) {
- instancing->alloc_size = realloc_size;
- instancing->ibufs = MEM_reallocN(instancing->ibufs,
- realloc_size * sizeof(DRWInstancingBuffer));
- }
+ /* Resize pools and free unused. */
+ BLI_memblock_clear(idatalist->pool_buffers, (MemblockValFreeFP)temp_buffer_handle_free);
+ BLI_memblock_clear(idatalist->pool_instancing, (MemblockValFreeFP)GPU_batch_clear);
+ BLI_memblock_clear(idatalist->pool_batching, (MemblockValFreeFP)GPU_batch_clear);
}
/** \} */
@@ -352,12 +283,10 @@ DRWInstanceData *DRW_instance_data_request(DRWInstanceDataList *idatalist, uint
DRWInstanceDataList *DRW_instance_data_list_create(void)
{
DRWInstanceDataList *idatalist = MEM_callocN(sizeof(DRWInstanceDataList), "DRWInstanceDataList");
- idatalist->batching.bbufs = MEM_callocN(sizeof(DRWBatchingBuffer) * BUFFER_CHUNK_SIZE,
- "DRWBatchingBuffers");
- idatalist->batching.alloc_size = BUFFER_CHUNK_SIZE;
- idatalist->instancing.ibufs = MEM_callocN(sizeof(DRWInstancingBuffer) * BUFFER_CHUNK_SIZE,
- "DRWInstancingBuffers");
- idatalist->instancing.alloc_size = BUFFER_CHUNK_SIZE;
+
+ idatalist->pool_batching = BLI_memblock_create(sizeof(GPUBatch), true);
+ idatalist->pool_instancing = BLI_memblock_create(sizeof(GPUBatch), true);
+ idatalist->pool_buffers = BLI_memblock_create(sizeof(DRWTempBufferHandle), true);
BLI_addtail(&g_idatalists, idatalist);
@@ -378,19 +307,9 @@ void DRW_instance_data_list_free(DRWInstanceDataList *idatalist)
idatalist->idata_tail[i] = NULL;
}
- DRWBatchingBuffer *bbuf = idatalist->batching.bbufs;
- for (int i = 0; i < idatalist->batching.alloc_size; i++, bbuf++) {
- GPU_VERTBUF_DISCARD_SAFE(bbuf->vert);
- GPU_BATCH_DISCARD_SAFE(bbuf->batch);
- }
- MEM_freeN(idatalist->batching.bbufs);
-
- DRWInstancingBuffer *ibuf = idatalist->instancing.ibufs;
- for (int i = 0; i < idatalist->instancing.alloc_size; i++, ibuf++) {
- GPU_VERTBUF_DISCARD_SAFE(ibuf->vert);
- GPU_BATCH_DISCARD_SAFE(ibuf->batch);
- }
- MEM_freeN(idatalist->instancing.ibufs);
+ BLI_memblock_destroy(idatalist->pool_buffers, (MemblockValFreeFP)temp_buffer_handle_free);
+ BLI_memblock_destroy(idatalist->pool_instancing, (MemblockValFreeFP)GPU_batch_clear);
+ BLI_memblock_destroy(idatalist->pool_batching, (MemblockValFreeFP)GPU_batch_clear);
BLI_remlink(&g_idatalists, idatalist);
}
diff --git a/source/blender/draw/intern/draw_instance_data.h b/source/blender/draw/intern/draw_instance_data.h
index ea5c6ac7bb2..d88de1a58e2 100644
--- a/source/blender/draw/intern/draw_instance_data.h
+++ b/source/blender/draw/intern/draw_instance_data.h
@@ -30,6 +30,8 @@
#define MAX_INSTANCE_DATA_SIZE 64 /* Can be adjusted for more */
+#define DRW_BUFFER_VERTS_CHUNK 128
+
typedef struct DRWInstanceData DRWInstanceData;
typedef struct DRWInstanceDataList DRWInstanceDataList;
@@ -38,18 +40,15 @@ struct DRWShadingGroup;
void *DRW_instance_data_next(DRWInstanceData *idata);
DRWInstanceData *DRW_instance_data_request(DRWInstanceDataList *idatalist, uint attr_size);
-void DRW_batching_buffer_request(DRWInstanceDataList *idatalist,
- GPUVertFormat *format,
- GPUPrimType type,
- struct DRWShadingGroup *shgroup,
- GPUBatch **r_batch,
- GPUVertBuf **r_vert);
-void DRW_instancing_buffer_request(DRWInstanceDataList *idatalist,
- GPUVertFormat *format,
- GPUBatch *instance,
- struct DRWShadingGroup *shgroup,
- GPUBatch **r_batch,
- GPUVertBuf **r_vert);
+GPUVertBuf *DRW_temp_buffer_request(DRWInstanceDataList *idatalist,
+ GPUVertFormat *format,
+ uint *vert_len);
+GPUBatch *DRW_temp_batch_instance_request(DRWInstanceDataList *idatalist,
+ GPUVertBuf *buf,
+ GPUBatch *geom);
+GPUBatch *DRW_temp_batch_request(DRWInstanceDataList *idatalist,
+ GPUVertBuf *buf,
+ GPUPrimType type);
/* Upload all instance data to the GPU as soon as possible. */
void DRW_instance_buffer_finish(DRWInstanceDataList *idatalist);
diff --git a/source/blender/draw/intern/draw_manager.c b/source/blender/draw/intern/draw_manager.c
index f4830916ecf..355046ae277 100644
--- a/source/blender/draw/intern/draw_manager.c
+++ b/source/blender/draw/intern/draw_manager.c
@@ -2953,7 +2953,6 @@ void DRW_engines_free(void)
DRW_UBO_FREE_SAFE(G_draw.view_ubo);
DRW_TEXTURE_FREE_SAFE(G_draw.ramp);
DRW_TEXTURE_FREE_SAFE(G_draw.weight_ramp);
- MEM_SAFE_FREE(g_pos_format);
MEM_SAFE_FREE(DST.uniform_names.buffer);
diff --git a/source/blender/draw/intern/draw_manager.h b/source/blender/draw/intern/draw_manager.h
index b814000673d..c2287acf8e9 100644
--- a/source/blender/draw/intern/draw_manager.h
+++ b/source/blender/draw/intern/draw_manager.h
@@ -134,7 +134,9 @@ typedef struct DRWCall {
uint inst_count;
#ifdef USE_GPU_SELECT
+ /* TODO(fclem) remove once we have a dedicated selection engine. */
int select_id;
+ GPUVertBuf *inst_selectid;
#endif
} DRWCall;
@@ -171,45 +173,18 @@ struct DRWUniform {
char arraysize; /* cannot be more than 16 too */
};
-typedef enum {
- DRW_SHG_NORMAL,
-
- DRW_SHG_POINT_BATCH,
- DRW_SHG_LINE_BATCH,
- DRW_SHG_TRIANGLE_BATCH,
-
- DRW_SHG_INSTANCE,
- DRW_SHG_INSTANCE_EXTERNAL,
- DRW_SHG_FEEDBACK_TRANSFORM,
-} DRWShadingGroupType;
-
struct DRWShadingGroup {
DRWShadingGroup *next;
GPUShader *shader; /* Shader to bind */
DRWUniform *uniforms; /* Uniforms pointers */
- /* Watch this! Can be nasty for debugging. */
- union {
- struct { /* DRW_SHG_NORMAL */
- DRWCall *first, *last; /* Linked list of DRWCall */
- } calls;
- struct { /* DRW_SHG_FEEDBACK_TRANSFORM */
- DRWCall *first, *last; /* Linked list of DRWCall. */
- struct GPUVertBuf *tfeedback_target; /* Transform Feedback target. */
- };
- struct { /* DRW_SHG_***_BATCH */
- struct GPUBatch *batch_geom; /* Result of call batching */
- struct GPUVertBuf *batch_vbo;
- uint primitive_count;
- };
- struct { /* DRW_SHG_INSTANCE[_EXTERNAL] */
- struct GPUBatch *instance_geom;
- struct GPUVertBuf *instance_vbo;
- uint instance_count;
- float instance_orcofac[2][3]; /* TODO find a better place. */
- };
- };
+ struct {
+ DRWCall *first, *last; /* Linked list of DRWCall */
+ } calls;
+
+ /** TODO Maybe remove from here */
+ struct GPUVertBuf *tfeedback_target;
/** State changes for this batch only (or'd with the pass's state) */
DRWState state_extra;
@@ -217,7 +192,6 @@ struct DRWShadingGroup {
DRWState state_extra_disable;
/** Stencil mask to use for stencil test / write operations */
uint stencil_mask;
- DRWShadingGroupType type;
/* Builtin matrices locations */
int model;
@@ -229,13 +203,6 @@ struct DRWShadingGroup {
uchar matflag; /* Matrices needed, same as DRWCall.flag */
DRWPass *pass_parent; /* backlink to pass we're in */
-#ifndef NDEBUG
- char attrs_count;
-#endif
-#ifdef USE_GPU_SELECT
- GPUVertBuf *inst_selectid;
- int override_selectid; /* Override for single object instances. */
-#endif
};
#define MAX_PASS_NAME 32
@@ -420,6 +387,4 @@ GPUBatch *drw_cache_procedural_points_get(void);
GPUBatch *drw_cache_procedural_lines_get(void);
GPUBatch *drw_cache_procedural_triangles_get(void);
-extern struct GPUVertFormat *g_pos_format;
-
#endif /* __DRAW_MANAGER_H__ */
diff --git a/source/blender/draw/intern/draw_manager_data.c b/source/blender/draw/intern/draw_manager_data.c
index 3a10543c8ef..151ab469e59 100644
--- a/source/blender/draw/intern/draw_manager_data.c
+++ b/source/blender/draw/intern/draw_manager_data.c
@@ -43,8 +43,6 @@
#include "intern/gpu_codegen.h"
-struct GPUVertFormat *g_pos_format = NULL;
-
/* -------------------------------------------------------------------- */
/** \name Uniform Buffer Object (DRW_uniformbuffer)
* \{ */
@@ -453,7 +451,6 @@ static DRWCallState *drw_call_state_object(DRWShadingGroup *shgroup, float (*obm
void DRW_shgroup_call_add(DRWShadingGroup *shgroup, GPUBatch *geom, float (*obmat)[4])
{
BLI_assert(geom != NULL);
- BLI_assert(ELEM(shgroup->type, DRW_SHG_NORMAL, DRW_SHG_FEEDBACK_TRANSFORM));
DRWCall *call = BLI_memblock_alloc(DST.vmempool->calls);
BLI_LINKS_APPEND(&shgroup->calls, call);
@@ -465,6 +462,7 @@ void DRW_shgroup_call_add(DRWShadingGroup *shgroup, GPUBatch *geom, float (*obma
call->inst_count = 0;
#ifdef USE_GPU_SELECT
call->select_id = DST.select_id;
+ call->inst_selectid = NULL;
#endif
}
@@ -472,7 +470,6 @@ void DRW_shgroup_call_range_add(
DRWShadingGroup *shgroup, GPUBatch *geom, float (*obmat)[4], uint v_sta, uint v_count)
{
BLI_assert(geom != NULL);
- BLI_assert(ELEM(shgroup->type, DRW_SHG_NORMAL, DRW_SHG_FEEDBACK_TRANSFORM));
BLI_assert(v_count);
DRWCall *call = BLI_memblock_alloc(DST.vmempool->calls);
@@ -485,6 +482,7 @@ void DRW_shgroup_call_range_add(
call->inst_count = 0;
#ifdef USE_GPU_SELECT
call->select_id = DST.select_id;
+ call->inst_selectid = NULL;
#endif
}
@@ -493,7 +491,6 @@ static void drw_shgroup_call_procedural_add_ex(DRWShadingGroup *shgroup,
uint vert_count,
float (*obmat)[4])
{
- BLI_assert(ELEM(shgroup->type, DRW_SHG_NORMAL, DRW_SHG_FEEDBACK_TRANSFORM));
DRWCall *call = BLI_memblock_alloc(DST.vmempool->calls);
BLI_LINKS_APPEND(&shgroup->calls, call);
@@ -505,6 +502,7 @@ static void drw_shgroup_call_procedural_add_ex(DRWShadingGroup *shgroup,
call->inst_count = 0;
#ifdef USE_GPU_SELECT
call->select_id = DST.select_id;
+ call->inst_selectid = NULL;
#endif
}
@@ -539,7 +537,6 @@ void DRW_shgroup_call_object_add_ex(DRWShadingGroup *shgroup,
bool bypass_culling)
{
BLI_assert(geom != NULL);
- BLI_assert(ELEM(shgroup->type, DRW_SHG_NORMAL, DRW_SHG_FEEDBACK_TRANSFORM));
DRWCall *call = BLI_memblock_alloc(DST.vmempool->calls);
BLI_LINKS_APPEND(&shgroup->calls, call);
@@ -553,6 +550,7 @@ void DRW_shgroup_call_object_add_ex(DRWShadingGroup *shgroup,
call->inst_count = 0;
#ifdef USE_GPU_SELECT
call->select_id = DST.select_id;
+ call->inst_selectid = NULL;
#endif
}
@@ -563,7 +561,6 @@ void DRW_shgroup_call_object_add_with_callback(DRWShadingGroup *shgroup,
void *user_data)
{
BLI_assert(geom != NULL);
- BLI_assert(ELEM(shgroup->type, DRW_SHG_NORMAL, DRW_SHG_FEEDBACK_TRANSFORM));
DRWCall *call = BLI_memblock_alloc(DST.vmempool->calls);
BLI_LINKS_APPEND(&shgroup->calls, call);
@@ -577,6 +574,7 @@ void DRW_shgroup_call_object_add_with_callback(DRWShadingGroup *shgroup,
call->inst_count = 0;
#ifdef USE_GPU_SELECT
call->select_id = DST.select_id;
+ call->inst_selectid = NULL;
#endif
}
@@ -586,7 +584,6 @@ void DRW_shgroup_call_instances_add(DRWShadingGroup *shgroup,
uint count)
{
BLI_assert(geom != NULL);
- BLI_assert(ELEM(shgroup->type, DRW_SHG_NORMAL, DRW_SHG_FEEDBACK_TRANSFORM));
DRWCall *call = BLI_memblock_alloc(DST.vmempool->calls);
BLI_LINKS_APPEND(&shgroup->calls, call);
@@ -598,6 +595,31 @@ void DRW_shgroup_call_instances_add(DRWShadingGroup *shgroup,
call->inst_count = count;
#ifdef USE_GPU_SELECT
call->select_id = DST.select_id;
+ call->inst_selectid = NULL;
+#endif
+}
+
+void DRW_shgroup_call_instances_with_attribs_add(DRWShadingGroup *shgroup,
+ struct GPUBatch *geom,
+ float (*obmat)[4],
+ struct GPUBatch *inst_attributes)
+{
+ BLI_assert(geom != NULL);
+ BLI_assert(inst_attributes->verts[0] != NULL);
+
+ GPUVertBuf *buf_inst = inst_attributes->verts[0];
+
+ DRWCall *call = BLI_memblock_alloc(DST.vmempool->calls);
+ BLI_LINKS_APPEND(&shgroup->calls, call);
+
+ call->state = drw_call_state_create(shgroup, obmat, NULL);
+ call->batch = DRW_temp_batch_instance_request(DST.idatalist, buf_inst, geom);
+ call->vert_first = 0;
+ call->vert_count = 0; /* Auto from batch. */
+ call->inst_count = buf_inst->vertex_len;
+#ifdef USE_GPU_SELECT
+ call->select_id = DST.select_id;
+ call->inst_selectid = NULL;
#endif
}
@@ -741,30 +763,95 @@ void DRW_shgroup_call_sculpt_with_materials_add(DRWShadingGroup **shgroups,
drw_sculpt_generate_calls(&scd, use_vcol);
}
-void DRW_shgroup_call_dynamic_add_array(DRWShadingGroup *shgroup,
- const void *attr[],
- uint attr_len)
+static GPUVertFormat inst_select_format = {0};
+
+DRWCallBuffer *DRW_shgroup_call_buffer_add(DRWShadingGroup *shgroup,
+ struct GPUVertFormat *format,
+ GPUPrimType prim_type)
+{
+ BLI_assert(ELEM(prim_type, GPU_PRIM_POINTS, GPU_PRIM_LINES, GPU_PRIM_TRI_FAN));
+ BLI_assert(format != NULL);
+
+ DRWCall *call = BLI_memblock_alloc(DST.vmempool->calls);
+ BLI_LINKS_APPEND(&shgroup->calls, call);
+
+ call->state = drw_call_state_create(shgroup, NULL, NULL);
+ GPUVertBuf *buf = DRW_temp_buffer_request(DST.idatalist, format, &call->vert_count);
+ call->batch = DRW_temp_batch_request(DST.idatalist, buf, prim_type);
+ call->vert_first = 0;
+ call->vert_count = 0;
+ call->inst_count = 0;
+
+#ifdef USE_GPU_SELECT
+ if (G.f & G_FLAG_PICKSEL) {
+ /* Not actually used for rendering but alloced in one chunk. */
+ if (inst_select_format.attr_len == 0) {
+ GPU_vertformat_attr_add(&inst_select_format, "selectId", GPU_COMP_I32, 1, GPU_FETCH_INT);
+ }
+ call->inst_selectid = DRW_temp_buffer_request(
+ DST.idatalist, &inst_select_format, &call->vert_count);
+ }
+#endif
+ return (DRWCallBuffer *)call;
+}
+
+DRWCallBuffer *DRW_shgroup_call_buffer_instance_add(DRWShadingGroup *shgroup,
+ struct GPUVertFormat *format,
+ GPUBatch *geom)
{
+ BLI_assert(geom != NULL);
+ BLI_assert(format != NULL);
+
+ DRWCall *call = BLI_memblock_alloc(DST.vmempool->calls);
+ BLI_LINKS_APPEND(&shgroup->calls, call);
+
+ call->state = drw_call_state_create(shgroup, NULL, NULL);
+ GPUVertBuf *buf = DRW_temp_buffer_request(DST.idatalist, format, &call->inst_count);
+ call->batch = DRW_temp_batch_instance_request(DST.idatalist, buf, geom);
+ call->vert_first = 0;
+ call->vert_count = 0; /* Auto from batch. */
+ call->inst_count = 0;
+
#ifdef USE_GPU_SELECT
if (G.f & G_FLAG_PICKSEL) {
- if (shgroup->instance_count == shgroup->inst_selectid->vertex_len) {
- GPU_vertbuf_data_resize(shgroup->inst_selectid, shgroup->instance_count + 32);
+ /* Not actually used for rendering but alloced in one chunk. */
+ if (inst_select_format.attr_len == 0) {
+ GPU_vertformat_attr_add(&inst_select_format, "selectId", GPU_COMP_I32, 1, GPU_FETCH_INT);
}
- GPU_vertbuf_attr_set(shgroup->inst_selectid, 0, shgroup->instance_count, &DST.select_id);
+ call->inst_selectid = DRW_temp_buffer_request(
+ DST.idatalist, &inst_select_format, &call->inst_count);
}
#endif
+ return (DRWCallBuffer *)call;
+}
- BLI_assert(attr_len == shgroup->attrs_count);
+void DRW_buffer_add_entry_array(DRWCallBuffer *callbuf, const void *attr[], uint attr_len)
+{
+ DRWCall *call = (DRWCall *)callbuf;
+ const bool is_instance = call->batch->inst != NULL;
+ GPUVertBuf *buf = is_instance ? call->batch->inst : call->batch->verts[0];
+ uint count = is_instance ? call->inst_count++ : call->vert_count++;
+ const bool resize = (count == buf->vertex_alloc);
+
+ BLI_assert(attr_len == buf->format.attr_len);
UNUSED_VARS_NDEBUG(attr_len);
+ if (UNLIKELY(resize)) {
+ GPU_vertbuf_data_resize(buf, count + DRW_BUFFER_VERTS_CHUNK);
+ }
+
for (int i = 0; i < attr_len; ++i) {
- if (shgroup->instance_count == shgroup->instance_vbo->vertex_len) {
- GPU_vertbuf_data_resize(shgroup->instance_vbo, shgroup->instance_count + 32);
- }
- GPU_vertbuf_attr_set(shgroup->instance_vbo, i, shgroup->instance_count, attr[i]);
+ GPU_vertbuf_attr_set(buf, i, count, attr[i]);
}
- shgroup->instance_count += 1;
+#ifdef USE_GPU_SELECT
+ if (G.f & G_FLAG_PICKSEL) {
+ if (UNLIKELY(resize)) {
+ GPU_vertbuf_data_resize(call->inst_selectid, count + DRW_BUFFER_VERTS_CHUNK);
+ }
+ GPU_vertbuf_attr_set(call->inst_selectid, 0, count, &DST.select_id);
+ }
+#endif
}
/** \} */
@@ -775,17 +862,7 @@ void DRW_shgroup_call_dynamic_add_array(DRWShadingGroup *shgroup,
static void drw_shgroup_init(DRWShadingGroup *shgroup, GPUShader *shader)
{
- shgroup->instance_geom = NULL;
- shgroup->instance_vbo = NULL;
- shgroup->instance_count = 0;
shgroup->uniforms = NULL;
-#ifdef USE_GPU_SELECT
- shgroup->inst_selectid = NULL;
- shgroup->override_selectid = -1;
-#endif
-#ifndef NDEBUG
- shgroup->attrs_count = 0;
-#endif
int view_ubo_location = GPU_shader_get_uniform_block(shader, "viewBlock");
@@ -817,6 +894,7 @@ static void drw_shgroup_init(DRWShadingGroup *shgroup, GPUShader *shader)
/* Not supported. */
BLI_assert(GPU_shader_get_builtin_uniform(shader, GPU_UNIFORM_MODELVIEW_INV) == -1);
BLI_assert(GPU_shader_get_builtin_uniform(shader, GPU_UNIFORM_MODELVIEW) == -1);
+ BLI_assert(GPU_shader_get_builtin_uniform(shader, GPU_UNIFORM_NORMAL) == -1);
shgroup->model = GPU_shader_get_builtin_uniform(shader, GPU_UNIFORM_MODEL);
shgroup->modelinverse = GPU_shader_get_builtin_uniform(shader, GPU_UNIFORM_MODEL_INV);
@@ -825,9 +903,6 @@ static void drw_shgroup_init(DRWShadingGroup *shgroup, GPUShader *shader)
shgroup->objectinfo = GPU_shader_get_builtin_uniform(shader, GPU_UNIFORM_OBJECT_INFO);
shgroup->callid = GPU_shader_get_builtin_uniform(shader, GPU_UNIFORM_CALLID);
- /* We do not support normal matrix anymore. */
- BLI_assert(GPU_shader_get_builtin_uniform(shader, GPU_UNIFORM_NORMAL) == -1);
-
shgroup->matflag = 0;
if (shgroup->modelinverse > -1) {
shgroup->matflag |= DRW_CALL_MODELINVERSE;
@@ -843,113 +918,19 @@ static void drw_shgroup_init(DRWShadingGroup *shgroup, GPUShader *shader)
}
}
-static void drw_shgroup_instance_init(DRWShadingGroup *shgroup,
- GPUShader *shader,
- GPUBatch *batch,
- GPUVertFormat *format)
-{
- BLI_assert(shgroup->type == DRW_SHG_INSTANCE);
- BLI_assert(batch != NULL);
- BLI_assert(format != NULL);
-
- drw_shgroup_init(shgroup, shader);
-
- shgroup->instance_geom = batch;
-#ifndef NDEBUG
- shgroup->attrs_count = format->attr_len;
-#endif
-
- DRW_instancing_buffer_request(
- DST.idatalist, format, batch, shgroup, &shgroup->instance_geom, &shgroup->instance_vbo);
-
-#ifdef USE_GPU_SELECT
- if (G.f & G_FLAG_PICKSEL) {
- /* Not actually used for rendering but alloced in one chunk.
- * Plus we don't have to care about ownership. */
- static GPUVertFormat inst_select_format = {0};
- if (inst_select_format.attr_len == 0) {
- GPU_vertformat_attr_add(&inst_select_format, "selectId", GPU_COMP_I32, 1, GPU_FETCH_INT);
- }
- GPUBatch *batch_dummy; /* Not used */
- DRW_batching_buffer_request(DST.idatalist,
- &inst_select_format,
- GPU_PRIM_POINTS,
- shgroup,
- &batch_dummy,
- &shgroup->inst_selectid);
- }
-#endif
-}
-
-static void drw_shgroup_batching_init(DRWShadingGroup *shgroup,
- GPUShader *shader,
- GPUVertFormat *format)
-{
- drw_shgroup_init(shgroup, shader);
-
-#ifndef NDEBUG
- shgroup->attrs_count = (format != NULL) ? format->attr_len : 0;
-#endif
- BLI_assert(format != NULL);
-
- GPUPrimType type;
- switch (shgroup->type) {
- case DRW_SHG_POINT_BATCH:
- type = GPU_PRIM_POINTS;
- break;
- case DRW_SHG_LINE_BATCH:
- type = GPU_PRIM_LINES;
- break;
- case DRW_SHG_TRIANGLE_BATCH:
- type = GPU_PRIM_TRIS;
- break;
- default:
- type = GPU_PRIM_NONE;
- BLI_assert(0);
- break;
- }
-
- DRW_batching_buffer_request(
- DST.idatalist, format, type, shgroup, &shgroup->batch_geom, &shgroup->batch_vbo);
-
-#ifdef USE_GPU_SELECT
- if (G.f & G_FLAG_PICKSEL) {
- /* Not actually used for rendering but alloced in one chunk. */
- static GPUVertFormat inst_select_format = {0};
- if (inst_select_format.attr_len == 0) {
- GPU_vertformat_attr_add(&inst_select_format, "selectId", GPU_COMP_I32, 1, GPU_FETCH_INT);
- }
- GPUBatch *batch; /* Not used */
- DRW_batching_buffer_request(DST.idatalist,
- &inst_select_format,
- GPU_PRIM_POINTS,
- shgroup,
- &batch,
- &shgroup->inst_selectid);
- }
-#endif
-}
-
static DRWShadingGroup *drw_shgroup_create_ex(struct GPUShader *shader, DRWPass *pass)
{
DRWShadingGroup *shgroup = BLI_memblock_alloc(DST.vmempool->shgroups);
BLI_LINKS_APPEND(&pass->shgroups, shgroup);
- shgroup->type = DRW_SHG_NORMAL;
shgroup->shader = shader;
shgroup->state_extra = 0;
shgroup->state_extra_disable = ~0x0;
shgroup->stencil_mask = 0;
shgroup->calls.first = NULL;
shgroup->calls.last = NULL;
-#if 0 /* All the same in the union! */
- shgroup->batch_geom = NULL;
- shgroup->batch_vbo = NULL;
-
- shgroup->instance_geom = NULL;
- shgroup->instance_vbo = NULL;
-#endif
+ shgroup->tfeedback_target = NULL;
shgroup->pass_parent = pass;
return shgroup;
@@ -1034,7 +1015,6 @@ DRWShadingGroup *DRW_shgroup_material_create(struct GPUMaterial *material, DRWPa
drw_shgroup_init(shgroup, GPU_pass_shader_get(gpupass));
drw_shgroup_material_inputs(shgroup, material);
}
-
return shgroup;
}
@@ -1045,96 +1025,18 @@ DRWShadingGroup *DRW_shgroup_create(struct GPUShader *shader, DRWPass *pass)
return shgroup;
}
-DRWShadingGroup *DRW_shgroup_instance_create(struct GPUShader *shader,
- DRWPass *pass,
- GPUBatch *geom,
- GPUVertFormat *format)
-{
- DRWShadingGroup *shgroup = drw_shgroup_create_ex(shader, pass);
- shgroup->type = DRW_SHG_INSTANCE;
- shgroup->instance_geom = geom;
- drw_call_calc_orco(NULL, shgroup->instance_orcofac);
- drw_shgroup_instance_init(shgroup, shader, geom, format);
-
- return shgroup;
-}
-
-DRWShadingGroup *DRW_shgroup_point_batch_create(struct GPUShader *shader, DRWPass *pass)
-{
- DRW_shgroup_instance_format(g_pos_format, {{"pos", DRW_ATTR_FLOAT, 3}});
-
- DRWShadingGroup *shgroup = drw_shgroup_create_ex(shader, pass);
- shgroup->type = DRW_SHG_POINT_BATCH;
-
- drw_shgroup_batching_init(shgroup, shader, g_pos_format);
-
- return shgroup;
-}
-
-DRWShadingGroup *DRW_shgroup_line_batch_create_with_format(struct GPUShader *shader,
- DRWPass *pass,
- GPUVertFormat *format)
-{
- DRWShadingGroup *shgroup = drw_shgroup_create_ex(shader, pass);
- shgroup->type = DRW_SHG_LINE_BATCH;
-
- drw_shgroup_batching_init(shgroup, shader, format);
-
- return shgroup;
-}
-
-DRWShadingGroup *DRW_shgroup_line_batch_create(struct GPUShader *shader, DRWPass *pass)
-{
- DRW_shgroup_instance_format(g_pos_format, {{"pos", DRW_ATTR_FLOAT, 3}});
-
- return DRW_shgroup_line_batch_create_with_format(shader, pass, g_pos_format);
-}
-
DRWShadingGroup *DRW_shgroup_transform_feedback_create(struct GPUShader *shader,
DRWPass *pass,
GPUVertBuf *tf_target)
{
BLI_assert(tf_target != NULL);
DRWShadingGroup *shgroup = drw_shgroup_create_ex(shader, pass);
- shgroup->type = DRW_SHG_FEEDBACK_TRANSFORM;
-
drw_shgroup_init(shgroup, shader);
-
shgroup->tfeedback_target = tf_target;
-
return shgroup;
}
/**
- * Specify an external batch instead of adding each attribute one by one.
- */
-void DRW_shgroup_instance_batch(DRWShadingGroup *shgroup, struct GPUBatch *batch)
-{
- BLI_assert(shgroup->type == DRW_SHG_INSTANCE);
- BLI_assert(shgroup->instance_count == 0);
- /* You cannot use external instancing batch without a dummy format. */
- BLI_assert(shgroup->attrs_count != 0);
-
- shgroup->type = DRW_SHG_INSTANCE_EXTERNAL;
- drw_call_calc_orco(NULL, shgroup->instance_orcofac);
- /* PERF : This destroys the vaos cache so better check if it's necessary. */
- /* Note: This WILL break if batch->verts[0] is destroyed and reallocated
- * at the same address. Bindings/VAOs would remain obsolete. */
- // if (shgroup->instancing_geom->inst != batch->verts[0])
- /* XXX FIXME: THIS IS BROKEN BECAUSE OVEWRITTEN BY DRW_instance_buffer_finish(). */
- GPU_batch_instbuf_set(shgroup->instance_geom, batch->verts[0], false);
-
-#ifdef USE_GPU_SELECT
- shgroup->override_selectid = DST.select_id;
-#endif
-}
-
-uint DRW_shgroup_get_instance_count(const DRWShadingGroup *shgroup)
-{
- return shgroup->instance_count;
-}
-
-/**
* State is added to #Pass.state while drawing.
* Use to temporarily enable draw options.
*/
@@ -1156,26 +1058,12 @@ void DRW_shgroup_stencil_mask(DRWShadingGroup *shgroup, uint mask)
bool DRW_shgroup_is_empty(DRWShadingGroup *shgroup)
{
- switch (shgroup->type) {
- case DRW_SHG_NORMAL:
- case DRW_SHG_FEEDBACK_TRANSFORM:
- return shgroup->calls.first == NULL;
- case DRW_SHG_POINT_BATCH:
- case DRW_SHG_LINE_BATCH:
- case DRW_SHG_TRIANGLE_BATCH:
- case DRW_SHG_INSTANCE:
- case DRW_SHG_INSTANCE_EXTERNAL:
- return shgroup->instance_count == 0;
- }
- BLI_assert(!"Shading Group type not supported");
- return true;
+ return shgroup->calls.first == NULL;
}
DRWShadingGroup *DRW_shgroup_create_sub(DRWShadingGroup *shgroup)
{
/* Remove this assertion if needed but implement the other cases first! */
- BLI_assert(shgroup->type == DRW_SHG_NORMAL);
-
DRWShadingGroup *shgroup_new = BLI_memblock_alloc(DST.vmempool->shgroups);
*shgroup_new = *shgroup;
diff --git a/source/blender/draw/intern/draw_manager_exec.c b/source/blender/draw/intern/draw_manager_exec.c
index 0a4df550044..e1ef2e81b8d 100644
--- a/source/blender/draw/intern/draw_manager_exec.c
+++ b/source/blender/draw/intern/draw_manager_exec.c
@@ -30,6 +30,7 @@
#include "GPU_draw.h"
#include "GPU_extensions.h"
#include "intern/gpu_shader_private.h"
+#include "intern/gpu_primitive_private.h"
#ifdef USE_GPU_SELECT
# include "GPU_select.h"
@@ -852,8 +853,8 @@ static void draw_geometry_prepare(DRWShadingGroup *shgroup, DRWCall *call)
GPU_shader_uniform_vector(shgroup->shader, shgroup->objectinfo, 4, 1, (float *)unitmat);
}
if (shgroup->orcotexfac != -1) {
- GPU_shader_uniform_vector(
- shgroup->shader, shgroup->orcotexfac, 3, 2, (float *)shgroup->instance_orcofac);
+ float orcofacs[2][3] = {{0.0f, 0.0f, 0.0f}, {1.0f, 1.0f, 1.0f}};
+ GPU_shader_uniform_vector(shgroup->shader, shgroup->orcotexfac, 3, 2, (float *)orcofacs);
}
}
}
@@ -1040,6 +1041,49 @@ static void release_ubo_slots(bool with_persist)
}
}
+BLI_INLINE bool draw_select_do_call(DRWShadingGroup *shgroup, DRWCall *call)
+{
+#ifdef USE_GPU_SELECT
+ if ((G.f & G_FLAG_PICKSEL) == 0) {
+ return false;
+ }
+ if (call->inst_selectid != NULL) {
+ const bool is_instancing = (call->inst_count != 0);
+ uint start = 0;
+ uint count = 1;
+ uint tot = is_instancing ? call->inst_count : call->vert_count;
+ /* Hack : get vbo data without actually drawing. */
+ GPUVertBufRaw raw;
+ GPU_vertbuf_attr_get_raw_data(call->inst_selectid, 0, &raw);
+ int *select_id = GPU_vertbuf_raw_step(&raw);
+
+ /* Batching */
+ if (!is_instancing) {
+ /* FIXME: Meh a bit nasty. */
+ if (call->batch->gl_prim_type == convert_prim_type_to_gl(GPU_PRIM_TRIS)) {
+ count = 3;
+ }
+ else if (call->batch->gl_prim_type == convert_prim_type_to_gl(GPU_PRIM_LINES)) {
+ count = 2;
+ }
+ }
+
+ while (start < tot) {
+ GPU_select_load_id(select_id[start]);
+ draw_geometry_execute(shgroup, call->batch, start, count, is_instancing);
+ start += count;
+ }
+ return true;
+ }
+ else {
+ GPU_select_load_id(call->select_id);
+ return false;
+ }
+#else
+ return false;
+#endif
+}
+
static void draw_shgroup(DRWShadingGroup *shgroup, DRWState pass_state)
{
BLI_assert(shgroup->shader);
@@ -1059,8 +1103,7 @@ static void draw_shgroup(DRWShadingGroup *shgroup, DRWState pass_state)
DST.shader = shgroup->shader;
}
- if ((pass_state & DRW_STATE_TRANS_FEEDBACK) != 0 &&
- (shgroup->type == DRW_SHG_FEEDBACK_TRANSFORM)) {
+ if ((pass_state & DRW_STATE_TRANS_FEEDBACK) != 0 && (shgroup->tfeedback_target != NULL)) {
use_tfeedback = GPU_shader_transform_feedback_enable(shgroup->shader,
shgroup->tfeedback_target->vbo_id);
}
@@ -1140,102 +1183,10 @@ static void draw_shgroup(DRWShadingGroup *shgroup, DRWState pass_state)
}
}
-#ifdef USE_GPU_SELECT
-# define GPU_SELECT_LOAD_IF_PICKSEL(_select_id) \
- if (G.f & G_FLAG_PICKSEL) { \
- GPU_select_load_id(_select_id); \
- } \
- ((void)0)
-
-# define GPU_SELECT_LOAD_IF_PICKSEL_CALL(_call) \
- if ((G.f & G_FLAG_PICKSEL) && (_call)) { \
- GPU_select_load_id((_call)->select_id); \
- } \
- ((void)0)
-
-# define GPU_SELECT_LOAD_IF_PICKSEL_LIST(_shgroup, _start, _count) \
- _start = 0; \
- _count = _shgroup->instance_count; \
- int *select_id = NULL; \
- if (G.f & G_FLAG_PICKSEL) { \
- if (_shgroup->override_selectid == -1) { \
- /* Hack : get vbo data without actually drawing. */ \
- GPUVertBufRaw raw; \
- GPU_vertbuf_attr_get_raw_data(_shgroup->inst_selectid, 0, &raw); \
- select_id = GPU_vertbuf_raw_step(&raw); \
- switch (_shgroup->type) { \
- case DRW_SHG_TRIANGLE_BATCH: \
- _count = 3; \
- break; \
- case DRW_SHG_LINE_BATCH: \
- _count = 2; \
- break; \
- default: \
- _count = 1; \
- break; \
- } \
- } \
- else { \
- GPU_select_load_id(_shgroup->override_selectid); \
- } \
- } \
- while (_start < _shgroup->instance_count) { \
- if (select_id) { \
- GPU_select_load_id(select_id[_start]); \
- }
-
-# define GPU_SELECT_LOAD_IF_PICKSEL_LIST_END(_start, _count) \
- _start += _count; \
- } \
- ((void)0)
-
-#else
-# define GPU_SELECT_LOAD_IF_PICKSEL(select_id)
-# define GPU_SELECT_LOAD_IF_PICKSEL_CALL(call)
-# define GPU_SELECT_LOAD_IF_PICKSEL_LIST_END(start, count) ((void)0)
-# define GPU_SELECT_LOAD_IF_PICKSEL_LIST(_shgroup, _start, _count) \
- _start = 0; \
- _count = _shgroup->instance_count;
-
-#endif
-
BLI_assert(ubo_bindings_validate(shgroup));
/* Rendering Calls */
- if (!ELEM(shgroup->type, DRW_SHG_NORMAL, DRW_SHG_FEEDBACK_TRANSFORM)) {
- /* Replacing multiple calls with only one */
- if (ELEM(shgroup->type, DRW_SHG_INSTANCE, DRW_SHG_INSTANCE_EXTERNAL)) {
- if (shgroup->type == DRW_SHG_INSTANCE_EXTERNAL) {
- if (shgroup->instance_geom != NULL) {
- GPU_SELECT_LOAD_IF_PICKSEL(shgroup->override_selectid);
- draw_geometry_prepare(shgroup, NULL);
- draw_geometry_execute(shgroup, shgroup->instance_geom, 0, 0, true);
- }
- }
- else {
- if (shgroup->instance_count > 0) {
- uint count, start;
- draw_geometry_prepare(shgroup, NULL);
- GPU_SELECT_LOAD_IF_PICKSEL_LIST (shgroup, start, count) {
- draw_geometry_execute(shgroup, shgroup->instance_geom, start, count, true);
- }
- GPU_SELECT_LOAD_IF_PICKSEL_LIST_END(start, count);
- }
- }
- }
- else { /* DRW_SHG_***_BATCH */
- /* Some dynamic batch can have no geom (no call to aggregate) */
- if (shgroup->instance_count > 0) {
- uint count, start;
- draw_geometry_prepare(shgroup, NULL);
- GPU_SELECT_LOAD_IF_PICKSEL_LIST (shgroup, start, count) {
- draw_geometry_execute(shgroup, shgroup->batch_geom, start, count, false);
- }
- GPU_SELECT_LOAD_IF_PICKSEL_LIST_END(start, count);
- }
- }
- }
- else {
+ {
bool prev_neg_scale = false;
int callid = 0;
for (DRWCall *call = shgroup->calls.first; call; call = call->next) {
@@ -1262,9 +1213,12 @@ static void draw_shgroup(DRWShadingGroup *shgroup, DRWState pass_state)
prev_neg_scale = neg_scale;
}
- GPU_SELECT_LOAD_IF_PICKSEL_CALL(call);
draw_geometry_prepare(shgroup, call);
+ if (draw_select_do_call(shgroup, call)) {
+ continue;
+ }
+
/* TODO revisit when DRW_SHG_INSTANCE and the like is gone. */
if (call->inst_count == 0) {
draw_geometry_execute(shgroup, call->batch, call->vert_first, call->vert_count, false);
diff --git a/source/blender/draw/modes/edit_metaball_mode.c b/source/blender/draw/modes/edit_metaball_mode.c
index e94d394b98f..11ff95a212b 100644
--- a/source/blender/draw/modes/edit_metaball_mode.c
+++ b/source/blender/draw/modes/edit_metaball_mode.c
@@ -88,7 +88,7 @@ typedef struct EDIT_METABALL_Data {
typedef struct EDIT_METABALL_PrivateData {
/* This keeps the references of the shading groups for
* easy access in EDIT_METABALL_cache_populate() */
- DRWShadingGroup *group;
+ DRWCallBuffer *group;
} EDIT_METABALL_PrivateData; /* Transient data */
/* *********** FUNCTIONS *********** */
@@ -121,7 +121,7 @@ static void EDIT_METABALL_cache_init(void *vedata)
psl->pass = DRW_pass_create("My Pass", state);
/* Create a shadingGroup using a function in draw_common.c or custom one */
- stl->g_data->group = shgroup_instance_mball_handles(psl->pass, draw_ctx->sh_cfg);
+ stl->g_data->group = buffer_instance_mball_handles(psl->pass, draw_ctx->sh_cfg);
}
}
@@ -133,7 +133,7 @@ static void EDIT_METABALL_cache_populate(void *vedata, Object *ob)
if (ob->type == OB_MBALL) {
const DRWContextState *draw_ctx = DRW_context_state_get();
- DRWShadingGroup *group = stl->g_data->group;
+ DRWCallBuffer *group = stl->g_data->group;
if ((ob == draw_ctx->object_edit) || BKE_object_is_in_editmode(ob)) {
MetaBall *mb = ob->data;
@@ -184,7 +184,7 @@ static void EDIT_METABALL_cache_populate(void *vedata, Object *ob)
DRW_select_load_id(select_id | MBALLSEL_RADIUS);
}
- DRW_shgroup_call_dynamic_add(group, draw_scale_xform, &ml->rad, color);
+ DRW_buffer_add_entry(group, draw_scale_xform, &ml->rad, color);
if ((ml->flag & SELECT) && !(ml->flag & MB_SCALE_RAD)) {
color = col_stiffness_select;
@@ -197,7 +197,7 @@ static void EDIT_METABALL_cache_populate(void *vedata, Object *ob)
DRW_select_load_id(select_id | MBALLSEL_STIFF);
}
- DRW_shgroup_call_dynamic_add(group, draw_scale_xform, &draw_stiffness_radius, color);
+ DRW_buffer_add_entry(group, draw_scale_xform, &draw_stiffness_radius, color);
}
}
}
diff --git a/source/blender/draw/modes/edit_text_mode.c b/source/blender/draw/modes/edit_text_mode.c
index 5f44a74b24e..0b3bd1779f0 100644
--- a/source/blender/draw/modes/edit_text_mode.c
+++ b/source/blender/draw/modes/edit_text_mode.c
@@ -102,8 +102,8 @@ typedef struct EDIT_TEXT_PrivateData {
DRWShadingGroup *wire_shgrp;
DRWShadingGroup *overlay_select_shgrp;
DRWShadingGroup *overlay_cursor_shgrp;
- DRWShadingGroup *box_shgrp;
- DRWShadingGroup *box_active_shgrp;
+ DRWCallBuffer *box_shgrp;
+ DRWCallBuffer *box_active_shgrp;
} EDIT_TEXT_PrivateData; /* Transient data */
/* *********** FUNCTIONS *********** */
@@ -179,9 +179,9 @@ static void EDIT_TEXT_cache_init(void *vedata)
psl->text_box_pass = DRW_pass_create("Font Text Boxes",
DRW_STATE_WRITE_COLOR | DRW_STATE_WRITE_DEPTH);
- stl->g_data->box_shgrp = shgroup_dynlines_dashed_uniform_color(
+ stl->g_data->box_shgrp = buffer_dynlines_dashed_uniform_color(
psl->text_box_pass, G_draw.block.colorWire, draw_ctx->sh_cfg);
- stl->g_data->box_active_shgrp = shgroup_dynlines_dashed_uniform_color(
+ stl->g_data->box_active_shgrp = buffer_dynlines_dashed_uniform_color(
psl->text_box_pass, G_draw.block.colorActive, draw_ctx->sh_cfg);
}
}
@@ -265,7 +265,7 @@ static void edit_text_cache_populate_boxes(void *vedata, Object *ob)
EDIT_TEXT_StorageList *stl = ((EDIT_TEXT_Data *)vedata)->stl;
const Curve *cu = ob->data;
- DRWShadingGroup *shading_groups[] = {
+ DRWCallBuffer *callbufs[] = {
stl->g_data->box_active_shgrp,
stl->g_data->box_shgrp,
};
@@ -279,7 +279,7 @@ static void edit_text_cache_populate_boxes(void *vedata, Object *ob)
}
const bool is_active = i == (cu->actbox - 1);
- DRWShadingGroup *shading_group = shading_groups[is_active ? 0 : 1];
+ DRWCallBuffer *callbuf = callbufs[is_active ? 0 : 1];
vec[0] = cu->xof + tb->x;
vec[1] = cu->yof + tb->y + cu->fsize_realtime;
@@ -289,29 +289,29 @@ static void edit_text_cache_populate_boxes(void *vedata, Object *ob)
vec[0] += tb->w;
mul_v3_m4v3(vec2, ob->obmat, vec);
- DRW_shgroup_call_dynamic_add(shading_group, vec1);
- DRW_shgroup_call_dynamic_add(shading_group, vec2);
+ DRW_buffer_add_entry(callbuf, vec1);
+ DRW_buffer_add_entry(callbuf, vec2);
vec[1] -= tb->h;
copy_v3_v3(vec1, vec2);
mul_v3_m4v3(vec2, ob->obmat, vec);
- DRW_shgroup_call_dynamic_add(shading_group, vec1);
- DRW_shgroup_call_dynamic_add(shading_group, vec2);
+ DRW_buffer_add_entry(callbuf, vec1);
+ DRW_buffer_add_entry(callbuf, vec2);
vec[0] -= tb->w;
copy_v3_v3(vec1, vec2);
mul_v3_m4v3(vec2, ob->obmat, vec);
- DRW_shgroup_call_dynamic_add(shading_group, vec1);
- DRW_shgroup_call_dynamic_add(shading_group, vec2);
+ DRW_buffer_add_entry(callbuf, vec1);
+ DRW_buffer_add_entry(callbuf, vec2);
vec[1] += tb->h;
copy_v3_v3(vec1, vec2);
mul_v3_m4v3(vec2, ob->obmat, vec);
- DRW_shgroup_call_dynamic_add(shading_group, vec1);
- DRW_shgroup_call_dynamic_add(shading_group, vec2);
+ DRW_buffer_add_entry(callbuf, vec1);
+ DRW_buffer_add_entry(callbuf, vec2);
}
}
diff --git a/source/blender/draw/modes/object_mode.c b/source/blender/draw/modes/object_mode.c
index 9d6732fbcab..55f04c6218c 100644
--- a/source/blender/draw/modes/object_mode.c
+++ b/source/blender/draw/modes/object_mode.c
@@ -172,82 +172,82 @@ typedef struct OBJECT_ShadingGroupList {
struct DRWPass *bone_axes;
/* Empties */
- DRWShadingGroup *plain_axes;
- DRWShadingGroup *cube;
- DRWShadingGroup *circle;
- DRWShadingGroup *sphere;
- DRWShadingGroup *sphere_solid;
- DRWShadingGroup *cylinder;
- DRWShadingGroup *capsule_cap;
- DRWShadingGroup *capsule_body;
- DRWShadingGroup *cone;
- DRWShadingGroup *single_arrow;
- DRWShadingGroup *single_arrow_line;
- DRWShadingGroup *empty_axes;
+ DRWCallBuffer *plain_axes;
+ DRWCallBuffer *cube;
+ DRWCallBuffer *circle;
+ DRWCallBuffer *sphere;
+ DRWCallBuffer *sphere_solid;
+ DRWCallBuffer *cylinder;
+ DRWCallBuffer *capsule_cap;
+ DRWCallBuffer *capsule_body;
+ DRWCallBuffer *cone;
+ DRWCallBuffer *single_arrow;
+ DRWCallBuffer *single_arrow_line;
+ DRWCallBuffer *empty_axes;
/* Force Field */
- DRWShadingGroup *field_wind;
- DRWShadingGroup *field_force;
- DRWShadingGroup *field_vortex;
- DRWShadingGroup *field_curve_sta;
- DRWShadingGroup *field_curve_end;
- DRWShadingGroup *field_tube_limit;
- DRWShadingGroup *field_cone_limit;
+ DRWCallBuffer *field_wind;
+ DRWCallBuffer *field_force;
+ DRWCallBuffer *field_vortex;
+ DRWCallBuffer *field_curve_sta;
+ DRWCallBuffer *field_curve_end;
+ DRWCallBuffer *field_tube_limit;
+ DRWCallBuffer *field_cone_limit;
/* Grease Pencil */
- DRWShadingGroup *gpencil_axes;
+ DRWCallBuffer *gpencil_axes;
/* Speaker */
- DRWShadingGroup *speaker;
+ DRWCallBuffer *speaker;
/* Probe */
- DRWShadingGroup *probe_cube;
- DRWShadingGroup *probe_planar;
- DRWShadingGroup *probe_grid;
+ DRWCallBuffer *probe_cube;
+ DRWCallBuffer *probe_planar;
+ DRWCallBuffer *probe_grid;
/* MetaBalls */
- DRWShadingGroup *mball_handle;
+ DRWCallBuffer *mball_handle;
/* Lights */
- DRWShadingGroup *light_center;
- DRWShadingGroup *light_groundpoint;
- DRWShadingGroup *light_groundline;
- DRWShadingGroup *light_circle;
- DRWShadingGroup *light_circle_shadow;
- DRWShadingGroup *light_sunrays;
- DRWShadingGroup *light_distance;
- DRWShadingGroup *light_buflimit;
- DRWShadingGroup *light_buflimit_points;
- DRWShadingGroup *light_area_sphere;
- DRWShadingGroup *light_area_square;
- DRWShadingGroup *light_area_disk;
- DRWShadingGroup *light_hemi;
- DRWShadingGroup *light_spot_cone;
- DRWShadingGroup *light_spot_blend;
- DRWShadingGroup *light_spot_pyramid;
- DRWShadingGroup *light_spot_blend_rect;
- DRWShadingGroup *light_spot_volume;
- DRWShadingGroup *light_spot_volume_rect;
- DRWShadingGroup *light_spot_volume_outside;
- DRWShadingGroup *light_spot_volume_rect_outside;
+ DRWCallBuffer *light_center;
+ DRWCallBuffer *light_groundpoint;
+ DRWCallBuffer *light_groundline;
+ DRWCallBuffer *light_circle;
+ DRWCallBuffer *light_circle_shadow;
+ DRWCallBuffer *light_sunrays;
+ DRWCallBuffer *light_distance;
+ DRWCallBuffer *light_buflimit;
+ DRWCallBuffer *light_buflimit_points;
+ DRWCallBuffer *light_area_sphere;
+ DRWCallBuffer *light_area_square;
+ DRWCallBuffer *light_area_disk;
+ DRWCallBuffer *light_hemi;
+ DRWCallBuffer *light_spot_cone;
+ DRWCallBuffer *light_spot_blend;
+ DRWCallBuffer *light_spot_pyramid;
+ DRWCallBuffer *light_spot_blend_rect;
+ DRWCallBuffer *light_spot_volume;
+ DRWCallBuffer *light_spot_volume_rect;
+ DRWCallBuffer *light_spot_volume_outside;
+ DRWCallBuffer *light_spot_volume_rect_outside;
/* Helpers */
- DRWShadingGroup *relationship_lines;
- DRWShadingGroup *constraint_lines;
+ DRWCallBuffer *relationship_lines;
+ DRWCallBuffer *constraint_lines;
/* Camera */
- DRWShadingGroup *camera;
- DRWShadingGroup *camera_frame;
- DRWShadingGroup *camera_tria;
- DRWShadingGroup *camera_focus;
- DRWShadingGroup *camera_clip;
- DRWShadingGroup *camera_clip_points;
- DRWShadingGroup *camera_mist;
- DRWShadingGroup *camera_mist_points;
- DRWShadingGroup *camera_stereo_plane;
- DRWShadingGroup *camera_stereo_plane_wires;
- DRWShadingGroup *camera_stereo_volume;
- DRWShadingGroup *camera_stereo_volume_wires;
+ DRWCallBuffer *camera;
+ DRWCallBuffer *camera_frame;
+ DRWCallBuffer *camera_tria;
+ DRWCallBuffer *camera_focus;
+ DRWCallBuffer *camera_clip;
+ DRWCallBuffer *camera_clip_points;
+ DRWCallBuffer *camera_mist;
+ DRWCallBuffer *camera_mist_points;
+ DRWCallBuffer *camera_stereo_plane;
+ DRWCallBuffer *camera_stereo_plane_wires;
+ DRWCallBuffer *camera_stereo_volume;
+ DRWCallBuffer *camera_stereo_volume_wires;
ListBase camera_path;
/* Wire */
@@ -269,7 +269,7 @@ typedef struct OBJECT_ShadingGroupList {
DRWShadingGroup *points_dupli_select;
/* Texture Space */
- DRWShadingGroup *texspace;
+ DRWCallBuffer *texspace;
} OBJECT_ShadingGroupList;
typedef struct OBJECT_PrivateData {
@@ -285,22 +285,22 @@ typedef struct OBJECT_PrivateData {
DRWShadingGroup *outlines_transform;
/* Lightprobes */
- DRWShadingGroup *lightprobes_cube_select;
- DRWShadingGroup *lightprobes_cube_select_dupli;
- DRWShadingGroup *lightprobes_cube_active;
- DRWShadingGroup *lightprobes_cube_transform;
+ DRWCallBuffer *lightprobes_cube_select;
+ DRWCallBuffer *lightprobes_cube_select_dupli;
+ DRWCallBuffer *lightprobes_cube_active;
+ DRWCallBuffer *lightprobes_cube_transform;
- DRWShadingGroup *lightprobes_planar_select;
- DRWShadingGroup *lightprobes_planar_select_dupli;
- DRWShadingGroup *lightprobes_planar_active;
- DRWShadingGroup *lightprobes_planar_transform;
+ DRWCallBuffer *lightprobes_planar_select;
+ DRWCallBuffer *lightprobes_planar_select_dupli;
+ DRWCallBuffer *lightprobes_planar_active;
+ DRWCallBuffer *lightprobes_planar_transform;
/* Objects Centers */
- DRWShadingGroup *center_active;
- DRWShadingGroup *center_selected;
- DRWShadingGroup *center_deselected;
- DRWShadingGroup *center_selected_lib;
- DRWShadingGroup *center_deselected_lib;
+ DRWCallBuffer *center_active;
+ DRWCallBuffer *center_selected;
+ DRWCallBuffer *center_deselected;
+ DRWCallBuffer *center_selected_lib;
+ DRWCallBuffer *center_deselected_lib;
/* Outlines id offset (accessed as an array) */
int id_ofs_active;
@@ -326,7 +326,6 @@ typedef struct OBJECT_DupliData {
static struct {
/* Instance Data format */
- struct GPUVertFormat *particle_format;
struct GPUVertFormat *empty_image_format;
struct GPUVertFormat *empty_image_wire_format;
@@ -688,7 +687,6 @@ static void OBJECT_engine_init(void *vedata)
static void OBJECT_engine_free(void)
{
- MEM_SAFE_FREE(e_data.particle_format);
MEM_SAFE_FREE(e_data.empty_image_format);
MEM_SAFE_FREE(e_data.empty_image_wire_format);
@@ -798,8 +796,8 @@ static int *shgroup_theme_id_to_outline_counter(OBJECT_StorageList *stl,
}
}
-static DRWShadingGroup *shgroup_theme_id_to_probe_planar_outline_shgrp(OBJECT_StorageList *stl,
- int theme_id)
+static DRWCallBuffer *buffer_theme_id_to_probe_planar_outline_shgrp(OBJECT_StorageList *stl,
+ int theme_id)
{
/* does not increment counter */
switch (theme_id) {
@@ -813,9 +811,9 @@ static DRWShadingGroup *shgroup_theme_id_to_probe_planar_outline_shgrp(OBJECT_St
}
}
-static DRWShadingGroup *shgroup_theme_id_to_probe_cube_outline_shgrp(OBJECT_StorageList *stl,
- int theme_id,
- const int base_flag)
+static DRWCallBuffer *buffer_theme_id_to_probe_cube_outline_shgrp(OBJECT_StorageList *stl,
+ int theme_id,
+ const int base_flag)
{
/* does not increment counter */
if (UNLIKELY(base_flag & BASE_FROM_DUPLI)) {
@@ -1108,23 +1106,23 @@ static void OBJECT_cache_init(void *vedata)
struct GPUBatch *quad = DRW_cache_quad_get();
/* Cubemap */
- g_data->lightprobes_cube_select = shgroup_instance_outline(
+ g_data->lightprobes_cube_select = buffer_instance_outline(
pass, sphere, &g_data->id_ofs_prb_select);
- g_data->lightprobes_cube_select_dupli = shgroup_instance_outline(
+ g_data->lightprobes_cube_select_dupli = buffer_instance_outline(
pass, sphere, &g_data->id_ofs_prb_select_dupli);
- g_data->lightprobes_cube_active = shgroup_instance_outline(
+ g_data->lightprobes_cube_active = buffer_instance_outline(
pass, sphere, &g_data->id_ofs_prb_active);
- g_data->lightprobes_cube_transform = shgroup_instance_outline(
+ g_data->lightprobes_cube_transform = buffer_instance_outline(
pass, sphere, &g_data->id_ofs_prb_transform);
/* Planar */
- g_data->lightprobes_planar_select = shgroup_instance_outline(
+ g_data->lightprobes_planar_select = buffer_instance_outline(
pass, quad, &g_data->id_ofs_prb_select);
- g_data->lightprobes_planar_select_dupli = shgroup_instance_outline(
+ g_data->lightprobes_planar_select_dupli = buffer_instance_outline(
pass, quad, &g_data->id_ofs_prb_select_dupli);
- g_data->lightprobes_planar_active = shgroup_instance_outline(
+ g_data->lightprobes_planar_active = buffer_instance_outline(
pass, quad, &g_data->id_ofs_prb_active);
- g_data->lightprobes_planar_transform = shgroup_instance_outline(
+ g_data->lightprobes_planar_transform = buffer_instance_outline(
pass, quad, &g_data->id_ofs_prb_transform);
g_data->id_ofs_prb_select = 0;
@@ -1262,113 +1260,111 @@ static void OBJECT_cache_init(void *vedata)
/* Empties */
geom = DRW_cache_plain_axes_get();
- sgl->plain_axes = shgroup_instance(sgl->non_meshes, geom, draw_ctx->sh_cfg);
+ sgl->plain_axes = buffer_instance(sgl->non_meshes, geom, draw_ctx->sh_cfg);
geom = DRW_cache_empty_cube_get();
- sgl->cube = shgroup_instance(sgl->non_meshes, geom, draw_ctx->sh_cfg);
+ sgl->cube = buffer_instance(sgl->non_meshes, geom, draw_ctx->sh_cfg);
geom = DRW_cache_circle_get();
- sgl->circle = shgroup_instance(sgl->non_meshes, geom, draw_ctx->sh_cfg);
+ sgl->circle = buffer_instance(sgl->non_meshes, geom, draw_ctx->sh_cfg);
geom = DRW_cache_empty_sphere_get();
- sgl->sphere = shgroup_instance(sgl->non_meshes, geom, draw_ctx->sh_cfg);
+ sgl->sphere = buffer_instance(sgl->non_meshes, geom, draw_ctx->sh_cfg);
geom = DRW_cache_sphere_get();
- sgl->sphere_solid = shgroup_instance_solid(sgl->non_meshes, geom);
+ sgl->sphere_solid = buffer_instance_solid(sgl->non_meshes, geom);
geom = DRW_cache_empty_cylinder_get();
- sgl->cylinder = shgroup_instance(sgl->non_meshes, geom, draw_ctx->sh_cfg);
+ sgl->cylinder = buffer_instance(sgl->non_meshes, geom, draw_ctx->sh_cfg);
geom = DRW_cache_empty_capsule_cap_get();
- sgl->capsule_cap = shgroup_instance(sgl->non_meshes, geom, draw_ctx->sh_cfg);
+ sgl->capsule_cap = buffer_instance(sgl->non_meshes, geom, draw_ctx->sh_cfg);
geom = DRW_cache_empty_capsule_body_get();
- sgl->capsule_body = shgroup_instance(sgl->non_meshes, geom, draw_ctx->sh_cfg);
+ sgl->capsule_body = buffer_instance(sgl->non_meshes, geom, draw_ctx->sh_cfg);
geom = DRW_cache_empty_cone_get();
- sgl->cone = shgroup_instance(sgl->non_meshes, geom, draw_ctx->sh_cfg);
+ sgl->cone = buffer_instance(sgl->non_meshes, geom, draw_ctx->sh_cfg);
geom = DRW_cache_single_arrow_get();
- sgl->single_arrow = shgroup_instance(sgl->non_meshes, geom, draw_ctx->sh_cfg);
+ sgl->single_arrow = buffer_instance(sgl->non_meshes, geom, draw_ctx->sh_cfg);
geom = DRW_cache_single_line_get();
- sgl->single_arrow_line = shgroup_instance(sgl->non_meshes, geom, draw_ctx->sh_cfg);
+ sgl->single_arrow_line = buffer_instance(sgl->non_meshes, geom, draw_ctx->sh_cfg);
geom = DRW_cache_bone_arrows_get();
- sgl->empty_axes = shgroup_instance_empty_axes(sgl->non_meshes, geom, draw_ctx->sh_cfg);
+ sgl->empty_axes = buffer_instance_empty_axes(sgl->non_meshes, geom, draw_ctx->sh_cfg);
/* Force Field */
geom = DRW_cache_field_wind_get();
- sgl->field_wind = shgroup_instance_scaled(sgl->non_meshes, geom, draw_ctx->sh_cfg);
+ sgl->field_wind = buffer_instance_scaled(sgl->non_meshes, geom, draw_ctx->sh_cfg);
geom = DRW_cache_field_force_get();
- sgl->field_force = shgroup_instance_screen_aligned(sgl->non_meshes, geom, draw_ctx->sh_cfg);
+ sgl->field_force = buffer_instance_screen_aligned(sgl->non_meshes, geom, draw_ctx->sh_cfg);
geom = DRW_cache_field_vortex_get();
- sgl->field_vortex = shgroup_instance_scaled(sgl->non_meshes, geom, draw_ctx->sh_cfg);
+ sgl->field_vortex = buffer_instance_scaled(sgl->non_meshes, geom, draw_ctx->sh_cfg);
geom = DRW_cache_screenspace_circle_get();
- sgl->field_curve_sta = shgroup_instance_screen_aligned(
- sgl->non_meshes, geom, draw_ctx->sh_cfg);
+ sgl->field_curve_sta = buffer_instance_screen_aligned(sgl->non_meshes, geom, draw_ctx->sh_cfg);
/* Grease Pencil */
geom = DRW_cache_gpencil_axes_get();
- sgl->gpencil_axes = shgroup_instance(sgl->non_meshes, geom, draw_ctx->sh_cfg);
+ sgl->gpencil_axes = buffer_instance(sgl->non_meshes, geom, draw_ctx->sh_cfg);
/* Speaker */
geom = DRW_cache_speaker_get();
- sgl->speaker = shgroup_instance(sgl->non_meshes, geom, draw_ctx->sh_cfg);
+ sgl->speaker = buffer_instance(sgl->non_meshes, geom, draw_ctx->sh_cfg);
/* Probe */
static float probeSize = 14.0f;
geom = DRW_cache_lightprobe_cube_get();
- sgl->probe_cube = shgroup_instance_screenspace(
+ sgl->probe_cube = buffer_instance_screenspace(
sgl->non_meshes, geom, &probeSize, draw_ctx->sh_cfg);
geom = DRW_cache_lightprobe_grid_get();
- sgl->probe_grid = shgroup_instance_screenspace(
+ sgl->probe_grid = buffer_instance_screenspace(
sgl->non_meshes, geom, &probeSize, draw_ctx->sh_cfg);
static float probePlanarSize = 20.0f;
geom = DRW_cache_lightprobe_planar_get();
- sgl->probe_planar = shgroup_instance_screenspace(
+ sgl->probe_planar = buffer_instance_screenspace(
sgl->non_meshes, geom, &probePlanarSize, draw_ctx->sh_cfg);
/* Camera */
geom = DRW_cache_camera_get();
- sgl->camera = shgroup_camera_instance(sgl->non_meshes, geom, draw_ctx->sh_cfg);
+ sgl->camera = buffer_camera_instance(sgl->non_meshes, geom, draw_ctx->sh_cfg);
geom = DRW_cache_camera_frame_get();
- sgl->camera_frame = shgroup_camera_instance(sgl->non_meshes, geom, draw_ctx->sh_cfg);
+ sgl->camera_frame = buffer_camera_instance(sgl->non_meshes, geom, draw_ctx->sh_cfg);
geom = DRW_cache_camera_tria_get();
- sgl->camera_tria = shgroup_camera_instance(sgl->non_meshes, geom, draw_ctx->sh_cfg);
+ sgl->camera_tria = buffer_camera_instance(sgl->non_meshes, geom, draw_ctx->sh_cfg);
geom = DRW_cache_plain_axes_get();
- sgl->camera_focus = shgroup_instance(sgl->non_meshes, geom, draw_ctx->sh_cfg);
+ sgl->camera_focus = buffer_instance(sgl->non_meshes, geom, draw_ctx->sh_cfg);
geom = DRW_cache_single_line_get();
- sgl->camera_clip = shgroup_distance_lines_instance(sgl->non_meshes, geom, draw_ctx->sh_cfg);
- sgl->camera_mist = shgroup_distance_lines_instance(sgl->non_meshes, geom, draw_ctx->sh_cfg);
+ sgl->camera_clip = buffer_distance_lines_instance(sgl->non_meshes, geom, draw_ctx->sh_cfg);
+ sgl->camera_mist = buffer_distance_lines_instance(sgl->non_meshes, geom, draw_ctx->sh_cfg);
geom = DRW_cache_single_line_endpoints_get();
- sgl->camera_clip_points = shgroup_distance_lines_instance(
+ sgl->camera_clip_points = buffer_distance_lines_instance(
sgl->non_meshes, geom, draw_ctx->sh_cfg);
- sgl->camera_mist_points = shgroup_distance_lines_instance(
+ sgl->camera_mist_points = buffer_distance_lines_instance(
sgl->non_meshes, geom, draw_ctx->sh_cfg);
geom = DRW_cache_quad_wires_get();
- sgl->camera_stereo_plane_wires = shgroup_instance(sgl->non_meshes, geom, draw_ctx->sh_cfg);
- DRW_shgroup_state_enable(sgl->camera_stereo_plane_wires, DRW_STATE_WIRE);
+ sgl->camera_stereo_plane_wires = buffer_instance(sgl->non_meshes, geom, draw_ctx->sh_cfg);
geom = DRW_cache_empty_cube_get();
- sgl->camera_stereo_volume_wires = shgroup_instance(sgl->non_meshes, geom, draw_ctx->sh_cfg);
+ sgl->camera_stereo_volume_wires = buffer_instance(sgl->non_meshes, geom, draw_ctx->sh_cfg);
BLI_listbase_clear(&sgl->camera_path);
/* Texture Space */
geom = DRW_cache_empty_cube_get();
- sgl->texspace = shgroup_instance(sgl->non_meshes, geom, draw_ctx->sh_cfg);
+ sgl->texspace = buffer_instance(sgl->non_meshes, geom, draw_ctx->sh_cfg);
/* Wires (for loose edges) */
sh = GPU_shader_get_builtin_shader_with_config(GPU_SHADER_3D_UNIFORM_COLOR, draw_ctx->sh_cfg);
@@ -1400,123 +1396,138 @@ static void OBJECT_cache_init(void *vedata)
DRW_shgroup_state_disable(sgl->points_dupli_select, DRW_STATE_BLEND);
/* Metaballs Handles */
- sgl->mball_handle = shgroup_instance_mball_handles(sgl->non_meshes, draw_ctx->sh_cfg);
+ sgl->mball_handle = buffer_instance_mball_handles(sgl->non_meshes, draw_ctx->sh_cfg);
/* Lights */
/* TODO
* for now we create multiple times the same VBO with only light center coordinates
* but ideally we would only create it once */
- /* start with buflimit because we don't want stipples */
- geom = DRW_cache_single_line_get();
- sgl->light_buflimit = shgroup_distance_lines_instance(sgl->non_meshes, geom, draw_ctx->sh_cfg);
+ sh = GPU_shader_get_builtin_shader_with_config(
+ GPU_SHADER_3D_POINT_UNIFORM_SIZE_UNIFORM_COLOR_AA, draw_ctx->sh_cfg);
+
+ DRWShadingGroup *grp = DRW_shgroup_create(sh, sgl->non_meshes);
+ DRW_shgroup_uniform_vec4(grp, "color", gb->colorLightNoAlpha, 1);
+ DRW_shgroup_uniform_float(grp, "size", &gb->sizeLightCenter, 1);
+ DRW_shgroup_state_enable(grp, DRW_STATE_POINT);
+ if (draw_ctx->sh_cfg == GPU_SHADER_CFG_CLIPPED) {
+ DRW_shgroup_world_clip_planes_from_rv3d(grp, DRW_context_state_get()->rv3d);
+ }
+
+ sgl->light_center = buffer_dynpoints_uniform_color(grp);
- sgl->light_center = shgroup_dynpoints_uniform_color(
- sgl->non_meshes, gb->colorLightNoAlpha, &gb->sizeLightCenter, draw_ctx->sh_cfg);
+ geom = DRW_cache_single_line_get();
+ sgl->light_buflimit = buffer_distance_lines_instance(sgl->non_meshes, geom, draw_ctx->sh_cfg);
geom = DRW_cache_light_get();
- sgl->light_circle = shgroup_instance_screenspace(
+ sgl->light_circle = buffer_instance_screenspace(
sgl->non_meshes, geom, &gb->sizeLightCircle, draw_ctx->sh_cfg);
geom = DRW_cache_light_shadows_get();
- sgl->light_circle_shadow = shgroup_instance_screenspace(
+ sgl->light_circle_shadow = buffer_instance_screenspace(
sgl->non_meshes, geom, &gb->sizeLightCircleShadow, draw_ctx->sh_cfg);
geom = DRW_cache_light_sunrays_get();
- sgl->light_sunrays = shgroup_instance_screenspace(
+ sgl->light_sunrays = buffer_instance_screenspace(
sgl->non_meshes, geom, &gb->sizeLightCircle, draw_ctx->sh_cfg);
- sgl->light_groundline = shgroup_groundlines_uniform_color(
+ sgl->light_groundline = buffer_groundlines_uniform_color(
sgl->non_meshes, gb->colorLight, draw_ctx->sh_cfg);
- sgl->light_groundpoint = shgroup_groundpoints_uniform_color(
+ sgl->light_groundpoint = buffer_groundpoints_uniform_color(
sgl->non_meshes, gb->colorLight, draw_ctx->sh_cfg);
geom = DRW_cache_screenspace_circle_get();
- sgl->light_area_sphere = shgroup_instance_screen_aligned(
+ sgl->light_area_sphere = buffer_instance_screen_aligned(
sgl->non_meshes, geom, draw_ctx->sh_cfg);
geom = DRW_cache_light_area_square_get();
- sgl->light_area_square = shgroup_instance(sgl->non_meshes, geom, draw_ctx->sh_cfg);
+ sgl->light_area_square = buffer_instance(sgl->non_meshes, geom, draw_ctx->sh_cfg);
geom = DRW_cache_light_area_disk_get();
- sgl->light_area_disk = shgroup_instance(sgl->non_meshes, geom, draw_ctx->sh_cfg);
+ sgl->light_area_disk = buffer_instance(sgl->non_meshes, geom, draw_ctx->sh_cfg);
geom = DRW_cache_light_hemi_get();
- sgl->light_hemi = shgroup_instance(sgl->non_meshes, geom, draw_ctx->sh_cfg);
+ sgl->light_hemi = buffer_instance(sgl->non_meshes, geom, draw_ctx->sh_cfg);
geom = DRW_cache_single_line_get();
- sgl->light_distance = shgroup_distance_lines_instance(sgl->non_meshes, geom, draw_ctx->sh_cfg);
+ sgl->light_distance = buffer_distance_lines_instance(sgl->non_meshes, geom, draw_ctx->sh_cfg);
geom = DRW_cache_single_line_endpoints_get();
- sgl->light_buflimit_points = shgroup_distance_lines_instance(
+ sgl->light_buflimit_points = buffer_distance_lines_instance(
sgl->non_meshes, geom, draw_ctx->sh_cfg);
geom = DRW_cache_light_spot_get();
- sgl->light_spot_cone = shgroup_spot_instance(sgl->non_meshes, geom, draw_ctx->sh_cfg);
+ sgl->light_spot_cone = buffer_spot_instance(sgl->non_meshes, geom, draw_ctx->sh_cfg);
geom = DRW_cache_circle_get();
- sgl->light_spot_blend = shgroup_instance(sgl->non_meshes, geom, draw_ctx->sh_cfg);
+ sgl->light_spot_blend = buffer_instance(sgl->non_meshes, geom, draw_ctx->sh_cfg);
geom = DRW_cache_light_spot_square_get();
- sgl->light_spot_pyramid = shgroup_instance(sgl->non_meshes, geom, draw_ctx->sh_cfg);
+ sgl->light_spot_pyramid = buffer_instance(sgl->non_meshes, geom, draw_ctx->sh_cfg);
geom = DRW_cache_square_get();
- sgl->light_spot_blend_rect = shgroup_instance(sgl->non_meshes, geom, draw_ctx->sh_cfg);
+ sgl->light_spot_blend_rect = buffer_instance(sgl->non_meshes, geom, draw_ctx->sh_cfg);
/* -------- STIPPLES ------- */
/* Relationship Lines */
- sgl->relationship_lines = shgroup_dynlines_dashed_uniform_color(
+ sgl->relationship_lines = buffer_dynlines_dashed_uniform_color(
sgl->non_meshes, gb->colorWire, draw_ctx->sh_cfg);
- sgl->constraint_lines = shgroup_dynlines_dashed_uniform_color(
+ sgl->constraint_lines = buffer_dynlines_dashed_uniform_color(
sgl->non_meshes, gb->colorGridAxisZ, draw_ctx->sh_cfg);
/* Force Field Curve Guide End (here because of stipple) */
/* TODO port to shader stipple */
geom = DRW_cache_screenspace_circle_get();
- sgl->field_curve_end = shgroup_instance_screen_aligned(
- sgl->non_meshes, geom, draw_ctx->sh_cfg);
+ sgl->field_curve_end = buffer_instance_screen_aligned(sgl->non_meshes, geom, draw_ctx->sh_cfg);
/* Force Field Limits */
/* TODO port to shader stipple */
geom = DRW_cache_field_tube_limit_get();
- sgl->field_tube_limit = shgroup_instance_scaled(sgl->non_meshes, geom, draw_ctx->sh_cfg);
+ sgl->field_tube_limit = buffer_instance_scaled(sgl->non_meshes, geom, draw_ctx->sh_cfg);
/* TODO port to shader stipple */
geom = DRW_cache_field_cone_limit_get();
- sgl->field_cone_limit = shgroup_instance_scaled(sgl->non_meshes, geom, draw_ctx->sh_cfg);
+ sgl->field_cone_limit = buffer_instance_scaled(sgl->non_meshes, geom, draw_ctx->sh_cfg);
/* Transparent Shapes */
state = DRW_STATE_WRITE_COLOR | DRW_STATE_DEPTH_LESS_EQUAL | DRW_STATE_BLEND |
DRW_STATE_CULL_FRONT;
sgl->transp_shapes = psl->transp_shapes[i] = DRW_pass_create("Transparent Shapes", state);
+ sh = GPU_shader_get_builtin_shader_with_config(
+ GPU_SHADER_INSTANCE_VARIYING_COLOR_VARIYING_SIZE, draw_ctx->sh_cfg);
+
+ DRWShadingGroup *grp_transp = DRW_shgroup_create(sh, sgl->transp_shapes);
+ if (draw_ctx->sh_cfg == GPU_SHADER_CFG_CLIPPED) {
+ DRW_shgroup_world_clip_planes_from_rv3d(grp_transp, DRW_context_state_get()->rv3d);
+ }
+
+ DRWShadingGroup *grp_cull_back = DRW_shgroup_create_sub(grp_transp);
+ DRW_shgroup_state_disable(grp_cull_back, DRW_STATE_CULL_FRONT);
+ DRW_shgroup_state_enable(grp_cull_back, DRW_STATE_CULL_BACK);
+
+ DRWShadingGroup *grp_cull_none = DRW_shgroup_create_sub(grp_transp);
+ DRW_shgroup_state_disable(grp_cull_none, DRW_STATE_CULL_FRONT);
+
/* Spot cones */
geom = DRW_cache_light_spot_volume_get();
- sgl->light_spot_volume = shgroup_instance_alpha(sgl->transp_shapes, geom, draw_ctx->sh_cfg);
+ sgl->light_spot_volume = buffer_instance_alpha(grp_transp, geom);
geom = DRW_cache_light_spot_square_volume_get();
- sgl->light_spot_volume_rect = shgroup_instance_alpha(
- sgl->transp_shapes, geom, draw_ctx->sh_cfg);
+ sgl->light_spot_volume_rect = buffer_instance_alpha(grp_transp, geom);
geom = DRW_cache_light_spot_volume_get();
- sgl->light_spot_volume_outside = shgroup_instance_alpha(
- sgl->transp_shapes, geom, draw_ctx->sh_cfg);
- DRW_shgroup_state_disable(sgl->light_spot_volume_outside, DRW_STATE_CULL_FRONT);
- DRW_shgroup_state_enable(sgl->light_spot_volume_outside, DRW_STATE_CULL_BACK);
+ sgl->light_spot_volume_outside = buffer_instance_alpha(grp_cull_back, geom);
geom = DRW_cache_light_spot_square_volume_get();
- sgl->light_spot_volume_rect_outside = shgroup_instance_alpha(
- sgl->transp_shapes, geom, draw_ctx->sh_cfg);
- DRW_shgroup_state_disable(sgl->light_spot_volume_rect_outside, DRW_STATE_CULL_FRONT);
- DRW_shgroup_state_enable(sgl->light_spot_volume_rect_outside, DRW_STATE_CULL_BACK);
+ sgl->light_spot_volume_rect_outside = buffer_instance_alpha(grp_cull_back, geom);
/* Camera stereo volumes */
geom = DRW_cache_cube_get();
- sgl->camera_stereo_volume = shgroup_instance_alpha(sgl->transp_shapes, geom, draw_ctx->sh_cfg);
+ sgl->camera_stereo_volume = buffer_instance_alpha(grp_transp, geom);
geom = DRW_cache_quad_get();
- sgl->camera_stereo_plane = shgroup_instance_alpha(sgl->transp_shapes, geom, draw_ctx->sh_cfg);
- DRW_shgroup_state_disable(sgl->camera_stereo_plane, DRW_STATE_CULL_FRONT);
+ sgl->camera_stereo_plane = buffer_instance_alpha(grp_cull_none, geom);
}
{
@@ -1534,7 +1545,7 @@ static void OBJECT_cache_init(void *vedata)
GPU_SHADER_3D_POINT_UNIFORM_SIZE_UNIFORM_COLOR_OUTLINE_AA, draw_ctx->sh_cfg);
/* Active */
- grp = DRW_shgroup_point_batch_create(sh, psl->ob_center);
+ grp = DRW_shgroup_create(sh, psl->ob_center);
DRW_shgroup_uniform_float(grp, "size", &size, 1);
DRW_shgroup_uniform_float(grp, "outlineWidth", &outlineWidth, 1);
DRW_shgroup_uniform_vec4(grp, "color", gb->colorActive, 1);
@@ -1542,39 +1553,28 @@ static void OBJECT_cache_init(void *vedata)
if (draw_ctx->sh_cfg == GPU_SHADER_CFG_CLIPPED) {
DRW_shgroup_world_clip_planes_from_rv3d(grp, draw_ctx->rv3d);
}
- stl->g_data->center_active = grp;
+ /* TODO find better name. */
+ stl->g_data->center_active = buffer_dynpoints_uniform_color(grp);
/* Select */
- grp = DRW_shgroup_point_batch_create(sh, psl->ob_center);
+ grp = DRW_shgroup_create_sub(grp);
DRW_shgroup_uniform_vec4(grp, "color", gb->colorSelect, 1);
- if (draw_ctx->sh_cfg == GPU_SHADER_CFG_CLIPPED) {
- DRW_shgroup_world_clip_planes_from_rv3d(grp, draw_ctx->rv3d);
- }
- stl->g_data->center_selected = grp;
+ stl->g_data->center_selected = buffer_dynpoints_uniform_color(grp);
/* Deselect */
- grp = DRW_shgroup_point_batch_create(sh, psl->ob_center);
+ grp = DRW_shgroup_create_sub(grp);
DRW_shgroup_uniform_vec4(grp, "color", gb->colorDeselect, 1);
- if (draw_ctx->sh_cfg == GPU_SHADER_CFG_CLIPPED) {
- DRW_shgroup_world_clip_planes_from_rv3d(grp, draw_ctx->rv3d);
- }
- stl->g_data->center_deselected = grp;
+ stl->g_data->center_deselected = buffer_dynpoints_uniform_color(grp);
/* Select (library) */
- grp = DRW_shgroup_point_batch_create(sh, psl->ob_center);
+ grp = DRW_shgroup_create_sub(grp);
DRW_shgroup_uniform_vec4(grp, "color", gb->colorLibrarySelect, 1);
- if (draw_ctx->sh_cfg == GPU_SHADER_CFG_CLIPPED) {
- DRW_shgroup_world_clip_planes_from_rv3d(grp, draw_ctx->rv3d);
- }
- stl->g_data->center_selected_lib = grp;
+ stl->g_data->center_selected_lib = buffer_dynpoints_uniform_color(grp);
/* Deselect (library) */
- grp = DRW_shgroup_point_batch_create(sh, psl->ob_center);
+ grp = DRW_shgroup_create_sub(grp);
DRW_shgroup_uniform_vec4(grp, "color", gb->colorLibrary, 1);
- if (draw_ctx->sh_cfg == GPU_SHADER_CFG_CLIPPED) {
- DRW_shgroup_world_clip_planes_from_rv3d(grp, draw_ctx->rv3d);
- }
- stl->g_data->center_deselected_lib = grp;
+ stl->g_data->center_deselected_lib = buffer_dynpoints_uniform_color(grp);
}
{
@@ -1619,7 +1619,7 @@ static void DRW_shgroup_mball_handles(OBJECT_ShadingGroupList *sgl,
draw_scale_xform[1][3] = world_pos[1];
draw_scale_xform[2][3] = world_pos[2];
- DRW_shgroup_call_dynamic_add(sgl->mball_handle, draw_scale_xform, &ml->rad, color);
+ DRW_buffer_add_entry(sgl->mball_handle, draw_scale_xform, &ml->rad, color);
}
}
@@ -1645,25 +1645,25 @@ static void DRW_shgroup_light(OBJECT_ShadingGroupList *sgl, Object *ob, ViewLaye
if ((ob->base_flag & (BASE_FROM_SET | BASE_FROM_DUPLI)) == 0) {
/* Don't draw the center if it's selected or active */
if (theme_id == TH_LIGHT) {
- DRW_shgroup_call_dynamic_add(sgl->light_center, ob->obmat[3]);
+ DRW_buffer_add_entry(sgl->light_center, ob->obmat[3]);
}
}
/* First circle */
- DRW_shgroup_call_dynamic_add(sgl->light_circle, ob->obmat[3], color);
+ DRW_buffer_add_entry(sgl->light_circle, ob->obmat[3], color);
/* draw dashed outer circle for shadow */
- DRW_shgroup_call_dynamic_add(sgl->light_circle_shadow, ob->obmat[3], color);
+ DRW_buffer_add_entry(sgl->light_circle_shadow, ob->obmat[3], color);
/* Distance */
if (ELEM(la->type, LA_SUN, LA_AREA)) {
- DRW_shgroup_call_dynamic_add(sgl->light_distance, color, &zero, &la->dist, ob->obmat);
+ DRW_buffer_add_entry(sgl->light_distance, color, &zero, &la->dist, ob->obmat);
}
copy_m4_m4(shapemat, ob->obmat);
if (la->type == LA_SUN) {
- DRW_shgroup_call_dynamic_add(sgl->light_sunrays, ob->obmat[3], color);
+ DRW_buffer_add_entry(sgl->light_sunrays, ob->obmat[3], color);
}
else if (la->type == LA_SPOT) {
float size[3], sizemat[4][4];
@@ -1685,42 +1685,39 @@ static void DRW_shgroup_light(OBJECT_ShadingGroupList *sgl, Object *ob, ViewLaye
mul_m4_m4m4(spotblendmat, shapemat, sizemat);
if (la->mode & LA_SQUARE) {
- DRW_shgroup_call_dynamic_add(sgl->light_spot_pyramid, color, &one, shapemat);
+ DRW_buffer_add_entry(sgl->light_spot_pyramid, color, &one, shapemat);
/* hide line if it is zero size or overlaps with outer border,
* previously it adjusted to always to show it but that seems
* confusing because it doesn't show the actual blend size */
if (blend != 0.0f && blend != 1.0f) {
- DRW_shgroup_call_dynamic_add(sgl->light_spot_blend_rect, color, &one, spotblendmat);
+ DRW_buffer_add_entry(sgl->light_spot_blend_rect, color, &one, spotblendmat);
}
if (la->mode & LA_SHOW_CONE) {
- DRW_shgroup_call_dynamic_add(sgl->light_spot_volume_rect, cone_inside, &one, shapemat);
- DRW_shgroup_call_dynamic_add(
- sgl->light_spot_volume_rect_outside, cone_outside, &one, shapemat);
+ DRW_buffer_add_entry(sgl->light_spot_volume_rect, cone_inside, &one, shapemat);
+ DRW_buffer_add_entry(sgl->light_spot_volume_rect_outside, cone_outside, &one, shapemat);
}
}
else {
- DRW_shgroup_call_dynamic_add(sgl->light_spot_cone, color, shapemat);
+ DRW_buffer_add_entry(sgl->light_spot_cone, color, shapemat);
/* hide line if it is zero size or overlaps with outer border,
* previously it adjusted to always to show it but that seems
* confusing because it doesn't show the actual blend size */
if (blend != 0.0f && blend != 1.0f) {
- DRW_shgroup_call_dynamic_add(sgl->light_spot_blend, color, &one, spotblendmat);
+ DRW_buffer_add_entry(sgl->light_spot_blend, color, &one, spotblendmat);
}
if (la->mode & LA_SHOW_CONE) {
- DRW_shgroup_call_dynamic_add(sgl->light_spot_volume, cone_inside, &one, shapemat);
- DRW_shgroup_call_dynamic_add(sgl->light_spot_volume_outside, cone_outside, &one, shapemat);
+ DRW_buffer_add_entry(sgl->light_spot_volume, cone_inside, &one, shapemat);
+ DRW_buffer_add_entry(sgl->light_spot_volume_outside, cone_outside, &one, shapemat);
}
}
- DRW_shgroup_call_dynamic_add(
- sgl->light_buflimit, color, &la->clipsta, &la->clipend, ob->obmat);
- DRW_shgroup_call_dynamic_add(
- sgl->light_buflimit_points, color, &la->clipsta, &la->clipend, ob->obmat);
+ DRW_buffer_add_entry(sgl->light_buflimit, color, &la->clipsta, &la->clipend, ob->obmat);
+ DRW_buffer_add_entry(sgl->light_buflimit_points, color, &la->clipsta, &la->clipend, ob->obmat);
}
else if (la->type == LA_AREA) {
float size[3] = {1.0f, 1.0f, 1.0f}, sizemat[4][4];
@@ -1732,10 +1729,10 @@ static void DRW_shgroup_light(OBJECT_ShadingGroupList *sgl, Object *ob, ViewLaye
}
if (ELEM(la->area_shape, LA_AREA_DISK, LA_AREA_ELLIPSE)) {
- DRW_shgroup_call_dynamic_add(sgl->light_area_disk, color, &la->area_size, shapemat);
+ DRW_buffer_add_entry(sgl->light_area_disk, color, &la->area_size, shapemat);
}
else {
- DRW_shgroup_call_dynamic_add(sgl->light_area_square, color, &la->area_size, shapemat);
+ DRW_buffer_add_entry(sgl->light_area_square, color, &la->area_size, shapemat);
}
}
@@ -1745,12 +1742,12 @@ static void DRW_shgroup_light(OBJECT_ShadingGroupList *sgl, Object *ob, ViewLaye
shapemat[0][1] = shapemat[0][2] = 0.0f;
shapemat[1][0] = shapemat[1][2] = 0.0f;
shapemat[2][0] = shapemat[2][1] = 0.0f;
- DRW_shgroup_call_dynamic_add(sgl->light_area_sphere, color, &la->area_size, shapemat);
+ DRW_buffer_add_entry(sgl->light_area_sphere, color, &la->area_size, shapemat);
}
/* Line and point going to the ground */
- DRW_shgroup_call_dynamic_add(sgl->light_groundline, ob->obmat[3]);
- DRW_shgroup_call_dynamic_add(sgl->light_groundpoint, ob->obmat[3]);
+ DRW_buffer_add_entry(sgl->light_groundline, ob->obmat[3]);
+ DRW_buffer_add_entry(sgl->light_groundpoint, ob->obmat[3]);
}
static GPUBatch *batch_camera_path_get(ListBase *camera_paths,
@@ -1846,19 +1843,19 @@ static void camera_view3d_stereoscopy_display_extra(OBJECT_ShadingGroupList *sgl
copy_v2_v2(drw_tria_dummy[eye][1], cam->runtime.drw_corners[eye][0]);
if (is_stereo3d_cameras) {
- DRW_shgroup_call_dynamic_add(sgl->camera_frame,
- color,
- cam->runtime.drw_corners[eye],
- &cam->runtime.drw_depth[eye],
- cam->runtime.drw_tria,
- obmat);
+ DRW_buffer_add_entry(sgl->camera_frame,
+ color,
+ cam->runtime.drw_corners[eye],
+ &cam->runtime.drw_depth[eye],
+ cam->runtime.drw_tria,
+ obmat);
- DRW_shgroup_call_dynamic_add(sgl->camera,
- color,
- cam->runtime.drw_corners[eye],
- &cam->runtime.drw_depth[eye],
- drw_tria_dummy[eye],
- obmat);
+ DRW_buffer_add_entry(sgl->camera,
+ color,
+ cam->runtime.drw_corners[eye],
+ &cam->runtime.drw_depth[eye],
+ drw_tria_dummy[eye],
+ obmat);
}
/* Connecting line. */
@@ -1867,8 +1864,8 @@ static void camera_view3d_stereoscopy_display_extra(OBJECT_ShadingGroupList *sgl
/* Draw connecting lines. */
if (is_stereo3d_cameras) {
- DRW_shgroup_call_dynamic_add(sgl->relationship_lines, origin[0]);
- DRW_shgroup_call_dynamic_add(sgl->relationship_lines, origin[1]);
+ DRW_buffer_add_entry(sgl->relationship_lines, origin[0]);
+ DRW_buffer_add_entry(sgl->relationship_lines, origin[1]);
}
/* Draw convergence plane. */
@@ -1905,9 +1902,9 @@ static void camera_view3d_stereoscopy_display_extra(OBJECT_ShadingGroupList *sgl
translate_m4(plane_mat, 2.0f * cam->shiftx, (width / height) * 2.0f * cam->shifty, 0.0f);
if (v3d->stereo3d_convergence_alpha > 0.0f) {
- DRW_shgroup_call_dynamic_add(sgl->camera_stereo_plane, color_plane[0], &one, plane_mat);
+ DRW_buffer_add_entry(sgl->camera_stereo_plane, color_plane[0], &one, plane_mat);
}
- DRW_shgroup_call_dynamic_add(sgl->camera_stereo_plane_wires, color_plane[1], &one, plane_mat);
+ DRW_buffer_add_entry(sgl->camera_stereo_plane_wires, color_plane[1], &one, plane_mat);
}
/* Draw convergence volume. */
@@ -1931,10 +1928,9 @@ static void camera_view3d_stereoscopy_display_extra(OBJECT_ShadingGroupList *sgl
invert_m4_m4(persinv, persmat);
if (v3d->stereo3d_volume_alpha > 0.0f) {
- DRW_shgroup_call_dynamic_add(sgl->camera_stereo_volume, color_volume[eye], &one, persinv);
+ DRW_buffer_add_entry(sgl->camera_stereo_volume, color_volume[eye], &one, persinv);
}
- DRW_shgroup_call_dynamic_add(
- sgl->camera_stereo_volume_wires, color_volume[2], &one, persinv);
+ DRW_buffer_add_entry(sgl->camera_stereo_volume_wires, color_volume[2], &one, persinv);
}
}
}
@@ -2048,7 +2044,7 @@ static void camera_view3d_reconstruction(OBJECT_ShadingGroupList *sgl,
};
mul_m4_m4m4(bundle_mat, bundle_mat, bundle_scale_mat);
- DRW_shgroup_call_dynamic_add(sgl->sphere_solid, bundle_mat, bundle_color_v4);
+ DRW_buffer_add_entry(sgl->sphere_solid, bundle_mat, bundle_color_v4);
}
else {
DRW_shgroup_empty_ex(
@@ -2163,31 +2159,31 @@ static void DRW_shgroup_camera(OBJECT_ShadingGroupList *sgl, Object *ob, ViewLay
copy_m4_m4(mat, ob->obmat);
}
- DRW_shgroup_call_dynamic_add(sgl->camera_frame,
- color,
- cam->runtime.drw_corners[0],
- &cam->runtime.drw_depth[0],
- cam->runtime.drw_tria,
- mat);
+ DRW_buffer_add_entry(sgl->camera_frame,
+ color,
+ cam->runtime.drw_corners[0],
+ &cam->runtime.drw_depth[0],
+ cam->runtime.drw_tria,
+ mat);
}
else {
if (!is_stereo3d_cameras) {
- DRW_shgroup_call_dynamic_add(sgl->camera,
- color,
- cam->runtime.drw_corners[0],
- &cam->runtime.drw_depth[0],
- cam->runtime.drw_tria,
- ob->obmat);
+ DRW_buffer_add_entry(sgl->camera,
+ color,
+ cam->runtime.drw_corners[0],
+ &cam->runtime.drw_depth[0],
+ cam->runtime.drw_tria,
+ ob->obmat);
}
/* Active cam */
if (is_active) {
- DRW_shgroup_call_dynamic_add(sgl->camera_tria,
- color,
- cam->runtime.drw_corners[0],
- &cam->runtime.drw_depth[0],
- cam->runtime.drw_tria,
- ob->obmat);
+ DRW_buffer_add_entry(sgl->camera_tria,
+ color,
+ cam->runtime.drw_corners[0],
+ &cam->runtime.drw_depth[0],
+ cam->runtime.drw_tria,
+ ob->obmat);
}
}
@@ -2204,16 +2200,16 @@ static void DRW_shgroup_camera(OBJECT_ShadingGroupList *sgl, Object *ob, ViewLay
size_to_mat4(sizemat, size);
mul_m4_m4m4(cam->runtime.drw_focusmat, cam->runtime.drw_focusmat, sizemat);
- DRW_shgroup_call_dynamic_add(
+ DRW_buffer_add_entry(
sgl->camera_focus, (is_active ? col_hi : col), &cam->drawsize, cam->runtime.drw_focusmat);
- DRW_shgroup_call_dynamic_add(
+ DRW_buffer_add_entry(
sgl->camera_clip, color, &cam->clip_start, &cam->clip_end, cam->runtime.drw_normalmat);
- DRW_shgroup_call_dynamic_add(sgl->camera_clip_points,
- (is_active ? col_hi : col),
- &cam->clip_start,
- &cam->clip_end,
- cam->runtime.drw_normalmat);
+ DRW_buffer_add_entry(sgl->camera_clip_points,
+ (is_active ? col_hi : col),
+ &cam->clip_start,
+ &cam->clip_end,
+ cam->runtime.drw_normalmat);
}
if (cam->flag & CAM_SHOWMIST) {
@@ -2222,13 +2218,13 @@ static void DRW_shgroup_camera(OBJECT_ShadingGroupList *sgl, Object *ob, ViewLay
if (world) {
static float col[4] = {0.5f, 0.5f, 0.5f, 1.0f}, col_hi[4] = {1.0f, 1.0f, 1.0f, 1.0f};
world->mistend = world->miststa + world->mistdist;
- DRW_shgroup_call_dynamic_add(
+ DRW_buffer_add_entry(
sgl->camera_mist, color, &world->miststa, &world->mistend, cam->runtime.drw_normalmat);
- DRW_shgroup_call_dynamic_add(sgl->camera_mist_points,
- (is_active ? col_hi : col),
- &world->miststa,
- &world->mistend,
- cam->runtime.drw_normalmat);
+ DRW_buffer_add_entry(sgl->camera_mist_points,
+ (is_active ? col_hi : col),
+ &world->miststa,
+ &world->mistend,
+ cam->runtime.drw_normalmat);
}
}
@@ -2250,26 +2246,26 @@ static void DRW_shgroup_empty_ex(OBJECT_ShadingGroupList *sgl,
{
switch (draw_type) {
case OB_PLAINAXES:
- DRW_shgroup_call_dynamic_add(sgl->plain_axes, color, draw_size, mat);
+ DRW_buffer_add_entry(sgl->plain_axes, color, draw_size, mat);
break;
case OB_SINGLE_ARROW:
- DRW_shgroup_call_dynamic_add(sgl->single_arrow, color, draw_size, mat);
- DRW_shgroup_call_dynamic_add(sgl->single_arrow_line, color, draw_size, mat);
+ DRW_buffer_add_entry(sgl->single_arrow, color, draw_size, mat);
+ DRW_buffer_add_entry(sgl->single_arrow_line, color, draw_size, mat);
break;
case OB_CUBE:
- DRW_shgroup_call_dynamic_add(sgl->cube, color, draw_size, mat);
+ DRW_buffer_add_entry(sgl->cube, color, draw_size, mat);
break;
case OB_CIRCLE:
- DRW_shgroup_call_dynamic_add(sgl->circle, color, draw_size, mat);
+ DRW_buffer_add_entry(sgl->circle, color, draw_size, mat);
break;
case OB_EMPTY_SPHERE:
- DRW_shgroup_call_dynamic_add(sgl->sphere, color, draw_size, mat);
+ DRW_buffer_add_entry(sgl->sphere, color, draw_size, mat);
break;
case OB_EMPTY_CONE:
- DRW_shgroup_call_dynamic_add(sgl->cone, color, draw_size, mat);
+ DRW_buffer_add_entry(sgl->cone, color, draw_size, mat);
break;
case OB_ARROWS:
- DRW_shgroup_call_dynamic_add(sgl->empty_axes, color, draw_size, mat);
+ DRW_buffer_add_entry(sgl->empty_axes, color, draw_size, mat);
break;
case OB_EMPTY_IMAGE:
BLI_assert(!"Should never happen, use DRW_shgroup_empty instead.");
@@ -2361,19 +2357,19 @@ static void DRW_shgroup_forcefield(OBJECT_ShadingGroupList *sgl, Object *ob, Vie
switch (pd->forcefield) {
case PFIELD_WIND:
- DRW_shgroup_call_dynamic_add(sgl->field_wind, color, &pd->drawvec1, ob->obmat);
+ DRW_buffer_add_entry(sgl->field_wind, color, &pd->drawvec1, ob->obmat);
break;
case PFIELD_FORCE:
- DRW_shgroup_call_dynamic_add(sgl->field_force, color, &pd->drawvec1, ob->obmat);
+ DRW_buffer_add_entry(sgl->field_force, color, &pd->drawvec1, ob->obmat);
break;
case PFIELD_VORTEX:
- DRW_shgroup_call_dynamic_add(sgl->field_vortex, color, &pd->drawvec1, ob->obmat);
+ DRW_buffer_add_entry(sgl->field_vortex, color, &pd->drawvec1, ob->obmat);
break;
case PFIELD_GUIDE:
if (cu && (cu->flag & CU_PATH) && ob->runtime.curve_cache->path &&
ob->runtime.curve_cache->path->data) {
- DRW_shgroup_call_dynamic_add(sgl->field_curve_sta, color, &pd->f_strength, ob->obmat);
- DRW_shgroup_call_dynamic_add(sgl->field_curve_end, color, &pd->f_strength, ob->obmat);
+ DRW_buffer_add_entry(sgl->field_curve_sta, color, &pd->f_strength, ob->obmat);
+ DRW_buffer_add_entry(sgl->field_curve_end, color, &pd->f_strength, ob->obmat);
}
break;
}
@@ -2381,33 +2377,29 @@ static void DRW_shgroup_forcefield(OBJECT_ShadingGroupList *sgl, Object *ob, Vie
if (pd->falloff == PFIELD_FALL_SPHERE) {
/* as last, guide curve alters it */
if ((pd->flag & PFIELD_USEMAX) != 0) {
- DRW_shgroup_call_dynamic_add(sgl->field_curve_end, color, &pd->maxdist, ob->obmat);
+ DRW_buffer_add_entry(sgl->field_curve_end, color, &pd->maxdist, ob->obmat);
}
if ((pd->flag & PFIELD_USEMIN) != 0) {
- DRW_shgroup_call_dynamic_add(sgl->field_curve_end, color, &pd->mindist, ob->obmat);
+ DRW_buffer_add_entry(sgl->field_curve_end, color, &pd->mindist, ob->obmat);
}
}
else if (pd->falloff == PFIELD_FALL_TUBE) {
if (pd->flag & (PFIELD_USEMAX | PFIELD_USEMAXR)) {
- DRW_shgroup_call_dynamic_add(
- sgl->field_tube_limit, color, &pd->drawvec_falloff_max, ob->obmat);
+ DRW_buffer_add_entry(sgl->field_tube_limit, color, &pd->drawvec_falloff_max, ob->obmat);
}
if (pd->flag & (PFIELD_USEMIN | PFIELD_USEMINR)) {
- DRW_shgroup_call_dynamic_add(
- sgl->field_tube_limit, color, &pd->drawvec_falloff_min, ob->obmat);
+ DRW_buffer_add_entry(sgl->field_tube_limit, color, &pd->drawvec_falloff_min, ob->obmat);
}
}
else if (pd->falloff == PFIELD_FALL_CONE) {
if (pd->flag & (PFIELD_USEMAX | PFIELD_USEMAXR)) {
- DRW_shgroup_call_dynamic_add(
- sgl->field_cone_limit, color, &pd->drawvec_falloff_max, ob->obmat);
+ DRW_buffer_add_entry(sgl->field_cone_limit, color, &pd->drawvec_falloff_max, ob->obmat);
}
if (pd->flag & (PFIELD_USEMIN | PFIELD_USEMINR)) {
- DRW_shgroup_call_dynamic_add(
- sgl->field_cone_limit, color, &pd->drawvec_falloff_min, ob->obmat);
+ DRW_buffer_add_entry(sgl->field_cone_limit, color, &pd->drawvec_falloff_min, ob->obmat);
}
}
}
@@ -2439,7 +2431,7 @@ static void DRW_shgroup_volume_extra(OBJECT_ShadingGroupList *sgl,
translate_m4(voxel_cubemat, 1.0f, 1.0f, 1.0f);
mul_m4_m4m4(voxel_cubemat, ob->obmat, voxel_cubemat);
- DRW_shgroup_call_dynamic_add(sgl->cube, color, &one, voxel_cubemat);
+ DRW_buffer_add_entry(sgl->cube, color, &one, voxel_cubemat);
/* Don't show smoke before simulation starts, this could be made an option in the future. */
if (!sds->draw_velocity || !sds->fluid || CFRA < sds->point_cache[0]->startframe) {
@@ -2499,7 +2491,7 @@ static void DRW_shgroup_speaker(OBJECT_ShadingGroupList *sgl, Object *ob, ViewLa
static float one = 1.0f;
DRW_object_wire_theme_get(ob, view_layer, &color);
- DRW_shgroup_call_dynamic_add(sgl->speaker, color, &one, ob->obmat);
+ DRW_buffer_add_entry(sgl->speaker, color, &one, ob->obmat);
}
typedef struct OBJECT_LightProbeEngineData {
@@ -2583,17 +2575,17 @@ static void DRW_shgroup_lightprobe(OBJECT_Shaders *sh_data,
// unit_m4(prb_data->probe_cube_mat);
// copy_v3_v3(prb_data->probe_cube_mat[3], ob->obmat[3]);
- DRWShadingGroup *grp = shgroup_theme_id_to_probe_cube_outline_shgrp(
+ DRWCallBuffer *buf = buffer_theme_id_to_probe_cube_outline_shgrp(
stl, theme_id, ob->base_flag);
/* TODO remove or change the drawing of the cube probes. Theses line draws nothing on purpose
* to keep the call ids correct. */
zero_m4(probe_cube_mat);
- DRW_shgroup_call_dynamic_add(grp, call_id, &draw_size, probe_cube_mat);
+ DRW_buffer_add_entry(buf, call_id, &draw_size, probe_cube_mat);
}
else {
float draw_size = 1.0f;
- DRWShadingGroup *grp = shgroup_theme_id_to_probe_planar_outline_shgrp(stl, theme_id);
- DRW_shgroup_call_dynamic_add(grp, call_id, &draw_size, ob->obmat);
+ DRWCallBuffer *buf = buffer_theme_id_to_probe_planar_outline_shgrp(stl, theme_id);
+ DRW_buffer_add_entry(buf, call_id, &draw_size, ob->obmat);
}
*call_id += 1;
@@ -2601,14 +2593,14 @@ static void DRW_shgroup_lightprobe(OBJECT_Shaders *sh_data,
switch (prb->type) {
case LIGHTPROBE_TYPE_PLANAR:
- DRW_shgroup_call_dynamic_add(sgl->probe_planar, ob->obmat[3], color);
+ DRW_buffer_add_entry(sgl->probe_planar, ob->obmat[3], color);
break;
case LIGHTPROBE_TYPE_GRID:
- DRW_shgroup_call_dynamic_add(sgl->probe_grid, ob->obmat[3], color);
+ DRW_buffer_add_entry(sgl->probe_grid, ob->obmat[3], color);
break;
case LIGHTPROBE_TYPE_CUBE:
default:
- DRW_shgroup_call_dynamic_add(sgl->probe_cube, ob->obmat[3], color);
+ DRW_buffer_add_entry(sgl->probe_cube, ob->obmat[3], color);
break;
}
@@ -2617,13 +2609,13 @@ static void DRW_shgroup_lightprobe(OBJECT_Shaders *sh_data,
copy_m4_m4(mat, ob->obmat);
normalize_m4(mat);
- DRW_shgroup_call_dynamic_add(sgl->single_arrow, color, &ob->empty_drawsize, mat);
- DRW_shgroup_call_dynamic_add(sgl->single_arrow_line, color, &ob->empty_drawsize, mat);
+ DRW_buffer_add_entry(sgl->single_arrow, color, &ob->empty_drawsize, mat);
+ DRW_buffer_add_entry(sgl->single_arrow_line, color, &ob->empty_drawsize, mat);
copy_m4_m4(mat, ob->obmat);
zero_v3(mat[2]);
- DRW_shgroup_call_dynamic_add(sgl->cube, color, &one, mat);
+ DRW_buffer_add_entry(sgl->cube, color, &one, mat);
}
if ((prb->flag & LIGHTPROBE_FLAG_SHOW_INFLUENCE) != 0) {
@@ -2637,8 +2629,8 @@ static void DRW_shgroup_lightprobe(OBJECT_Shaders *sh_data,
}
if (prb->type == LIGHTPROBE_TYPE_GRID || prb->attenuation_type == LIGHTPROBE_SHAPE_BOX) {
- DRW_shgroup_call_dynamic_add(sgl->cube, color, &prb->distgridinf, ob->obmat);
- DRW_shgroup_call_dynamic_add(sgl->cube, color, &prb->distfalloff, ob->obmat);
+ DRW_buffer_add_entry(sgl->cube, color, &prb->distgridinf, ob->obmat);
+ DRW_buffer_add_entry(sgl->cube, color, &prb->distfalloff, ob->obmat);
}
else if (prb->type == LIGHTPROBE_TYPE_PLANAR) {
float rangemat[4][4];
@@ -2646,17 +2638,17 @@ static void DRW_shgroup_lightprobe(OBJECT_Shaders *sh_data,
normalize_v3(rangemat[2]);
mul_v3_fl(rangemat[2], prb->distinf);
- DRW_shgroup_call_dynamic_add(sgl->cube, color, &one, rangemat);
+ DRW_buffer_add_entry(sgl->cube, color, &one, rangemat);
copy_m4_m4(rangemat, ob->obmat);
normalize_v3(rangemat[2]);
mul_v3_fl(rangemat[2], prb->distfalloff);
- DRW_shgroup_call_dynamic_add(sgl->cube, color, &one, rangemat);
+ DRW_buffer_add_entry(sgl->cube, color, &one, rangemat);
}
else {
- DRW_shgroup_call_dynamic_add(sgl->sphere, color, &prb->distgridinf, ob->obmat);
- DRW_shgroup_call_dynamic_add(sgl->sphere, color, &prb->distfalloff, ob->obmat);
+ DRW_buffer_add_entry(sgl->sphere, color, &prb->distgridinf, ob->obmat);
+ DRW_buffer_add_entry(sgl->sphere, color, &prb->distfalloff, ob->obmat);
}
}
@@ -2675,10 +2667,10 @@ static void DRW_shgroup_lightprobe(OBJECT_Shaders *sh_data,
}
if (prb->parallax_type == LIGHTPROBE_SHAPE_BOX) {
- DRW_shgroup_call_dynamic_add(sgl->cube, color, dist, obmat);
+ DRW_buffer_add_entry(sgl->cube, color, dist, obmat);
}
else {
- DRW_shgroup_call_dynamic_add(sgl->sphere, color, dist, obmat);
+ DRW_buffer_add_entry(sgl->sphere, color, dist, obmat);
}
}
}
@@ -2717,9 +2709,8 @@ static void DRW_shgroup_lightprobe(OBJECT_Shaders *sh_data,
normalize_m4_m4(clipmat, ob->obmat);
mul_m4_m4m4(clipmat, clipmat, cubefacemat[i]);
- DRW_shgroup_call_dynamic_add(
- sgl->light_buflimit, color, &prb->clipsta, &prb->clipend, clipmat);
- DRW_shgroup_call_dynamic_add(
+ DRW_buffer_add_entry(sgl->light_buflimit, color, &prb->clipsta, &prb->clipend, clipmat);
+ DRW_buffer_add_entry(
sgl->light_buflimit_points, color, &prb->clipsta, &prb->clipend, clipmat);
}
}
@@ -2727,8 +2718,8 @@ static void DRW_shgroup_lightprobe(OBJECT_Shaders *sh_data,
/* Line and point going to the ground */
if (prb->type == LIGHTPROBE_TYPE_CUBE) {
- DRW_shgroup_call_dynamic_add(sgl->light_groundline, ob->obmat[3]);
- DRW_shgroup_call_dynamic_add(sgl->light_groundpoint, ob->obmat[3]);
+ DRW_buffer_add_entry(sgl->light_groundline, ob->obmat[3]);
+ DRW_buffer_add_entry(sgl->light_groundpoint, ob->obmat[3]);
}
}
@@ -2738,20 +2729,20 @@ static void DRW_shgroup_relationship_lines(OBJECT_ShadingGroupList *sgl,
Object *ob)
{
if (ob->parent && (DRW_object_visibility_in_active_context(ob->parent) & OB_VISIBLE_SELF)) {
- DRW_shgroup_call_dynamic_add(sgl->relationship_lines, ob->runtime.parent_display_origin);
- DRW_shgroup_call_dynamic_add(sgl->relationship_lines, ob->obmat[3]);
+ DRW_buffer_add_entry(sgl->relationship_lines, ob->runtime.parent_display_origin);
+ DRW_buffer_add_entry(sgl->relationship_lines, ob->obmat[3]);
}
if (ob->rigidbody_constraint) {
Object *rbc_ob1 = ob->rigidbody_constraint->ob1;
Object *rbc_ob2 = ob->rigidbody_constraint->ob2;
if (rbc_ob1 && (DRW_object_visibility_in_active_context(rbc_ob1) & OB_VISIBLE_SELF)) {
- DRW_shgroup_call_dynamic_add(sgl->relationship_lines, rbc_ob1->obmat[3]);
- DRW_shgroup_call_dynamic_add(sgl->relationship_lines, ob->obmat[3]);
+ DRW_buffer_add_entry(sgl->relationship_lines, rbc_ob1->obmat[3]);
+ DRW_buffer_add_entry(sgl->relationship_lines, ob->obmat[3]);
}
if (rbc_ob2 && (DRW_object_visibility_in_active_context(rbc_ob2) & OB_VISIBLE_SELF)) {
- DRW_shgroup_call_dynamic_add(sgl->relationship_lines, rbc_ob2->obmat[3]);
- DRW_shgroup_call_dynamic_add(sgl->relationship_lines, ob->obmat[3]);
+ DRW_buffer_add_entry(sgl->relationship_lines, rbc_ob2->obmat[3]);
+ DRW_buffer_add_entry(sgl->relationship_lines, ob->obmat[3]);
}
}
@@ -2783,8 +2774,8 @@ static void DRW_shgroup_relationship_lines(OBJECT_ShadingGroupList *sgl,
}
if (camob) {
- DRW_shgroup_call_dynamic_add(sgl->constraint_lines, camob->obmat[3]);
- DRW_shgroup_call_dynamic_add(sgl->constraint_lines, ob->obmat[3]);
+ DRW_buffer_add_entry(sgl->constraint_lines, camob->obmat[3]);
+ DRW_buffer_add_entry(sgl->constraint_lines, ob->obmat[3]);
}
}
else {
@@ -2805,8 +2796,8 @@ static void DRW_shgroup_relationship_lines(OBJECT_ShadingGroupList *sgl,
unit_m4(ct->matrix);
}
- DRW_shgroup_call_dynamic_add(sgl->constraint_lines, ct->matrix[3]);
- DRW_shgroup_call_dynamic_add(sgl->constraint_lines, ob->obmat[3]);
+ DRW_buffer_add_entry(sgl->constraint_lines, ct->matrix[3]);
+ DRW_buffer_add_entry(sgl->constraint_lines, ob->obmat[3]);
}
if (cti->flush_constraint_targets) {
@@ -2829,32 +2820,32 @@ static void DRW_shgroup_object_center(OBJECT_StorageList *stl,
return;
}
const bool is_library = ob->id.us > 1 || ID_IS_LINKED(ob);
- DRWShadingGroup *shgroup;
+ DRWCallBuffer *buf;
if (ob == OBACT(view_layer)) {
- shgroup = stl->g_data->center_active;
+ buf = stl->g_data->center_active;
}
else if (ob->base_flag & BASE_SELECTED) {
if (is_library) {
- shgroup = stl->g_data->center_selected_lib;
+ buf = stl->g_data->center_selected_lib;
}
else {
- shgroup = stl->g_data->center_selected;
+ buf = stl->g_data->center_selected;
}
}
else if (v3d->flag & V3D_DRAW_CENTERS) {
if (is_library) {
- shgroup = stl->g_data->center_deselected_lib;
+ buf = stl->g_data->center_deselected_lib;
}
else {
- shgroup = stl->g_data->center_deselected;
+ buf = stl->g_data->center_deselected;
}
}
else {
return;
}
- DRW_shgroup_call_dynamic_add(shgroup, ob->obmat[3]);
+ DRW_buffer_add_entry(buf, ob->obmat[3]);
}
static void DRW_shgroup_texture_space(OBJECT_ShadingGroupList *sgl, Object *ob, int theme_id)
@@ -2904,7 +2895,7 @@ static void DRW_shgroup_texture_space(OBJECT_ShadingGroupList *sgl, Object *ob,
float color[4];
UI_GetThemeColor4fv(theme_id, color);
- DRW_shgroup_call_dynamic_add(sgl->texspace, color, &one, tmp);
+ DRW_buffer_add_entry(sgl->texspace, color, &one, tmp);
}
static void DRW_shgroup_bounds(OBJECT_ShadingGroupList *sgl, Object *ob, int theme_id)
@@ -2941,7 +2932,7 @@ static void DRW_shgroup_bounds(OBJECT_ShadingGroupList *sgl, Object *ob, int the
size_to_mat4(tmp, size);
copy_v3_v3(tmp[3], center);
mul_m4_m4m4(tmp, ob->obmat, tmp);
- DRW_shgroup_call_dynamic_add(sgl->cube, color, &one, tmp);
+ DRW_buffer_add_entry(sgl->cube, color, &one, tmp);
break;
case OB_BOUND_SPHERE:
size[0] = max_fff(size[0], size[1], size[2]);
@@ -2949,7 +2940,7 @@ static void DRW_shgroup_bounds(OBJECT_ShadingGroupList *sgl, Object *ob, int the
size_to_mat4(tmp, size);
copy_v3_v3(tmp[3], center);
mul_m4_m4m4(tmp, ob->obmat, tmp);
- DRW_shgroup_call_dynamic_add(sgl->sphere, color, &one, tmp);
+ DRW_buffer_add_entry(sgl->sphere, color, &one, tmp);
break;
case OB_BOUND_CYLINDER:
size[0] = max_ff(size[0], size[1]);
@@ -2957,7 +2948,7 @@ static void DRW_shgroup_bounds(OBJECT_ShadingGroupList *sgl, Object *ob, int the
size_to_mat4(tmp, size);
copy_v3_v3(tmp[3], center);
mul_m4_m4m4(tmp, ob->obmat, tmp);
- DRW_shgroup_call_dynamic_add(sgl->cylinder, color, &one, tmp);
+ DRW_buffer_add_entry(sgl->cylinder, color, &one, tmp);
break;
case OB_BOUND_CONE:
size[0] = max_ff(size[0], size[1]);
@@ -2968,7 +2959,7 @@ static void DRW_shgroup_bounds(OBJECT_ShadingGroupList *sgl, Object *ob, int the
swap_v3_v3(tmp[1], tmp[2]);
tmp[3][2] -= size[2];
mul_m4_m4m4(tmp, ob->obmat, tmp);
- DRW_shgroup_call_dynamic_add(sgl->cone, color, &one, tmp);
+ DRW_buffer_add_entry(sgl->cone, color, &one, tmp);
break;
case OB_BOUND_CAPSULE:
size[0] = max_ff(size[0], size[1]);
@@ -2977,14 +2968,14 @@ static void DRW_shgroup_bounds(OBJECT_ShadingGroupList *sgl, Object *ob, int the
copy_v2_v2(tmp[3], center);
tmp[3][2] = center[2] + max_ff(0.0f, size[2] - size[0]);
mul_m4_m4m4(final_mat, ob->obmat, tmp);
- DRW_shgroup_call_dynamic_add(sgl->capsule_cap, color, &one, final_mat);
+ DRW_buffer_add_entry(sgl->capsule_cap, color, &one, final_mat);
negate_v3(tmp[2]);
tmp[3][2] = center[2] - max_ff(0.0f, size[2] - size[0]);
mul_m4_m4m4(final_mat, ob->obmat, tmp);
- DRW_shgroup_call_dynamic_add(sgl->capsule_cap, color, &one, final_mat);
+ DRW_buffer_add_entry(sgl->capsule_cap, color, &one, final_mat);
tmp[2][2] = max_ff(0.0f, size[2] * 2.0f - size[0] * 2.0f);
mul_m4_m4m4(final_mat, ob->obmat, tmp);
- DRW_shgroup_call_dynamic_add(sgl->capsule_body, color, &one, final_mat);
+ DRW_buffer_add_entry(sgl->capsule_body, color, &one, final_mat);
break;
}
}
@@ -3007,12 +2998,10 @@ static void OBJECT_cache_populate_particles(OBJECT_Shaders *sh_data,
if (draw_as != PART_DRAW_PATH) {
struct GPUBatch *geom = DRW_cache_particles_get_dots(ob, psys);
DRWShadingGroup *shgrp = NULL;
+ struct GPUBatch *shape = NULL;
static float def_prim_col[3] = {0.5f, 0.5f, 0.5f};
static float def_sec_col[3] = {1.0f, 1.0f, 1.0f};
- /* Dummy particle format for instancing to work. */
- DRW_shgroup_instance_format(e_data.particle_format, {{"dummy", DRW_ATTR_FLOAT, 1}});
-
Material *ma = give_current_material(ob, part->omat);
switch (draw_as) {
@@ -3026,35 +3015,29 @@ static void OBJECT_cache_populate_particles(OBJECT_Shaders *sh_data,
DRW_shgroup_call_add(shgrp, geom, mat);
break;
case PART_DRAW_CROSS:
- shgrp = DRW_shgroup_instance_create(sh_data->part_prim,
- psl->particle,
- DRW_cache_particles_get_prim(PART_DRAW_CROSS),
- e_data.particle_format);
+ shgrp = DRW_shgroup_create(sh_data->part_prim, psl->particle);
DRW_shgroup_uniform_texture(shgrp, "ramp", G_draw.ramp);
DRW_shgroup_uniform_vec3(shgrp, "color", ma ? &ma->r : def_prim_col, 1);
DRW_shgroup_uniform_float(shgrp, "draw_size", &part->draw_size, 1);
DRW_shgroup_uniform_bool_copy(shgrp, "screen_space", false);
- DRW_shgroup_instance_batch(shgrp, geom);
+ shape = DRW_cache_particles_get_prim(PART_DRAW_CROSS);
+ DRW_shgroup_call_instances_with_attribs_add(shgrp, shape, NULL, geom);
break;
case PART_DRAW_CIRC:
- shgrp = DRW_shgroup_instance_create(sh_data->part_prim,
- psl->particle,
- DRW_cache_particles_get_prim(PART_DRAW_CIRC),
- e_data.particle_format);
+ shape = DRW_cache_particles_get_prim(PART_DRAW_CIRC);
+ shgrp = DRW_shgroup_create(sh_data->part_prim, psl->particle);
DRW_shgroup_uniform_texture(shgrp, "ramp", G_draw.ramp);
DRW_shgroup_uniform_vec3(shgrp, "color", ma ? &ma->r : def_prim_col, 1);
DRW_shgroup_uniform_float(shgrp, "draw_size", &part->draw_size, 1);
DRW_shgroup_uniform_bool_copy(shgrp, "screen_space", true);
- DRW_shgroup_instance_batch(shgrp, geom);
+ DRW_shgroup_call_instances_with_attribs_add(shgrp, shape, NULL, geom);
break;
case PART_DRAW_AXIS:
- shgrp = DRW_shgroup_instance_create(sh_data->part_axis,
- psl->particle,
- DRW_cache_particles_get_prim(PART_DRAW_AXIS),
- e_data.particle_format);
+ shape = DRW_cache_particles_get_prim(PART_DRAW_AXIS);
+ shgrp = DRW_shgroup_create(sh_data->part_axis, psl->particle);
DRW_shgroup_uniform_float(shgrp, "draw_size", &part->draw_size, 1);
DRW_shgroup_uniform_bool_copy(shgrp, "screen_space", false);
- DRW_shgroup_instance_batch(shgrp, geom);
+ DRW_shgroup_call_instances_with_attribs_add(shgrp, shape, NULL, geom);
break;
default:
break;
@@ -3460,7 +3443,7 @@ static void OBJECT_cache_populate(void *vedata, Object *ob)
float *color, axes_size = 1.0f;
DRW_object_wire_theme_get(ob, view_layer, &color);
- DRW_shgroup_call_dynamic_add(sgl->empty_axes, color, &axes_size, ob->obmat);
+ DRW_buffer_add_entry(sgl->empty_axes, color, &axes_size, ob->obmat);
}
if ((md = modifiers_findByType(ob, eModifierType_Smoke)) &&