Welcome to mirror list, hosted at ThFree Co, Russian Federation.

git.blender.org/blender.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorClément Foucault <foucault.clem@gmail.com>2019-05-13 18:56:20 +0300
committerClément Foucault <foucault.clem@gmail.com>2019-05-14 11:57:03 +0300
commit8bc8a62c57f91326ab3f8850785dce5452b5d703 (patch)
treec578b9786bc3e519f36f782cd74abd77ad52b344 /source/blender/draw/intern
parent20d9cd3a1fbd763dbe002e9baf2e3ba7fbb66f2f (diff)
DRW: Refactor: Use DRWCall to accumulate per instance attributes
This is a big change that cleanup a lot of confusing code. - The instancing/batching data buffer distribution in draw_instance_data.c. - The selection & drawing code in draw_manager_exec.c - Prety much every non-meshes object drawing (object_mode.c). Most of the changes are just renaming but there still a chance a typo might have sneek through. The Batching/Instancing Shading groups are replace by DRWCallBuffers. This is cleaner and conceptually more in line with what a DRWShadingGroup should be. There is still some little confusion in draw_common.c where some function takes shgroup as input and some don't.
Diffstat (limited to 'source/blender/draw/intern')
-rw-r--r--source/blender/draw/intern/DRW_render.h75
-rw-r--r--source/blender/draw/intern/draw_armature.c204
-rw-r--r--source/blender/draw/intern/draw_common.c310
-rw-r--r--source/blender/draw/intern/draw_common.h120
-rw-r--r--source/blender/draw/intern/draw_instance_data.c343
-rw-r--r--source/blender/draw/intern/draw_instance_data.h23
-rw-r--r--source/blender/draw/intern/draw_manager.c1
-rw-r--r--source/blender/draw/intern/draw_manager.h51
-rw-r--r--source/blender/draw/intern/draw_manager_data.c332
-rw-r--r--source/blender/draw/intern/draw_manager_exec.c150
10 files changed, 648 insertions, 961 deletions
diff --git a/source/blender/draw/intern/DRW_render.h b/source/blender/draw/intern/DRW_render.h
index 1c943c18ed3..401ed50c1dc 100644
--- a/source/blender/draw/intern/DRW_render.h
+++ b/source/blender/draw/intern/DRW_render.h
@@ -44,6 +44,7 @@
#include "DNA_world_types.h"
#include "GPU_framebuffer.h"
+#include "GPU_primitive.h"
#include "GPU_texture.h"
#include "GPU_shader.h"
@@ -83,6 +84,9 @@ typedef struct DRWPass DRWPass;
typedef struct DRWShadingGroup DRWShadingGroup;
typedef struct DRWUniform DRWUniform;
+/* Opaque type to avoid usage as a DRWCall but it is exactly the same thing. */
+typedef struct DRWCallBuffer DRWCallBuffer;
+
/* TODO Put it somewhere else? */
typedef struct BoundSphere {
float center[3], radius;
@@ -319,8 +323,8 @@ typedef enum {
DRW_STATE_DEPTH_GREATER_EQUAL = (1 << 7),
DRW_STATE_CULL_BACK = (1 << 8),
DRW_STATE_CULL_FRONT = (1 << 9),
- DRW_STATE_WIRE = (1 << 10),
- DRW_STATE_POINT = (1 << 11),
+ DRW_STATE_WIRE = (1 << 10), /* TODO remove */
+ DRW_STATE_POINT = (1 << 11), /* TODO remove */
/** Polygon offset. Does not work with lines and points. */
DRW_STATE_OFFSET_POSITIVE = (1 << 12),
/** Polygon offset. Does not work with lines and points. */
@@ -374,19 +378,11 @@ struct GPUVertFormat *DRW_shgroup_instance_format_array(const DRWInstanceAttrFor
} \
} while (0)
+/* TODO(fclem): Remove the _create suffix. */
DRWShadingGroup *DRW_shgroup_create(struct GPUShader *shader, DRWPass *pass);
DRWShadingGroup *DRW_shgroup_create_sub(DRWShadingGroup *shgroup);
DRWShadingGroup *DRW_shgroup_material_create(struct GPUMaterial *material, DRWPass *pass);
-DRWShadingGroup *DRW_shgroup_instance_create(struct GPUShader *shader,
- DRWPass *pass,
- struct GPUBatch *geom,
- struct GPUVertFormat *format);
-DRWShadingGroup *DRW_shgroup_point_batch_create(struct GPUShader *shader, DRWPass *pass);
-DRWShadingGroup *DRW_shgroup_line_batch_create_with_format(struct GPUShader *shader,
- DRWPass *pass,
- struct GPUVertFormat *format);
-DRWShadingGroup *DRW_shgroup_line_batch_create(struct GPUShader *shader, DRWPass *pass);
DRWShadingGroup *DRW_shgroup_transform_feedback_create(struct GPUShader *shader,
DRWPass *pass,
struct GPUVertBuf *tf_target);
@@ -394,20 +390,17 @@ DRWShadingGroup *DRW_shgroup_transform_feedback_create(struct GPUShader *shader,
/* return final visibility */
typedef bool(DRWCallVisibilityFn)(bool vis_in, void *user_data);
-void DRW_shgroup_instance_batch(DRWShadingGroup *shgroup, struct GPUBatch *batch);
-
-void DRW_shgroup_call_add(DRWShadingGroup *shgroup, struct GPUBatch *geom, float (*obmat)[4]);
+/* TODO(fclem): Remove the _add suffix. */
+void DRW_shgroup_call_add(DRWShadingGroup *sh, struct GPUBatch *geom, float (*obmat)[4]);
void DRW_shgroup_call_range_add(
- DRWShadingGroup *shgroup, struct GPUBatch *geom, float (*obmat)[4], uint v_sta, uint v_count);
-void DRW_shgroup_call_procedural_points_add(DRWShadingGroup *shgroup,
- uint point_len,
- float (*obmat)[4]);
-void DRW_shgroup_call_procedural_lines_add(DRWShadingGroup *shgroup,
- uint line_count,
- float (*obmat)[4]);
-void DRW_shgroup_call_procedural_triangles_add(DRWShadingGroup *shgroup,
- uint tria_count,
+ DRWShadingGroup *sh, struct GPUBatch *geom, float (*obmat)[4], uint v_sta, uint v_ct);
+
+void DRW_shgroup_call_procedural_points_add(DRWShadingGroup *sh, uint point_ct, float (*obmat)[4]);
+void DRW_shgroup_call_procedural_lines_add(DRWShadingGroup *sh, uint line_ct, float (*obmat)[4]);
+void DRW_shgroup_call_procedural_triangles_add(DRWShadingGroup *sh,
+ uint tri_ct,
float (*obmat)[4]);
+
void DRW_shgroup_call_object_add_ex(DRWShadingGroup *shgroup,
struct GPUBatch *geom,
struct Object *ob,
@@ -422,31 +415,33 @@ void DRW_shgroup_call_object_add_with_callback(DRWShadingGroup *shgroup,
DRWCallVisibilityFn *callback,
void *user_data);
-void DRW_shgroup_call_sculpt_add(DRWShadingGroup *shading_group,
- Object *object,
- bool use_wire,
- bool use_mask,
- bool use_vert_color);
-void DRW_shgroup_call_sculpt_with_materials_add(DRWShadingGroup **shgroups,
- Object *ob,
- bool use_vcol);
-
-/* Used for drawing a batch with instancing without instance attributes. */
void DRW_shgroup_call_instances_add(DRWShadingGroup *shgroup,
struct GPUBatch *geom,
float (*obmat)[4],
uint count);
-void DRW_shgroup_call_dynamic_add_array(DRWShadingGroup *shgroup,
- const void *attr[],
- uint attr_len);
-#define DRW_shgroup_call_dynamic_add(shgroup, ...) \
+void DRW_shgroup_call_instances_with_attribs_add(DRWShadingGroup *shgroup,
+ struct GPUBatch *geom,
+ float (*obmat)[4],
+ struct GPUBatch *inst_attributes);
+
+void DRW_shgroup_call_sculpt_add(DRWShadingGroup *sh, Object *ob, bool wire, bool mask, bool vcol);
+void DRW_shgroup_call_sculpt_with_materials_add(DRWShadingGroup **sh, Object *ob, bool vcol);
+
+DRWCallBuffer *DRW_shgroup_call_buffer_add(DRWShadingGroup *shading_group,
+ struct GPUVertFormat *format,
+ GPUPrimType prim_type);
+DRWCallBuffer *DRW_shgroup_call_buffer_instance_add(DRWShadingGroup *shading_group,
+ struct GPUVertFormat *format,
+ struct GPUBatch *geom);
+
+void DRW_buffer_add_entry_array(DRWCallBuffer *buffer, const void *attr[], uint attr_len);
+
+#define DRW_buffer_add_entry(buffer, ...) \
do { \
const void *array[] = {__VA_ARGS__}; \
- DRW_shgroup_call_dynamic_add_array(shgroup, array, (sizeof(array) / sizeof(*array))); \
+ DRW_buffer_add_entry_array(buffer, array, (sizeof(array) / sizeof(*array))); \
} while (0)
-uint DRW_shgroup_get_instance_count(const DRWShadingGroup *shgroup);
-
void DRW_shgroup_state_enable(DRWShadingGroup *shgroup, DRWState state);
void DRW_shgroup_state_disable(DRWShadingGroup *shgroup, DRWState state);
void DRW_shgroup_stencil_mask(DRWShadingGroup *shgroup, uint mask);
diff --git a/source/blender/draw/intern/draw_armature.c b/source/blender/draw/intern/draw_armature.c
index 6361ff63fd4..3f651b27dd0 100644
--- a/source/blender/draw/intern/draw_armature.c
+++ b/source/blender/draw/intern/draw_armature.c
@@ -59,26 +59,26 @@ static struct {
/* Current armature object */
Object *ob;
/* Reset when changing current_armature */
- DRWShadingGroup *bone_octahedral_solid;
- DRWShadingGroup *bone_octahedral_wire;
- DRWShadingGroup *bone_octahedral_outline;
- DRWShadingGroup *bone_box_solid;
- DRWShadingGroup *bone_box_wire;
- DRWShadingGroup *bone_box_outline;
- DRWShadingGroup *bone_wire;
- DRWShadingGroup *bone_stick;
- DRWShadingGroup *bone_dof_sphere;
- DRWShadingGroup *bone_dof_lines;
- DRWShadingGroup *bone_envelope_solid;
- DRWShadingGroup *bone_envelope_distance;
- DRWShadingGroup *bone_envelope_wire;
- DRWShadingGroup *bone_point_solid;
- DRWShadingGroup *bone_point_wire;
- DRWShadingGroup *bone_axes;
- DRWShadingGroup *lines_relationship;
- DRWShadingGroup *lines_ik;
- DRWShadingGroup *lines_ik_no_target;
- DRWShadingGroup *lines_ik_spline;
+ DRWCallBuffer *bone_octahedral_solid;
+ DRWCallBuffer *bone_octahedral_wire;
+ DRWCallBuffer *bone_octahedral_outline;
+ DRWCallBuffer *bone_box_solid;
+ DRWCallBuffer *bone_box_wire;
+ DRWCallBuffer *bone_box_outline;
+ DRWCallBuffer *bone_wire;
+ DRWCallBuffer *bone_stick;
+ DRWCallBuffer *bone_dof_sphere;
+ DRWCallBuffer *bone_dof_lines;
+ DRWCallBuffer *bone_envelope_solid;
+ DRWCallBuffer *bone_envelope_distance;
+ DRWCallBuffer *bone_envelope_wire;
+ DRWCallBuffer *bone_point_solid;
+ DRWCallBuffer *bone_point_wire;
+ DRWCallBuffer *bone_axes;
+ DRWCallBuffer *lines_relationship;
+ DRWCallBuffer *lines_ik;
+ DRWCallBuffer *lines_ik_no_target;
+ DRWCallBuffer *lines_ik_spline;
DRWArmaturePasses passes;
@@ -122,22 +122,21 @@ static void drw_shgroup_bone_octahedral(const float (*bone_mat)[4],
{
if (g_data.bone_octahedral_outline == NULL) {
struct GPUBatch *geom = DRW_cache_bone_octahedral_wire_get();
- g_data.bone_octahedral_outline = shgroup_instance_bone_shape_outline(
+ g_data.bone_octahedral_outline = buffer_instance_bone_shape_outline(
g_data.passes.bone_outline, geom, sh_cfg);
}
if (g_data.bone_octahedral_solid == NULL && g_data.passes.bone_solid != NULL) {
struct GPUBatch *geom = DRW_cache_bone_octahedral_get();
- g_data.bone_octahedral_solid = shgroup_instance_bone_shape_solid(
+ g_data.bone_octahedral_solid = buffer_instance_bone_shape_solid(
g_data.passes.bone_solid, geom, g_data.transparent, sh_cfg);
}
float final_bonemat[4][4];
mul_m4_m4m4(final_bonemat, g_data.ob->obmat, bone_mat);
if (g_data.bone_octahedral_solid != NULL) {
- DRW_shgroup_call_dynamic_add(
- g_data.bone_octahedral_solid, final_bonemat, bone_color, hint_color);
+ DRW_buffer_add_entry(g_data.bone_octahedral_solid, final_bonemat, bone_color, hint_color);
}
if (outline_color[3] > 0.0f) {
- DRW_shgroup_call_dynamic_add(g_data.bone_octahedral_outline, final_bonemat, outline_color);
+ DRW_buffer_add_entry(g_data.bone_octahedral_outline, final_bonemat, outline_color);
}
}
@@ -150,21 +149,21 @@ static void drw_shgroup_bone_box(const float (*bone_mat)[4],
{
if (g_data.bone_box_wire == NULL) {
struct GPUBatch *geom = DRW_cache_bone_box_wire_get();
- g_data.bone_box_outline = shgroup_instance_bone_shape_outline(
+ g_data.bone_box_outline = buffer_instance_bone_shape_outline(
g_data.passes.bone_outline, geom, sh_cfg);
}
if (g_data.bone_box_solid == NULL && g_data.passes.bone_solid != NULL) {
struct GPUBatch *geom = DRW_cache_bone_box_get();
- g_data.bone_box_solid = shgroup_instance_bone_shape_solid(
+ g_data.bone_box_solid = buffer_instance_bone_shape_solid(
g_data.passes.bone_solid, geom, g_data.transparent, sh_cfg);
}
float final_bonemat[4][4];
mul_m4_m4m4(final_bonemat, g_data.ob->obmat, bone_mat);
if (g_data.bone_box_solid != NULL) {
- DRW_shgroup_call_dynamic_add(g_data.bone_box_solid, final_bonemat, bone_color, hint_color);
+ DRW_buffer_add_entry(g_data.bone_box_solid, final_bonemat, bone_color, hint_color);
}
if (outline_color[3] > 0.0f) {
- DRW_shgroup_call_dynamic_add(g_data.bone_box_outline, final_bonemat, outline_color);
+ DRW_buffer_add_entry(g_data.bone_box_outline, final_bonemat, outline_color);
}
}
@@ -174,15 +173,15 @@ static void drw_shgroup_bone_wire(const float (*bone_mat)[4],
const eGPUShaderConfig sh_cfg)
{
if (g_data.bone_wire == NULL) {
- g_data.bone_wire = shgroup_dynlines_flat_color(g_data.passes.bone_wire, sh_cfg);
+ g_data.bone_wire = buffer_dynlines_flat_color(g_data.passes.bone_wire, sh_cfg);
}
float head[3], tail[3];
mul_v3_m4v3(head, g_data.ob->obmat, bone_mat[3]);
- DRW_shgroup_call_dynamic_add(g_data.bone_wire, head, color);
+ DRW_buffer_add_entry(g_data.bone_wire, head, color);
add_v3_v3v3(tail, bone_mat[3], bone_mat[1]);
mul_m4_v3(g_data.ob->obmat, tail);
- DRW_shgroup_call_dynamic_add(g_data.bone_wire, tail, color);
+ DRW_buffer_add_entry(g_data.bone_wire, tail, color);
}
/* Stick */
@@ -194,12 +193,12 @@ static void drw_shgroup_bone_stick(const float (*bone_mat)[4],
const eGPUShaderConfig sh_cfg)
{
if (g_data.bone_stick == NULL) {
- g_data.bone_stick = shgroup_instance_bone_stick(g_data.passes.bone_wire, sh_cfg);
+ g_data.bone_stick = buffer_instance_bone_stick(g_data.passes.bone_wire, sh_cfg);
}
float final_bonemat[4][4], tail[4];
mul_m4_m4m4(final_bonemat, g_data.ob->obmat, bone_mat);
add_v3_v3v3(tail, final_bonemat[3], final_bonemat[1]);
- DRW_shgroup_call_dynamic_add(
+ DRW_buffer_add_entry(
g_data.bone_stick, final_bonemat[3], tail, col_wire, col_bone, col_head, col_tail);
}
@@ -212,7 +211,7 @@ static void drw_shgroup_bone_envelope_distance(const float (*bone_mat)[4],
{
if (g_data.passes.bone_envelope != NULL) {
if (g_data.bone_envelope_distance == NULL) {
- g_data.bone_envelope_distance = shgroup_instance_bone_envelope_distance(
+ g_data.bone_envelope_distance = buffer_instance_bone_envelope_distance(
g_data.passes.bone_envelope, sh_cfg);
/* passes.bone_envelope should have the DRW_STATE_CULL_FRONT state enabled. */
}
@@ -227,7 +226,7 @@ static void drw_shgroup_bone_envelope_distance(const float (*bone_mat)[4],
head_sphere[3] += *distance;
tail_sphere[3] = *radius_tail;
tail_sphere[3] += *distance;
- DRW_shgroup_call_dynamic_add(
+ DRW_buffer_add_entry(
g_data.bone_envelope_distance, head_sphere, tail_sphere, final_bonemat[0]);
}
}
@@ -241,22 +240,19 @@ static void drw_shgroup_bone_envelope(const float (*bone_mat)[4],
const eGPUShaderConfig sh_cfg)
{
if (g_data.bone_point_wire == NULL) {
- g_data.bone_point_wire = shgroup_instance_bone_sphere_outline(g_data.passes.bone_wire, sh_cfg);
+ g_data.bone_point_wire = buffer_instance_bone_sphere_outline(g_data.passes.bone_wire, sh_cfg);
}
if (g_data.bone_point_solid == NULL && g_data.passes.bone_solid != NULL) {
- g_data.bone_point_solid = shgroup_instance_bone_sphere_solid(
+ g_data.bone_point_solid = buffer_instance_bone_sphere_solid(
g_data.passes.bone_solid, g_data.transparent, sh_cfg);
}
if (g_data.bone_envelope_wire == NULL) {
- g_data.bone_envelope_wire = shgroup_instance_bone_envelope_outline(g_data.passes.bone_wire,
- sh_cfg);
+ g_data.bone_envelope_wire = buffer_instance_bone_envelope_outline(g_data.passes.bone_wire,
+ sh_cfg);
}
if (g_data.bone_envelope_solid == NULL && g_data.passes.bone_solid != NULL) {
- g_data.bone_envelope_solid = shgroup_instance_bone_envelope_solid(
+ g_data.bone_envelope_solid = buffer_instance_bone_envelope_solid(
g_data.passes.bone_solid, g_data.transparent, sh_cfg);
- /* We can have a lot of overdraw if we don't do this. Also envelope are not subject to
- * inverted matrix. */
- DRW_shgroup_state_enable(g_data.bone_envelope_solid, DRW_STATE_CULL_BACK);
}
float head_sphere[4] = {0.0f, 0.0f, 0.0f, 1.0f}, tail_sphere[4] = {0.0f, 1.0f, 0.0f, 1.0f};
@@ -274,10 +270,10 @@ static void drw_shgroup_bone_envelope(const float (*bone_mat)[4],
tmp[3][3] = 1.0f;
copy_v3_v3(tmp[3], tail_sphere);
if (g_data.bone_point_solid != NULL) {
- DRW_shgroup_call_dynamic_add(g_data.bone_point_solid, tmp, bone_color, hint_color);
+ DRW_buffer_add_entry(g_data.bone_point_solid, tmp, bone_color, hint_color);
}
if (outline_color[3] > 0.0f) {
- DRW_shgroup_call_dynamic_add(g_data.bone_point_wire, tmp, outline_color);
+ DRW_buffer_add_entry(g_data.bone_point_wire, tmp, outline_color);
}
}
else if (tail_sphere[3] < 0.0f) {
@@ -287,10 +283,10 @@ static void drw_shgroup_bone_envelope(const float (*bone_mat)[4],
tmp[3][3] = 1.0f;
copy_v3_v3(tmp[3], head_sphere);
if (g_data.bone_point_solid != NULL) {
- DRW_shgroup_call_dynamic_add(g_data.bone_point_solid, tmp, bone_color, hint_color);
+ DRW_buffer_add_entry(g_data.bone_point_solid, tmp, bone_color, hint_color);
}
if (outline_color[3] > 0.0f) {
- DRW_shgroup_call_dynamic_add(g_data.bone_point_wire, tmp, outline_color);
+ DRW_buffer_add_entry(g_data.bone_point_wire, tmp, outline_color);
}
}
else {
@@ -307,15 +303,15 @@ static void drw_shgroup_bone_envelope(const float (*bone_mat)[4],
interp_v4_v4v4(head_sphere, tail_sphere, head_sphere, fac_head);
interp_v4_v4v4(tail_sphere, tmp_sphere, tail_sphere, fac_tail);
if (g_data.bone_envelope_solid != NULL) {
- DRW_shgroup_call_dynamic_add(g_data.bone_envelope_solid,
- head_sphere,
- tail_sphere,
- bone_color,
- hint_color,
- final_bonemat[0]);
+ DRW_buffer_add_entry(g_data.bone_envelope_solid,
+ head_sphere,
+ tail_sphere,
+ bone_color,
+ hint_color,
+ final_bonemat[0]);
}
if (outline_color[3] > 0.0f) {
- DRW_shgroup_call_dynamic_add(
+ DRW_buffer_add_entry(
g_data.bone_envelope_wire, head_sphere, tail_sphere, outline_color, final_bonemat[0]);
}
}
@@ -327,10 +323,10 @@ static void drw_shgroup_bone_envelope(const float (*bone_mat)[4],
tmp[3][3] = 1.0f;
copy_v3_v3(tmp[3], tmp_sphere);
if (g_data.bone_point_solid != NULL) {
- DRW_shgroup_call_dynamic_add(g_data.bone_point_solid, tmp, bone_color, hint_color);
+ DRW_buffer_add_entry(g_data.bone_point_solid, tmp, bone_color, hint_color);
}
if (outline_color[3] > 0.0f) {
- DRW_shgroup_call_dynamic_add(g_data.bone_point_wire, tmp, outline_color);
+ DRW_buffer_add_entry(g_data.bone_point_wire, tmp, outline_color);
}
}
}
@@ -364,50 +360,50 @@ static void drw_shgroup_bone_custom_solid(const float (*bone_mat)[4],
BLI_assert(g_data.passes.custom_shapes != NULL);
if (surf && g_data.passes.bone_solid != NULL) {
- DRWShadingGroup *shgrp_geom_solid = BLI_ghash_lookup(g_data.passes.custom_shapes, surf);
+ DRWCallBuffer *buf_geom_solid = BLI_ghash_lookup(g_data.passes.custom_shapes, surf);
- if (shgrp_geom_solid == NULL) {
+ if (buf_geom_solid == NULL) {
/* TODO(fclem) needs to be moved elsewhere. */
drw_batch_cache_generate_requested(custom);
/* NOTE! g_data.transparent require a separate shading group if the
* object is transparent. This is done by passing a different ghash
* for transparent armature in pose mode. */
- shgrp_geom_solid = shgroup_instance_bone_shape_solid(
+ buf_geom_solid = buffer_instance_bone_shape_solid(
g_data.passes.bone_solid, surf, g_data.transparent, sh_cfg);
- BLI_ghash_insert(g_data.passes.custom_shapes, surf, shgrp_geom_solid);
+ BLI_ghash_insert(g_data.passes.custom_shapes, surf, buf_geom_solid);
}
- DRW_shgroup_call_dynamic_add(shgrp_geom_solid, final_bonemat, bone_color, hint_color);
+ DRW_buffer_add_entry(buf_geom_solid, final_bonemat, bone_color, hint_color);
}
if (edges && outline_color[3] > 0.0f) {
- DRWShadingGroup *shgrp_geom_wire = BLI_ghash_lookup(g_data.passes.custom_shapes, edges);
+ DRWCallBuffer *buf_geom_wire = BLI_ghash_lookup(g_data.passes.custom_shapes, edges);
- if (shgrp_geom_wire == NULL) {
+ if (buf_geom_wire == NULL) {
/* TODO(fclem) needs to be moved elsewhere. */
drw_batch_cache_generate_requested(custom);
- shgrp_geom_wire = shgroup_instance_bone_shape_outline(
+ buf_geom_wire = buffer_instance_bone_shape_outline(
g_data.passes.bone_outline, edges, sh_cfg);
- BLI_ghash_insert(g_data.passes.custom_shapes, edges, shgrp_geom_wire);
+ BLI_ghash_insert(g_data.passes.custom_shapes, edges, buf_geom_wire);
}
- DRW_shgroup_call_dynamic_add(shgrp_geom_wire, final_bonemat, outline_color);
+ DRW_buffer_add_entry(buf_geom_wire, final_bonemat, outline_color);
}
if (ledges) {
- DRWShadingGroup *shgrp_geom_ledges = BLI_ghash_lookup(g_data.passes.custom_shapes, ledges);
+ DRWCallBuffer *buf_geom_ledges = BLI_ghash_lookup(g_data.passes.custom_shapes, ledges);
- if (shgrp_geom_ledges == NULL) {
+ if (buf_geom_ledges == NULL) {
/* TODO(fclem) needs to be moved elsewhere. */
drw_batch_cache_generate_requested(custom);
- shgrp_geom_ledges = shgroup_instance_wire(g_data.passes.bone_wire, ledges);
+ buf_geom_ledges = buffer_instance_wire(g_data.passes.bone_wire, ledges);
- BLI_ghash_insert(g_data.passes.custom_shapes, ledges, shgrp_geom_ledges);
+ BLI_ghash_insert(g_data.passes.custom_shapes, ledges, buf_geom_ledges);
}
float final_color[4] = {outline_color[0], outline_color[1], outline_color[2], 1.0f};
- DRW_shgroup_call_dynamic_add(shgrp_geom_ledges, final_bonemat, final_color);
+ DRW_buffer_add_entry(buf_geom_ledges, final_bonemat, final_color);
}
}
@@ -422,20 +418,20 @@ static void drw_shgroup_bone_custom_wire(const float (*bone_mat)[4],
struct GPUBatch *geom = DRW_cache_object_all_edges_get(custom);
if (geom) {
- DRWShadingGroup *shgrp_geom_wire = BLI_ghash_lookup(g_data.passes.custom_shapes, geom);
+ DRWCallBuffer *buf_geom_wire = BLI_ghash_lookup(g_data.passes.custom_shapes, geom);
- if (shgrp_geom_wire == NULL) {
+ if (buf_geom_wire == NULL) {
/* TODO(fclem) needs to be moved elsewhere. */
drw_batch_cache_generate_requested(custom);
- shgrp_geom_wire = shgroup_instance_wire(g_data.passes.bone_wire, geom);
+ buf_geom_wire = buffer_instance_wire(g_data.passes.bone_wire, geom);
- BLI_ghash_insert(g_data.passes.custom_shapes, geom, shgrp_geom_wire);
+ BLI_ghash_insert(g_data.passes.custom_shapes, geom, buf_geom_wire);
}
float final_color[4] = {color[0], color[1], color[2], 1.0f};
float final_bonemat[4][4];
mul_m4_m4m4(final_bonemat, g_data.ob->obmat, bone_mat);
- DRW_shgroup_call_dynamic_add(shgrp_geom_wire, final_bonemat, final_color);
+ DRW_buffer_add_entry(buf_geom_wire, final_bonemat, final_color);
}
}
@@ -447,19 +443,19 @@ static void drw_shgroup_bone_point(const float (*bone_mat)[4],
const eGPUShaderConfig sh_cfg)
{
if (g_data.bone_point_wire == NULL) {
- g_data.bone_point_wire = shgroup_instance_bone_sphere_outline(g_data.passes.bone_wire, sh_cfg);
+ g_data.bone_point_wire = buffer_instance_bone_sphere_outline(g_data.passes.bone_wire, sh_cfg);
}
if (g_data.bone_point_solid == NULL && g_data.passes.bone_solid != NULL) {
- g_data.bone_point_solid = shgroup_instance_bone_sphere_solid(
+ g_data.bone_point_solid = buffer_instance_bone_sphere_solid(
g_data.passes.bone_solid, g_data.transparent, sh_cfg);
}
float final_bonemat[4][4];
mul_m4_m4m4(final_bonemat, g_data.ob->obmat, bone_mat);
if (g_data.bone_point_solid != NULL) {
- DRW_shgroup_call_dynamic_add(g_data.bone_point_solid, final_bonemat, bone_color, hint_color);
+ DRW_buffer_add_entry(g_data.bone_point_solid, final_bonemat, bone_color, hint_color);
}
if (outline_color[3] > 0.0f) {
- DRW_shgroup_call_dynamic_add(g_data.bone_point_wire, final_bonemat, outline_color);
+ DRW_buffer_add_entry(g_data.bone_point_wire, final_bonemat, outline_color);
}
}
@@ -469,11 +465,11 @@ static void drw_shgroup_bone_axes(const float (*bone_mat)[4],
const eGPUShaderConfig sh_cfg)
{
if (g_data.bone_axes == NULL) {
- g_data.bone_axes = shgroup_instance_bone_axes(g_data.passes.bone_axes, sh_cfg);
+ g_data.bone_axes = buffer_instance_bone_axes(g_data.passes.bone_axes, sh_cfg);
}
float final_bonemat[4][4];
mul_m4_m4m4(final_bonemat, g_data.ob->obmat, bone_mat);
- DRW_shgroup_call_dynamic_add(g_data.bone_axes, final_bonemat, color);
+ DRW_buffer_add_entry(g_data.bone_axes, final_bonemat, color);
}
/* Relationship lines */
@@ -482,15 +478,15 @@ static void drw_shgroup_bone_relationship_lines(const float start[3],
const eGPUShaderConfig sh_cfg)
{
if (g_data.lines_relationship == NULL) {
- g_data.lines_relationship = shgroup_dynlines_dashed_uniform_color(
+ g_data.lines_relationship = buffer_dynlines_dashed_uniform_color(
g_data.passes.relationship_lines, g_theme.wire_color, sh_cfg);
}
/* reverse order to have less stipple overlap */
float v[3];
mul_v3_m4v3(v, g_data.ob->obmat, end);
- DRW_shgroup_call_dynamic_add(g_data.lines_relationship, v);
+ DRW_buffer_add_entry(g_data.lines_relationship, v);
mul_v3_m4v3(v, g_data.ob->obmat, start);
- DRW_shgroup_call_dynamic_add(g_data.lines_relationship, v);
+ DRW_buffer_add_entry(g_data.lines_relationship, v);
}
static void drw_shgroup_bone_ik_lines(const float start[3],
@@ -499,15 +495,15 @@ static void drw_shgroup_bone_ik_lines(const float start[3],
{
if (g_data.lines_ik == NULL) {
static float fcolor[4] = {0.8f, 0.5f, 0.0f, 1.0f}; /* add theme! */
- g_data.lines_ik = shgroup_dynlines_dashed_uniform_color(
+ g_data.lines_ik = buffer_dynlines_dashed_uniform_color(
g_data.passes.relationship_lines, fcolor, sh_cfg);
}
/* reverse order to have less stipple overlap */
float v[3];
mul_v3_m4v3(v, g_data.ob->obmat, end);
- DRW_shgroup_call_dynamic_add(g_data.lines_ik, v);
+ DRW_buffer_add_entry(g_data.lines_ik, v);
mul_v3_m4v3(v, g_data.ob->obmat, start);
- DRW_shgroup_call_dynamic_add(g_data.lines_ik, v);
+ DRW_buffer_add_entry(g_data.lines_ik, v);
}
static void drw_shgroup_bone_ik_no_target_lines(const float start[3],
@@ -516,15 +512,15 @@ static void drw_shgroup_bone_ik_no_target_lines(const float start[3],
{
if (g_data.lines_ik_no_target == NULL) {
static float fcolor[4] = {0.8f, 0.8f, 0.2f, 1.0f}; /* add theme! */
- g_data.lines_ik_no_target = shgroup_dynlines_dashed_uniform_color(
+ g_data.lines_ik_no_target = buffer_dynlines_dashed_uniform_color(
g_data.passes.relationship_lines, fcolor, sh_cfg);
}
/* reverse order to have less stipple overlap */
float v[3];
mul_v3_m4v3(v, g_data.ob->obmat, end);
- DRW_shgroup_call_dynamic_add(g_data.lines_ik_no_target, v);
+ DRW_buffer_add_entry(g_data.lines_ik_no_target, v);
mul_v3_m4v3(v, g_data.ob->obmat, start);
- DRW_shgroup_call_dynamic_add(g_data.lines_ik_no_target, v);
+ DRW_buffer_add_entry(g_data.lines_ik_no_target, v);
}
static void drw_shgroup_bone_ik_spline_lines(const float start[3],
@@ -533,15 +529,15 @@ static void drw_shgroup_bone_ik_spline_lines(const float start[3],
{
if (g_data.lines_ik_spline == NULL) {
static float fcolor[4] = {0.8f, 0.8f, 0.2f, 1.0f}; /* add theme! */
- g_data.lines_ik_spline = shgroup_dynlines_dashed_uniform_color(
+ g_data.lines_ik_spline = buffer_dynlines_dashed_uniform_color(
g_data.passes.relationship_lines, fcolor, sh_cfg);
}
/* reverse order to have less stipple overlap */
float v[3];
mul_v3_m4v3(v, g_data.ob->obmat, end);
- DRW_shgroup_call_dynamic_add(g_data.lines_ik_spline, v);
+ DRW_buffer_add_entry(g_data.lines_ik_spline, v);
mul_v3_m4v3(v, g_data.ob->obmat, start);
- DRW_shgroup_call_dynamic_add(g_data.lines_ik_spline, v);
+ DRW_buffer_add_entry(g_data.lines_ik_spline, v);
}
/** \} */
@@ -1645,12 +1641,10 @@ static void draw_bone_dofs(bPoseChannel *pchan)
}
if (g_data.bone_dof_sphere == NULL) {
- g_data.bone_dof_lines = shgroup_instance_bone_dof(g_data.passes.bone_wire,
- DRW_cache_bone_dof_lines_get());
- g_data.bone_dof_sphere = shgroup_instance_bone_dof(g_data.passes.bone_envelope,
- DRW_cache_bone_dof_sphere_get());
- DRW_shgroup_state_enable(g_data.bone_dof_sphere, DRW_STATE_BLEND);
- DRW_shgroup_state_disable(g_data.bone_dof_sphere, DRW_STATE_CULL_FRONT);
+ g_data.bone_dof_lines = buffer_instance_bone_dof(
+ g_data.passes.bone_wire, DRW_cache_bone_dof_lines_get(), false);
+ g_data.bone_dof_sphere = buffer_instance_bone_dof(
+ g_data.passes.bone_envelope, DRW_cache_bone_dof_sphere_get(), true);
}
/* *0.5f here comes from M_PI/360.0f when rotations were still in degrees */
@@ -1683,20 +1677,20 @@ static void draw_bone_dofs(bPoseChannel *pchan)
amax[0] = xminmax[1];
amin[1] = zminmax[0];
amax[1] = zminmax[1];
- DRW_shgroup_call_dynamic_add(g_data.bone_dof_sphere, final_bonemat, col_sphere, amin, amax);
- DRW_shgroup_call_dynamic_add(g_data.bone_dof_lines, final_bonemat, col_lines, amin, amax);
+ DRW_buffer_add_entry(g_data.bone_dof_sphere, final_bonemat, col_sphere, amin, amax);
+ DRW_buffer_add_entry(g_data.bone_dof_lines, final_bonemat, col_lines, amin, amax);
}
if (pchan->ikflag & BONE_IK_XLIMIT) {
amin[0] = xminmax[0];
amax[0] = xminmax[1];
amin[1] = amax[1] = 0.0f;
- DRW_shgroup_call_dynamic_add(g_data.bone_dof_lines, final_bonemat, col_xaxis, amin, amax);
+ DRW_buffer_add_entry(g_data.bone_dof_lines, final_bonemat, col_xaxis, amin, amax);
}
if (pchan->ikflag & BONE_IK_ZLIMIT) {
amin[1] = zminmax[0];
amax[1] = zminmax[1];
amin[0] = amax[0] = 0.0f;
- DRW_shgroup_call_dynamic_add(g_data.bone_dof_lines, final_bonemat, col_zaxis, amin, amax);
+ DRW_buffer_add_entry(g_data.bone_dof_lines, final_bonemat, col_zaxis, amin, amax);
}
}
diff --git a/source/blender/draw/intern/draw_common.c b/source/blender/draw/intern/draw_common.c
index 9abc7ec6c6e..f2351885962 100644
--- a/source/blender/draw/intern/draw_common.c
+++ b/source/blender/draw/intern/draw_common.c
@@ -285,7 +285,8 @@ static struct {
struct GPUVertFormat *instance_bone_envelope_distance;
struct GPUVertFormat *instance_bone_envelope_outline;
struct GPUVertFormat *instance_mball_handles;
- struct GPUVertFormat *dynlines_color;
+ struct GPUVertFormat *pos_color;
+ struct GPUVertFormat *pos;
} g_formats = {NULL};
void DRW_globals_free(void)
@@ -310,34 +311,36 @@ void DRW_shgroup_world_clip_planes_from_rv3d(DRWShadingGroup *shgrp, const Regio
DRW_shgroup_state_enable(shgrp, DRW_STATE_CLIP_PLANES);
}
-DRWShadingGroup *shgroup_dynlines_flat_color(DRWPass *pass, eGPUShaderConfig sh_cfg)
+struct DRWCallBuffer *buffer_dynlines_flat_color(DRWPass *pass, eGPUShaderConfig sh_cfg)
{
GPUShader *sh = GPU_shader_get_builtin_shader_with_config(GPU_SHADER_3D_FLAT_COLOR, sh_cfg);
- DRW_shgroup_instance_format(g_formats.dynlines_color,
+ DRW_shgroup_instance_format(g_formats.pos_color,
{
{"pos", DRW_ATTR_FLOAT, 3},
{"color", DRW_ATTR_FLOAT, 4},
});
- DRWShadingGroup *grp = DRW_shgroup_line_batch_create_with_format(
- sh, pass, g_formats.dynlines_color);
+ DRWShadingGroup *grp = DRW_shgroup_create(sh, pass);
if (sh_cfg == GPU_SHADER_CFG_CLIPPED) {
DRW_shgroup_world_clip_planes_from_rv3d(grp, DRW_context_state_get()->rv3d);
}
- return grp;
+ return DRW_shgroup_call_buffer_add(grp, g_formats.pos_color, GPU_PRIM_LINES);
}
-DRWShadingGroup *shgroup_dynlines_dashed_uniform_color(DRWPass *pass,
- const float color[4],
- eGPUShaderConfig sh_cfg)
+struct DRWCallBuffer *buffer_dynlines_dashed_uniform_color(DRWPass *pass,
+ const float color[4],
+ eGPUShaderConfig sh_cfg)
{
GPUShader *sh = GPU_shader_get_builtin_shader_with_config(
GPU_SHADER_3D_LINE_DASHED_UNIFORM_COLOR, sh_cfg);
static float dash_width = 6.0f;
static float dash_factor = 0.5f;
- DRWShadingGroup *grp = DRW_shgroup_line_batch_create(sh, pass);
+
+ DRW_shgroup_instance_format(g_formats.pos, {{"pos", DRW_ATTR_FLOAT, 3}});
+
+ DRWShadingGroup *grp = DRW_shgroup_create(sh, pass);
DRW_shgroup_uniform_vec4(grp, "color", color, 1);
DRW_shgroup_uniform_vec2(grp, "viewport_size", DRW_viewport_size_get(), 1);
DRW_shgroup_uniform_float(grp, "dash_width", &dash_width, 1);
@@ -346,60 +349,53 @@ DRWShadingGroup *shgroup_dynlines_dashed_uniform_color(DRWPass *pass,
if (sh_cfg == GPU_SHADER_CFG_CLIPPED) {
DRW_shgroup_world_clip_planes_from_rv3d(grp, DRW_context_state_get()->rv3d);
}
- return grp;
+ return DRW_shgroup_call_buffer_add(grp, g_formats.pos, GPU_PRIM_LINES);
}
-DRWShadingGroup *shgroup_dynpoints_uniform_color(DRWPass *pass,
- const float color[4],
- const float *size,
- eGPUShaderConfig sh_cfg)
+struct DRWCallBuffer *buffer_dynpoints_uniform_color(DRWShadingGroup *grp)
{
- GPUShader *sh = GPU_shader_get_builtin_shader_with_config(
- GPU_SHADER_3D_POINT_UNIFORM_SIZE_UNIFORM_COLOR_AA, sh_cfg);
+ DRW_shgroup_instance_format(g_formats.pos, {{"pos", DRW_ATTR_FLOAT, 3}});
- DRWShadingGroup *grp = DRW_shgroup_point_batch_create(sh, pass);
- DRW_shgroup_uniform_vec4(grp, "color", color, 1);
- DRW_shgroup_uniform_float(grp, "size", size, 1);
- DRW_shgroup_state_enable(grp, DRW_STATE_POINT);
- if (sh_cfg == GPU_SHADER_CFG_CLIPPED) {
- DRW_shgroup_world_clip_planes_from_rv3d(grp, DRW_context_state_get()->rv3d);
- }
- return grp;
+ return DRW_shgroup_call_buffer_add(grp, g_formats.pos, GPU_PRIM_POINTS);
}
-DRWShadingGroup *shgroup_groundlines_uniform_color(DRWPass *pass,
- const float color[4],
- eGPUShaderConfig sh_cfg)
+struct DRWCallBuffer *buffer_groundlines_uniform_color(DRWPass *pass,
+ const float color[4],
+ eGPUShaderConfig sh_cfg)
{
GPUShader *sh = GPU_shader_get_builtin_shader_with_config(GPU_SHADER_3D_GROUNDLINE, sh_cfg);
- DRWShadingGroup *grp = DRW_shgroup_point_batch_create(sh, pass);
+ DRW_shgroup_instance_format(g_formats.pos, {{"pos", DRW_ATTR_FLOAT, 3}});
+
+ DRWShadingGroup *grp = DRW_shgroup_create(sh, pass);
DRW_shgroup_uniform_vec4(grp, "color", color, 1);
if (sh_cfg == GPU_SHADER_CFG_CLIPPED) {
DRW_shgroup_world_clip_planes_from_rv3d(grp, DRW_context_state_get()->rv3d);
}
- return grp;
+ return DRW_shgroup_call_buffer_add(grp, g_formats.pos, GPU_PRIM_POINTS);
}
-DRWShadingGroup *shgroup_groundpoints_uniform_color(DRWPass *pass,
- const float color[4],
- eGPUShaderConfig sh_cfg)
+struct DRWCallBuffer *buffer_groundpoints_uniform_color(DRWPass *pass,
+ const float color[4],
+ eGPUShaderConfig sh_cfg)
{
GPUShader *sh = GPU_shader_get_builtin_shader_with_config(GPU_SHADER_3D_GROUNDPOINT, sh_cfg);
- DRWShadingGroup *grp = DRW_shgroup_point_batch_create(sh, pass);
+ DRW_shgroup_instance_format(g_formats.pos, {{"pos", DRW_ATTR_FLOAT, 3}});
+
+ DRWShadingGroup *grp = DRW_shgroup_create(sh, pass);
DRW_shgroup_uniform_vec4(grp, "color", color, 1);
DRW_shgroup_state_enable(grp, DRW_STATE_POINT);
if (sh_cfg == GPU_SHADER_CFG_CLIPPED) {
DRW_shgroup_world_clip_planes_from_rv3d(grp, DRW_context_state_get()->rv3d);
}
- return grp;
+ return DRW_shgroup_call_buffer_add(grp, g_formats.pos, GPU_PRIM_POINTS);
}
-DRWShadingGroup *shgroup_instance_screenspace(DRWPass *pass,
- struct GPUBatch *geom,
- const float *size,
- eGPUShaderConfig sh_cfg)
+struct DRWCallBuffer *buffer_instance_screenspace(DRWPass *pass,
+ struct GPUBatch *geom,
+ const float *size,
+ eGPUShaderConfig sh_cfg)
{
GPUShader *sh = GPU_shader_get_builtin_shader_with_config(
GPU_SHADER_3D_SCREENSPACE_VARIYING_COLOR, sh_cfg);
@@ -410,18 +406,17 @@ DRWShadingGroup *shgroup_instance_screenspace(DRWPass *pass,
{"color", DRW_ATTR_FLOAT, 3},
});
- DRWShadingGroup *grp = DRW_shgroup_instance_create(
- sh, pass, geom, g_formats.instance_screenspace);
+ DRWShadingGroup *grp = DRW_shgroup_create(sh, pass);
DRW_shgroup_uniform_float(grp, "size", size, 1);
DRW_shgroup_uniform_float(grp, "pixel_size", DRW_viewport_pixelsize_get(), 1);
DRW_shgroup_uniform_vec3(grp, "screen_vecs[0]", DRW_viewport_screenvecs_get(), 2);
if (sh_cfg == GPU_SHADER_CFG_CLIPPED) {
DRW_shgroup_world_clip_planes_from_rv3d(grp, DRW_context_state_get()->rv3d);
}
- return grp;
+ return DRW_shgroup_call_buffer_instance_add(grp, g_formats.instance_screenspace, geom);
}
-DRWShadingGroup *shgroup_instance_solid(DRWPass *pass, struct GPUBatch *geom)
+struct DRWCallBuffer *buffer_instance_solid(DRWPass *pass, struct GPUBatch *geom)
{
static float light[3] = {0.0f, 0.0f, 1.0f};
GPUShader *sh = GPU_shader_get_builtin_shader(
@@ -433,13 +428,13 @@ DRWShadingGroup *shgroup_instance_solid(DRWPass *pass, struct GPUBatch *geom)
{"color", DRW_ATTR_FLOAT, 4},
});
- DRWShadingGroup *grp = DRW_shgroup_instance_create(sh, pass, geom, g_formats.instance_color);
+ DRWShadingGroup *grp = DRW_shgroup_create(sh, pass);
DRW_shgroup_uniform_vec3(grp, "light", light, 1);
- return grp;
+ return DRW_shgroup_call_buffer_instance_add(grp, g_formats.instance_color, geom);
}
-DRWShadingGroup *shgroup_instance_wire(DRWPass *pass, struct GPUBatch *geom)
+struct DRWCallBuffer *buffer_instance_wire(DRWPass *pass, struct GPUBatch *geom)
{
GPUShader *sh = GPU_shader_get_builtin_shader(GPU_SHADER_3D_OBJECTSPACE_VARIYING_COLOR);
@@ -449,14 +444,14 @@ DRWShadingGroup *shgroup_instance_wire(DRWPass *pass, struct GPUBatch *geom)
{"color", DRW_ATTR_FLOAT, 4},
});
- DRWShadingGroup *grp = DRW_shgroup_instance_create(sh, pass, geom, g_formats.instance_color);
+ DRWShadingGroup *grp = DRW_shgroup_create(sh, pass);
- return grp;
+ return DRW_shgroup_call_buffer_instance_add(grp, g_formats.instance_color, geom);
}
-DRWShadingGroup *shgroup_instance_screen_aligned(DRWPass *pass,
- struct GPUBatch *geom,
- eGPUShaderConfig sh_cfg)
+struct DRWCallBuffer *buffer_instance_screen_aligned(DRWPass *pass,
+ struct GPUBatch *geom,
+ eGPUShaderConfig sh_cfg)
{
GPUShader *sh = GPU_shader_get_builtin_shader_with_config(GPU_SHADER_3D_INSTANCE_SCREEN_ALIGNED,
sh_cfg);
@@ -468,18 +463,17 @@ DRWShadingGroup *shgroup_instance_screen_aligned(DRWPass *pass,
{"InstanceModelMatrix", DRW_ATTR_FLOAT, 16},
});
- DRWShadingGroup *grp = DRW_shgroup_instance_create(
- sh, pass, geom, g_formats.instance_screen_aligned);
+ DRWShadingGroup *grp = DRW_shgroup_create(sh, pass);
DRW_shgroup_uniform_vec3(grp, "screen_vecs[0]", DRW_viewport_screenvecs_get(), 2);
if (sh_cfg == GPU_SHADER_CFG_CLIPPED) {
DRW_shgroup_world_clip_planes_from_rv3d(grp, DRW_context_state_get()->rv3d);
}
- return grp;
+ return DRW_shgroup_call_buffer_instance_add(grp, g_formats.instance_screen_aligned, geom);
}
-DRWShadingGroup *shgroup_instance_scaled(DRWPass *pass,
- struct GPUBatch *geom,
- eGPUShaderConfig sh_cfg)
+struct DRWCallBuffer *buffer_instance_scaled(DRWPass *pass,
+ struct GPUBatch *geom,
+ eGPUShaderConfig sh_cfg)
{
GPUShader *sh_inst = GPU_shader_get_builtin_shader_with_config(
GPU_SHADER_INSTANCE_VARIYING_COLOR_VARIYING_SCALE, sh_cfg);
@@ -491,15 +485,16 @@ DRWShadingGroup *shgroup_instance_scaled(DRWPass *pass,
{"InstanceModelMatrix", DRW_ATTR_FLOAT, 16},
});
- DRWShadingGroup *grp = DRW_shgroup_instance_create(
- sh_inst, pass, geom, g_formats.instance_scaled);
+ DRWShadingGroup *grp = DRW_shgroup_create(sh_inst, pass);
if (sh_cfg == GPU_SHADER_CFG_CLIPPED) {
DRW_shgroup_world_clip_planes_from_rv3d(grp, DRW_context_state_get()->rv3d);
}
- return grp;
+ return DRW_shgroup_call_buffer_instance_add(grp, g_formats.instance_scaled, geom);
}
-DRWShadingGroup *shgroup_instance(DRWPass *pass, struct GPUBatch *geom, eGPUShaderConfig sh_cfg)
+struct DRWCallBuffer *buffer_instance(DRWPass *pass,
+ struct GPUBatch *geom,
+ eGPUShaderConfig sh_cfg)
{
GPUShader *sh_inst = GPU_shader_get_builtin_shader_with_config(
GPU_SHADER_INSTANCE_VARIYING_COLOR_VARIYING_SIZE, sh_cfg);
@@ -511,22 +506,16 @@ DRWShadingGroup *shgroup_instance(DRWPass *pass, struct GPUBatch *geom, eGPUShad
{"InstanceModelMatrix", DRW_ATTR_FLOAT, 16},
});
- DRWShadingGroup *grp = DRW_shgroup_instance_create(
- sh_inst, pass, geom, g_formats.instance_sized);
+ DRWShadingGroup *grp = DRW_shgroup_create(sh_inst, pass);
DRW_shgroup_state_disable(grp, DRW_STATE_BLEND);
if (sh_cfg == GPU_SHADER_CFG_CLIPPED) {
DRW_shgroup_world_clip_planes_from_rv3d(grp, DRW_context_state_get()->rv3d);
}
- return grp;
+ return DRW_shgroup_call_buffer_instance_add(grp, g_formats.instance_sized, geom);
}
-DRWShadingGroup *shgroup_instance_alpha(DRWPass *pass,
- struct GPUBatch *geom,
- eGPUShaderConfig sh_cfg)
+struct DRWCallBuffer *buffer_instance_alpha(DRWShadingGroup *grp, struct GPUBatch *geom)
{
- GPUShader *sh_inst = GPU_shader_get_builtin_shader_with_config(
- GPU_SHADER_INSTANCE_VARIYING_COLOR_VARIYING_SIZE, sh_cfg);
-
DRW_shgroup_instance_format(g_formats.instance_sized,
{
{"color", DRW_ATTR_FLOAT, 4},
@@ -534,17 +523,12 @@ DRWShadingGroup *shgroup_instance_alpha(DRWPass *pass,
{"InstanceModelMatrix", DRW_ATTR_FLOAT, 16},
});
- DRWShadingGroup *grp = DRW_shgroup_instance_create(
- sh_inst, pass, geom, g_formats.instance_sized);
- if (sh_cfg == GPU_SHADER_CFG_CLIPPED) {
- DRW_shgroup_world_clip_planes_from_rv3d(grp, DRW_context_state_get()->rv3d);
- }
- return grp;
+ return DRW_shgroup_call_buffer_instance_add(grp, g_formats.instance_sized, geom);
}
-DRWShadingGroup *shgroup_instance_empty_axes(DRWPass *pass,
- struct GPUBatch *geom,
- eGPUShaderConfig sh_cfg)
+struct DRWCallBuffer *buffer_instance_empty_axes(DRWPass *pass,
+ struct GPUBatch *geom,
+ eGPUShaderConfig sh_cfg)
{
COMMON_Shaders *sh_data = &g_shaders[sh_cfg];
if (sh_data->empty_axes_sh == NULL) {
@@ -563,16 +547,15 @@ DRWShadingGroup *shgroup_instance_empty_axes(DRWPass *pass,
{"InstanceModelMatrix", DRW_ATTR_FLOAT, 16},
});
- DRWShadingGroup *grp = DRW_shgroup_instance_create(
- sh_data->empty_axes_sh, pass, geom, g_formats.instance_sized);
+ DRWShadingGroup *grp = DRW_shgroup_create(sh_data->empty_axes_sh, pass);
DRW_shgroup_uniform_vec3(grp, "screenVecs[0]", DRW_viewport_screenvecs_get(), 2);
if (sh_cfg == GPU_SHADER_CFG_CLIPPED) {
DRW_shgroup_world_clip_planes_from_rv3d(grp, DRW_context_state_get()->rv3d);
}
- return grp;
+ return DRW_shgroup_call_buffer_instance_add(grp, g_formats.instance_sized, geom);
}
-DRWShadingGroup *shgroup_instance_outline(DRWPass *pass, struct GPUBatch *geom, int *baseid)
+struct DRWCallBuffer *buffer_instance_outline(DRWPass *pass, struct GPUBatch *geom, int *baseid)
{
GPUShader *sh_inst = GPU_shader_get_builtin_shader(
GPU_SHADER_INSTANCE_VARIYING_ID_VARIYING_SIZE);
@@ -584,16 +567,15 @@ DRWShadingGroup *shgroup_instance_outline(DRWPass *pass, struct GPUBatch *geom,
{"InstanceModelMatrix", DRW_ATTR_FLOAT, 16},
});
- DRWShadingGroup *grp = DRW_shgroup_instance_create(
- sh_inst, pass, geom, g_formats.instance_outline);
+ DRWShadingGroup *grp = DRW_shgroup_create(sh_inst, pass);
DRW_shgroup_uniform_int(grp, "baseId", baseid, 1);
- return grp;
+ return DRW_shgroup_call_buffer_instance_add(grp, g_formats.instance_outline, geom);
}
-DRWShadingGroup *shgroup_camera_instance(DRWPass *pass,
- struct GPUBatch *geom,
- eGPUShaderConfig sh_cfg)
+struct DRWCallBuffer *buffer_camera_instance(DRWPass *pass,
+ struct GPUBatch *geom,
+ eGPUShaderConfig sh_cfg)
{
GPUShader *sh_inst = GPU_shader_get_builtin_shader_with_config(GPU_SHADER_CAMERA, sh_cfg);
@@ -606,17 +588,16 @@ DRWShadingGroup *shgroup_camera_instance(DRWPass *pass,
{"InstanceModelMatrix", DRW_ATTR_FLOAT, 16},
});
- DRWShadingGroup *grp = DRW_shgroup_instance_create(
- sh_inst, pass, geom, g_formats.instance_camera);
+ DRWShadingGroup *grp = DRW_shgroup_create(sh_inst, pass);
if (sh_cfg == GPU_SHADER_CFG_CLIPPED) {
DRW_shgroup_world_clip_planes_from_rv3d(grp, DRW_context_state_get()->rv3d);
}
- return grp;
+ return DRW_shgroup_call_buffer_instance_add(grp, g_formats.instance_camera, geom);
}
-DRWShadingGroup *shgroup_distance_lines_instance(DRWPass *pass,
- struct GPUBatch *geom,
- eGPUShaderConfig sh_cfg)
+struct DRWCallBuffer *buffer_distance_lines_instance(DRWPass *pass,
+ struct GPUBatch *geom,
+ eGPUShaderConfig sh_cfg)
{
GPUShader *sh_inst = GPU_shader_get_builtin_shader_with_config(GPU_SHADER_DISTANCE_LINES,
sh_cfg);
@@ -630,18 +611,17 @@ DRWShadingGroup *shgroup_distance_lines_instance(DRWPass *pass,
{"InstanceModelMatrix", DRW_ATTR_FLOAT, 16},
});
- DRWShadingGroup *grp = DRW_shgroup_instance_create(
- sh_inst, pass, geom, g_formats.instance_distance_lines);
+ DRWShadingGroup *grp = DRW_shgroup_create(sh_inst, pass);
DRW_shgroup_uniform_float(grp, "size", &point_size, 1);
if (sh_cfg == GPU_SHADER_CFG_CLIPPED) {
DRW_shgroup_world_clip_planes_from_rv3d(grp, DRW_context_state_get()->rv3d);
}
- return grp;
+ return DRW_shgroup_call_buffer_instance_add(grp, g_formats.instance_distance_lines, geom);
}
-DRWShadingGroup *shgroup_spot_instance(DRWPass *pass,
- struct GPUBatch *geom,
- eGPUShaderConfig sh_cfg)
+struct DRWCallBuffer *buffer_spot_instance(DRWPass *pass,
+ struct GPUBatch *geom,
+ eGPUShaderConfig sh_cfg)
{
GPUShader *sh_inst = GPU_shader_get_builtin_shader_with_config(
GPU_SHADER_INSTANCE_EDGES_VARIYING_COLOR, sh_cfg);
@@ -654,17 +634,17 @@ DRWShadingGroup *shgroup_spot_instance(DRWPass *pass,
{"InstanceModelMatrix", DRW_ATTR_FLOAT, 16},
});
- DRWShadingGroup *grp = DRW_shgroup_instance_create(sh_inst, pass, geom, g_formats.instance_spot);
+ DRWShadingGroup *grp = DRW_shgroup_create(sh_inst, pass);
DRW_shgroup_uniform_bool(grp, "drawFront", &False, 1);
DRW_shgroup_uniform_bool(grp, "drawBack", &False, 1);
DRW_shgroup_uniform_bool(grp, "drawSilhouette", &True, 1);
if (sh_cfg == GPU_SHADER_CFG_CLIPPED) {
DRW_shgroup_world_clip_planes_from_rv3d(grp, DRW_context_state_get()->rv3d);
}
- return grp;
+ return DRW_shgroup_call_buffer_instance_add(grp, g_formats.instance_spot, geom);
}
-DRWShadingGroup *shgroup_instance_bone_axes(DRWPass *pass, eGPUShaderConfig sh_cfg)
+struct DRWCallBuffer *buffer_instance_bone_axes(DRWPass *pass, eGPUShaderConfig sh_cfg)
{
COMMON_Shaders *sh_data = &g_shaders[sh_cfg];
if (sh_data->bone_axes == NULL) {
@@ -682,16 +662,16 @@ DRWShadingGroup *shgroup_instance_bone_axes(DRWPass *pass, eGPUShaderConfig sh_c
{"color", DRW_ATTR_FLOAT, 4},
});
- DRWShadingGroup *grp = DRW_shgroup_instance_create(
- sh_data->bone_axes, pass, DRW_cache_bone_arrows_get(), g_formats.instance_color);
+ DRWShadingGroup *grp = DRW_shgroup_create(sh_data->bone_axes, pass);
DRW_shgroup_uniform_vec3(grp, "screenVecs[0]", DRW_viewport_screenvecs_get(), 2);
if (sh_cfg == GPU_SHADER_CFG_CLIPPED) {
DRW_shgroup_world_clip_planes_from_rv3d(grp, DRW_context_state_get()->rv3d);
}
- return grp;
+ return DRW_shgroup_call_buffer_instance_add(
+ grp, g_formats.instance_color, DRW_cache_bone_arrows_get());
}
-DRWShadingGroup *shgroup_instance_bone_envelope_outline(DRWPass *pass, eGPUShaderConfig sh_cfg)
+struct DRWCallBuffer *buffer_instance_bone_envelope_outline(DRWPass *pass, eGPUShaderConfig sh_cfg)
{
COMMON_Shaders *sh_data = &g_shaders[sh_cfg];
if (sh_data->bone_envelope_outline == NULL) {
@@ -712,18 +692,17 @@ DRWShadingGroup *shgroup_instance_bone_envelope_outline(DRWPass *pass, eGPUShade
{"xAxis", DRW_ATTR_FLOAT, 3},
});
- DRWShadingGroup *grp = DRW_shgroup_instance_create(sh_data->bone_envelope_outline,
- pass,
- DRW_cache_bone_envelope_outline_get(),
- g_formats.instance_bone_envelope_outline);
+ DRWShadingGroup *grp = DRW_shgroup_create(sh_data->bone_envelope_outline, pass);
DRW_shgroup_uniform_vec2(grp, "viewportSize", DRW_viewport_size_get(), 1);
if (sh_cfg == GPU_SHADER_CFG_CLIPPED) {
DRW_shgroup_world_clip_planes_from_rv3d(grp, DRW_context_state_get()->rv3d);
}
- return grp;
+ return DRW_shgroup_call_buffer_instance_add(
+ grp, g_formats.instance_bone_envelope_outline, DRW_cache_bone_envelope_outline_get());
}
-DRWShadingGroup *shgroup_instance_bone_envelope_distance(DRWPass *pass, eGPUShaderConfig sh_cfg)
+struct DRWCallBuffer *buffer_instance_bone_envelope_distance(DRWPass *pass,
+ eGPUShaderConfig sh_cfg)
{
COMMON_Shaders *sh_data = &g_shaders[sh_cfg];
if (sh_data->bone_envelope_distance == NULL) {
@@ -743,19 +722,17 @@ DRWShadingGroup *shgroup_instance_bone_envelope_distance(DRWPass *pass, eGPUShad
{"xAxis", DRW_ATTR_FLOAT, 3},
});
- DRWShadingGroup *grp = DRW_shgroup_instance_create(sh_data->bone_envelope_distance,
- pass,
- DRW_cache_bone_envelope_solid_get(),
- g_formats.instance_bone_envelope_distance);
+ DRWShadingGroup *grp = DRW_shgroup_create(sh_data->bone_envelope_distance, pass);
if (sh_cfg == GPU_SHADER_CFG_CLIPPED) {
DRW_shgroup_world_clip_planes_from_rv3d(grp, DRW_context_state_get()->rv3d);
}
- return grp;
+ return DRW_shgroup_call_buffer_instance_add(
+ grp, g_formats.instance_bone_envelope_distance, DRW_cache_bone_envelope_solid_get());
}
-DRWShadingGroup *shgroup_instance_bone_envelope_solid(DRWPass *pass,
- bool transp,
- eGPUShaderConfig sh_cfg)
+struct DRWCallBuffer *buffer_instance_bone_envelope_solid(DRWPass *pass,
+ bool transp,
+ eGPUShaderConfig sh_cfg)
{
COMMON_Shaders *sh_data = &g_shaders[sh_cfg];
if (sh_data->bone_envelope == NULL) {
@@ -777,18 +754,19 @@ DRWShadingGroup *shgroup_instance_bone_envelope_solid(DRWPass *pass,
{"xAxis", DRW_ATTR_FLOAT, 3},
});
- DRWShadingGroup *grp = DRW_shgroup_instance_create(sh_data->bone_envelope,
- pass,
- DRW_cache_bone_envelope_solid_get(),
- g_formats.instance_bone_envelope);
+ DRWShadingGroup *grp = DRW_shgroup_create(sh_data->bone_envelope, pass);
+ /* We can have a lot of overdraw if we don't do this. Also envelope are not subject to
+ * inverted matrix. */
+ DRW_shgroup_state_enable(grp, DRW_STATE_CULL_BACK);
DRW_shgroup_uniform_float_copy(grp, "alpha", transp ? 0.6f : 1.0f);
if (sh_cfg == GPU_SHADER_CFG_CLIPPED) {
DRW_shgroup_world_clip_planes_from_rv3d(grp, DRW_context_state_get()->rv3d);
}
- return grp;
+ return DRW_shgroup_call_buffer_instance_add(
+ grp, g_formats.instance_bone_envelope, DRW_cache_bone_envelope_solid_get());
}
-DRWShadingGroup *shgroup_instance_mball_handles(DRWPass *pass, eGPUShaderConfig sh_cfg)
+struct DRWCallBuffer *buffer_instance_mball_handles(DRWPass *pass, eGPUShaderConfig sh_cfg)
{
COMMON_Shaders *sh_data = &g_shaders[sh_cfg];
if (sh_data->mball_handles == NULL) {
@@ -807,21 +785,19 @@ DRWShadingGroup *shgroup_instance_mball_handles(DRWPass *pass, eGPUShaderConfig
{"color", DRW_ATTR_FLOAT, 3},
});
- DRWShadingGroup *grp = DRW_shgroup_instance_create(sh_data->mball_handles,
- pass,
- DRW_cache_screenspace_circle_get(),
- g_formats.instance_mball_handles);
+ DRWShadingGroup *grp = DRW_shgroup_create(sh_data->mball_handles, pass);
DRW_shgroup_uniform_vec3(grp, "screen_vecs[0]", DRW_viewport_screenvecs_get(), 2);
if (sh_cfg == GPU_SHADER_CFG_CLIPPED) {
DRW_shgroup_world_clip_planes_from_rv3d(grp, DRW_context_state_get()->rv3d);
}
- return grp;
+ return DRW_shgroup_call_buffer_instance_add(
+ grp, g_formats.instance_mball_handles, DRW_cache_screenspace_circle_get());
}
/* Only works with batches with adjacency infos. */
-DRWShadingGroup *shgroup_instance_bone_shape_outline(DRWPass *pass,
- struct GPUBatch *geom,
- eGPUShaderConfig sh_cfg)
+struct DRWCallBuffer *buffer_instance_bone_shape_outline(DRWPass *pass,
+ struct GPUBatch *geom,
+ eGPUShaderConfig sh_cfg)
{
COMMON_Shaders *sh_data = &g_shaders[sh_cfg];
if (sh_data->shape_outline == NULL) {
@@ -846,19 +822,18 @@ DRWShadingGroup *shgroup_instance_bone_shape_outline(DRWPass *pass,
{"outlineColorSize", DRW_ATTR_FLOAT, 4},
});
- DRWShadingGroup *grp = DRW_shgroup_instance_create(
- sh_data->shape_outline, pass, geom, g_formats.instance_bone_outline);
+ DRWShadingGroup *grp = DRW_shgroup_create(sh_data->shape_outline, pass);
DRW_shgroup_uniform_vec2(grp, "viewportSize", DRW_viewport_size_get(), 1);
if (sh_cfg == GPU_SHADER_CFG_CLIPPED) {
DRW_shgroup_world_clip_planes_from_rv3d(grp, DRW_context_state_get()->rv3d);
}
- return grp;
+ return DRW_shgroup_call_buffer_instance_add(grp, g_formats.instance_bone_outline, geom);
}
-DRWShadingGroup *shgroup_instance_bone_shape_solid(DRWPass *pass,
- struct GPUBatch *geom,
- bool transp,
- eGPUShaderConfig sh_cfg)
+struct DRWCallBuffer *buffer_instance_bone_shape_solid(DRWPass *pass,
+ struct GPUBatch *geom,
+ bool transp,
+ eGPUShaderConfig sh_cfg)
{
COMMON_Shaders *sh_data = &g_shaders[sh_cfg];
if (sh_data->shape_solid == NULL) {
@@ -880,18 +855,17 @@ DRWShadingGroup *shgroup_instance_bone_shape_solid(DRWPass *pass,
{"stateColor", DRW_ATTR_FLOAT, 3},
});
- DRWShadingGroup *grp = DRW_shgroup_instance_create(
- sh_data->shape_solid, pass, geom, g_formats.instance_bone);
+ DRWShadingGroup *grp = DRW_shgroup_create(sh_data->shape_solid, pass);
DRW_shgroup_uniform_float_copy(grp, "alpha", transp ? 0.6f : 1.0f);
if (sh_cfg == GPU_SHADER_CFG_CLIPPED) {
DRW_shgroup_world_clip_planes_from_rv3d(grp, DRW_context_state_get()->rv3d);
}
- return grp;
+ return DRW_shgroup_call_buffer_instance_add(grp, g_formats.instance_bone, geom);
}
-DRWShadingGroup *shgroup_instance_bone_sphere_solid(DRWPass *pass,
- bool transp,
- eGPUShaderConfig sh_cfg)
+struct DRWCallBuffer *buffer_instance_bone_sphere_solid(DRWPass *pass,
+ bool transp,
+ eGPUShaderConfig sh_cfg)
{
COMMON_Shaders *sh_data = &g_shaders[sh_cfg];
if (sh_data->bone_sphere == NULL) {
@@ -910,17 +884,17 @@ DRWShadingGroup *shgroup_instance_bone_sphere_solid(DRWPass *pass,
{"stateColor", DRW_ATTR_FLOAT, 3},
});
- DRWShadingGroup *grp = DRW_shgroup_instance_create(
- sh_data->bone_sphere, pass, DRW_cache_bone_point_get(), g_formats.instance_bone);
+ DRWShadingGroup *grp = DRW_shgroup_create(sh_data->bone_sphere, pass);
/* More transparent than the shape to be less distractive. */
DRW_shgroup_uniform_float_copy(grp, "alpha", transp ? 0.4f : 1.0f);
if (sh_cfg == GPU_SHADER_CFG_CLIPPED) {
DRW_shgroup_world_clip_planes_from_rv3d(grp, DRW_context_state_get()->rv3d);
}
- return grp;
+ return DRW_shgroup_call_buffer_instance_add(
+ grp, g_formats.instance_bone, DRW_cache_bone_point_get());
}
-DRWShadingGroup *shgroup_instance_bone_sphere_outline(DRWPass *pass, eGPUShaderConfig sh_cfg)
+struct DRWCallBuffer *buffer_instance_bone_sphere_outline(DRWPass *pass, eGPUShaderConfig sh_cfg)
{
COMMON_Shaders *sh_data = &g_shaders[sh_cfg];
if (sh_data->bone_sphere_outline == NULL) {
@@ -939,18 +913,16 @@ DRWShadingGroup *shgroup_instance_bone_sphere_outline(DRWPass *pass, eGPUShaderC
{"outlineColorSize", DRW_ATTR_FLOAT, 4},
});
- DRWShadingGroup *grp = DRW_shgroup_instance_create(sh_data->bone_sphere_outline,
- pass,
- DRW_cache_bone_point_wire_outline_get(),
- g_formats.instance_bone_outline);
+ DRWShadingGroup *grp = DRW_shgroup_create(sh_data->bone_sphere_outline, pass);
DRW_shgroup_uniform_vec2(grp, "viewportSize", DRW_viewport_size_get(), 1);
if (sh_cfg == GPU_SHADER_CFG_CLIPPED) {
DRW_shgroup_world_clip_planes_from_rv3d(grp, DRW_context_state_get()->rv3d);
}
- return grp;
+ return DRW_shgroup_call_buffer_instance_add(
+ grp, g_formats.instance_bone_outline, DRW_cache_bone_point_wire_outline_get());
}
-DRWShadingGroup *shgroup_instance_bone_stick(DRWPass *pass, eGPUShaderConfig sh_cfg)
+struct DRWCallBuffer *buffer_instance_bone_stick(DRWPass *pass, eGPUShaderConfig sh_cfg)
{
COMMON_Shaders *sh_data = &g_shaders[sh_cfg];
if (sh_data->bone_stick == NULL) {
@@ -973,17 +945,19 @@ DRWShadingGroup *shgroup_instance_bone_stick(DRWPass *pass, eGPUShaderConfig sh_
{"tailColor", DRW_ATTR_FLOAT, 4},
});
- DRWShadingGroup *grp = DRW_shgroup_instance_create(
- sh_data->bone_stick, pass, DRW_cache_bone_stick_get(), g_formats.instance_bone_stick);
+ DRWShadingGroup *grp = DRW_shgroup_create(sh_data->bone_stick, pass);
DRW_shgroup_uniform_vec2(grp, "viewportSize", DRW_viewport_size_get(), 1);
DRW_shgroup_uniform_float_copy(grp, "stickSize", 5.0f * U.pixelsize);
if (sh_cfg == GPU_SHADER_CFG_CLIPPED) {
DRW_shgroup_world_clip_planes_from_rv3d(grp, DRW_context_state_get()->rv3d);
}
- return grp;
+ return DRW_shgroup_call_buffer_instance_add(
+ grp, g_formats.instance_bone_stick, DRW_cache_bone_stick_get());
}
-struct DRWShadingGroup *shgroup_instance_bone_dof(struct DRWPass *pass, struct GPUBatch *geom)
+struct DRWCallBuffer *buffer_instance_bone_dof(struct DRWPass *pass,
+ struct GPUBatch *geom,
+ bool blend)
{
COMMON_Shaders *sh_data = &g_shaders[GPU_SHADER_CFG_DEFAULT];
if (sh_data->bone_dofs == NULL) {
@@ -999,10 +973,12 @@ struct DRWShadingGroup *shgroup_instance_bone_dof(struct DRWPass *pass, struct G
{"amax", DRW_ATTR_FLOAT, 2},
});
- DRWShadingGroup *grp = DRW_shgroup_instance_create(
- sh_data->bone_dofs, pass, geom, g_formats.instance_bone_dof);
-
- return grp;
+ DRWShadingGroup *grp = DRW_shgroup_create(sh_data->bone_dofs, pass);
+ if (blend) {
+ DRW_shgroup_state_enable(grp, DRW_STATE_BLEND);
+ DRW_shgroup_state_disable(grp, DRW_STATE_CULL_FRONT);
+ }
+ return DRW_shgroup_call_buffer_instance_add(grp, g_formats.instance_bone_dof, geom);
}
struct GPUShader *mpath_line_shader_get(void)
diff --git a/source/blender/draw/intern/draw_common.h b/source/blender/draw/intern/draw_common.h
index 489bc7459df..df7220c0d2a 100644
--- a/source/blender/draw/intern/draw_common.h
+++ b/source/blender/draw/intern/draw_common.h
@@ -23,6 +23,7 @@
#ifndef __DRAW_COMMON_H__
#define __DRAW_COMMON_H__
+struct DRWCallBuffer;
struct DRWPass;
struct DRWShadingGroup;
struct GPUBatch;
@@ -125,77 +126,74 @@ void DRW_globals_free(void);
void DRW_shgroup_world_clip_planes_from_rv3d(struct DRWShadingGroup *shgrp,
const RegionView3D *rv3d);
-struct DRWShadingGroup *shgroup_dynlines_flat_color(struct DRWPass *pass, eGPUShaderConfig sh_cfg);
-struct DRWShadingGroup *shgroup_dynlines_dashed_uniform_color(struct DRWPass *pass,
- const float color[4],
- eGPUShaderConfig sh_cfg);
-struct DRWShadingGroup *shgroup_dynpoints_uniform_color(struct DRWPass *pass,
- const float color[4],
- const float *size,
- eGPUShaderConfig sh_cfg);
-struct DRWShadingGroup *shgroup_groundlines_uniform_color(struct DRWPass *pass,
- const float color[4],
- eGPUShaderConfig sh_cfg);
-struct DRWShadingGroup *shgroup_groundpoints_uniform_color(struct DRWPass *pass,
+/* TODO(fclem) ideally, most of the DRWCallBuffer functions shouldn't create a shgroup. */
+struct DRWCallBuffer *buffer_dynlines_flat_color(struct DRWPass *pass, eGPUShaderConfig sh_cfg);
+struct DRWCallBuffer *buffer_dynlines_dashed_uniform_color(struct DRWPass *pass,
const float color[4],
eGPUShaderConfig sh_cfg);
-struct DRWShadingGroup *shgroup_instance_screenspace(struct DRWPass *pass,
+struct DRWCallBuffer *buffer_dynpoints_uniform_color(struct DRWShadingGroup *grp);
+struct DRWCallBuffer *buffer_groundlines_uniform_color(struct DRWPass *pass,
+ const float color[4],
+ eGPUShaderConfig sh_cfg);
+struct DRWCallBuffer *buffer_groundpoints_uniform_color(struct DRWPass *pass,
+ const float color[4],
+ eGPUShaderConfig sh_cfg);
+struct DRWCallBuffer *buffer_instance_screenspace(struct DRWPass *pass,
+ struct GPUBatch *geom,
+ const float *size,
+ eGPUShaderConfig sh_cfg);
+struct DRWCallBuffer *buffer_instance_solid(struct DRWPass *pass, struct GPUBatch *geom);
+struct DRWCallBuffer *buffer_instance_wire(struct DRWPass *pass, struct GPUBatch *geom);
+struct DRWCallBuffer *buffer_instance_screen_aligned(struct DRWPass *pass,
struct GPUBatch *geom,
- const float *size,
eGPUShaderConfig sh_cfg);
-struct DRWShadingGroup *shgroup_instance_solid(struct DRWPass *pass, struct GPUBatch *geom);
-struct DRWShadingGroup *shgroup_instance_wire(struct DRWPass *pass, struct GPUBatch *geom);
-struct DRWShadingGroup *shgroup_instance_screen_aligned(struct DRWPass *pass,
- struct GPUBatch *geom,
- eGPUShaderConfig sh_cfg);
-struct DRWShadingGroup *shgroup_instance_empty_axes(struct DRWPass *pass,
- struct GPUBatch *geom,
- eGPUShaderConfig sh_cfg);
-struct DRWShadingGroup *shgroup_instance_scaled(struct DRWPass *pass,
- struct GPUBatch *geom,
- eGPUShaderConfig sh_cfg);
-struct DRWShadingGroup *shgroup_instance(struct DRWPass *pass,
- struct GPUBatch *geom,
- eGPUShaderConfig sh_cfg);
-struct DRWShadingGroup *shgroup_instance_alpha(struct DRWPass *pass,
- struct GPUBatch *geom,
- eGPUShaderConfig sh_cfg);
-struct DRWShadingGroup *shgroup_instance_outline(struct DRWPass *pass,
+struct DRWCallBuffer *buffer_instance_empty_axes(struct DRWPass *pass,
struct GPUBatch *geom,
- int *baseid);
-struct DRWShadingGroup *shgroup_camera_instance(struct DRWPass *pass,
- struct GPUBatch *geom,
- eGPUShaderConfig sh_cfg);
-struct DRWShadingGroup *shgroup_distance_lines_instance(struct DRWPass *pass,
- struct GPUBatch *geom,
- eGPUShaderConfig sh_cfg);
-struct DRWShadingGroup *shgroup_spot_instance(struct DRWPass *pass,
+ eGPUShaderConfig sh_cfg);
+struct DRWCallBuffer *buffer_instance_scaled(struct DRWPass *pass,
+ struct GPUBatch *geom,
+ eGPUShaderConfig sh_cfg);
+struct DRWCallBuffer *buffer_instance(struct DRWPass *pass,
+ struct GPUBatch *geom,
+ eGPUShaderConfig sh_cfg);
+struct DRWCallBuffer *buffer_instance_alpha(struct DRWShadingGroup *grp, struct GPUBatch *geom);
+struct DRWCallBuffer *buffer_instance_outline(struct DRWPass *pass,
struct GPUBatch *geom,
- eGPUShaderConfig sh_cfg);
-struct DRWShadingGroup *shgroup_instance_mball_handles(struct DRWPass *pass,
- eGPUShaderConfig sh_cfg);
-struct DRWShadingGroup *shgroup_instance_bone_axes(struct DRWPass *pass, eGPUShaderConfig sh_cfg);
-struct DRWShadingGroup *shgroup_instance_bone_envelope_distance(struct DRWPass *pass,
- eGPUShaderConfig sh_cfg);
-struct DRWShadingGroup *shgroup_instance_bone_envelope_outline(struct DRWPass *pass,
- eGPUShaderConfig sh_cfg);
-struct DRWShadingGroup *shgroup_instance_bone_envelope_solid(struct DRWPass *pass,
- bool transp,
+ int *baseid);
+struct DRWCallBuffer *buffer_camera_instance(struct DRWPass *pass,
+ struct GPUBatch *geom,
+ eGPUShaderConfig sh_cfg);
+struct DRWCallBuffer *buffer_distance_lines_instance(struct DRWPass *pass,
+ struct GPUBatch *geom,
+ eGPUShaderConfig sh_cfg);
+struct DRWCallBuffer *buffer_spot_instance(struct DRWPass *pass,
+ struct GPUBatch *geom,
+ eGPUShaderConfig sh_cfg);
+struct DRWCallBuffer *buffer_instance_mball_handles(struct DRWPass *pass, eGPUShaderConfig sh_cfg);
+struct DRWCallBuffer *buffer_instance_bone_axes(struct DRWPass *pass, eGPUShaderConfig sh_cfg);
+struct DRWCallBuffer *buffer_instance_bone_envelope_distance(struct DRWPass *pass,
eGPUShaderConfig sh_cfg);
-struct DRWShadingGroup *shgroup_instance_bone_shape_outline(struct DRWPass *pass,
- struct GPUBatch *geom,
+struct DRWCallBuffer *buffer_instance_bone_envelope_outline(struct DRWPass *pass,
eGPUShaderConfig sh_cfg);
-struct DRWShadingGroup *shgroup_instance_bone_shape_solid(struct DRWPass *pass,
- struct GPUBatch *geom,
+struct DRWCallBuffer *buffer_instance_bone_envelope_solid(struct DRWPass *pass,
bool transp,
eGPUShaderConfig sh_cfg);
-struct DRWShadingGroup *shgroup_instance_bone_sphere_outline(struct DRWPass *pass,
- eGPUShaderConfig sh_cfg);
-struct DRWShadingGroup *shgroup_instance_bone_sphere_solid(struct DRWPass *pass,
- bool transp,
- eGPUShaderConfig sh_cfg);
-struct DRWShadingGroup *shgroup_instance_bone_stick(struct DRWPass *pass, eGPUShaderConfig sh_cfg);
-struct DRWShadingGroup *shgroup_instance_bone_dof(struct DRWPass *pass, struct GPUBatch *geom);
+struct DRWCallBuffer *buffer_instance_bone_shape_outline(struct DRWPass *pass,
+ struct GPUBatch *geom,
+ eGPUShaderConfig sh_cfg);
+struct DRWCallBuffer *buffer_instance_bone_shape_solid(struct DRWPass *pass,
+ struct GPUBatch *geom,
+ bool transp,
+ eGPUShaderConfig sh_cfg);
+struct DRWCallBuffer *buffer_instance_bone_sphere_outline(struct DRWPass *pass,
+ eGPUShaderConfig sh_cfg);
+struct DRWCallBuffer *buffer_instance_bone_sphere_solid(struct DRWPass *pass,
+ bool transp,
+ eGPUShaderConfig sh_cfg);
+struct DRWCallBuffer *buffer_instance_bone_stick(struct DRWPass *pass, eGPUShaderConfig sh_cfg);
+struct DRWCallBuffer *buffer_instance_bone_dof(struct DRWPass *pass,
+ struct GPUBatch *geom,
+ bool blend);
struct GPUShader *mpath_line_shader_get(void);
struct GPUShader *mpath_points_shader_get(void);
diff --git a/source/blender/draw/intern/draw_instance_data.c b/source/blender/draw/intern/draw_instance_data.c
index e8d91309e06..b88ad936c28 100644
--- a/source/blender/draw/intern/draw_instance_data.c
+++ b/source/blender/draw/intern/draw_instance_data.c
@@ -36,33 +36,9 @@
#include "MEM_guardedalloc.h"
#include "BLI_utildefines.h"
#include "BLI_mempool.h"
+#include "BLI_memblock.h"
-#define BUFFER_CHUNK_SIZE 32
-#define BUFFER_VERTS_CHUNK 32
-
-typedef struct DRWBatchingBuffer {
- struct DRWShadingGroup *shgroup; /* Link back to the owning shGroup. Also tells if it's used */
- GPUVertFormat *format; /* Identifier. */
- GPUVertBuf *vert; /* GPUVertBuf contained in the GPUBatch. */
- GPUBatch *batch; /* GPUBatch containing the GPUVertBuf. */
-} DRWBatchingBuffer;
-
-typedef struct DRWInstancingBuffer {
- struct DRWShadingGroup *shgroup; /* Link back to the owning shGroup. Also tells if it's used */
- GPUVertFormat *format; /* Identifier. */
- GPUBatch *instance; /* Identifier. */
- GPUVertBuf *vert; /* GPUVertBuf contained in the GPUBatch. */
- GPUBatch *batch; /* GPUBatch containing the GPUVertBuf. */
-} DRWInstancingBuffer;
-
-typedef struct DRWInstanceChunk {
- size_t cursor; /* Offset to the next instance data. */
- size_t alloc_size; /* Number of DRWBatchingBuffer/Batches alloc'd in ibufs/btchs. */
- union {
- DRWBatchingBuffer *bbufs;
- DRWInstancingBuffer *ibufs;
- };
-} DRWInstanceChunk;
+#include "intern/gpu_primitive_private.h"
struct DRWInstanceData {
struct DRWInstanceData *next;
@@ -77,212 +53,167 @@ struct DRWInstanceDataList {
DRWInstanceData *idata_head[MAX_INSTANCE_DATA_SIZE];
DRWInstanceData *idata_tail[MAX_INSTANCE_DATA_SIZE];
- DRWInstanceChunk instancing;
- DRWInstanceChunk batching;
+ BLI_memblock *pool_instancing;
+ BLI_memblock *pool_batching;
+ BLI_memblock *pool_buffers;
};
+typedef struct DRWTempBufferHandle {
+ /** Must be first for casting. */
+ GPUVertBuf buf;
+ /** Format pointer for reuse. */
+ GPUVertFormat *format;
+ /** Touched vertex length for resize. */
+ uint *vert_len;
+} DRWTempBufferHandle;
+
static ListBase g_idatalists = {NULL, NULL};
/* -------------------------------------------------------------------- */
/** \name Instance Buffer Management
* \{ */
-/**
- * This manager allows to distribute existing batches for instancing
- * attributes. This reduce the number of batches creation.
- * Querying a batch is done with a vertex format. This format should
- * be static so that it's pointer never changes (because we are using
- * this pointer as identifier [we don't want to check the full format
- * that would be too slow]).
- */
-static void instance_batch_free(GPUBatch *batch, void *UNUSED(user_data))
+static void instance_batch_free(GPUBatch *geom, void *UNUSED(user_data))
{
- if (batch->verts[0] == NULL) {
+ if (geom->verts[0] == NULL) {
/** XXX This is a false positive case.
* The batch has been requested but not init yet
* and there is a chance that it might become init.
*/
return;
}
- /* Free all batches that have the same key before they are reused. */
+
+ /* Free all batches that use the same vbos before they are reused. */
/* TODO: Make it thread safe! Batch freeing can happen from another thread. */
- /* XXX we need to iterate over all idatalists unless we make some smart
- * data structure to store the locations to update. */
- for (DRWInstanceDataList *idatalist = g_idatalists.first; idatalist;
- idatalist = idatalist->next) {
- DRWInstancingBuffer *ibuf = idatalist->instancing.ibufs;
- for (int i = 0; i < idatalist->instancing.alloc_size; i++, ibuf++) {
- if (ibuf->instance == batch) {
- BLI_assert(ibuf->shgroup == NULL); /* Make sure it has no other users. */
- GPU_VERTBUF_DISCARD_SAFE(ibuf->vert);
- GPU_BATCH_DISCARD_SAFE(ibuf->batch);
- /* Tag as non alloced. */
- ibuf->format = NULL;
+ /* FIXME: This is not really correct. The correct way would be to check based on
+ * the vertex buffers. We assume the batch containing the VBO is being when it should. */
+ /* PERF: This is doing a linear search. This can be very costly. */
+ LISTBASE_FOREACH (DRWInstanceDataList *, data_list, &g_idatalists) {
+ BLI_memblock *memblock = data_list->pool_instancing;
+ BLI_memblock_iter iter;
+ BLI_memblock_iternew(memblock, &iter);
+ GPUBatch *batch;
+ while ((batch = (GPUBatch *)BLI_memblock_iterstep(&iter))) {
+ /* Only check verts[0] that's enough. */
+ if (batch->verts[0] == geom->verts[0]) {
+ GPU_batch_clear(batch);
}
}
}
}
-void DRW_batching_buffer_request(DRWInstanceDataList *idatalist,
- GPUVertFormat *format,
- GPUPrimType type,
- struct DRWShadingGroup *shgroup,
- GPUBatch **r_batch,
- GPUVertBuf **r_vert)
+/**
+ * This manager allows to distribute existing batches for instancing
+ * attributes. This reduce the number of batches creation.
+ * Querying a batch is done with a vertex format. This format should
+ * be static so that it's pointer never changes (because we are using
+ * this pointer as identifier [we don't want to check the full format
+ * that would be too slow]).
+ */
+GPUVertBuf *DRW_temp_buffer_request(DRWInstanceDataList *idatalist,
+ GPUVertFormat *format,
+ uint *vert_len)
{
- DRWInstanceChunk *chunk = &idatalist->batching;
- DRWBatchingBuffer *bbuf = idatalist->batching.bbufs;
- BLI_assert(format);
- /* Search for an unused batch. */
- for (int i = 0; i < idatalist->batching.alloc_size; i++, bbuf++) {
- if (bbuf->shgroup == NULL) {
- if (bbuf->format == format) {
- bbuf->shgroup = shgroup;
- *r_batch = bbuf->batch;
- *r_vert = bbuf->vert;
- return;
- }
- }
- }
- int new_id = 0; /* Find insertion point. */
- for (; new_id < chunk->alloc_size; ++new_id) {
- if (chunk->bbufs[new_id].format == NULL) {
- break;
- }
+ BLI_assert(format != NULL);
+ BLI_assert(vert_len != NULL);
+
+ DRWTempBufferHandle *handle = BLI_memblock_alloc(idatalist->pool_buffers);
+ GPUVertBuf *vert = &handle->buf;
+ handle->vert_len = vert_len;
+
+ if (handle->format != format) {
+ handle->format = format;
+ /* TODO/PERF: Save the allocated data from freeing to avoid reallocation. */
+ GPU_vertbuf_clear(vert);
+ GPU_vertbuf_init_with_format_ex(vert, format, GPU_USAGE_DYNAMIC);
+ GPU_vertbuf_data_alloc(vert, DRW_BUFFER_VERTS_CHUNK);
}
- /* If there is no batch left. Allocate more. */
- if (new_id == chunk->alloc_size) {
- new_id = chunk->alloc_size;
- chunk->alloc_size += BUFFER_CHUNK_SIZE;
- chunk->bbufs = MEM_reallocN(chunk->bbufs, chunk->alloc_size * sizeof(DRWBatchingBuffer));
- memset(chunk->bbufs + new_id, 0, sizeof(DRWBatchingBuffer) * BUFFER_CHUNK_SIZE);
- }
- /* Create the batch. */
- bbuf = chunk->bbufs + new_id;
- bbuf->vert = *r_vert = GPU_vertbuf_create_with_format_ex(format, GPU_USAGE_DYNAMIC);
- bbuf->batch = *r_batch = GPU_batch_create_ex(type, bbuf->vert, NULL, 0);
- bbuf->format = format;
- bbuf->shgroup = shgroup;
- GPU_vertbuf_data_alloc(*r_vert, BUFFER_VERTS_CHUNK);
+ return vert;
}
-void DRW_instancing_buffer_request(DRWInstanceDataList *idatalist,
- GPUVertFormat *format,
- GPUBatch *instance,
- struct DRWShadingGroup *shgroup,
- GPUBatch **r_batch,
- GPUVertBuf **r_vert)
+/* NOTE: Does not return a valid drawable batch until DRW_instance_buffer_finish has run. */
+GPUBatch *DRW_temp_batch_instance_request(DRWInstanceDataList *idatalist,
+ GPUVertBuf *buf,
+ GPUBatch *geom)
{
- DRWInstanceChunk *chunk = &idatalist->instancing;
- DRWInstancingBuffer *ibuf = idatalist->instancing.ibufs;
- BLI_assert(format);
- /* Search for an unused batch. */
- for (int i = 0; i < idatalist->instancing.alloc_size; i++, ibuf++) {
- if (ibuf->shgroup == NULL) {
- if (ibuf->format == format) {
- if (ibuf->instance == instance) {
- ibuf->shgroup = shgroup;
- *r_batch = ibuf->batch;
- *r_vert = ibuf->vert;
- return;
- }
- }
+ /* Do not call this with a batch that is already an instancing batch. */
+ BLI_assert(geom->inst == NULL);
+
+ GPUBatch *batch = BLI_memblock_alloc(idatalist->pool_instancing);
+ bool is_compatible = (batch->gl_prim_type == geom->gl_prim_type) && (batch->inst == buf) &&
+ (batch->phase == GPU_BATCH_READY_TO_DRAW);
+ for (int i = 0; i < GPU_BATCH_VBO_MAX_LEN && is_compatible; i++) {
+ if (batch->verts[i] != geom->verts[i]) {
+ is_compatible = false;
}
}
- int new_id = 0; /* Find insertion point. */
- for (; new_id < chunk->alloc_size; ++new_id) {
- if (chunk->ibufs[new_id].format == NULL) {
- break;
- }
+
+ if (!is_compatible) {
+ GPU_batch_clear(batch);
+ /* Save args and init later */
+ batch->inst = buf;
+ batch->phase = GPU_BATCH_READY_TO_BUILD;
+ batch->verts[0] = (void *)geom; /* HACK to save the pointer without other alloc. */
+
+ /* Make sure to free this batch if the instance geom gets free. */
+ GPU_batch_callback_free_set(geom, &instance_batch_free, NULL);
}
- /* If there is no batch left. Allocate more. */
- if (new_id == chunk->alloc_size) {
- new_id = chunk->alloc_size;
- chunk->alloc_size += BUFFER_CHUNK_SIZE;
- chunk->ibufs = MEM_reallocN(chunk->ibufs, chunk->alloc_size * sizeof(DRWInstancingBuffer));
- memset(chunk->ibufs + new_id, 0, sizeof(DRWInstancingBuffer) * BUFFER_CHUNK_SIZE);
+ return batch;
+}
+
+/* NOTE: Use only with buf allocated via DRW_temp_buffer_request. */
+GPUBatch *DRW_temp_batch_request(DRWInstanceDataList *idatalist,
+ GPUVertBuf *buf,
+ GPUPrimType prim_type)
+{
+ GPUBatch *batch = BLI_memblock_alloc(idatalist->pool_batching);
+ bool is_compatible = (batch->verts[0] == buf) &&
+ (batch->gl_prim_type == convert_prim_type_to_gl(prim_type));
+ if (!is_compatible) {
+ GPU_batch_clear(batch);
+ GPU_batch_init(batch, prim_type, buf, NULL);
}
- /* Create the batch. */
- ibuf = chunk->ibufs + new_id;
- ibuf->vert = *r_vert = GPU_vertbuf_create_with_format_ex(format, GPU_USAGE_DYNAMIC);
- ibuf->batch = *r_batch = MEM_callocN(sizeof(GPUBatch), "GPUBatch");
- ibuf->format = format;
- ibuf->shgroup = shgroup;
- ibuf->instance = instance;
- GPU_vertbuf_data_alloc(*r_vert, BUFFER_VERTS_CHUNK);
- /* Make sure to free this ibuf if the instance batch gets free. */
- GPU_batch_callback_free_set(instance, &instance_batch_free, NULL);
+ return batch;
+}
+
+static void temp_buffer_handle_free(DRWTempBufferHandle *handle)
+{
+ handle->format = NULL;
+ GPU_vertbuf_clear(&handle->buf);
}
void DRW_instance_buffer_finish(DRWInstanceDataList *idatalist)
{
- size_t realloc_size = 1; /* Avoid 0 size realloc. */
- /* Resize down buffers in use and send data to GPU & free unused buffers. */
- DRWInstanceChunk *batching = &idatalist->batching;
- DRWBatchingBuffer *bbuf = batching->bbufs;
- for (int i = 0; i < batching->alloc_size; i++, bbuf++) {
- if (bbuf->shgroup != NULL) {
- realloc_size = i + 1;
- uint vert_len = DRW_shgroup_get_instance_count(bbuf->shgroup);
- vert_len += (vert_len == 0) ? 1 : 0; /* Do not realloc to 0 size buffer */
- if (vert_len + BUFFER_VERTS_CHUNK <= bbuf->vert->vertex_len) {
- uint size = vert_len + BUFFER_VERTS_CHUNK - 1;
- size = size - size % BUFFER_VERTS_CHUNK;
- GPU_vertbuf_data_resize(bbuf->vert, size);
+ /* Resize down buffers in use and send data to GPU. */
+ BLI_memblock_iter iter;
+ DRWTempBufferHandle *handle;
+ BLI_memblock_iternew(idatalist->pool_buffers, &iter);
+ while ((handle = BLI_memblock_iterstep(&iter))) {
+ if (handle->vert_len != NULL) {
+ uint vert_len = *(handle->vert_len);
+ uint target_buf_size = ((vert_len / DRW_BUFFER_VERTS_CHUNK) + 1) * DRW_BUFFER_VERTS_CHUNK;
+ if (target_buf_size < handle->buf.vertex_alloc) {
+ GPU_vertbuf_data_resize(&handle->buf, target_buf_size);
}
- GPU_vertbuf_use(bbuf->vert); /* Send data. */
- bbuf->shgroup = NULL; /* Set as non used for the next round. */
- }
- else {
- GPU_VERTBUF_DISCARD_SAFE(bbuf->vert);
- GPU_BATCH_DISCARD_SAFE(bbuf->batch);
- bbuf->format = NULL; /* Tag as non alloced. */
+ GPU_vertbuf_data_len_set(&handle->buf, vert_len);
+ GPU_vertbuf_use(&handle->buf); /* Send data. */
}
}
- /* Rounding up to nearest chunk size. */
- realloc_size += BUFFER_CHUNK_SIZE - 1;
- realloc_size -= realloc_size % BUFFER_CHUNK_SIZE;
- /* Resize down if necessary. */
- if (realloc_size < batching->alloc_size) {
- batching->alloc_size = realloc_size;
- batching->ibufs = MEM_reallocN(batching->ibufs, realloc_size * sizeof(DRWBatchingBuffer));
- }
-
- realloc_size = 1;
- /* Resize down buffers in use and send data to GPU & free unused buffers. */
- DRWInstanceChunk *instancing = &idatalist->instancing;
- DRWInstancingBuffer *ibuf = instancing->ibufs;
- for (int i = 0; i < instancing->alloc_size; i++, ibuf++) {
- if (ibuf->shgroup != NULL) {
- realloc_size = i + 1;
- uint vert_len = DRW_shgroup_get_instance_count(ibuf->shgroup);
- vert_len += (vert_len == 0) ? 1 : 0; /* Do not realloc to 0 size buffer */
- if (vert_len + BUFFER_VERTS_CHUNK <= ibuf->vert->vertex_len) {
- uint size = vert_len + BUFFER_VERTS_CHUNK - 1;
- size = size - size % BUFFER_VERTS_CHUNK;
- GPU_vertbuf_data_resize(ibuf->vert, size);
- }
- GPU_vertbuf_use(ibuf->vert); /* Send data. */
- /* Setup batch now that we are sure ibuf->instance is setup. */
- GPU_batch_copy(ibuf->batch, ibuf->instance);
- GPU_batch_instbuf_set(ibuf->batch, ibuf->vert, false);
- ibuf->shgroup = NULL; /* Set as non used for the next round. */
- }
- else {
- GPU_VERTBUF_DISCARD_SAFE(ibuf->vert);
- GPU_BATCH_DISCARD_SAFE(ibuf->batch);
- ibuf->format = NULL; /* Tag as non alloced. */
+ /* Finish pending instancing batches. */
+ GPUBatch *batch;
+ BLI_memblock_iternew(idatalist->pool_instancing, &iter);
+ while ((batch = BLI_memblock_iterstep(&iter))) {
+ if (batch->phase == GPU_BATCH_READY_TO_BUILD) {
+ GPUVertBuf *inst = batch->inst;
+ GPUBatch *geom = (void *)batch->verts[0]; /* HACK see DRW_temp_batch_instance_request. */
+ GPU_batch_copy(batch, geom);
+ GPU_batch_instbuf_set(batch, inst, false);
}
}
- /* Rounding up to nearest chunk size. */
- realloc_size += BUFFER_CHUNK_SIZE - 1;
- realloc_size -= realloc_size % BUFFER_CHUNK_SIZE;
- /* Resize down if necessary. */
- if (realloc_size < instancing->alloc_size) {
- instancing->alloc_size = realloc_size;
- instancing->ibufs = MEM_reallocN(instancing->ibufs,
- realloc_size * sizeof(DRWInstancingBuffer));
- }
+ /* Resize pools and free unused. */
+ BLI_memblock_clear(idatalist->pool_buffers, (MemblockValFreeFP)temp_buffer_handle_free);
+ BLI_memblock_clear(idatalist->pool_instancing, (MemblockValFreeFP)GPU_batch_clear);
+ BLI_memblock_clear(idatalist->pool_batching, (MemblockValFreeFP)GPU_batch_clear);
}
/** \} */
@@ -352,12 +283,10 @@ DRWInstanceData *DRW_instance_data_request(DRWInstanceDataList *idatalist, uint
DRWInstanceDataList *DRW_instance_data_list_create(void)
{
DRWInstanceDataList *idatalist = MEM_callocN(sizeof(DRWInstanceDataList), "DRWInstanceDataList");
- idatalist->batching.bbufs = MEM_callocN(sizeof(DRWBatchingBuffer) * BUFFER_CHUNK_SIZE,
- "DRWBatchingBuffers");
- idatalist->batching.alloc_size = BUFFER_CHUNK_SIZE;
- idatalist->instancing.ibufs = MEM_callocN(sizeof(DRWInstancingBuffer) * BUFFER_CHUNK_SIZE,
- "DRWInstancingBuffers");
- idatalist->instancing.alloc_size = BUFFER_CHUNK_SIZE;
+
+ idatalist->pool_batching = BLI_memblock_create(sizeof(GPUBatch), true);
+ idatalist->pool_instancing = BLI_memblock_create(sizeof(GPUBatch), true);
+ idatalist->pool_buffers = BLI_memblock_create(sizeof(DRWTempBufferHandle), true);
BLI_addtail(&g_idatalists, idatalist);
@@ -378,19 +307,9 @@ void DRW_instance_data_list_free(DRWInstanceDataList *idatalist)
idatalist->idata_tail[i] = NULL;
}
- DRWBatchingBuffer *bbuf = idatalist->batching.bbufs;
- for (int i = 0; i < idatalist->batching.alloc_size; i++, bbuf++) {
- GPU_VERTBUF_DISCARD_SAFE(bbuf->vert);
- GPU_BATCH_DISCARD_SAFE(bbuf->batch);
- }
- MEM_freeN(idatalist->batching.bbufs);
-
- DRWInstancingBuffer *ibuf = idatalist->instancing.ibufs;
- for (int i = 0; i < idatalist->instancing.alloc_size; i++, ibuf++) {
- GPU_VERTBUF_DISCARD_SAFE(ibuf->vert);
- GPU_BATCH_DISCARD_SAFE(ibuf->batch);
- }
- MEM_freeN(idatalist->instancing.ibufs);
+ BLI_memblock_destroy(idatalist->pool_buffers, (MemblockValFreeFP)temp_buffer_handle_free);
+ BLI_memblock_destroy(idatalist->pool_instancing, (MemblockValFreeFP)GPU_batch_clear);
+ BLI_memblock_destroy(idatalist->pool_batching, (MemblockValFreeFP)GPU_batch_clear);
BLI_remlink(&g_idatalists, idatalist);
}
diff --git a/source/blender/draw/intern/draw_instance_data.h b/source/blender/draw/intern/draw_instance_data.h
index ea5c6ac7bb2..d88de1a58e2 100644
--- a/source/blender/draw/intern/draw_instance_data.h
+++ b/source/blender/draw/intern/draw_instance_data.h
@@ -30,6 +30,8 @@
#define MAX_INSTANCE_DATA_SIZE 64 /* Can be adjusted for more */
+#define DRW_BUFFER_VERTS_CHUNK 128
+
typedef struct DRWInstanceData DRWInstanceData;
typedef struct DRWInstanceDataList DRWInstanceDataList;
@@ -38,18 +40,15 @@ struct DRWShadingGroup;
void *DRW_instance_data_next(DRWInstanceData *idata);
DRWInstanceData *DRW_instance_data_request(DRWInstanceDataList *idatalist, uint attr_size);
-void DRW_batching_buffer_request(DRWInstanceDataList *idatalist,
- GPUVertFormat *format,
- GPUPrimType type,
- struct DRWShadingGroup *shgroup,
- GPUBatch **r_batch,
- GPUVertBuf **r_vert);
-void DRW_instancing_buffer_request(DRWInstanceDataList *idatalist,
- GPUVertFormat *format,
- GPUBatch *instance,
- struct DRWShadingGroup *shgroup,
- GPUBatch **r_batch,
- GPUVertBuf **r_vert);
+GPUVertBuf *DRW_temp_buffer_request(DRWInstanceDataList *idatalist,
+ GPUVertFormat *format,
+ uint *vert_len);
+GPUBatch *DRW_temp_batch_instance_request(DRWInstanceDataList *idatalist,
+ GPUVertBuf *buf,
+ GPUBatch *geom);
+GPUBatch *DRW_temp_batch_request(DRWInstanceDataList *idatalist,
+ GPUVertBuf *buf,
+ GPUPrimType type);
/* Upload all instance data to the GPU as soon as possible. */
void DRW_instance_buffer_finish(DRWInstanceDataList *idatalist);
diff --git a/source/blender/draw/intern/draw_manager.c b/source/blender/draw/intern/draw_manager.c
index f4830916ecf..355046ae277 100644
--- a/source/blender/draw/intern/draw_manager.c
+++ b/source/blender/draw/intern/draw_manager.c
@@ -2953,7 +2953,6 @@ void DRW_engines_free(void)
DRW_UBO_FREE_SAFE(G_draw.view_ubo);
DRW_TEXTURE_FREE_SAFE(G_draw.ramp);
DRW_TEXTURE_FREE_SAFE(G_draw.weight_ramp);
- MEM_SAFE_FREE(g_pos_format);
MEM_SAFE_FREE(DST.uniform_names.buffer);
diff --git a/source/blender/draw/intern/draw_manager.h b/source/blender/draw/intern/draw_manager.h
index b814000673d..c2287acf8e9 100644
--- a/source/blender/draw/intern/draw_manager.h
+++ b/source/blender/draw/intern/draw_manager.h
@@ -134,7 +134,9 @@ typedef struct DRWCall {
uint inst_count;
#ifdef USE_GPU_SELECT
+ /* TODO(fclem) remove once we have a dedicated selection engine. */
int select_id;
+ GPUVertBuf *inst_selectid;
#endif
} DRWCall;
@@ -171,45 +173,18 @@ struct DRWUniform {
char arraysize; /* cannot be more than 16 too */
};
-typedef enum {
- DRW_SHG_NORMAL,
-
- DRW_SHG_POINT_BATCH,
- DRW_SHG_LINE_BATCH,
- DRW_SHG_TRIANGLE_BATCH,
-
- DRW_SHG_INSTANCE,
- DRW_SHG_INSTANCE_EXTERNAL,
- DRW_SHG_FEEDBACK_TRANSFORM,
-} DRWShadingGroupType;
-
struct DRWShadingGroup {
DRWShadingGroup *next;
GPUShader *shader; /* Shader to bind */
DRWUniform *uniforms; /* Uniforms pointers */
- /* Watch this! Can be nasty for debugging. */
- union {
- struct { /* DRW_SHG_NORMAL */
- DRWCall *first, *last; /* Linked list of DRWCall */
- } calls;
- struct { /* DRW_SHG_FEEDBACK_TRANSFORM */
- DRWCall *first, *last; /* Linked list of DRWCall. */
- struct GPUVertBuf *tfeedback_target; /* Transform Feedback target. */
- };
- struct { /* DRW_SHG_***_BATCH */
- struct GPUBatch *batch_geom; /* Result of call batching */
- struct GPUVertBuf *batch_vbo;
- uint primitive_count;
- };
- struct { /* DRW_SHG_INSTANCE[_EXTERNAL] */
- struct GPUBatch *instance_geom;
- struct GPUVertBuf *instance_vbo;
- uint instance_count;
- float instance_orcofac[2][3]; /* TODO find a better place. */
- };
- };
+ struct {
+ DRWCall *first, *last; /* Linked list of DRWCall */
+ } calls;
+
+ /** TODO Maybe remove from here */
+ struct GPUVertBuf *tfeedback_target;
/** State changes for this batch only (or'd with the pass's state) */
DRWState state_extra;
@@ -217,7 +192,6 @@ struct DRWShadingGroup {
DRWState state_extra_disable;
/** Stencil mask to use for stencil test / write operations */
uint stencil_mask;
- DRWShadingGroupType type;
/* Builtin matrices locations */
int model;
@@ -229,13 +203,6 @@ struct DRWShadingGroup {
uchar matflag; /* Matrices needed, same as DRWCall.flag */
DRWPass *pass_parent; /* backlink to pass we're in */
-#ifndef NDEBUG
- char attrs_count;
-#endif
-#ifdef USE_GPU_SELECT
- GPUVertBuf *inst_selectid;
- int override_selectid; /* Override for single object instances. */
-#endif
};
#define MAX_PASS_NAME 32
@@ -420,6 +387,4 @@ GPUBatch *drw_cache_procedural_points_get(void);
GPUBatch *drw_cache_procedural_lines_get(void);
GPUBatch *drw_cache_procedural_triangles_get(void);
-extern struct GPUVertFormat *g_pos_format;
-
#endif /* __DRAW_MANAGER_H__ */
diff --git a/source/blender/draw/intern/draw_manager_data.c b/source/blender/draw/intern/draw_manager_data.c
index 3a10543c8ef..151ab469e59 100644
--- a/source/blender/draw/intern/draw_manager_data.c
+++ b/source/blender/draw/intern/draw_manager_data.c
@@ -43,8 +43,6 @@
#include "intern/gpu_codegen.h"
-struct GPUVertFormat *g_pos_format = NULL;
-
/* -------------------------------------------------------------------- */
/** \name Uniform Buffer Object (DRW_uniformbuffer)
* \{ */
@@ -453,7 +451,6 @@ static DRWCallState *drw_call_state_object(DRWShadingGroup *shgroup, float (*obm
void DRW_shgroup_call_add(DRWShadingGroup *shgroup, GPUBatch *geom, float (*obmat)[4])
{
BLI_assert(geom != NULL);
- BLI_assert(ELEM(shgroup->type, DRW_SHG_NORMAL, DRW_SHG_FEEDBACK_TRANSFORM));
DRWCall *call = BLI_memblock_alloc(DST.vmempool->calls);
BLI_LINKS_APPEND(&shgroup->calls, call);
@@ -465,6 +462,7 @@ void DRW_shgroup_call_add(DRWShadingGroup *shgroup, GPUBatch *geom, float (*obma
call->inst_count = 0;
#ifdef USE_GPU_SELECT
call->select_id = DST.select_id;
+ call->inst_selectid = NULL;
#endif
}
@@ -472,7 +470,6 @@ void DRW_shgroup_call_range_add(
DRWShadingGroup *shgroup, GPUBatch *geom, float (*obmat)[4], uint v_sta, uint v_count)
{
BLI_assert(geom != NULL);
- BLI_assert(ELEM(shgroup->type, DRW_SHG_NORMAL, DRW_SHG_FEEDBACK_TRANSFORM));
BLI_assert(v_count);
DRWCall *call = BLI_memblock_alloc(DST.vmempool->calls);
@@ -485,6 +482,7 @@ void DRW_shgroup_call_range_add(
call->inst_count = 0;
#ifdef USE_GPU_SELECT
call->select_id = DST.select_id;
+ call->inst_selectid = NULL;
#endif
}
@@ -493,7 +491,6 @@ static void drw_shgroup_call_procedural_add_ex(DRWShadingGroup *shgroup,
uint vert_count,
float (*obmat)[4])
{
- BLI_assert(ELEM(shgroup->type, DRW_SHG_NORMAL, DRW_SHG_FEEDBACK_TRANSFORM));
DRWCall *call = BLI_memblock_alloc(DST.vmempool->calls);
BLI_LINKS_APPEND(&shgroup->calls, call);
@@ -505,6 +502,7 @@ static void drw_shgroup_call_procedural_add_ex(DRWShadingGroup *shgroup,
call->inst_count = 0;
#ifdef USE_GPU_SELECT
call->select_id = DST.select_id;
+ call->inst_selectid = NULL;
#endif
}
@@ -539,7 +537,6 @@ void DRW_shgroup_call_object_add_ex(DRWShadingGroup *shgroup,
bool bypass_culling)
{
BLI_assert(geom != NULL);
- BLI_assert(ELEM(shgroup->type, DRW_SHG_NORMAL, DRW_SHG_FEEDBACK_TRANSFORM));
DRWCall *call = BLI_memblock_alloc(DST.vmempool->calls);
BLI_LINKS_APPEND(&shgroup->calls, call);
@@ -553,6 +550,7 @@ void DRW_shgroup_call_object_add_ex(DRWShadingGroup *shgroup,
call->inst_count = 0;
#ifdef USE_GPU_SELECT
call->select_id = DST.select_id;
+ call->inst_selectid = NULL;
#endif
}
@@ -563,7 +561,6 @@ void DRW_shgroup_call_object_add_with_callback(DRWShadingGroup *shgroup,
void *user_data)
{
BLI_assert(geom != NULL);
- BLI_assert(ELEM(shgroup->type, DRW_SHG_NORMAL, DRW_SHG_FEEDBACK_TRANSFORM));
DRWCall *call = BLI_memblock_alloc(DST.vmempool->calls);
BLI_LINKS_APPEND(&shgroup->calls, call);
@@ -577,6 +574,7 @@ void DRW_shgroup_call_object_add_with_callback(DRWShadingGroup *shgroup,
call->inst_count = 0;
#ifdef USE_GPU_SELECT
call->select_id = DST.select_id;
+ call->inst_selectid = NULL;
#endif
}
@@ -586,7 +584,6 @@ void DRW_shgroup_call_instances_add(DRWShadingGroup *shgroup,
uint count)
{
BLI_assert(geom != NULL);
- BLI_assert(ELEM(shgroup->type, DRW_SHG_NORMAL, DRW_SHG_FEEDBACK_TRANSFORM));
DRWCall *call = BLI_memblock_alloc(DST.vmempool->calls);
BLI_LINKS_APPEND(&shgroup->calls, call);
@@ -598,6 +595,31 @@ void DRW_shgroup_call_instances_add(DRWShadingGroup *shgroup,
call->inst_count = count;
#ifdef USE_GPU_SELECT
call->select_id = DST.select_id;
+ call->inst_selectid = NULL;
+#endif
+}
+
+void DRW_shgroup_call_instances_with_attribs_add(DRWShadingGroup *shgroup,
+ struct GPUBatch *geom,
+ float (*obmat)[4],
+ struct GPUBatch *inst_attributes)
+{
+ BLI_assert(geom != NULL);
+ BLI_assert(inst_attributes->verts[0] != NULL);
+
+ GPUVertBuf *buf_inst = inst_attributes->verts[0];
+
+ DRWCall *call = BLI_memblock_alloc(DST.vmempool->calls);
+ BLI_LINKS_APPEND(&shgroup->calls, call);
+
+ call->state = drw_call_state_create(shgroup, obmat, NULL);
+ call->batch = DRW_temp_batch_instance_request(DST.idatalist, buf_inst, geom);
+ call->vert_first = 0;
+ call->vert_count = 0; /* Auto from batch. */
+ call->inst_count = buf_inst->vertex_len;
+#ifdef USE_GPU_SELECT
+ call->select_id = DST.select_id;
+ call->inst_selectid = NULL;
#endif
}
@@ -741,30 +763,95 @@ void DRW_shgroup_call_sculpt_with_materials_add(DRWShadingGroup **shgroups,
drw_sculpt_generate_calls(&scd, use_vcol);
}
-void DRW_shgroup_call_dynamic_add_array(DRWShadingGroup *shgroup,
- const void *attr[],
- uint attr_len)
+static GPUVertFormat inst_select_format = {0};
+
+DRWCallBuffer *DRW_shgroup_call_buffer_add(DRWShadingGroup *shgroup,
+ struct GPUVertFormat *format,
+ GPUPrimType prim_type)
+{
+ BLI_assert(ELEM(prim_type, GPU_PRIM_POINTS, GPU_PRIM_LINES, GPU_PRIM_TRI_FAN));
+ BLI_assert(format != NULL);
+
+ DRWCall *call = BLI_memblock_alloc(DST.vmempool->calls);
+ BLI_LINKS_APPEND(&shgroup->calls, call);
+
+ call->state = drw_call_state_create(shgroup, NULL, NULL);
+ GPUVertBuf *buf = DRW_temp_buffer_request(DST.idatalist, format, &call->vert_count);
+ call->batch = DRW_temp_batch_request(DST.idatalist, buf, prim_type);
+ call->vert_first = 0;
+ call->vert_count = 0;
+ call->inst_count = 0;
+
+#ifdef USE_GPU_SELECT
+ if (G.f & G_FLAG_PICKSEL) {
+ /* Not actually used for rendering but alloced in one chunk. */
+ if (inst_select_format.attr_len == 0) {
+ GPU_vertformat_attr_add(&inst_select_format, "selectId", GPU_COMP_I32, 1, GPU_FETCH_INT);
+ }
+ call->inst_selectid = DRW_temp_buffer_request(
+ DST.idatalist, &inst_select_format, &call->vert_count);
+ }
+#endif
+ return (DRWCallBuffer *)call;
+}
+
+DRWCallBuffer *DRW_shgroup_call_buffer_instance_add(DRWShadingGroup *shgroup,
+ struct GPUVertFormat *format,
+ GPUBatch *geom)
{
+ BLI_assert(geom != NULL);
+ BLI_assert(format != NULL);
+
+ DRWCall *call = BLI_memblock_alloc(DST.vmempool->calls);
+ BLI_LINKS_APPEND(&shgroup->calls, call);
+
+ call->state = drw_call_state_create(shgroup, NULL, NULL);
+ GPUVertBuf *buf = DRW_temp_buffer_request(DST.idatalist, format, &call->inst_count);
+ call->batch = DRW_temp_batch_instance_request(DST.idatalist, buf, geom);
+ call->vert_first = 0;
+ call->vert_count = 0; /* Auto from batch. */
+ call->inst_count = 0;
+
#ifdef USE_GPU_SELECT
if (G.f & G_FLAG_PICKSEL) {
- if (shgroup->instance_count == shgroup->inst_selectid->vertex_len) {
- GPU_vertbuf_data_resize(shgroup->inst_selectid, shgroup->instance_count + 32);
+ /* Not actually used for rendering but alloced in one chunk. */
+ if (inst_select_format.attr_len == 0) {
+ GPU_vertformat_attr_add(&inst_select_format, "selectId", GPU_COMP_I32, 1, GPU_FETCH_INT);
}
- GPU_vertbuf_attr_set(shgroup->inst_selectid, 0, shgroup->instance_count, &DST.select_id);
+ call->inst_selectid = DRW_temp_buffer_request(
+ DST.idatalist, &inst_select_format, &call->inst_count);
}
#endif
+ return (DRWCallBuffer *)call;
+}
- BLI_assert(attr_len == shgroup->attrs_count);
+void DRW_buffer_add_entry_array(DRWCallBuffer *callbuf, const void *attr[], uint attr_len)
+{
+ DRWCall *call = (DRWCall *)callbuf;
+ const bool is_instance = call->batch->inst != NULL;
+ GPUVertBuf *buf = is_instance ? call->batch->inst : call->batch->verts[0];
+ uint count = is_instance ? call->inst_count++ : call->vert_count++;
+ const bool resize = (count == buf->vertex_alloc);
+
+ BLI_assert(attr_len == buf->format.attr_len);
UNUSED_VARS_NDEBUG(attr_len);
+ if (UNLIKELY(resize)) {
+ GPU_vertbuf_data_resize(buf, count + DRW_BUFFER_VERTS_CHUNK);
+ }
+
for (int i = 0; i < attr_len; ++i) {
- if (shgroup->instance_count == shgroup->instance_vbo->vertex_len) {
- GPU_vertbuf_data_resize(shgroup->instance_vbo, shgroup->instance_count + 32);
- }
- GPU_vertbuf_attr_set(shgroup->instance_vbo, i, shgroup->instance_count, attr[i]);
+ GPU_vertbuf_attr_set(buf, i, count, attr[i]);
}
- shgroup->instance_count += 1;
+#ifdef USE_GPU_SELECT
+ if (G.f & G_FLAG_PICKSEL) {
+ if (UNLIKELY(resize)) {
+ GPU_vertbuf_data_resize(call->inst_selectid, count + DRW_BUFFER_VERTS_CHUNK);
+ }
+ GPU_vertbuf_attr_set(call->inst_selectid, 0, count, &DST.select_id);
+ }
+#endif
}
/** \} */
@@ -775,17 +862,7 @@ void DRW_shgroup_call_dynamic_add_array(DRWShadingGroup *shgroup,
static void drw_shgroup_init(DRWShadingGroup *shgroup, GPUShader *shader)
{
- shgroup->instance_geom = NULL;
- shgroup->instance_vbo = NULL;
- shgroup->instance_count = 0;
shgroup->uniforms = NULL;
-#ifdef USE_GPU_SELECT
- shgroup->inst_selectid = NULL;
- shgroup->override_selectid = -1;
-#endif
-#ifndef NDEBUG
- shgroup->attrs_count = 0;
-#endif
int view_ubo_location = GPU_shader_get_uniform_block(shader, "viewBlock");
@@ -817,6 +894,7 @@ static void drw_shgroup_init(DRWShadingGroup *shgroup, GPUShader *shader)
/* Not supported. */
BLI_assert(GPU_shader_get_builtin_uniform(shader, GPU_UNIFORM_MODELVIEW_INV) == -1);
BLI_assert(GPU_shader_get_builtin_uniform(shader, GPU_UNIFORM_MODELVIEW) == -1);
+ BLI_assert(GPU_shader_get_builtin_uniform(shader, GPU_UNIFORM_NORMAL) == -1);
shgroup->model = GPU_shader_get_builtin_uniform(shader, GPU_UNIFORM_MODEL);
shgroup->modelinverse = GPU_shader_get_builtin_uniform(shader, GPU_UNIFORM_MODEL_INV);
@@ -825,9 +903,6 @@ static void drw_shgroup_init(DRWShadingGroup *shgroup, GPUShader *shader)
shgroup->objectinfo = GPU_shader_get_builtin_uniform(shader, GPU_UNIFORM_OBJECT_INFO);
shgroup->callid = GPU_shader_get_builtin_uniform(shader, GPU_UNIFORM_CALLID);
- /* We do not support normal matrix anymore. */
- BLI_assert(GPU_shader_get_builtin_uniform(shader, GPU_UNIFORM_NORMAL) == -1);
-
shgroup->matflag = 0;
if (shgroup->modelinverse > -1) {
shgroup->matflag |= DRW_CALL_MODELINVERSE;
@@ -843,113 +918,19 @@ static void drw_shgroup_init(DRWShadingGroup *shgroup, GPUShader *shader)
}
}
-static void drw_shgroup_instance_init(DRWShadingGroup *shgroup,
- GPUShader *shader,
- GPUBatch *batch,
- GPUVertFormat *format)
-{
- BLI_assert(shgroup->type == DRW_SHG_INSTANCE);
- BLI_assert(batch != NULL);
- BLI_assert(format != NULL);
-
- drw_shgroup_init(shgroup, shader);
-
- shgroup->instance_geom = batch;
-#ifndef NDEBUG
- shgroup->attrs_count = format->attr_len;
-#endif
-
- DRW_instancing_buffer_request(
- DST.idatalist, format, batch, shgroup, &shgroup->instance_geom, &shgroup->instance_vbo);
-
-#ifdef USE_GPU_SELECT
- if (G.f & G_FLAG_PICKSEL) {
- /* Not actually used for rendering but alloced in one chunk.
- * Plus we don't have to care about ownership. */
- static GPUVertFormat inst_select_format = {0};
- if (inst_select_format.attr_len == 0) {
- GPU_vertformat_attr_add(&inst_select_format, "selectId", GPU_COMP_I32, 1, GPU_FETCH_INT);
- }
- GPUBatch *batch_dummy; /* Not used */
- DRW_batching_buffer_request(DST.idatalist,
- &inst_select_format,
- GPU_PRIM_POINTS,
- shgroup,
- &batch_dummy,
- &shgroup->inst_selectid);
- }
-#endif
-}
-
-static void drw_shgroup_batching_init(DRWShadingGroup *shgroup,
- GPUShader *shader,
- GPUVertFormat *format)
-{
- drw_shgroup_init(shgroup, shader);
-
-#ifndef NDEBUG
- shgroup->attrs_count = (format != NULL) ? format->attr_len : 0;
-#endif
- BLI_assert(format != NULL);
-
- GPUPrimType type;
- switch (shgroup->type) {
- case DRW_SHG_POINT_BATCH:
- type = GPU_PRIM_POINTS;
- break;
- case DRW_SHG_LINE_BATCH:
- type = GPU_PRIM_LINES;
- break;
- case DRW_SHG_TRIANGLE_BATCH:
- type = GPU_PRIM_TRIS;
- break;
- default:
- type = GPU_PRIM_NONE;
- BLI_assert(0);
- break;
- }
-
- DRW_batching_buffer_request(
- DST.idatalist, format, type, shgroup, &shgroup->batch_geom, &shgroup->batch_vbo);
-
-#ifdef USE_GPU_SELECT
- if (G.f & G_FLAG_PICKSEL) {
- /* Not actually used for rendering but alloced in one chunk. */
- static GPUVertFormat inst_select_format = {0};
- if (inst_select_format.attr_len == 0) {
- GPU_vertformat_attr_add(&inst_select_format, "selectId", GPU_COMP_I32, 1, GPU_FETCH_INT);
- }
- GPUBatch *batch; /* Not used */
- DRW_batching_buffer_request(DST.idatalist,
- &inst_select_format,
- GPU_PRIM_POINTS,
- shgroup,
- &batch,
- &shgroup->inst_selectid);
- }
-#endif
-}
-
static DRWShadingGroup *drw_shgroup_create_ex(struct GPUShader *shader, DRWPass *pass)
{
DRWShadingGroup *shgroup = BLI_memblock_alloc(DST.vmempool->shgroups);
BLI_LINKS_APPEND(&pass->shgroups, shgroup);
- shgroup->type = DRW_SHG_NORMAL;
shgroup->shader = shader;
shgroup->state_extra = 0;
shgroup->state_extra_disable = ~0x0;
shgroup->stencil_mask = 0;
shgroup->calls.first = NULL;
shgroup->calls.last = NULL;
-#if 0 /* All the same in the union! */
- shgroup->batch_geom = NULL;
- shgroup->batch_vbo = NULL;
-
- shgroup->instance_geom = NULL;
- shgroup->instance_vbo = NULL;
-#endif
+ shgroup->tfeedback_target = NULL;
shgroup->pass_parent = pass;
return shgroup;
@@ -1034,7 +1015,6 @@ DRWShadingGroup *DRW_shgroup_material_create(struct GPUMaterial *material, DRWPa
drw_shgroup_init(shgroup, GPU_pass_shader_get(gpupass));
drw_shgroup_material_inputs(shgroup, material);
}
-
return shgroup;
}
@@ -1045,96 +1025,18 @@ DRWShadingGroup *DRW_shgroup_create(struct GPUShader *shader, DRWPass *pass)
return shgroup;
}
-DRWShadingGroup *DRW_shgroup_instance_create(struct GPUShader *shader,
- DRWPass *pass,
- GPUBatch *geom,
- GPUVertFormat *format)
-{
- DRWShadingGroup *shgroup = drw_shgroup_create_ex(shader, pass);
- shgroup->type = DRW_SHG_INSTANCE;
- shgroup->instance_geom = geom;
- drw_call_calc_orco(NULL, shgroup->instance_orcofac);
- drw_shgroup_instance_init(shgroup, shader, geom, format);
-
- return shgroup;
-}
-
-DRWShadingGroup *DRW_shgroup_point_batch_create(struct GPUShader *shader, DRWPass *pass)
-{
- DRW_shgroup_instance_format(g_pos_format, {{"pos", DRW_ATTR_FLOAT, 3}});
-
- DRWShadingGroup *shgroup = drw_shgroup_create_ex(shader, pass);
- shgroup->type = DRW_SHG_POINT_BATCH;
-
- drw_shgroup_batching_init(shgroup, shader, g_pos_format);
-
- return shgroup;
-}
-
-DRWShadingGroup *DRW_shgroup_line_batch_create_with_format(struct GPUShader *shader,
- DRWPass *pass,
- GPUVertFormat *format)
-{
- DRWShadingGroup *shgroup = drw_shgroup_create_ex(shader, pass);
- shgroup->type = DRW_SHG_LINE_BATCH;
-
- drw_shgroup_batching_init(shgroup, shader, format);
-
- return shgroup;
-}
-
-DRWShadingGroup *DRW_shgroup_line_batch_create(struct GPUShader *shader, DRWPass *pass)
-{
- DRW_shgroup_instance_format(g_pos_format, {{"pos", DRW_ATTR_FLOAT, 3}});
-
- return DRW_shgroup_line_batch_create_with_format(shader, pass, g_pos_format);
-}
-
DRWShadingGroup *DRW_shgroup_transform_feedback_create(struct GPUShader *shader,
DRWPass *pass,
GPUVertBuf *tf_target)
{
BLI_assert(tf_target != NULL);
DRWShadingGroup *shgroup = drw_shgroup_create_ex(shader, pass);
- shgroup->type = DRW_SHG_FEEDBACK_TRANSFORM;
-
drw_shgroup_init(shgroup, shader);
-
shgroup->tfeedback_target = tf_target;
-
return shgroup;
}
/**
- * Specify an external batch instead of adding each attribute one by one.
- */
-void DRW_shgroup_instance_batch(DRWShadingGroup *shgroup, struct GPUBatch *batch)
-{
- BLI_assert(shgroup->type == DRW_SHG_INSTANCE);
- BLI_assert(shgroup->instance_count == 0);
- /* You cannot use external instancing batch without a dummy format. */
- BLI_assert(shgroup->attrs_count != 0);
-
- shgroup->type = DRW_SHG_INSTANCE_EXTERNAL;
- drw_call_calc_orco(NULL, shgroup->instance_orcofac);
- /* PERF : This destroys the vaos cache so better check if it's necessary. */
- /* Note: This WILL break if batch->verts[0] is destroyed and reallocated
- * at the same address. Bindings/VAOs would remain obsolete. */
- // if (shgroup->instancing_geom->inst != batch->verts[0])
- /* XXX FIXME: THIS IS BROKEN BECAUSE OVEWRITTEN BY DRW_instance_buffer_finish(). */
- GPU_batch_instbuf_set(shgroup->instance_geom, batch->verts[0], false);
-
-#ifdef USE_GPU_SELECT
- shgroup->override_selectid = DST.select_id;
-#endif
-}
-
-uint DRW_shgroup_get_instance_count(const DRWShadingGroup *shgroup)
-{
- return shgroup->instance_count;
-}
-
-/**
* State is added to #Pass.state while drawing.
* Use to temporarily enable draw options.
*/
@@ -1156,26 +1058,12 @@ void DRW_shgroup_stencil_mask(DRWShadingGroup *shgroup, uint mask)
bool DRW_shgroup_is_empty(DRWShadingGroup *shgroup)
{
- switch (shgroup->type) {
- case DRW_SHG_NORMAL:
- case DRW_SHG_FEEDBACK_TRANSFORM:
- return shgroup->calls.first == NULL;
- case DRW_SHG_POINT_BATCH:
- case DRW_SHG_LINE_BATCH:
- case DRW_SHG_TRIANGLE_BATCH:
- case DRW_SHG_INSTANCE:
- case DRW_SHG_INSTANCE_EXTERNAL:
- return shgroup->instance_count == 0;
- }
- BLI_assert(!"Shading Group type not supported");
- return true;
+ return shgroup->calls.first == NULL;
}
DRWShadingGroup *DRW_shgroup_create_sub(DRWShadingGroup *shgroup)
{
/* Remove this assertion if needed but implement the other cases first! */
- BLI_assert(shgroup->type == DRW_SHG_NORMAL);
-
DRWShadingGroup *shgroup_new = BLI_memblock_alloc(DST.vmempool->shgroups);
*shgroup_new = *shgroup;
diff --git a/source/blender/draw/intern/draw_manager_exec.c b/source/blender/draw/intern/draw_manager_exec.c
index 0a4df550044..e1ef2e81b8d 100644
--- a/source/blender/draw/intern/draw_manager_exec.c
+++ b/source/blender/draw/intern/draw_manager_exec.c
@@ -30,6 +30,7 @@
#include "GPU_draw.h"
#include "GPU_extensions.h"
#include "intern/gpu_shader_private.h"
+#include "intern/gpu_primitive_private.h"
#ifdef USE_GPU_SELECT
# include "GPU_select.h"
@@ -852,8 +853,8 @@ static void draw_geometry_prepare(DRWShadingGroup *shgroup, DRWCall *call)
GPU_shader_uniform_vector(shgroup->shader, shgroup->objectinfo, 4, 1, (float *)unitmat);
}
if (shgroup->orcotexfac != -1) {
- GPU_shader_uniform_vector(
- shgroup->shader, shgroup->orcotexfac, 3, 2, (float *)shgroup->instance_orcofac);
+ float orcofacs[2][3] = {{0.0f, 0.0f, 0.0f}, {1.0f, 1.0f, 1.0f}};
+ GPU_shader_uniform_vector(shgroup->shader, shgroup->orcotexfac, 3, 2, (float *)orcofacs);
}
}
}
@@ -1040,6 +1041,49 @@ static void release_ubo_slots(bool with_persist)
}
}
+BLI_INLINE bool draw_select_do_call(DRWShadingGroup *shgroup, DRWCall *call)
+{
+#ifdef USE_GPU_SELECT
+ if ((G.f & G_FLAG_PICKSEL) == 0) {
+ return false;
+ }
+ if (call->inst_selectid != NULL) {
+ const bool is_instancing = (call->inst_count != 0);
+ uint start = 0;
+ uint count = 1;
+ uint tot = is_instancing ? call->inst_count : call->vert_count;
+ /* Hack : get vbo data without actually drawing. */
+ GPUVertBufRaw raw;
+ GPU_vertbuf_attr_get_raw_data(call->inst_selectid, 0, &raw);
+ int *select_id = GPU_vertbuf_raw_step(&raw);
+
+ /* Batching */
+ if (!is_instancing) {
+ /* FIXME: Meh a bit nasty. */
+ if (call->batch->gl_prim_type == convert_prim_type_to_gl(GPU_PRIM_TRIS)) {
+ count = 3;
+ }
+ else if (call->batch->gl_prim_type == convert_prim_type_to_gl(GPU_PRIM_LINES)) {
+ count = 2;
+ }
+ }
+
+ while (start < tot) {
+ GPU_select_load_id(select_id[start]);
+ draw_geometry_execute(shgroup, call->batch, start, count, is_instancing);
+ start += count;
+ }
+ return true;
+ }
+ else {
+ GPU_select_load_id(call->select_id);
+ return false;
+ }
+#else
+ return false;
+#endif
+}
+
static void draw_shgroup(DRWShadingGroup *shgroup, DRWState pass_state)
{
BLI_assert(shgroup->shader);
@@ -1059,8 +1103,7 @@ static void draw_shgroup(DRWShadingGroup *shgroup, DRWState pass_state)
DST.shader = shgroup->shader;
}
- if ((pass_state & DRW_STATE_TRANS_FEEDBACK) != 0 &&
- (shgroup->type == DRW_SHG_FEEDBACK_TRANSFORM)) {
+ if ((pass_state & DRW_STATE_TRANS_FEEDBACK) != 0 && (shgroup->tfeedback_target != NULL)) {
use_tfeedback = GPU_shader_transform_feedback_enable(shgroup->shader,
shgroup->tfeedback_target->vbo_id);
}
@@ -1140,102 +1183,10 @@ static void draw_shgroup(DRWShadingGroup *shgroup, DRWState pass_state)
}
}
-#ifdef USE_GPU_SELECT
-# define GPU_SELECT_LOAD_IF_PICKSEL(_select_id) \
- if (G.f & G_FLAG_PICKSEL) { \
- GPU_select_load_id(_select_id); \
- } \
- ((void)0)
-
-# define GPU_SELECT_LOAD_IF_PICKSEL_CALL(_call) \
- if ((G.f & G_FLAG_PICKSEL) && (_call)) { \
- GPU_select_load_id((_call)->select_id); \
- } \
- ((void)0)
-
-# define GPU_SELECT_LOAD_IF_PICKSEL_LIST(_shgroup, _start, _count) \
- _start = 0; \
- _count = _shgroup->instance_count; \
- int *select_id = NULL; \
- if (G.f & G_FLAG_PICKSEL) { \
- if (_shgroup->override_selectid == -1) { \
- /* Hack : get vbo data without actually drawing. */ \
- GPUVertBufRaw raw; \
- GPU_vertbuf_attr_get_raw_data(_shgroup->inst_selectid, 0, &raw); \
- select_id = GPU_vertbuf_raw_step(&raw); \
- switch (_shgroup->type) { \
- case DRW_SHG_TRIANGLE_BATCH: \
- _count = 3; \
- break; \
- case DRW_SHG_LINE_BATCH: \
- _count = 2; \
- break; \
- default: \
- _count = 1; \
- break; \
- } \
- } \
- else { \
- GPU_select_load_id(_shgroup->override_selectid); \
- } \
- } \
- while (_start < _shgroup->instance_count) { \
- if (select_id) { \
- GPU_select_load_id(select_id[_start]); \
- }
-
-# define GPU_SELECT_LOAD_IF_PICKSEL_LIST_END(_start, _count) \
- _start += _count; \
- } \
- ((void)0)
-
-#else
-# define GPU_SELECT_LOAD_IF_PICKSEL(select_id)
-# define GPU_SELECT_LOAD_IF_PICKSEL_CALL(call)
-# define GPU_SELECT_LOAD_IF_PICKSEL_LIST_END(start, count) ((void)0)
-# define GPU_SELECT_LOAD_IF_PICKSEL_LIST(_shgroup, _start, _count) \
- _start = 0; \
- _count = _shgroup->instance_count;
-
-#endif
-
BLI_assert(ubo_bindings_validate(shgroup));
/* Rendering Calls */
- if (!ELEM(shgroup->type, DRW_SHG_NORMAL, DRW_SHG_FEEDBACK_TRANSFORM)) {
- /* Replacing multiple calls with only one */
- if (ELEM(shgroup->type, DRW_SHG_INSTANCE, DRW_SHG_INSTANCE_EXTERNAL)) {
- if (shgroup->type == DRW_SHG_INSTANCE_EXTERNAL) {
- if (shgroup->instance_geom != NULL) {
- GPU_SELECT_LOAD_IF_PICKSEL(shgroup->override_selectid);
- draw_geometry_prepare(shgroup, NULL);
- draw_geometry_execute(shgroup, shgroup->instance_geom, 0, 0, true);
- }
- }
- else {
- if (shgroup->instance_count > 0) {
- uint count, start;
- draw_geometry_prepare(shgroup, NULL);
- GPU_SELECT_LOAD_IF_PICKSEL_LIST (shgroup, start, count) {
- draw_geometry_execute(shgroup, shgroup->instance_geom, start, count, true);
- }
- GPU_SELECT_LOAD_IF_PICKSEL_LIST_END(start, count);
- }
- }
- }
- else { /* DRW_SHG_***_BATCH */
- /* Some dynamic batch can have no geom (no call to aggregate) */
- if (shgroup->instance_count > 0) {
- uint count, start;
- draw_geometry_prepare(shgroup, NULL);
- GPU_SELECT_LOAD_IF_PICKSEL_LIST (shgroup, start, count) {
- draw_geometry_execute(shgroup, shgroup->batch_geom, start, count, false);
- }
- GPU_SELECT_LOAD_IF_PICKSEL_LIST_END(start, count);
- }
- }
- }
- else {
+ {
bool prev_neg_scale = false;
int callid = 0;
for (DRWCall *call = shgroup->calls.first; call; call = call->next) {
@@ -1262,9 +1213,12 @@ static void draw_shgroup(DRWShadingGroup *shgroup, DRWState pass_state)
prev_neg_scale = neg_scale;
}
- GPU_SELECT_LOAD_IF_PICKSEL_CALL(call);
draw_geometry_prepare(shgroup, call);
+ if (draw_select_do_call(shgroup, call)) {
+ continue;
+ }
+
/* TODO revisit when DRW_SHG_INSTANCE and the like is gone. */
if (call->inst_count == 0) {
draw_geometry_execute(shgroup, call->batch, call->vert_first, call->vert_count, false);