Welcome to mirror list, hosted at ThFree Co, Russian Federation.

git.blender.org/blender.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorClément Foucault <foucault.clem@gmail.com>2019-05-13 19:28:36 +0300
committerClément Foucault <foucault.clem@gmail.com>2019-05-14 11:57:03 +0300
commitbe5192bbb9981a8b82ae47e695f084e23c96f519 (patch)
tree1e3c58d7101452936b82f8a1528b982e9df99e0c /source/blender/draw/intern
parent8bc8a62c57f91326ab3f8850785dce5452b5d703 (diff)
Cleanup: DRW: Remove uneeded _add suffix from DRW_shgroup_call_add
Diffstat (limited to 'source/blender/draw/intern')
-rw-r--r--source/blender/draw/intern/DRW_render.h81
-rw-r--r--source/blender/draw/intern/draw_anim_viz.c4
-rw-r--r--source/blender/draw/intern/draw_common.c56
-rw-r--r--source/blender/draw/intern/draw_hair.c4
-rw-r--r--source/blender/draw/intern/draw_manager_data.c78
5 files changed, 109 insertions, 114 deletions
diff --git a/source/blender/draw/intern/DRW_render.h b/source/blender/draw/intern/DRW_render.h
index 401ed50c1dc..722f9117f7d 100644
--- a/source/blender/draw/intern/DRW_render.h
+++ b/source/blender/draw/intern/DRW_render.h
@@ -378,11 +378,9 @@ struct GPUVertFormat *DRW_shgroup_instance_format_array(const DRWInstanceAttrFor
} \
} while (0)
-/* TODO(fclem): Remove the _create suffix. */
DRWShadingGroup *DRW_shgroup_create(struct GPUShader *shader, DRWPass *pass);
DRWShadingGroup *DRW_shgroup_create_sub(DRWShadingGroup *shgroup);
DRWShadingGroup *DRW_shgroup_material_create(struct GPUMaterial *material, DRWPass *pass);
-
DRWShadingGroup *DRW_shgroup_transform_feedback_create(struct GPUShader *shader,
DRWPass *pass,
struct GPUVertBuf *tf_target);
@@ -391,48 +389,47 @@ DRWShadingGroup *DRW_shgroup_transform_feedback_create(struct GPUShader *shader,
typedef bool(DRWCallVisibilityFn)(bool vis_in, void *user_data);
/* TODO(fclem): Remove the _add suffix. */
-void DRW_shgroup_call_add(DRWShadingGroup *sh, struct GPUBatch *geom, float (*obmat)[4]);
-void DRW_shgroup_call_range_add(
+void DRW_shgroup_call(DRWShadingGroup *sh, struct GPUBatch *geom, float (*obmat)[4]);
+void DRW_shgroup_call_range(
DRWShadingGroup *sh, struct GPUBatch *geom, float (*obmat)[4], uint v_sta, uint v_ct);
-void DRW_shgroup_call_procedural_points_add(DRWShadingGroup *sh, uint point_ct, float (*obmat)[4]);
-void DRW_shgroup_call_procedural_lines_add(DRWShadingGroup *sh, uint line_ct, float (*obmat)[4]);
-void DRW_shgroup_call_procedural_triangles_add(DRWShadingGroup *sh,
- uint tri_ct,
- float (*obmat)[4]);
-
-void DRW_shgroup_call_object_add_ex(DRWShadingGroup *shgroup,
- struct GPUBatch *geom,
- struct Object *ob,
- bool bypass_culling);
-#define DRW_shgroup_call_object_add(shgroup, geom, ob) \
- DRW_shgroup_call_object_add_ex(shgroup, geom, ob, false)
-#define DRW_shgroup_call_object_add_no_cull(shgroup, geom, ob) \
- DRW_shgroup_call_object_add_ex(shgroup, geom, ob, true)
-void DRW_shgroup_call_object_add_with_callback(DRWShadingGroup *shgroup,
- struct GPUBatch *geom,
- struct Object *ob,
- DRWCallVisibilityFn *callback,
- void *user_data);
-
-void DRW_shgroup_call_instances_add(DRWShadingGroup *shgroup,
- struct GPUBatch *geom,
- float (*obmat)[4],
- uint count);
-void DRW_shgroup_call_instances_with_attribs_add(DRWShadingGroup *shgroup,
- struct GPUBatch *geom,
- float (*obmat)[4],
- struct GPUBatch *inst_attributes);
-
-void DRW_shgroup_call_sculpt_add(DRWShadingGroup *sh, Object *ob, bool wire, bool mask, bool vcol);
-void DRW_shgroup_call_sculpt_with_materials_add(DRWShadingGroup **sh, Object *ob, bool vcol);
-
-DRWCallBuffer *DRW_shgroup_call_buffer_add(DRWShadingGroup *shading_group,
- struct GPUVertFormat *format,
- GPUPrimType prim_type);
-DRWCallBuffer *DRW_shgroup_call_buffer_instance_add(DRWShadingGroup *shading_group,
- struct GPUVertFormat *format,
- struct GPUBatch *geom);
+void DRW_shgroup_call_procedural_points(DRWShadingGroup *sh, uint point_ct, float (*obmat)[4]);
+void DRW_shgroup_call_procedural_lines(DRWShadingGroup *sh, uint line_ct, float (*obmat)[4]);
+void DRW_shgroup_call_procedural_triangles(DRWShadingGroup *sh, uint tri_ct, float (*obmat)[4]);
+
+void DRW_shgroup_call_object_ex(DRWShadingGroup *shgroup,
+ struct GPUBatch *geom,
+ struct Object *ob,
+ bool bypass_culling);
+#define DRW_shgroup_call_object(shgroup, geom, ob) \
+ DRW_shgroup_call_object_ex(shgroup, geom, ob, false)
+#define DRW_shgroup_call_object_no_cull(shgroup, geom, ob) \
+ DRW_shgroup_call_object_ex(shgroup, geom, ob, true)
+
+void DRW_shgroup_call_object_with_callback(DRWShadingGroup *shgroup,
+ struct GPUBatch *geom,
+ struct Object *ob,
+ DRWCallVisibilityFn *callback,
+ void *user_data);
+
+void DRW_shgroup_call_instances(DRWShadingGroup *shgroup,
+ struct GPUBatch *geom,
+ float (*obmat)[4],
+ uint count);
+void DRW_shgroup_call_instances_with_attribs(DRWShadingGroup *shgroup,
+ struct GPUBatch *geom,
+ float (*obmat)[4],
+ struct GPUBatch *inst_attributes);
+
+void DRW_shgroup_call_sculpt(DRWShadingGroup *sh, Object *ob, bool wire, bool mask, bool vcol);
+void DRW_shgroup_call_sculpt_with_materials(DRWShadingGroup **sh, Object *ob, bool vcol);
+
+DRWCallBuffer *DRW_shgroup_call_buffer(DRWShadingGroup *shading_group,
+ struct GPUVertFormat *format,
+ GPUPrimType prim_type);
+DRWCallBuffer *DRW_shgroup_call_buffer_instance(DRWShadingGroup *shading_group,
+ struct GPUVertFormat *format,
+ struct GPUBatch *geom);
void DRW_buffer_add_entry_array(DRWCallBuffer *buffer, const void *attr[], uint attr_len);
diff --git a/source/blender/draw/intern/draw_anim_viz.c b/source/blender/draw/intern/draw_anim_viz.c
index a6026c9da3a..31d7a45ede9 100644
--- a/source/blender/draw/intern/draw_anim_viz.c
+++ b/source/blender/draw/intern/draw_anim_viz.c
@@ -215,7 +215,7 @@ static void MPATH_cache_motion_path(MPATH_PassList *psl,
DRW_shgroup_uniform_vec3(shgrp, "customColor", mpath->color, 1);
}
/* Only draw the required range. */
- DRW_shgroup_call_range_add(shgrp, mpath_batch_line_get(mpath), NULL, start_index, len);
+ DRW_shgroup_call_range(shgrp, mpath_batch_line_get(mpath), NULL, start_index, len);
}
/* Draw points. */
@@ -231,7 +231,7 @@ static void MPATH_cache_motion_path(MPATH_PassList *psl,
DRW_shgroup_uniform_vec3(shgrp, "customColor", mpath->color, 1);
}
/* Only draw the required range. */
- DRW_shgroup_call_range_add(shgrp, mpath_batch_points_get(mpath), NULL, start_index, len);
+ DRW_shgroup_call_range(shgrp, mpath_batch_points_get(mpath), NULL, start_index, len);
/* Draw frame numbers at each framestep value */
bool show_kf_no = (avs->path_viewflag & MOTIONPATH_VIEW_KFNOS) != 0;
diff --git a/source/blender/draw/intern/draw_common.c b/source/blender/draw/intern/draw_common.c
index f2351885962..51f097e79d8 100644
--- a/source/blender/draw/intern/draw_common.c
+++ b/source/blender/draw/intern/draw_common.c
@@ -325,7 +325,7 @@ struct DRWCallBuffer *buffer_dynlines_flat_color(DRWPass *pass, eGPUShaderConfig
if (sh_cfg == GPU_SHADER_CFG_CLIPPED) {
DRW_shgroup_world_clip_planes_from_rv3d(grp, DRW_context_state_get()->rv3d);
}
- return DRW_shgroup_call_buffer_add(grp, g_formats.pos_color, GPU_PRIM_LINES);
+ return DRW_shgroup_call_buffer(grp, g_formats.pos_color, GPU_PRIM_LINES);
}
struct DRWCallBuffer *buffer_dynlines_dashed_uniform_color(DRWPass *pass,
@@ -349,14 +349,14 @@ struct DRWCallBuffer *buffer_dynlines_dashed_uniform_color(DRWPass *pass,
if (sh_cfg == GPU_SHADER_CFG_CLIPPED) {
DRW_shgroup_world_clip_planes_from_rv3d(grp, DRW_context_state_get()->rv3d);
}
- return DRW_shgroup_call_buffer_add(grp, g_formats.pos, GPU_PRIM_LINES);
+ return DRW_shgroup_call_buffer(grp, g_formats.pos, GPU_PRIM_LINES);
}
struct DRWCallBuffer *buffer_dynpoints_uniform_color(DRWShadingGroup *grp)
{
DRW_shgroup_instance_format(g_formats.pos, {{"pos", DRW_ATTR_FLOAT, 3}});
- return DRW_shgroup_call_buffer_add(grp, g_formats.pos, GPU_PRIM_POINTS);
+ return DRW_shgroup_call_buffer(grp, g_formats.pos, GPU_PRIM_POINTS);
}
struct DRWCallBuffer *buffer_groundlines_uniform_color(DRWPass *pass,
@@ -372,7 +372,7 @@ struct DRWCallBuffer *buffer_groundlines_uniform_color(DRWPass *pass,
if (sh_cfg == GPU_SHADER_CFG_CLIPPED) {
DRW_shgroup_world_clip_planes_from_rv3d(grp, DRW_context_state_get()->rv3d);
}
- return DRW_shgroup_call_buffer_add(grp, g_formats.pos, GPU_PRIM_POINTS);
+ return DRW_shgroup_call_buffer(grp, g_formats.pos, GPU_PRIM_POINTS);
}
struct DRWCallBuffer *buffer_groundpoints_uniform_color(DRWPass *pass,
@@ -389,7 +389,7 @@ struct DRWCallBuffer *buffer_groundpoints_uniform_color(DRWPass *pass,
if (sh_cfg == GPU_SHADER_CFG_CLIPPED) {
DRW_shgroup_world_clip_planes_from_rv3d(grp, DRW_context_state_get()->rv3d);
}
- return DRW_shgroup_call_buffer_add(grp, g_formats.pos, GPU_PRIM_POINTS);
+ return DRW_shgroup_call_buffer(grp, g_formats.pos, GPU_PRIM_POINTS);
}
struct DRWCallBuffer *buffer_instance_screenspace(DRWPass *pass,
@@ -413,7 +413,7 @@ struct DRWCallBuffer *buffer_instance_screenspace(DRWPass *pass,
if (sh_cfg == GPU_SHADER_CFG_CLIPPED) {
DRW_shgroup_world_clip_planes_from_rv3d(grp, DRW_context_state_get()->rv3d);
}
- return DRW_shgroup_call_buffer_instance_add(grp, g_formats.instance_screenspace, geom);
+ return DRW_shgroup_call_buffer_instance(grp, g_formats.instance_screenspace, geom);
}
struct DRWCallBuffer *buffer_instance_solid(DRWPass *pass, struct GPUBatch *geom)
@@ -431,7 +431,7 @@ struct DRWCallBuffer *buffer_instance_solid(DRWPass *pass, struct GPUBatch *geom
DRWShadingGroup *grp = DRW_shgroup_create(sh, pass);
DRW_shgroup_uniform_vec3(grp, "light", light, 1);
- return DRW_shgroup_call_buffer_instance_add(grp, g_formats.instance_color, geom);
+ return DRW_shgroup_call_buffer_instance(grp, g_formats.instance_color, geom);
}
struct DRWCallBuffer *buffer_instance_wire(DRWPass *pass, struct GPUBatch *geom)
@@ -446,7 +446,7 @@ struct DRWCallBuffer *buffer_instance_wire(DRWPass *pass, struct GPUBatch *geom)
DRWShadingGroup *grp = DRW_shgroup_create(sh, pass);
- return DRW_shgroup_call_buffer_instance_add(grp, g_formats.instance_color, geom);
+ return DRW_shgroup_call_buffer_instance(grp, g_formats.instance_color, geom);
}
struct DRWCallBuffer *buffer_instance_screen_aligned(DRWPass *pass,
@@ -468,7 +468,7 @@ struct DRWCallBuffer *buffer_instance_screen_aligned(DRWPass *pass,
if (sh_cfg == GPU_SHADER_CFG_CLIPPED) {
DRW_shgroup_world_clip_planes_from_rv3d(grp, DRW_context_state_get()->rv3d);
}
- return DRW_shgroup_call_buffer_instance_add(grp, g_formats.instance_screen_aligned, geom);
+ return DRW_shgroup_call_buffer_instance(grp, g_formats.instance_screen_aligned, geom);
}
struct DRWCallBuffer *buffer_instance_scaled(DRWPass *pass,
@@ -489,7 +489,7 @@ struct DRWCallBuffer *buffer_instance_scaled(DRWPass *pass,
if (sh_cfg == GPU_SHADER_CFG_CLIPPED) {
DRW_shgroup_world_clip_planes_from_rv3d(grp, DRW_context_state_get()->rv3d);
}
- return DRW_shgroup_call_buffer_instance_add(grp, g_formats.instance_scaled, geom);
+ return DRW_shgroup_call_buffer_instance(grp, g_formats.instance_scaled, geom);
}
struct DRWCallBuffer *buffer_instance(DRWPass *pass,
@@ -511,7 +511,7 @@ struct DRWCallBuffer *buffer_instance(DRWPass *pass,
if (sh_cfg == GPU_SHADER_CFG_CLIPPED) {
DRW_shgroup_world_clip_planes_from_rv3d(grp, DRW_context_state_get()->rv3d);
}
- return DRW_shgroup_call_buffer_instance_add(grp, g_formats.instance_sized, geom);
+ return DRW_shgroup_call_buffer_instance(grp, g_formats.instance_sized, geom);
}
struct DRWCallBuffer *buffer_instance_alpha(DRWShadingGroup *grp, struct GPUBatch *geom)
@@ -523,7 +523,7 @@ struct DRWCallBuffer *buffer_instance_alpha(DRWShadingGroup *grp, struct GPUBatc
{"InstanceModelMatrix", DRW_ATTR_FLOAT, 16},
});
- return DRW_shgroup_call_buffer_instance_add(grp, g_formats.instance_sized, geom);
+ return DRW_shgroup_call_buffer_instance(grp, g_formats.instance_sized, geom);
}
struct DRWCallBuffer *buffer_instance_empty_axes(DRWPass *pass,
@@ -552,7 +552,7 @@ struct DRWCallBuffer *buffer_instance_empty_axes(DRWPass *pass,
if (sh_cfg == GPU_SHADER_CFG_CLIPPED) {
DRW_shgroup_world_clip_planes_from_rv3d(grp, DRW_context_state_get()->rv3d);
}
- return DRW_shgroup_call_buffer_instance_add(grp, g_formats.instance_sized, geom);
+ return DRW_shgroup_call_buffer_instance(grp, g_formats.instance_sized, geom);
}
struct DRWCallBuffer *buffer_instance_outline(DRWPass *pass, struct GPUBatch *geom, int *baseid)
@@ -570,7 +570,7 @@ struct DRWCallBuffer *buffer_instance_outline(DRWPass *pass, struct GPUBatch *ge
DRWShadingGroup *grp = DRW_shgroup_create(sh_inst, pass);
DRW_shgroup_uniform_int(grp, "baseId", baseid, 1);
- return DRW_shgroup_call_buffer_instance_add(grp, g_formats.instance_outline, geom);
+ return DRW_shgroup_call_buffer_instance(grp, g_formats.instance_outline, geom);
}
struct DRWCallBuffer *buffer_camera_instance(DRWPass *pass,
@@ -592,7 +592,7 @@ struct DRWCallBuffer *buffer_camera_instance(DRWPass *pass,
if (sh_cfg == GPU_SHADER_CFG_CLIPPED) {
DRW_shgroup_world_clip_planes_from_rv3d(grp, DRW_context_state_get()->rv3d);
}
- return DRW_shgroup_call_buffer_instance_add(grp, g_formats.instance_camera, geom);
+ return DRW_shgroup_call_buffer_instance(grp, g_formats.instance_camera, geom);
}
struct DRWCallBuffer *buffer_distance_lines_instance(DRWPass *pass,
@@ -616,7 +616,7 @@ struct DRWCallBuffer *buffer_distance_lines_instance(DRWPass *pass,
if (sh_cfg == GPU_SHADER_CFG_CLIPPED) {
DRW_shgroup_world_clip_planes_from_rv3d(grp, DRW_context_state_get()->rv3d);
}
- return DRW_shgroup_call_buffer_instance_add(grp, g_formats.instance_distance_lines, geom);
+ return DRW_shgroup_call_buffer_instance(grp, g_formats.instance_distance_lines, geom);
}
struct DRWCallBuffer *buffer_spot_instance(DRWPass *pass,
@@ -641,7 +641,7 @@ struct DRWCallBuffer *buffer_spot_instance(DRWPass *pass,
if (sh_cfg == GPU_SHADER_CFG_CLIPPED) {
DRW_shgroup_world_clip_planes_from_rv3d(grp, DRW_context_state_get()->rv3d);
}
- return DRW_shgroup_call_buffer_instance_add(grp, g_formats.instance_spot, geom);
+ return DRW_shgroup_call_buffer_instance(grp, g_formats.instance_spot, geom);
}
struct DRWCallBuffer *buffer_instance_bone_axes(DRWPass *pass, eGPUShaderConfig sh_cfg)
@@ -667,7 +667,7 @@ struct DRWCallBuffer *buffer_instance_bone_axes(DRWPass *pass, eGPUShaderConfig
if (sh_cfg == GPU_SHADER_CFG_CLIPPED) {
DRW_shgroup_world_clip_planes_from_rv3d(grp, DRW_context_state_get()->rv3d);
}
- return DRW_shgroup_call_buffer_instance_add(
+ return DRW_shgroup_call_buffer_instance(
grp, g_formats.instance_color, DRW_cache_bone_arrows_get());
}
@@ -697,7 +697,7 @@ struct DRWCallBuffer *buffer_instance_bone_envelope_outline(DRWPass *pass, eGPUS
if (sh_cfg == GPU_SHADER_CFG_CLIPPED) {
DRW_shgroup_world_clip_planes_from_rv3d(grp, DRW_context_state_get()->rv3d);
}
- return DRW_shgroup_call_buffer_instance_add(
+ return DRW_shgroup_call_buffer_instance(
grp, g_formats.instance_bone_envelope_outline, DRW_cache_bone_envelope_outline_get());
}
@@ -726,7 +726,7 @@ struct DRWCallBuffer *buffer_instance_bone_envelope_distance(DRWPass *pass,
if (sh_cfg == GPU_SHADER_CFG_CLIPPED) {
DRW_shgroup_world_clip_planes_from_rv3d(grp, DRW_context_state_get()->rv3d);
}
- return DRW_shgroup_call_buffer_instance_add(
+ return DRW_shgroup_call_buffer_instance(
grp, g_formats.instance_bone_envelope_distance, DRW_cache_bone_envelope_solid_get());
}
@@ -762,7 +762,7 @@ struct DRWCallBuffer *buffer_instance_bone_envelope_solid(DRWPass *pass,
if (sh_cfg == GPU_SHADER_CFG_CLIPPED) {
DRW_shgroup_world_clip_planes_from_rv3d(grp, DRW_context_state_get()->rv3d);
}
- return DRW_shgroup_call_buffer_instance_add(
+ return DRW_shgroup_call_buffer_instance(
grp, g_formats.instance_bone_envelope, DRW_cache_bone_envelope_solid_get());
}
@@ -790,7 +790,7 @@ struct DRWCallBuffer *buffer_instance_mball_handles(DRWPass *pass, eGPUShaderCon
if (sh_cfg == GPU_SHADER_CFG_CLIPPED) {
DRW_shgroup_world_clip_planes_from_rv3d(grp, DRW_context_state_get()->rv3d);
}
- return DRW_shgroup_call_buffer_instance_add(
+ return DRW_shgroup_call_buffer_instance(
grp, g_formats.instance_mball_handles, DRW_cache_screenspace_circle_get());
}
@@ -827,7 +827,7 @@ struct DRWCallBuffer *buffer_instance_bone_shape_outline(DRWPass *pass,
if (sh_cfg == GPU_SHADER_CFG_CLIPPED) {
DRW_shgroup_world_clip_planes_from_rv3d(grp, DRW_context_state_get()->rv3d);
}
- return DRW_shgroup_call_buffer_instance_add(grp, g_formats.instance_bone_outline, geom);
+ return DRW_shgroup_call_buffer_instance(grp, g_formats.instance_bone_outline, geom);
}
struct DRWCallBuffer *buffer_instance_bone_shape_solid(DRWPass *pass,
@@ -860,7 +860,7 @@ struct DRWCallBuffer *buffer_instance_bone_shape_solid(DRWPass *pass,
if (sh_cfg == GPU_SHADER_CFG_CLIPPED) {
DRW_shgroup_world_clip_planes_from_rv3d(grp, DRW_context_state_get()->rv3d);
}
- return DRW_shgroup_call_buffer_instance_add(grp, g_formats.instance_bone, geom);
+ return DRW_shgroup_call_buffer_instance(grp, g_formats.instance_bone, geom);
}
struct DRWCallBuffer *buffer_instance_bone_sphere_solid(DRWPass *pass,
@@ -890,7 +890,7 @@ struct DRWCallBuffer *buffer_instance_bone_sphere_solid(DRWPass *pass,
if (sh_cfg == GPU_SHADER_CFG_CLIPPED) {
DRW_shgroup_world_clip_planes_from_rv3d(grp, DRW_context_state_get()->rv3d);
}
- return DRW_shgroup_call_buffer_instance_add(
+ return DRW_shgroup_call_buffer_instance(
grp, g_formats.instance_bone, DRW_cache_bone_point_get());
}
@@ -918,7 +918,7 @@ struct DRWCallBuffer *buffer_instance_bone_sphere_outline(DRWPass *pass, eGPUSha
if (sh_cfg == GPU_SHADER_CFG_CLIPPED) {
DRW_shgroup_world_clip_planes_from_rv3d(grp, DRW_context_state_get()->rv3d);
}
- return DRW_shgroup_call_buffer_instance_add(
+ return DRW_shgroup_call_buffer_instance(
grp, g_formats.instance_bone_outline, DRW_cache_bone_point_wire_outline_get());
}
@@ -951,7 +951,7 @@ struct DRWCallBuffer *buffer_instance_bone_stick(DRWPass *pass, eGPUShaderConfig
if (sh_cfg == GPU_SHADER_CFG_CLIPPED) {
DRW_shgroup_world_clip_planes_from_rv3d(grp, DRW_context_state_get()->rv3d);
}
- return DRW_shgroup_call_buffer_instance_add(
+ return DRW_shgroup_call_buffer_instance(
grp, g_formats.instance_bone_stick, DRW_cache_bone_stick_get());
}
@@ -978,7 +978,7 @@ struct DRWCallBuffer *buffer_instance_bone_dof(struct DRWPass *pass,
DRW_shgroup_state_enable(grp, DRW_STATE_BLEND);
DRW_shgroup_state_disable(grp, DRW_STATE_CULL_FRONT);
}
- return DRW_shgroup_call_buffer_instance_add(grp, g_formats.instance_bone_dof, geom);
+ return DRW_shgroup_call_buffer_instance(grp, g_formats.instance_bone_dof, geom);
}
struct GPUShader *mpath_line_shader_get(void)
diff --git a/source/blender/draw/intern/draw_hair.c b/source/blender/draw/intern/draw_hair.c
index cb83265195a..6dee25fabae 100644
--- a/source/blender/draw/intern/draw_hair.c
+++ b/source/blender/draw/intern/draw_hair.c
@@ -195,7 +195,7 @@ static DRWShadingGroup *drw_shgroup_create_hair_procedural_ex(Object *object,
shgrp, "hairCloseTip", (part->shape_flag & PART_SHAPE_CLOSE_TIP) != 0);
/* TODO(fclem): Until we have a better way to cull the hair and render with orco, bypass culling
* test. */
- DRW_shgroup_call_object_add_no_cull(
+ DRW_shgroup_call_object_no_cull(
shgrp, hair_cache->final[subdiv].proc_hairs[thickness_res - 1], object);
/* Transform Feedback subdiv. */
@@ -224,7 +224,7 @@ static DRWShadingGroup *drw_shgroup_create_hair_procedural_ex(Object *object,
DRW_shgroup_uniform_texture(tf_shgrp, "hairStrandBuffer", hair_cache->strand_tex);
DRW_shgroup_uniform_texture(tf_shgrp, "hairStrandSegBuffer", hair_cache->strand_seg_tex);
DRW_shgroup_uniform_int(tf_shgrp, "hairStrandsRes", &hair_cache->final[subdiv].strands_res, 1);
- DRW_shgroup_call_procedural_points_add(tf_shgrp, final_points_len, NULL);
+ DRW_shgroup_call_procedural_points(tf_shgrp, final_points_len, NULL);
}
return shgrp;
diff --git a/source/blender/draw/intern/draw_manager_data.c b/source/blender/draw/intern/draw_manager_data.c
index 151ab469e59..1d44ac069f4 100644
--- a/source/blender/draw/intern/draw_manager_data.c
+++ b/source/blender/draw/intern/draw_manager_data.c
@@ -448,7 +448,7 @@ static DRWCallState *drw_call_state_object(DRWShadingGroup *shgroup, float (*obm
return DST.ob_state;
}
-void DRW_shgroup_call_add(DRWShadingGroup *shgroup, GPUBatch *geom, float (*obmat)[4])
+void DRW_shgroup_call(DRWShadingGroup *shgroup, GPUBatch *geom, float (*obmat)[4])
{
BLI_assert(geom != NULL);
@@ -466,7 +466,7 @@ void DRW_shgroup_call_add(DRWShadingGroup *shgroup, GPUBatch *geom, float (*obma
#endif
}
-void DRW_shgroup_call_range_add(
+void DRW_shgroup_call_range(
DRWShadingGroup *shgroup, GPUBatch *geom, float (*obmat)[4], uint v_sta, uint v_count)
{
BLI_assert(geom != NULL);
@@ -506,35 +506,35 @@ static void drw_shgroup_call_procedural_add_ex(DRWShadingGroup *shgroup,
#endif
}
-void DRW_shgroup_call_procedural_points_add(DRWShadingGroup *shgroup,
- uint point_len,
- float (*obmat)[4])
+void DRW_shgroup_call_procedural_points(DRWShadingGroup *shgroup,
+ uint point_len,
+ float (*obmat)[4])
{
struct GPUBatch *geom = drw_cache_procedural_points_get();
drw_shgroup_call_procedural_add_ex(shgroup, geom, point_len, obmat);
}
-void DRW_shgroup_call_procedural_lines_add(DRWShadingGroup *shgroup,
- uint line_count,
- float (*obmat)[4])
+void DRW_shgroup_call_procedural_lines(DRWShadingGroup *shgroup,
+ uint line_count,
+ float (*obmat)[4])
{
struct GPUBatch *geom = drw_cache_procedural_lines_get();
drw_shgroup_call_procedural_add_ex(shgroup, geom, line_count * 2, obmat);
}
-void DRW_shgroup_call_procedural_triangles_add(DRWShadingGroup *shgroup,
- uint tria_count,
- float (*obmat)[4])
+void DRW_shgroup_call_procedural_triangles(DRWShadingGroup *shgroup,
+ uint tria_count,
+ float (*obmat)[4])
{
struct GPUBatch *geom = drw_cache_procedural_triangles_get();
drw_shgroup_call_procedural_add_ex(shgroup, geom, tria_count * 3, obmat);
}
/* These calls can be culled and are optimized for redraw */
-void DRW_shgroup_call_object_add_ex(DRWShadingGroup *shgroup,
- GPUBatch *geom,
- Object *ob,
- bool bypass_culling)
+void DRW_shgroup_call_object_ex(DRWShadingGroup *shgroup,
+ GPUBatch *geom,
+ Object *ob,
+ bool bypass_culling)
{
BLI_assert(geom != NULL);
@@ -554,11 +554,11 @@ void DRW_shgroup_call_object_add_ex(DRWShadingGroup *shgroup,
#endif
}
-void DRW_shgroup_call_object_add_with_callback(DRWShadingGroup *shgroup,
- GPUBatch *geom,
- Object *ob,
- DRWCallVisibilityFn *callback,
- void *user_data)
+void DRW_shgroup_call_object_with_callback(DRWShadingGroup *shgroup,
+ GPUBatch *geom,
+ Object *ob,
+ DRWCallVisibilityFn *callback,
+ void *user_data)
{
BLI_assert(geom != NULL);
@@ -578,10 +578,10 @@ void DRW_shgroup_call_object_add_with_callback(DRWShadingGroup *shgroup,
#endif
}
-void DRW_shgroup_call_instances_add(DRWShadingGroup *shgroup,
- GPUBatch *geom,
- float (*obmat)[4],
- uint count)
+void DRW_shgroup_call_instances(DRWShadingGroup *shgroup,
+ GPUBatch *geom,
+ float (*obmat)[4],
+ uint count)
{
BLI_assert(geom != NULL);
@@ -599,10 +599,10 @@ void DRW_shgroup_call_instances_add(DRWShadingGroup *shgroup,
#endif
}
-void DRW_shgroup_call_instances_with_attribs_add(DRWShadingGroup *shgroup,
- struct GPUBatch *geom,
- float (*obmat)[4],
- struct GPUBatch *inst_attributes)
+void DRW_shgroup_call_instances_with_attribs(DRWShadingGroup *shgroup,
+ struct GPUBatch *geom,
+ float (*obmat)[4],
+ struct GPUBatch *inst_attributes)
{
BLI_assert(geom != NULL);
BLI_assert(inst_attributes->verts[0] != NULL);
@@ -673,9 +673,9 @@ static void sculpt_draw_cb(DRWSculptCallbackData *scd, GPU_PBVH_Buffers *buffers
shgrp = DRW_shgroup_create_sub(shgrp);
DRW_shgroup_uniform_vec3(shgrp, "materialDiffuseColor", SCULPT_DEBUG_COLOR(scd->node_nr++), 1);
#endif
- /* DRW_shgroup_call_object_add_ex reuses matrices calculations for all the drawcalls of this
+ /* DRW_shgroup_call_object_ex reuses matrices calculations for all the drawcalls of this
* object. */
- DRW_shgroup_call_object_add_ex(shgrp, geom, scd->ob, true);
+ DRW_shgroup_call_object_ex(shgrp, geom, scd->ob, true);
}
}
@@ -736,7 +736,7 @@ static void drw_sculpt_generate_calls(DRWSculptCallbackData *scd, bool use_vcol)
#endif
}
-void DRW_shgroup_call_sculpt_add(
+void DRW_shgroup_call_sculpt(
DRWShadingGroup *shgroup, Object *ob, bool use_wire, bool use_mask, bool use_vcol)
{
DRWSculptCallbackData scd = {
@@ -749,9 +749,7 @@ void DRW_shgroup_call_sculpt_add(
drw_sculpt_generate_calls(&scd, use_vcol);
}
-void DRW_shgroup_call_sculpt_with_materials_add(DRWShadingGroup **shgroups,
- Object *ob,
- bool use_vcol)
+void DRW_shgroup_call_sculpt_with_materials(DRWShadingGroup **shgroups, Object *ob, bool use_vcol)
{
DRWSculptCallbackData scd = {
.ob = ob,
@@ -765,9 +763,9 @@ void DRW_shgroup_call_sculpt_with_materials_add(DRWShadingGroup **shgroups,
static GPUVertFormat inst_select_format = {0};
-DRWCallBuffer *DRW_shgroup_call_buffer_add(DRWShadingGroup *shgroup,
- struct GPUVertFormat *format,
- GPUPrimType prim_type)
+DRWCallBuffer *DRW_shgroup_call_buffer(DRWShadingGroup *shgroup,
+ struct GPUVertFormat *format,
+ GPUPrimType prim_type)
{
BLI_assert(ELEM(prim_type, GPU_PRIM_POINTS, GPU_PRIM_LINES, GPU_PRIM_TRI_FAN));
BLI_assert(format != NULL);
@@ -795,9 +793,9 @@ DRWCallBuffer *DRW_shgroup_call_buffer_add(DRWShadingGroup *shgroup,
return (DRWCallBuffer *)call;
}
-DRWCallBuffer *DRW_shgroup_call_buffer_instance_add(DRWShadingGroup *shgroup,
- struct GPUVertFormat *format,
- GPUBatch *geom)
+DRWCallBuffer *DRW_shgroup_call_buffer_instance(DRWShadingGroup *shgroup,
+ struct GPUVertFormat *format,
+ GPUBatch *geom)
{
BLI_assert(geom != NULL);
BLI_assert(format != NULL);