Welcome to mirror list, hosted at ThFree Co, Russian Federation.

git.blender.org/blender.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
Diffstat (limited to 'source/blender/draw/intern')
-rw-r--r--source/blender/draw/intern/draw_cache_impl_curves.cc4
-rw-r--r--source/blender/draw/intern/draw_cache_impl_gpencil.cc4
-rw-r--r--source/blender/draw/intern/draw_cache_impl_mesh.cc8
-rw-r--r--source/blender/draw/intern/draw_cache_impl_pointcloud.cc120
-rw-r--r--source/blender/draw/intern/draw_command.cc22
-rw-r--r--source/blender/draw/intern/draw_command.hh37
-rw-r--r--source/blender/draw/intern/draw_curves.cc2
-rw-r--r--source/blender/draw/intern/draw_hair.cc6
-rw-r--r--source/blender/draw/intern/draw_manager.c6
-rw-r--r--source/blender/draw/intern/draw_manager_data.cc26
-rw-r--r--source/blender/draw/intern/draw_manager_shader.c4
-rw-r--r--source/blender/draw/intern/draw_manager_text.cc34
-rw-r--r--source/blender/draw/intern/draw_pass.hh26
-rw-r--r--source/blender/draw/intern/draw_pbvh.cc17
-rw-r--r--source/blender/draw/intern/draw_resource.hh4
-rw-r--r--source/blender/draw/intern/draw_volume.cc2
-rw-r--r--source/blender/draw/intern/mesh_extractors/extract_mesh_vbo_edge_fac.cc4
17 files changed, 193 insertions, 133 deletions
diff --git a/source/blender/draw/intern/draw_cache_impl_curves.cc b/source/blender/draw/intern/draw_cache_impl_curves.cc
index c36b90ec32e..0322d048fa5 100644
--- a/source/blender/draw/intern/draw_cache_impl_curves.cc
+++ b/source/blender/draw/intern/draw_cache_impl_curves.cc
@@ -252,7 +252,7 @@ static void curves_batch_cache_fill_segments_proc_pos(
static void curves_batch_cache_ensure_procedural_pos(const Curves &curves,
CurvesEvalCache &cache,
- GPUMaterial *gpu_material)
+ GPUMaterial *UNUSED(gpu_material))
{
if (cache.proc_point_buf == nullptr || DRW_vbo_requested(cache.proc_point_buf)) {
/* Initialize vertex format. */
@@ -331,7 +331,7 @@ static void curves_batch_cache_ensure_procedural_final_attr(CurvesEvalCache &cac
const GPUVertFormat *format,
const int subdiv,
const int index,
- const char *name)
+ const char *UNUSED(name))
{
CurvesEvalFinalCache &final_cache = cache.final[subdiv];
final_cache.attributes_buf[index] = GPU_vertbuf_create_with_format_ex(
diff --git a/source/blender/draw/intern/draw_cache_impl_gpencil.cc b/source/blender/draw/intern/draw_cache_impl_gpencil.cc
index 6c38b23f44f..6860fae744b 100644
--- a/source/blender/draw/intern/draw_cache_impl_gpencil.cc
+++ b/source/blender/draw/intern/draw_cache_impl_gpencil.cc
@@ -319,7 +319,7 @@ static void gpencil_buffer_add_point(GPUIndexBufBuilder *ibo,
vert->strength = (round_cap0) ? pt->strength : -pt->strength;
vert->u_stroke = pt->uv_fac;
- vert->stroke_id = gps->runtime.stroke_start;
+ vert->stroke_id = gps->runtime.vertex_start;
vert->point_id = v;
vert->thickness = max_ff(0.0f, gps->thickness * pt->pressure) * (round_cap1 ? 1.0f : -1.0f);
/* Tag endpoint material to -1 so they get discarded by vertex shader. */
@@ -612,7 +612,7 @@ static void gpencil_sbuffer_stroke_ensure(bGPdata *gpd, bool do_fill)
for (int i = 0; i < vert_len; i++) {
ED_gpencil_tpoint_to_point(region, origin, &tpoints[i], &gps->points[i]);
- mul_m4_v3(ob->imat, &gps->points[i].x);
+ mul_m4_v3(ob->world_to_object, &gps->points[i].x);
bGPDspoint *pt = &gps->points[i];
copy_v4_v4(pt->vert_color, tpoints[i].vert_color);
}
diff --git a/source/blender/draw/intern/draw_cache_impl_mesh.cc b/source/blender/draw/intern/draw_cache_impl_mesh.cc
index 5ce658abfe4..031de3e4ef2 100644
--- a/source/blender/draw/intern/draw_cache_impl_mesh.cc
+++ b/source/blender/draw/intern/draw_cache_impl_mesh.cc
@@ -1889,7 +1889,7 @@ void DRW_mesh_batch_cache_create_requested(struct TaskGraph *task_graph,
is_editmode,
is_paint_mode,
is_mode_active,
- ob->obmat,
+ ob->object_to_world,
false,
true,
scene,
@@ -1906,7 +1906,7 @@ void DRW_mesh_batch_cache_create_requested(struct TaskGraph *task_graph,
is_editmode,
is_paint_mode,
is_mode_active,
- ob->obmat,
+ ob->object_to_world,
false,
false,
scene,
@@ -1922,7 +1922,7 @@ void DRW_mesh_batch_cache_create_requested(struct TaskGraph *task_graph,
is_editmode,
is_paint_mode,
is_mode_active,
- ob->obmat,
+ ob->object_to_world,
true,
false,
do_cage,
@@ -1943,7 +1943,7 @@ void DRW_mesh_batch_cache_create_requested(struct TaskGraph *task_graph,
is_editmode,
is_paint_mode,
is_mode_active,
- ob->obmat,
+ ob->object_to_world,
true,
false,
scene,
diff --git a/source/blender/draw/intern/draw_cache_impl_pointcloud.cc b/source/blender/draw/intern/draw_cache_impl_pointcloud.cc
index d64fc581942..ddbfe232361 100644
--- a/source/blender/draw/intern/draw_cache_impl_pointcloud.cc
+++ b/source/blender/draw/intern/draw_cache_impl_pointcloud.cc
@@ -38,7 +38,7 @@ using namespace blender;
/** \name GPUBatch cache management
* \{ */
-struct PointCloudBatchCache {
+struct PointCloudEvalCache {
/* Dot primitive types. */
GPUBatch *dots;
/* Triangle primitive types. */
@@ -69,10 +69,15 @@ struct PointCloudBatchCache {
* user preferences (`U.vbotimeout`) then garbage collection is performed.
*/
int last_attr_matching_time;
- /* settings to determine if cache is invalid */
- bool is_dirty;
int mat_len;
+};
+
+struct PointCloudBatchCache {
+ PointCloudEvalCache eval_cache;
+
+ /* settings to determine if cache is invalid */
+ bool is_dirty;
/**
* The draw cache extraction is currently not multi-threaded for multiple objects, but if it was,
@@ -94,7 +99,7 @@ static bool pointcloud_batch_cache_valid(PointCloud &pointcloud)
if (cache == nullptr) {
return false;
}
- if (cache->mat_len != DRW_pointcloud_material_count_get(&pointcloud)) {
+ if (cache->eval_cache.mat_len != DRW_pointcloud_material_count_get(&pointcloud)) {
return false;
}
return cache->is_dirty == false;
@@ -109,12 +114,12 @@ static void pointcloud_batch_cache_init(PointCloud &pointcloud)
pointcloud.batch_cache = cache;
}
else {
- memset(cache, 0, sizeof(*cache));
+ cache->eval_cache = {};
}
- cache->mat_len = DRW_pointcloud_material_count_get(&pointcloud);
- cache->surface_per_mat = static_cast<GPUBatch **>(
- MEM_callocN(sizeof(GPUBatch *) * cache->mat_len, __func__));
+ cache->eval_cache.mat_len = DRW_pointcloud_material_count_get(&pointcloud);
+ cache->eval_cache.surface_per_mat = static_cast<GPUBatch **>(
+ MEM_callocN(sizeof(GPUBatch *) * cache->eval_cache.mat_len, __func__));
cache->is_dirty = false;
}
@@ -137,10 +142,10 @@ void DRW_pointcloud_batch_cache_dirty_tag(PointCloud *pointcloud, int mode)
static void pointcloud_discard_attributes(PointCloudBatchCache &cache)
{
for (const int j : IndexRange(GPU_MAX_ATTR)) {
- GPU_VERTBUF_DISCARD_SAFE(cache.attributes_buf[j]);
+ GPU_VERTBUF_DISCARD_SAFE(cache.eval_cache.attributes_buf[j]);
}
- drw_attributes_clear(&cache.attr_used);
+ drw_attributes_clear(&cache.eval_cache.attr_used);
}
static void pointcloud_batch_cache_clear(PointCloud &pointcloud)
@@ -150,18 +155,18 @@ static void pointcloud_batch_cache_clear(PointCloud &pointcloud)
return;
}
- GPU_BATCH_DISCARD_SAFE(cache->dots);
- GPU_BATCH_DISCARD_SAFE(cache->surface);
- GPU_VERTBUF_DISCARD_SAFE(cache->pos_rad);
- GPU_VERTBUF_DISCARD_SAFE(cache->attr_viewer);
- GPU_INDEXBUF_DISCARD_SAFE(cache->geom_indices);
+ GPU_BATCH_DISCARD_SAFE(cache->eval_cache.dots);
+ GPU_BATCH_DISCARD_SAFE(cache->eval_cache.surface);
+ GPU_VERTBUF_DISCARD_SAFE(cache->eval_cache.pos_rad);
+ GPU_VERTBUF_DISCARD_SAFE(cache->eval_cache.attr_viewer);
+ GPU_INDEXBUF_DISCARD_SAFE(cache->eval_cache.geom_indices);
- if (cache->surface_per_mat) {
- for (int i = 0; i < cache->mat_len; i++) {
- GPU_BATCH_DISCARD_SAFE(cache->surface_per_mat[i]);
+ if (cache->eval_cache.surface_per_mat) {
+ for (int i = 0; i < cache->eval_cache.mat_len; i++) {
+ GPU_BATCH_DISCARD_SAFE(cache->eval_cache.surface_per_mat[i]);
}
}
- MEM_SAFE_FREE(cache->surface_per_mat);
+ MEM_SAFE_FREE(cache->eval_cache.surface_per_mat);
pointcloud_discard_attributes(*cache);
}
@@ -189,15 +194,16 @@ void DRW_pointcloud_batch_cache_free_old(PointCloud *pointcloud, int ctime)
bool do_discard = false;
- if (drw_attributes_overlap(&cache->attr_used_over_time, &cache->attr_used)) {
- cache->last_attr_matching_time = ctime;
+ if (drw_attributes_overlap(&cache->eval_cache.attr_used_over_time,
+ &cache->eval_cache.attr_used)) {
+ cache->eval_cache.last_attr_matching_time = ctime;
}
- if (ctime - cache->last_attr_matching_time > U.vbotimeout) {
+ if (ctime - cache->eval_cache.last_attr_matching_time > U.vbotimeout) {
do_discard = true;
}
- drw_attributes_clear(&cache->attr_used_over_time);
+ drw_attributes_clear(&cache->eval_cache.attr_used_over_time);
if (do_discard) {
pointcloud_discard_attributes(*cache);
@@ -235,7 +241,7 @@ static void pointcloud_extract_indices(const PointCloud &pointcloud, PointCloudB
}
}
- GPU_indexbuf_build_in_place(&builder, cache.geom_indices);
+ GPU_indexbuf_build_in_place(&builder, cache.eval_cache.geom_indices);
}
static void pointcloud_extract_position_and_radius(const PointCloud &pointcloud,
@@ -252,11 +258,11 @@ static void pointcloud_extract_position_and_radius(const PointCloud &pointcloud,
}
GPUUsageType usage_flag = GPU_USAGE_STATIC | GPU_USAGE_FLAG_BUFFER_TEXTURE_ONLY;
- GPU_vertbuf_init_with_format_ex(cache.pos_rad, &format, usage_flag);
+ GPU_vertbuf_init_with_format_ex(cache.eval_cache.pos_rad, &format, usage_flag);
- GPU_vertbuf_data_alloc(cache.pos_rad, positions.size());
- MutableSpan<float4> vbo_data{static_cast<float4 *>(GPU_vertbuf_get_data(cache.pos_rad)),
- pointcloud.totpoint};
+ GPU_vertbuf_data_alloc(cache.eval_cache.pos_rad, positions.size());
+ MutableSpan<float4> vbo_data{
+ static_cast<float4 *>(GPU_vertbuf_get_data(cache.eval_cache.pos_rad)), pointcloud.totpoint};
if (radii) {
const VArraySpan<float> radii_span(radii);
threading::parallel_for(vbo_data.index_range(), 4096, [&](IndexRange range) {
@@ -288,7 +294,7 @@ static void pointcloud_extract_attribute(const PointCloud &pointcloud,
{
using namespace blender;
- GPUVertBuf *&attr_buf = cache.attributes_buf[index];
+ GPUVertBuf *&attr_buf = cache.eval_cache.attributes_buf[index];
const bke::AttributeAccessor attributes = pointcloud.attributes();
@@ -322,8 +328,8 @@ static void pointcloud_extract_attribute(const PointCloud &pointcloud,
GPUVertBuf *pointcloud_position_and_radius_get(PointCloud *pointcloud)
{
PointCloudBatchCache *cache = pointcloud_batch_cache_get(*pointcloud);
- DRW_vbo_request(nullptr, &cache->pos_rad);
- return cache->pos_rad;
+ DRW_vbo_request(nullptr, &cache->eval_cache.pos_rad);
+ return cache->eval_cache.pos_rad;
}
GPUBatch **pointcloud_surface_shaded_get(PointCloud *pointcloud,
@@ -350,23 +356,23 @@ GPUBatch **pointcloud_surface_shaded_get(PointCloud *pointcloud,
}
}
- if (!drw_attributes_overlap(&cache->attr_used, &attrs_needed)) {
+ if (!drw_attributes_overlap(&cache->eval_cache.attr_used, &attrs_needed)) {
/* Some new attributes have been added, free all and start over. */
for (const int i : IndexRange(GPU_MAX_ATTR)) {
- GPU_VERTBUF_DISCARD_SAFE(cache->attributes_buf[i]);
+ GPU_VERTBUF_DISCARD_SAFE(cache->eval_cache.attributes_buf[i]);
}
- drw_attributes_merge(&cache->attr_used, &attrs_needed, cache->render_mutex);
+ drw_attributes_merge(&cache->eval_cache.attr_used, &attrs_needed, cache->render_mutex);
}
- drw_attributes_merge(&cache->attr_used_over_time, &attrs_needed, cache->render_mutex);
+ drw_attributes_merge(&cache->eval_cache.attr_used_over_time, &attrs_needed, cache->render_mutex);
- DRW_batch_request(&cache->surface_per_mat[0]);
- return cache->surface_per_mat;
+ DRW_batch_request(&cache->eval_cache.surface_per_mat[0]);
+ return cache->eval_cache.surface_per_mat;
}
GPUBatch *pointcloud_surface_get(PointCloud *pointcloud)
{
PointCloudBatchCache *cache = pointcloud_batch_cache_get(*pointcloud);
- return DRW_batch_request(&cache->surface);
+ return DRW_batch_request(&cache->eval_cache.surface);
}
/** \} */
@@ -379,7 +385,7 @@ GPUBatch *DRW_pointcloud_batch_cache_get_dots(Object *ob)
{
PointCloud &pointcloud = *static_cast<PointCloud *>(ob->data);
PointCloudBatchCache *cache = pointcloud_batch_cache_get(pointcloud);
- return DRW_batch_request(&cache->dots);
+ return DRW_batch_request(&cache->eval_cache.dots);
}
GPUVertBuf **DRW_pointcloud_evaluated_attribute(PointCloud *pointcloud, const char *name)
@@ -392,12 +398,12 @@ GPUVertBuf **DRW_pointcloud_evaluated_attribute(PointCloud *pointcloud, const ch
if (drw_custom_data_match_attribute(&pointcloud->pdata, name, &layer_index, &type)) {
DRW_Attributes attributes{};
drw_attributes_add_request(&attributes, name, type, layer_index, domain);
- drw_attributes_merge(&cache.attr_used, &attributes, cache.render_mutex);
+ drw_attributes_merge(&cache.eval_cache.attr_used, &attributes, cache.render_mutex);
}
int request_i = -1;
- for (const int i : IndexRange(cache.attr_used.num_requests)) {
- if (STREQ(cache.attr_used.requests[i].attribute_name, name)) {
+ for (const int i : IndexRange(cache.eval_cache.attr_used.num_requests)) {
+ if (STREQ(cache.eval_cache.attr_used.requests[i].attribute_name, name)) {
request_i = i;
break;
}
@@ -405,7 +411,7 @@ GPUVertBuf **DRW_pointcloud_evaluated_attribute(PointCloud *pointcloud, const ch
if (request_i == -1) {
return nullptr;
}
- return &cache.attributes_buf[request_i];
+ return &cache.eval_cache.attributes_buf[request_i];
}
int DRW_pointcloud_material_count_get(PointCloud *pointcloud)
@@ -418,33 +424,33 @@ void DRW_pointcloud_batch_cache_create_requested(Object *ob)
PointCloud *pointcloud = static_cast<PointCloud *>(ob->data);
PointCloudBatchCache &cache = *pointcloud_batch_cache_get(*pointcloud);
- if (DRW_batch_requested(cache.dots, GPU_PRIM_POINTS)) {
- DRW_vbo_request(cache.dots, &cache.pos_rad);
+ if (DRW_batch_requested(cache.eval_cache.dots, GPU_PRIM_POINTS)) {
+ DRW_vbo_request(cache.eval_cache.dots, &cache.eval_cache.pos_rad);
}
- if (DRW_batch_requested(cache.surface, GPU_PRIM_TRIS)) {
- DRW_ibo_request(cache.surface, &cache.geom_indices);
- DRW_vbo_request(cache.surface, &cache.pos_rad);
+ if (DRW_batch_requested(cache.eval_cache.surface, GPU_PRIM_TRIS)) {
+ DRW_ibo_request(cache.eval_cache.surface, &cache.eval_cache.geom_indices);
+ DRW_vbo_request(cache.eval_cache.surface, &cache.eval_cache.pos_rad);
}
- for (int i = 0; i < cache.mat_len; i++) {
- if (DRW_batch_requested(cache.surface_per_mat[i], GPU_PRIM_TRIS)) {
+ for (int i = 0; i < cache.eval_cache.mat_len; i++) {
+ if (DRW_batch_requested(cache.eval_cache.surface_per_mat[i], GPU_PRIM_TRIS)) {
/* TODO(fclem): Per material ranges. */
- DRW_ibo_request(cache.surface_per_mat[i], &cache.geom_indices);
+ DRW_ibo_request(cache.eval_cache.surface_per_mat[i], &cache.eval_cache.geom_indices);
}
}
- for (int j = 0; j < cache.attr_used.num_requests; j++) {
- DRW_vbo_request(nullptr, &cache.attributes_buf[j]);
+ for (int j = 0; j < cache.eval_cache.attr_used.num_requests; j++) {
+ DRW_vbo_request(nullptr, &cache.eval_cache.attributes_buf[j]);
- if (DRW_vbo_requested(cache.attributes_buf[j])) {
- pointcloud_extract_attribute(*pointcloud, cache, cache.attr_used.requests[j], j);
+ if (DRW_vbo_requested(cache.eval_cache.attributes_buf[j])) {
+ pointcloud_extract_attribute(*pointcloud, cache, cache.eval_cache.attr_used.requests[j], j);
}
}
- if (DRW_ibo_requested(cache.geom_indices)) {
+ if (DRW_ibo_requested(cache.eval_cache.geom_indices)) {
pointcloud_extract_indices(*pointcloud, cache);
}
- if (DRW_vbo_requested(cache.pos_rad)) {
+ if (DRW_vbo_requested(cache.eval_cache.pos_rad)) {
pointcloud_extract_position_and_radius(*pointcloud, cache);
}
}
diff --git a/source/blender/draw/intern/draw_command.cc b/source/blender/draw/intern/draw_command.cc
index 882eda9b31d..6e999815e8d 100644
--- a/source/blender/draw/intern/draw_command.cc
+++ b/source/blender/draw/intern/draw_command.cc
@@ -30,6 +30,11 @@ void ShaderBind::execute(RecordingState &state) const
}
}
+void FramebufferBind::execute() const
+{
+ GPU_framebuffer_bind(framebuffer);
+}
+
void ResourceBind::execute() const
{
if (slot == -1) {
@@ -161,7 +166,10 @@ void StateSet::execute(RecordingState &recording_state) const
*/
BLI_assert(DST.state_lock == 0);
- if (!assign_if_different(recording_state.pipeline_state, new_state)) {
+ bool state_changed = assign_if_different(recording_state.pipeline_state, new_state);
+ bool clip_changed = assign_if_different(recording_state.clip_plane_count, clip_plane_count);
+
+ if (!state_changed && !clip_changed) {
return;
}
@@ -185,12 +193,7 @@ void StateSet::execute(RecordingState &recording_state) const
}
/* TODO: this should be part of shader state. */
- if (new_state & DRW_STATE_CLIP_PLANES) {
- GPU_clip_distances(recording_state.view_clip_plane_count);
- }
- else {
- GPU_clip_distances(0);
- }
+ GPU_clip_distances(recording_state.clip_plane_count);
if (new_state & DRW_STATE_IN_FRONT_SELECT) {
/* XXX `GPU_depth_range` is not a perfect solution
@@ -229,6 +232,11 @@ std::string ShaderBind::serialize() const
return std::string(".shader_bind(") + GPU_shader_get_name(shader) + ")";
}
+std::string FramebufferBind::serialize() const
+{
+ return std::string(".framebuffer_bind(") + GPU_framebuffer_get_name(framebuffer) + ")";
+}
+
std::string ResourceBind::serialize() const
{
switch (type) {
diff --git a/source/blender/draw/intern/draw_command.hh b/source/blender/draw/intern/draw_command.hh
index 5307a242e39..12c9916ee6d 100644
--- a/source/blender/draw/intern/draw_command.hh
+++ b/source/blender/draw/intern/draw_command.hh
@@ -39,7 +39,7 @@ struct RecordingState {
bool front_facing = true;
bool inverted_view = false;
DRWState pipeline_state = DRW_STATE_NO_DRAW;
- int view_clip_plane_count = 0;
+ int clip_plane_count = 0;
/** Used for gl_BaseInstance workaround. */
GPUStorageBuf *resource_id_buf = nullptr;
@@ -88,6 +88,7 @@ enum class Type : uint8_t {
DispatchIndirect,
Draw,
DrawIndirect,
+ FramebufferBind,
PushConstant,
ResourceBind,
ShaderBind,
@@ -118,6 +119,13 @@ struct ShaderBind {
std::string serialize() const;
};
+struct FramebufferBind {
+ GPUFrameBuffer *framebuffer;
+
+ void execute() const;
+ std::string serialize() const;
+};
+
struct ResourceBind {
eGPUSamplerState sampler;
int slot;
@@ -317,6 +325,7 @@ struct Clear {
struct StateSet {
DRWState new_state;
+ int clip_plane_count;
void execute(RecordingState &state) const;
std::string serialize() const;
@@ -473,10 +482,8 @@ class DrawMultiBuf {
uint vertex_first,
ResourceHandle handle)
{
- /* Unsupported for now. Use PassSimple. */
- BLI_assert(vertex_first == 0 || vertex_first == -1);
- BLI_assert(vertex_len == -1);
- UNUSED_VARS_NDEBUG(vertex_len, vertex_first);
+ /* Custom draw-calls cannot be batched and will produce one group per draw. */
+ const bool custom_group = ((vertex_first != 0 && vertex_first != -1) || vertex_len != -1);
instance_len = instance_len != -1 ? instance_len : 1;
@@ -493,8 +500,14 @@ class DrawMultiBuf {
bool inverted = handle.has_inverted_handedness();
- if (group_id == uint(-1)) {
+ DrawPrototype &draw = prototype_buf_.get_or_resize(prototype_count_++);
+ draw.resource_handle = handle.raw;
+ draw.instance_len = instance_len;
+ draw.group_id = group_id;
+
+ if (group_id == uint(-1) || custom_group) {
uint new_group_id = group_count_++;
+ draw.group_id = new_group_id;
DrawGroup &group = group_buf_.get_or_resize(new_group_id);
group.next = cmd.group_first;
@@ -503,11 +516,16 @@ class DrawMultiBuf {
group.gpu_batch = batch;
group.front_proto_len = 0;
group.back_proto_len = 0;
+ group.vertex_len = vertex_len;
+ group.vertex_first = vertex_first;
+ /* Custom group are not to be registered in the group_ids_. */
+ if (!custom_group) {
+ group_id = new_group_id;
+ }
/* For serialization only. */
(inverted ? group.back_proto_len : group.front_proto_len)++;
/* Append to list. */
cmd.group_first = new_group_id;
- group_id = new_group_id;
}
else {
DrawGroup &group = group_buf_[group_id];
@@ -516,11 +534,6 @@ class DrawMultiBuf {
/* For serialization only. */
(inverted ? group.back_proto_len : group.front_proto_len)++;
}
-
- DrawPrototype &draw = prototype_buf_.get_or_resize(prototype_count_++);
- draw.group_id = group_id;
- draw.resource_handle = handle.raw;
- draw.instance_len = instance_len;
}
void bind(RecordingState &state,
diff --git a/source/blender/draw/intern/draw_curves.cc b/source/blender/draw/intern/draw_curves.cc
index ee9045696be..ee9ed4666e0 100644
--- a/source/blender/draw/intern/draw_curves.cc
+++ b/source/blender/draw/intern/draw_curves.cc
@@ -390,7 +390,7 @@ DRWShadingGroup *DRW_shgroup_curves_create_sub(Object *object,
DRW_shgroup_uniform_int(shgrp, "hairStrandsRes", &curves_cache->final[subdiv].strands_res, 1);
DRW_shgroup_uniform_int_copy(shgrp, "hairThicknessRes", thickness_res);
DRW_shgroup_uniform_float_copy(shgrp, "hairRadShape", hair_rad_shape);
- DRW_shgroup_uniform_mat4_copy(shgrp, "hairDupliMatrix", object->obmat);
+ DRW_shgroup_uniform_mat4_copy(shgrp, "hairDupliMatrix", object->object_to_world);
DRW_shgroup_uniform_float_copy(shgrp, "hairRadRoot", hair_rad_root);
DRW_shgroup_uniform_float_copy(shgrp, "hairRadTip", hair_rad_tip);
DRW_shgroup_uniform_bool_copy(shgrp, "hairCloseTip", hair_close_tip);
diff --git a/source/blender/draw/intern/draw_hair.cc b/source/blender/draw/intern/draw_hair.cc
index 08e5d780cba..c5261f26f76 100644
--- a/source/blender/draw/intern/draw_hair.cc
+++ b/source/blender/draw/intern/draw_hair.cc
@@ -216,12 +216,12 @@ void DRW_hair_duplimat_get(Object *object,
if (collection != nullptr) {
sub_v3_v3(dupli_mat[3], collection->instance_offset);
}
- mul_m4_m4m4(dupli_mat, dupli_parent->obmat, dupli_mat);
+ mul_m4_m4m4(dupli_mat, dupli_parent->object_to_world, dupli_mat);
}
else {
- copy_m4_m4(dupli_mat, dupli_object->ob->obmat);
+ copy_m4_m4(dupli_mat, dupli_object->ob->object_to_world);
invert_m4(dupli_mat);
- mul_m4_m4m4(dupli_mat, object->obmat, dupli_mat);
+ mul_m4_m4m4(dupli_mat, object->object_to_world, dupli_mat);
}
}
else {
diff --git a/source/blender/draw/intern/draw_manager.c b/source/blender/draw/intern/draw_manager.c
index d0290426af3..4fcfec833eb 100644
--- a/source/blender/draw/intern/draw_manager.c
+++ b/source/blender/draw/intern/draw_manager.c
@@ -2801,7 +2801,7 @@ void DRW_draw_depth_object(
GPU_matrix_projection_set(rv3d->winmat);
GPU_matrix_set(rv3d->viewmat);
- GPU_matrix_mul(object->obmat);
+ GPU_matrix_mul(object->object_to_world);
/* Setup frame-buffer. */
GPUTexture *depth_tx = GPU_viewport_depth_texture(viewport);
@@ -2821,11 +2821,11 @@ void DRW_draw_depth_object(
const bool use_clipping_planes = RV3D_CLIPPING_ENABLED(v3d, rv3d);
if (use_clipping_planes) {
GPU_clip_distances(6);
- ED_view3d_clipping_local(rv3d, object->obmat);
+ ED_view3d_clipping_local(rv3d, object->object_to_world);
for (int i = 0; i < 6; i++) {
copy_v4_v4(planes.world[i], rv3d->clip_local[i]);
}
- copy_m4_m4(planes.ModelMatrix, object->obmat);
+ copy_m4_m4(planes.ModelMatrix, object->object_to_world);
}
drw_batch_cache_validate(object);
diff --git a/source/blender/draw/intern/draw_manager_data.cc b/source/blender/draw/intern/draw_manager_data.cc
index b9e0db71122..29b1493ec5e 100644
--- a/source/blender/draw/intern/draw_manager_data.cc
+++ b/source/blender/draw/intern/draw_manager_data.cc
@@ -678,7 +678,7 @@ BLI_INLINE void drw_call_matrix_init(DRWObjectMatrix *ob_mats, Object *ob, float
{
copy_m4_m4(ob_mats->model, obmat);
if (ob) {
- copy_m4_m4(ob_mats->modelinverse, ob->imat);
+ copy_m4_m4(ob_mats->modelinverse, ob->world_to_object);
}
else {
/* WATCH: Can be costly. */
@@ -695,7 +695,7 @@ static void drw_call_obinfos_init(DRWObjectInfos *ob_infos, Object *ob)
drw_call_calc_orco(ob, ob_infos->orcotexfac);
/* Random float value. */
uint random = (DST.dupli_source) ?
- DST.dupli_source->random_id :
+ DST.dupli_source->random_id :
/* TODO(fclem): this is rather costly to do at runtime. Maybe we can
* put it in ob->runtime and make depsgraph ensure it is up to date. */
BLI_hash_int_2d(BLI_hash_string(ob->id.name + 2), 0);
@@ -724,8 +724,8 @@ static void drw_call_culling_init(DRWCullingState *cull, Object *ob)
float corner[3];
/* Get BoundSphere center and radius from the BoundBox. */
mid_v3_v3v3(cull->bsphere.center, bbox->vec[0], bbox->vec[6]);
- mul_v3_m4v3(corner, ob->obmat, bbox->vec[0]);
- mul_m4_v3(ob->obmat, cull->bsphere.center);
+ mul_v3_m4v3(corner, ob->object_to_world, bbox->vec[0]);
+ mul_m4_v3(ob->object_to_world, cull->bsphere.center);
cull->bsphere.radius = len_v3v3(cull->bsphere.center, corner);
/* Bypass test for very large objects (see T67319). */
@@ -1017,7 +1017,7 @@ void DRW_shgroup_call_ex(DRWShadingGroup *shgroup,
if (G.f & G_FLAG_PICKSEL) {
drw_command_set_select_id(shgroup, nullptr, DST.select_id);
}
- DRWResourceHandle handle = drw_resource_handle(shgroup, ob ? ob->obmat : obmat, ob);
+ DRWResourceHandle handle = drw_resource_handle(shgroup, ob ? ob->object_to_world : obmat, ob);
drw_command_draw(shgroup, geom, handle);
/* Culling data. */
@@ -1042,7 +1042,7 @@ void DRW_shgroup_call_range(
if (G.f & G_FLAG_PICKSEL) {
drw_command_set_select_id(shgroup, nullptr, DST.select_id);
}
- DRWResourceHandle handle = drw_resource_handle(shgroup, ob ? ob->obmat : nullptr, ob);
+ DRWResourceHandle handle = drw_resource_handle(shgroup, ob ? ob->object_to_world : nullptr, ob);
drw_command_draw_range(shgroup, geom, handle, v_sta, v_num);
}
@@ -1053,7 +1053,7 @@ void DRW_shgroup_call_instance_range(
if (G.f & G_FLAG_PICKSEL) {
drw_command_set_select_id(shgroup, nullptr, DST.select_id);
}
- DRWResourceHandle handle = drw_resource_handle(shgroup, ob ? ob->obmat : nullptr, ob);
+ DRWResourceHandle handle = drw_resource_handle(shgroup, ob ? ob->object_to_world : nullptr, ob);
drw_command_draw_intance_range(shgroup, geom, handle, i_sta, i_num);
}
@@ -1099,7 +1099,7 @@ static void drw_shgroup_call_procedural_add_ex(DRWShadingGroup *shgroup,
if (G.f & G_FLAG_PICKSEL) {
drw_command_set_select_id(shgroup, nullptr, DST.select_id);
}
- DRWResourceHandle handle = drw_resource_handle(shgroup, ob ? ob->obmat : nullptr, ob);
+ DRWResourceHandle handle = drw_resource_handle(shgroup, ob ? ob->object_to_world : nullptr, ob);
drw_command_draw_procedural(shgroup, geom, handle, vert_count);
}
@@ -1149,7 +1149,7 @@ void DRW_shgroup_call_procedural_indirect(DRWShadingGroup *shgroup,
if (G.f & G_FLAG_PICKSEL) {
drw_command_set_select_id(shgroup, nullptr, DST.select_id);
}
- DRWResourceHandle handle = drw_resource_handle(shgroup, ob ? ob->obmat : nullptr, ob);
+ DRWResourceHandle handle = drw_resource_handle(shgroup, ob ? ob->object_to_world : nullptr, ob);
drw_command_draw_indirect(shgroup, geom, handle, indirect_buf);
}
@@ -1159,7 +1159,7 @@ void DRW_shgroup_call_instances(DRWShadingGroup *shgroup, Object *ob, GPUBatch *
if (G.f & G_FLAG_PICKSEL) {
drw_command_set_select_id(shgroup, nullptr, DST.select_id);
}
- DRWResourceHandle handle = drw_resource_handle(shgroup, ob ? ob->obmat : nullptr, ob);
+ DRWResourceHandle handle = drw_resource_handle(shgroup, ob ? ob->object_to_world : nullptr, ob);
drw_command_draw_instance(shgroup, geom, handle, count, false);
}
@@ -1173,7 +1173,7 @@ void DRW_shgroup_call_instances_with_attrs(DRWShadingGroup *shgroup,
if (G.f & G_FLAG_PICKSEL) {
drw_command_set_select_id(shgroup, nullptr, DST.select_id);
}
- DRWResourceHandle handle = drw_resource_handle(shgroup, ob ? ob->obmat : nullptr, ob);
+ DRWResourceHandle handle = drw_resource_handle(shgroup, ob ? ob->object_to_world : nullptr, ob);
GPUBatch *batch = DRW_temp_batch_instance_request(
DST.vmempool->idatalist, nullptr, inst_attributes, geom);
drw_command_draw_instance(shgroup, batch, handle, 0, true);
@@ -1283,7 +1283,7 @@ static void drw_sculpt_get_frustum_planes(Object *ob, float planes[6][4])
* 4x4 matrix is done by multiplying with the transpose inverse.
* The inverse cancels out here since we transform by inverse(obmat). */
float tmat[4][4];
- transpose_m4_m4(tmat, ob->obmat);
+ transpose_m4_m4(tmat, ob->object_to_world);
for (int i = 0; i < 6; i++) {
mul_m4_v4(tmat, planes[i]);
}
@@ -1361,7 +1361,7 @@ static void drw_sculpt_generate_calls(DRWSculptCallbackData *scd)
if (SCULPT_DEBUG_BUFFERS) {
int debug_node_nr = 0;
- DRW_debug_modelmat(scd->ob->obmat);
+ DRW_debug_modelmat(scd->ob->object_to_world);
BKE_pbvh_draw_debug_cb(
pbvh,
(void (*)(PBVHNode * n, void *d, const float min[3], const float max[3], PBVHNodeFlags f))
diff --git a/source/blender/draw/intern/draw_manager_shader.c b/source/blender/draw/intern/draw_manager_shader.c
index 40b05dff51f..85701a10f4b 100644
--- a/source/blender/draw/intern/draw_manager_shader.c
+++ b/source/blender/draw/intern/draw_manager_shader.c
@@ -64,8 +64,8 @@ static void drw_deferred_shader_compilation_exec(
void *custom_data,
/* Cannot be const, this function implements wm_jobs_start_callback.
* NOLINTNEXTLINE: readability-non-const-parameter. */
- short *stop,
- short *UNUSED(do_update),
+ bool *stop,
+ bool *UNUSED(do_update),
float *UNUSED(progress))
{
GPU_render_begin();
diff --git a/source/blender/draw/intern/draw_manager_text.cc b/source/blender/draw/intern/draw_manager_text.cc
index 100ef528bc8..1244c46e166 100644
--- a/source/blender/draw/intern/draw_manager_text.cc
+++ b/source/blender/draw/intern/draw_manager_text.cc
@@ -305,11 +305,11 @@ void DRW_text_edit_mesh_measure_stats(ARegion *region,
if (clip_segment_v3_plane_n(v1, v2, clip_planes, 4, v1_clip, v2_clip)) {
mid_v3_v3v3(vmid, v1_clip, v2_clip);
- mul_m4_v3(ob->obmat, vmid);
+ mul_m4_v3(ob->object_to_world, vmid);
if (do_global) {
- mul_mat3_m4_v3(ob->obmat, v1);
- mul_mat3_m4_v3(ob->obmat, v2);
+ mul_mat3_m4_v3(ob->object_to_world, v1);
+ mul_mat3_m4_v3(ob->object_to_world, v2);
}
if (unit->system) {
@@ -373,7 +373,7 @@ void DRW_text_edit_mesh_measure_stats(ARegion *region,
float angle;
mid_v3_v3v3(vmid, v1_clip, v2_clip);
- mul_m4_v3(ob->obmat, vmid);
+ mul_m4_v3(ob->object_to_world, vmid);
if (use_coords) {
copy_v3_v3(no_a, poly_normals[BM_elem_index_get(l_a->f)]);
@@ -385,8 +385,8 @@ void DRW_text_edit_mesh_measure_stats(ARegion *region,
}
if (do_global) {
- mul_mat3_m4_v3(ob->imat, no_a);
- mul_mat3_m4_v3(ob->imat, no_b);
+ mul_mat3_m4_v3(ob->world_to_object, no_a);
+ mul_mat3_m4_v3(ob->world_to_object, no_b);
normalize_v3(no_a);
normalize_v3(no_b);
}
@@ -443,16 +443,16 @@ void DRW_text_edit_mesh_measure_stats(ARegion *region,
n += 3;
if (do_global) {
- mul_mat3_m4_v3(ob->obmat, v1);
- mul_mat3_m4_v3(ob->obmat, v2);
- mul_mat3_m4_v3(ob->obmat, v3);
+ mul_mat3_m4_v3(ob->object_to_world, v1);
+ mul_mat3_m4_v3(ob->object_to_world, v2);
+ mul_mat3_m4_v3(ob->object_to_world, v3);
}
area += area_tri_v3(v1, v2, v3);
}
mul_v3_fl(vmid, 1.0f / float(n));
- mul_m4_v3(ob->obmat, vmid);
+ mul_m4_v3(ob->object_to_world, vmid);
if (unit->system) {
numstr_len = BKE_unit_value_as_string(
@@ -522,9 +522,9 @@ void DRW_text_edit_mesh_measure_stats(ARegion *region,
copy_v3_v3(v2_local, v2);
if (do_global) {
- mul_mat3_m4_v3(ob->obmat, v1);
- mul_mat3_m4_v3(ob->obmat, v2);
- mul_mat3_m4_v3(ob->obmat, v3);
+ mul_mat3_m4_v3(ob->object_to_world, v1);
+ mul_mat3_m4_v3(ob->object_to_world, v2);
+ mul_mat3_m4_v3(ob->object_to_world, v3);
}
float angle = angle_v3v3v3(v1, v2, v3);
@@ -535,7 +535,7 @@ void DRW_text_edit_mesh_measure_stats(ARegion *region,
(is_rad) ? angle : RAD2DEGF(angle),
(is_rad) ? "r" : "°");
interp_v3_v3v3(fvec, vmid, v2_local, 0.8f);
- mul_m4_v3(ob->obmat, fvec);
+ mul_m4_v3(ob->object_to_world, fvec);
DRW_text_cache_add(dt, fvec, numstr, numstr_len, 0, 0, txt_flag, col);
}
}
@@ -566,7 +566,7 @@ void DRW_text_edit_mesh_measure_stats(ARegion *region,
copy_v3_v3(v1, v->co);
}
- mul_m4_v3(ob->obmat, v1);
+ mul_m4_v3(ob->object_to_world, v1);
numstr_len = BLI_snprintf_rlen(numstr, sizeof(numstr), "%d", i);
DRW_text_cache_add(dt, v1, numstr, numstr_len, 0, 0, txt_flag, col);
@@ -595,7 +595,7 @@ void DRW_text_edit_mesh_measure_stats(ARegion *region,
if (clip_segment_v3_plane_n(v1, v2, clip_planes, 4, v1_clip, v2_clip)) {
mid_v3_v3v3(vmid, v1_clip, v2_clip);
- mul_m4_v3(ob->obmat, vmid);
+ mul_m4_v3(ob->object_to_world, vmid);
numstr_len = BLI_snprintf_rlen(numstr, sizeof(numstr), "%d", i);
DRW_text_cache_add(
@@ -629,7 +629,7 @@ void DRW_text_edit_mesh_measure_stats(ARegion *region,
BM_face_calc_center_median(f, v1);
}
- mul_m4_v3(ob->obmat, v1);
+ mul_m4_v3(ob->object_to_world, v1);
numstr_len = BLI_snprintf_rlen(numstr, sizeof(numstr), "%d", i);
DRW_text_cache_add(dt, v1, numstr, numstr_len, 0, 0, txt_flag, col);
diff --git a/source/blender/draw/intern/draw_pass.hh b/source/blender/draw/intern/draw_pass.hh
index 24dfdd1b97b..2c1fd16928e 100644
--- a/source/blender/draw/intern/draw_pass.hh
+++ b/source/blender/draw/intern/draw_pass.hh
@@ -14,8 +14,7 @@
* #Pass. Use many #PassSub along with a main #Pass to reduce the overhead and allow groupings of
* commands. \note The draw call order inside a batch of multiple draw with the exact same state is
* not guaranteed and is not even deterministic. Use a #PassSimple or #PassSortable if ordering is
- * needed. \note As of now, it is also quite limited in the type of draw command it can record
- * (no custom vertex count, no custom first vertex).
+ * needed. Custom vertex count and custom first vertex will effectively disable batching.
*
* `PassSimple`:
* Does not have the overhead of #PassMain but does not have the culling and batching optimization.
@@ -160,8 +159,10 @@ class PassBase {
*
* IMPORTANT: This does not set the stencil mask/reference values. Add a call to state_stencil()
* to ensure correct behavior of stencil aware draws.
+ *
+ * TODO(fclem): clip_plane_count should be part of shader state.
*/
- void state_set(DRWState state);
+ void state_set(DRWState state, int clip_plane_count = 0);
/**
* Clear the current frame-buffer.
@@ -192,6 +193,12 @@ class PassBase {
void shader_set(GPUShader *shader);
/**
+ * Bind a framebuffer. This is equivalent to a deferred GPU_framebuffer_bind() call.
+ * \note Changes the global GPU state (outside of DRW).
+ */
+ void framebuffer_set(GPUFrameBuffer *framebuffer);
+
+ /**
* Bind a material shader along with its associated resources. Any following bind() or
* push_constant() call will use its interface.
* IMPORTANT: Assumes material is compiled and can be used (no compilation error).
@@ -726,9 +733,13 @@ template<class T> inline void PassBase<T>::barrier(eGPUBarrier type)
/** \name State Implementation
* \{ */
-template<class T> inline void PassBase<T>::state_set(DRWState state)
+template<class T> inline void PassBase<T>::state_set(DRWState state, int clip_plane_count)
{
- create_command(Type::StateSet).state_set = {state};
+ /** \note This is for compatibility with the old clip plane API. */
+ if (clip_plane_count > 0) {
+ state |= DRW_STATE_CLIP_PLANES;
+ }
+ create_command(Type::StateSet).state_set = {state, clip_plane_count};
}
template<class T>
@@ -743,6 +754,11 @@ template<class T> inline void PassBase<T>::shader_set(GPUShader *shader)
create_command(Type::ShaderBind).shader_bind = {shader};
}
+template<class T> inline void PassBase<T>::framebuffer_set(GPUFrameBuffer *framebuffer)
+{
+ create_command(Type::FramebufferBind).framebuffer_bind = {framebuffer};
+}
+
template<class T> inline void PassBase<T>::material_set(Manager &manager, GPUMaterial *material)
{
GPUPass *gpupass = GPU_material_get_pass(material);
diff --git a/source/blender/draw/intern/draw_pbvh.cc b/source/blender/draw/intern/draw_pbvh.cc
index 38fb6d55245..b25bb42a8a5 100644
--- a/source/blender/draw/intern/draw_pbvh.cc
+++ b/source/blender/draw/intern/draw_pbvh.cc
@@ -951,6 +951,14 @@ struct PBVHBatches {
void create_index_faces(PBVH_GPU_Args *args)
{
+ int *mat_index = static_cast<int *>(
+ CustomData_get_layer_named(args->pdata, CD_PROP_INT32, "material_index"));
+
+ if (mat_index && args->totprim) {
+ int poly_index = args->mlooptri[args->prim_indices[0]].poly;
+ material_index = mat_index[poly_index];
+ }
+
/* Calculate number of edges*/
int edge_count = 0;
for (int i = 0; i < args->totprim; i++) {
@@ -959,6 +967,7 @@ struct PBVHBatches {
if (args->hide_poly && args->hide_poly[lt->poly]) {
continue;
}
+
int r_edges[3];
BKE_mesh_looptri_get_real_edges(args->me, lt, r_edges);
@@ -1030,6 +1039,14 @@ struct PBVHBatches {
void create_index_grids(PBVH_GPU_Args *args)
{
+ int *mat_index = static_cast<int *>(
+ CustomData_get_layer_named(args->pdata, CD_PROP_INT32, "material_index"));
+
+ if (mat_index && args->totprim) {
+ int poly_index = BKE_subdiv_ccg_grid_to_face_index(args->subdiv_ccg, args->grid_indices[0]);
+ material_index = mat_index[poly_index];
+ }
+
needs_tri_index = true;
int gridsize = args->ccg_key.grid_size;
int totgrid = args->totprim;
diff --git a/source/blender/draw/intern/draw_resource.hh b/source/blender/draw/intern/draw_resource.hh
index 2df38e32ed2..b8a0dbb8fa9 100644
--- a/source/blender/draw/intern/draw_resource.hh
+++ b/source/blender/draw/intern/draw_resource.hh
@@ -31,8 +31,8 @@
inline void ObjectMatrices::sync(const Object &object)
{
- model = object.obmat;
- model_inverse = object.imat;
+ model = object.object_to_world;
+ model_inverse = object.world_to_object;
}
inline void ObjectMatrices::sync(const float4x4 &model_matrix)
diff --git a/source/blender/draw/intern/draw_volume.cc b/source/blender/draw/intern/draw_volume.cc
index 2b4b0e3c089..5c1ce7c3111 100644
--- a/source/blender/draw/intern/draw_volume.cc
+++ b/source/blender/draw/intern/draw_volume.cc
@@ -127,7 +127,7 @@ static DRWShadingGroup *drw_volume_object_grids_init(Object *ob,
grp = DRW_shgroup_create_sub(grp);
- volume_infos.density_scale = BKE_volume_density_scale(volume, ob->obmat);
+ volume_infos.density_scale = BKE_volume_density_scale(volume, ob->object_to_world);
volume_infos.color_mul = float4(1.0f);
volume_infos.temperature_mul = 1.0f;
volume_infos.temperature_bias = 0.0f;
diff --git a/source/blender/draw/intern/mesh_extractors/extract_mesh_vbo_edge_fac.cc b/source/blender/draw/intern/mesh_extractors/extract_mesh_vbo_edge_fac.cc
index a968bd07c87..de1f5181ac5 100644
--- a/source/blender/draw/intern/mesh_extractors/extract_mesh_vbo_edge_fac.cc
+++ b/source/blender/draw/intern/mesh_extractors/extract_mesh_vbo_edge_fac.cc
@@ -66,7 +66,7 @@ static void extract_edge_fac_init(const MeshRenderData *mr,
* We could have a flag in the mesh instead or check the modifier stack. */
const MEdge *med = mr->medge;
for (int e_index = 0; e_index < mr->edge_len; e_index++, med++) {
- if ((med->flag & ME_EDGERENDER) == 0) {
+ if ((med->flag & ME_EDGEDRAW) == 0) {
data->use_edge_render = true;
break;
}
@@ -118,7 +118,7 @@ static void extract_edge_fac_iter_poly_mesh(const MeshRenderData *mr,
if (data->use_edge_render) {
const MEdge *med = &mr->medge[ml->e];
- data->vbo_data[ml_index] = (med->flag & ME_EDGERENDER) ? 255 : 0;
+ data->vbo_data[ml_index] = (med->flag & ME_EDGEDRAW) ? 255 : 0;
}
else {