Welcome to mirror list, hosted at ThFree Co, Russian Federation.

git.blender.org/blender.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
Diffstat (limited to 'source/blender/draw/intern/draw_cache_impl_curves.cc')
-rw-r--r--source/blender/draw/intern/draw_cache_impl_curves.cc208
1 files changed, 131 insertions, 77 deletions
diff --git a/source/blender/draw/intern/draw_cache_impl_curves.cc b/source/blender/draw/intern/draw_cache_impl_curves.cc
index 68ca1153c96..3bca17d9c56 100644
--- a/source/blender/draw/intern/draw_cache_impl_curves.cc
+++ b/source/blender/draw/intern/draw_cache_impl_curves.cc
@@ -75,13 +75,14 @@ static void curves_batch_cache_init(Curves &curves)
if (!cache) {
cache = MEM_cnew<CurvesBatchCache>(__func__);
- BLI_mutex_init(&cache->render_mutex);
curves.batch_cache = cache;
}
else {
memset(cache, 0, sizeof(*cache));
}
+ BLI_mutex_init(&cache->render_mutex);
+
cache->is_dirty = false;
}
@@ -258,7 +259,7 @@ static void curves_batch_cache_fill_segments_proc_pos(
}
}
-static void curves_batch_cache_ensure_procedural_pos(Curves &curves,
+static void curves_batch_cache_ensure_procedural_pos(const Curves &curves,
CurvesEvalCache &cache,
GPUMaterial *gpu_material)
{
@@ -268,7 +269,8 @@ static void curves_batch_cache_ensure_procedural_pos(Curves &curves,
GPU_vertformat_attr_add(&format, "posTime", GPU_COMP_F32, 4, GPU_FETCH_FLOAT);
GPU_vertformat_alias_add(&format, "pos");
- cache.proc_point_buf = GPU_vertbuf_create_with_format(&format);
+ cache.proc_point_buf = GPU_vertbuf_create_with_format_ex(
+ &format, GPU_USAGE_STATIC | GPU_USAGE_FLAG_BUFFER_TEXTURE_ONLY);
GPU_vertbuf_data_alloc(cache.proc_point_buf, cache.point_len);
MutableSpan posTime_data{
@@ -278,7 +280,8 @@ static void curves_batch_cache_ensure_procedural_pos(Curves &curves,
GPUVertFormat length_format = {0};
GPU_vertformat_attr_add(&length_format, "hairLength", GPU_COMP_F32, 1, GPU_FETCH_FLOAT);
- cache.proc_length_buf = GPU_vertbuf_create_with_format(&length_format);
+ cache.proc_length_buf = GPU_vertbuf_create_with_format_ex(
+ &length_format, GPU_USAGE_STATIC | GPU_USAGE_FLAG_BUFFER_TEXTURE_ONLY);
GPU_vertbuf_data_alloc(cache.proc_length_buf, cache.strands_len);
MutableSpan hairLength_data{
@@ -311,12 +314,15 @@ void drw_curves_get_attribute_sampler_name(const char *layer_name, char r_sample
BLI_snprintf(r_sampler_name, 32, "a%s", attr_safe_name);
}
-static void curves_batch_cache_ensure_procedural_final_attr(
- CurvesEvalCache &cache, GPUVertFormat *format, int subdiv, int index, const char *name)
+static void curves_batch_cache_ensure_procedural_final_attr(CurvesEvalCache &cache,
+ const GPUVertFormat *format,
+ const int subdiv,
+ const int index,
+ const char *name)
{
CurvesEvalFinalCache &final_cache = cache.final[subdiv];
- final_cache.attributes_buf[index] = GPU_vertbuf_create_with_format_ex(format,
- GPU_USAGE_DEVICE_ONLY);
+ final_cache.attributes_buf[index] = GPU_vertbuf_create_with_format_ex(
+ format, GPU_USAGE_DEVICE_ONLY | GPU_USAGE_FLAG_BUFFER_TEXTURE_ONLY);
/* Create a destination buffer for the transform feedback. Sized appropriately */
/* Those are points! not line segments. */
@@ -333,8 +339,8 @@ static void curves_batch_cache_ensure_procedural_final_attr(
static void curves_batch_ensure_attribute(const Curves &curves,
CurvesEvalCache &cache,
const DRW_AttributeRequest &request,
- int subdiv,
- int index)
+ const int subdiv,
+ const int index)
{
GPU_VERTBUF_DISCARD_SAFE(cache.proc_attributes_buf[index]);
DRW_TEXTURE_FREE_SAFE(cache.proc_attributes_tex[index]);
@@ -347,27 +353,28 @@ static void curves_batch_ensure_attribute(const Curves &curves,
/* All attributes use vec4, see comment below. */
GPU_vertformat_attr_add(&format, sampler_name, GPU_COMP_F32, 4, GPU_FETCH_FLOAT);
- cache.proc_attributes_buf[index] = GPU_vertbuf_create_with_format(&format);
+ cache.proc_attributes_buf[index] = GPU_vertbuf_create_with_format_ex(
+ &format, GPU_USAGE_STATIC | GPU_USAGE_FLAG_BUFFER_TEXTURE_ONLY);
GPUVertBuf *attr_vbo = cache.proc_attributes_buf[index];
GPU_vertbuf_data_alloc(attr_vbo,
request.domain == ATTR_DOMAIN_POINT ? curves.geometry.point_num :
curves.geometry.curve_num);
- CurveComponent component;
- component.replace(const_cast<Curves *>(&curves), GeometryOwnershipType::ReadOnly);
+ const blender::bke::AttributeAccessor attributes =
+ blender::bke::CurvesGeometry::wrap(curves.geometry).attributes();
/* TODO(@kevindietrich): float4 is used for scalar attributes as the implicit conversion done
* by OpenGL to vec4 for a scalar `s` will produce a `vec4(s, 0, 0, 1)`. However, following
* the Blender convention, it should be `vec4(s, s, s, 1)`. This could be resolved using a
* similar texture state swizzle to map the attribute correctly as for volume attributes, so we
* can control the conversion ourselves. */
- blender::VArray<ColorGeometry4f> attribute = component.attribute_get_for_read<ColorGeometry4f>(
+ blender::VArray<ColorGeometry4f> attribute = attributes.lookup_or_default<ColorGeometry4f>(
request.attribute_name, request.domain, {0.0f, 0.0f, 0.0f, 1.0f});
MutableSpan<ColorGeometry4f> vbo_span{
static_cast<ColorGeometry4f *>(GPU_vertbuf_get_data(attr_vbo)),
- component.attribute_domain_num(request.domain)};
+ attributes.domain_size(request.domain)};
attribute.materialize(vbo_span);
@@ -393,10 +400,10 @@ static void curves_batch_cache_fill_strands_data(const Curves &curves_id,
curves_id.geometry);
for (const int i : IndexRange(curves.curves_num())) {
- const IndexRange curve_range = curves.points_for_curve(i);
+ const IndexRange points = curves.points_for_curve(i);
- *(uint *)GPU_vertbuf_raw_step(&data_step) = curve_range.start();
- *(ushort *)GPU_vertbuf_raw_step(&seg_step) = curve_range.size() - 1;
+ *(uint *)GPU_vertbuf_raw_step(&data_step) = points.start();
+ *(ushort *)GPU_vertbuf_raw_step(&seg_step) = points.size() - 1;
}
}
@@ -412,11 +419,13 @@ static void curves_batch_cache_ensure_procedural_strand_data(Curves &curves,
uint seg_id = GPU_vertformat_attr_add(&format_seg, "data", GPU_COMP_U16, 1, GPU_FETCH_INT);
/* Curve Data. */
- cache.proc_strand_buf = GPU_vertbuf_create_with_format(&format_data);
+ cache.proc_strand_buf = GPU_vertbuf_create_with_format_ex(
+ &format_data, GPU_USAGE_STATIC | GPU_USAGE_FLAG_BUFFER_TEXTURE_ONLY);
GPU_vertbuf_data_alloc(cache.proc_strand_buf, cache.strands_len);
GPU_vertbuf_attr_get_raw_data(cache.proc_strand_buf, data_id, &data_step);
- cache.proc_strand_seg_buf = GPU_vertbuf_create_with_format(&format_seg);
+ cache.proc_strand_seg_buf = GPU_vertbuf_create_with_format_ex(
+ &format_seg, GPU_USAGE_STATIC | GPU_USAGE_FLAG_BUFFER_TEXTURE_ONLY);
GPU_vertbuf_data_alloc(cache.proc_strand_seg_buf, cache.strands_len);
GPU_vertbuf_attr_get_raw_data(cache.proc_strand_seg_buf, seg_id, &seg_step);
@@ -437,7 +446,8 @@ static void curves_batch_cache_ensure_procedural_final_points(CurvesEvalCache &c
GPUVertFormat format = {0};
GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 4, GPU_FETCH_FLOAT);
- cache.final[subdiv].proc_buf = GPU_vertbuf_create_with_format_ex(&format, GPU_USAGE_DEVICE_ONLY);
+ cache.final[subdiv].proc_buf = GPU_vertbuf_create_with_format_ex(
+ &format, GPU_USAGE_DEVICE_ONLY | GPU_USAGE_FLAG_BUFFER_TEXTURE_ONLY);
/* Create a destination buffer for the transform feedback. Sized appropriately */
/* Those are points! not line segments. */
@@ -509,58 +519,41 @@ static bool curves_ensure_attributes(const Curves &curves,
ThreadMutex *render_mutex = &cache.render_mutex;
const CustomData *cd_curve = &curves.geometry.curve_data;
const CustomData *cd_point = &curves.geometry.point_data;
+ CurvesEvalFinalCache &final_cache = cache.curves_cache.final[subdiv];
- DRW_Attributes attrs_needed;
- drw_attributes_clear(&attrs_needed);
- ListBase gpu_attrs = GPU_material_attributes(gpu_material);
- LISTBASE_FOREACH (GPUMaterialAttribute *, gpu_attr, &gpu_attrs) {
- const char *name = gpu_attr->name;
-
- int layer_index;
- eCustomDataType type;
- eAttrDomain domain;
- if (drw_custom_data_match_attribute(cd_curve, name, &layer_index, &type)) {
- domain = ATTR_DOMAIN_CURVE;
- }
- else if (drw_custom_data_match_attribute(cd_point, name, &layer_index, &type)) {
- domain = ATTR_DOMAIN_POINT;
- }
- else {
- continue;
- }
-
- switch (type) {
- case CD_PROP_BOOL:
- case CD_PROP_INT8:
- case CD_PROP_INT32:
- case CD_PROP_FLOAT:
- case CD_PROP_FLOAT2:
- case CD_PROP_FLOAT3:
- case CD_PROP_COLOR: {
- DRW_AttributeRequest *request = drw_attributes_add_request(
- &attrs_needed, type, layer_index, domain);
- if (request) {
- BLI_strncpy(request->attribute_name, name, sizeof(request->attribute_name));
- }
-
- break;
+ if (gpu_material) {
+ DRW_Attributes attrs_needed;
+ drw_attributes_clear(&attrs_needed);
+ ListBase gpu_attrs = GPU_material_attributes(gpu_material);
+ LISTBASE_FOREACH (GPUMaterialAttribute *, gpu_attr, &gpu_attrs) {
+ const char *name = gpu_attr->name;
+
+ int layer_index;
+ eCustomDataType type;
+ eAttrDomain domain;
+ if (drw_custom_data_match_attribute(cd_curve, name, &layer_index, &type)) {
+ domain = ATTR_DOMAIN_CURVE;
+ }
+ else if (drw_custom_data_match_attribute(cd_point, name, &layer_index, &type)) {
+ domain = ATTR_DOMAIN_POINT;
+ }
+ else {
+ continue;
}
- default:
- break;
- }
- }
- CurvesEvalFinalCache &final_cache = cache.curves_cache.final[subdiv];
+ drw_attributes_add_request(&attrs_needed, name, type, layer_index, domain);
+ }
- if (!drw_attributes_overlap(&final_cache.attr_used, &attrs_needed)) {
- /* Some new attributes have been added, free all and start over. */
- for (const int i : IndexRange(GPU_MAX_ATTR)) {
- GPU_VERTBUF_DISCARD_SAFE(cache.curves_cache.proc_attributes_buf[i]);
- DRW_TEXTURE_FREE_SAFE(cache.curves_cache.proc_attributes_tex[i]);
+ if (!drw_attributes_overlap(&final_cache.attr_used, &attrs_needed)) {
+ /* Some new attributes have been added, free all and start over. */
+ for (const int i : IndexRange(GPU_MAX_ATTR)) {
+ GPU_VERTBUF_DISCARD_SAFE(cache.curves_cache.proc_attributes_buf[i]);
+ DRW_TEXTURE_FREE_SAFE(cache.curves_cache.proc_attributes_tex[i]);
+ }
+ drw_attributes_merge(&final_cache.attr_used, &attrs_needed, render_mutex);
}
- drw_attributes_merge(&final_cache.attr_used, &attrs_needed, render_mutex);
+ drw_attributes_merge(&final_cache.attr_used_over_time, &attrs_needed, render_mutex);
}
- drw_attributes_merge(&final_cache.attr_used_over_time, &attrs_needed, render_mutex);
bool need_tf_update = false;
@@ -581,16 +574,15 @@ static bool curves_ensure_attributes(const Curves &curves,
return need_tf_update;
}
-bool curves_ensure_procedural_data(Object *object,
+bool curves_ensure_procedural_data(Curves *curves,
CurvesEvalCache **r_hair_cache,
GPUMaterial *gpu_material,
const int subdiv,
const int thickness_res)
{
bool need_ft_update = false;
- Curves &curves = *static_cast<Curves *>(object->data);
- CurvesBatchCache &cache = curves_batch_cache_get(curves);
+ CurvesBatchCache &cache = curves_batch_cache_get(*curves);
*r_hair_cache = &cache.curves_cache;
const int steps = 3; /* TODO: don't hard-code? */
@@ -598,14 +590,14 @@ bool curves_ensure_procedural_data(Object *object,
/* Refreshed on combing and simulation. */
if ((*r_hair_cache)->proc_point_buf == nullptr) {
- ensure_seg_pt_count(curves, cache.curves_cache);
- curves_batch_cache_ensure_procedural_pos(curves, cache.curves_cache, gpu_material);
+ ensure_seg_pt_count(*curves, cache.curves_cache);
+ curves_batch_cache_ensure_procedural_pos(*curves, cache.curves_cache, gpu_material);
need_ft_update = true;
}
/* Refreshed if active layer or custom data changes. */
if ((*r_hair_cache)->strand_tex == nullptr) {
- curves_batch_cache_ensure_procedural_strand_data(curves, cache.curves_cache);
+ curves_batch_cache_ensure_procedural_strand_data(*curves, cache.curves_cache);
}
/* Refreshed only on subdiv count change. */
@@ -615,12 +607,10 @@ bool curves_ensure_procedural_data(Object *object,
}
if ((*r_hair_cache)->final[subdiv].proc_hairs[thickness_res - 1] == nullptr) {
curves_batch_cache_ensure_procedural_indices(
- curves, cache.curves_cache, thickness_res, subdiv);
+ *curves, cache.curves_cache, thickness_res, subdiv);
}
- if (gpu_material) {
- need_ft_update |= curves_ensure_attributes(curves, cache, gpu_material, subdiv);
- }
+ need_ft_update |= curves_ensure_attributes(*curves, cache, gpu_material, subdiv);
return need_ft_update;
}
@@ -636,6 +626,70 @@ GPUBatch *DRW_curves_batch_cache_get_edit_points(Curves *curves)
return DRW_batch_request(&cache.edit_points);
}
+static void request_attribute(Curves &curves, const char *name)
+{
+ CurvesBatchCache &cache = curves_batch_cache_get(curves);
+ const DRWContextState *draw_ctx = DRW_context_state_get();
+ const Scene *scene = draw_ctx->scene;
+ const int subdiv = scene->r.hair_subdiv;
+ CurvesEvalFinalCache &final_cache = cache.curves_cache.final[subdiv];
+
+ DRW_Attributes attributes{};
+
+ blender::bke::CurvesGeometry &curves_geometry = blender::bke::CurvesGeometry::wrap(
+ curves.geometry);
+ std::optional<blender::bke::AttributeMetaData> meta_data =
+ curves_geometry.attributes().lookup_meta_data(name);
+ if (!meta_data) {
+ return;
+ }
+ const eAttrDomain domain = meta_data->domain;
+ const eCustomDataType type = meta_data->data_type;
+ const CustomData &custom_data = domain == ATTR_DOMAIN_POINT ? curves.geometry.point_data :
+ curves.geometry.curve_data;
+
+ drw_attributes_add_request(
+ &attributes, name, type, CustomData_get_named_layer(&custom_data, type, name), domain);
+
+ drw_attributes_merge(&final_cache.attr_used, &attributes, &cache.render_mutex);
+}
+
+GPUTexture **DRW_curves_texture_for_evaluated_attribute(Curves *curves,
+ const char *name,
+ bool *r_is_point_domain)
+{
+ CurvesBatchCache &cache = curves_batch_cache_get(*curves);
+ const DRWContextState *draw_ctx = DRW_context_state_get();
+ const Scene *scene = draw_ctx->scene;
+ const int subdiv = scene->r.hair_subdiv;
+ CurvesEvalFinalCache &final_cache = cache.curves_cache.final[subdiv];
+
+ request_attribute(*curves, name);
+
+ int request_i = -1;
+ for (const int i : IndexRange(final_cache.attr_used.num_requests)) {
+ if (STREQ(final_cache.attr_used.requests[i].attribute_name, name)) {
+ request_i = i;
+ break;
+ }
+ }
+ if (request_i == -1) {
+ *r_is_point_domain = false;
+ return nullptr;
+ }
+ switch (final_cache.attr_used.requests[request_i].domain) {
+ case ATTR_DOMAIN_POINT:
+ *r_is_point_domain = true;
+ return &final_cache.attributes_tex[request_i];
+ case ATTR_DOMAIN_CURVE:
+ *r_is_point_domain = false;
+ return &cache.curves_cache.proc_attributes_tex[request_i];
+ default:
+ BLI_assert_unreachable();
+ return nullptr;
+ }
+}
+
void DRW_curves_batch_cache_create_requested(Object *ob)
{
Curves *curves = static_cast<Curves *>(ob->data);