Welcome to mirror list, hosted at ThFree Co, Russian Federation.

git.blender.org/blender.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKévin Dietrich <kevin.dietrich@mailoo.org>2022-05-24 06:02:57 +0300
committerKévin Dietrich <kevin.dietrich@mailoo.org>2022-05-24 06:02:57 +0300
commitcd968a32739587a5baacdd278a670d44570cbd8a (patch)
tree33ef22c30e0352fdc2ea073847bcc1342ebc3dfc /source/blender/draw/intern/draw_cache_impl_mesh.c
parent64a5a7ade1e98234247a4140c182423ebc616d26 (diff)
EEVEE: support Curves attributes rendering
This adds support to render Curves attributes in EEVEE. Each attribute is stored in a texture derived from a VBO. As the shading group needs the textures to be valid upon creation, the attributes are created and setup during its very creation, instead of doing it lazily via create_requested which we cannot rely on anyway as contrary to the mesh batch, we do cannot really tell if attributes need to be updated or else via some `DRW_batch_requested`. Since point attributes need refinement, and since attributes are all cast to vec4/float4 to account for differences in type conversions between Blender and OpenGL, the refinement shader for points is used as is. The point attributes are stored for each subdivision level in CurvesEvalFinalCache. Each subdivision level also keeps track of the attributes already in use so they are properly updated when needed. Some basic garbage collection was added similar to what is done for meshes: if the attributes used over time have been different from the currently used attributes for too long, then the buffers are freed, ensuring that stale attributesare removed. This adds `CurvesInfos` to the shader creation info, which stores the scope in which the attributes are defined. Scopes are stored as booleans, in an array indexed by attribute loading order which is also the order in which the attributes were added to the material. A mapping is necessary between the indices used for the scoping, and the ones used in the Curves cache, as this may contain stale attributes which have not been garbage collected yet. Common utilities with the mesh code for handling requested attributes were moved to a separate file. Differential Revision: https://developer.blender.org/D14916
Diffstat (limited to 'source/blender/draw/intern/draw_cache_impl_mesh.c')
-rw-r--r--source/blender/draw/intern/draw_cache_impl_mesh.c151
1 files changed, 18 insertions, 133 deletions
diff --git a/source/blender/draw/intern/draw_cache_impl_mesh.c b/source/blender/draw/intern/draw_cache_impl_mesh.c
index 7fdeaf34965..a6ab2176d16 100644
--- a/source/blender/draw/intern/draw_cache_impl_mesh.c
+++ b/source/blender/draw/intern/draw_cache_impl_mesh.c
@@ -281,91 +281,6 @@ static void mesh_cd_calc_edit_uv_layer(const Mesh *UNUSED(me), DRW_MeshCDMask *c
cd_used->edit_uv = 1;
}
-/** \name DRW_MeshAttributes
- *
- * Utilities for handling requested attributes.
- * \{ */
-
-/* Return true if the given DRW_AttributeRequest is already in the requests. */
-static bool has_request(const DRW_MeshAttributes *requests, DRW_AttributeRequest req)
-{
- for (int i = 0; i < requests->num_requests; i++) {
- const DRW_AttributeRequest src_req = requests->requests[i];
- if (src_req.domain != req.domain) {
- continue;
- }
- if (src_req.layer_index != req.layer_index) {
- continue;
- }
- if (src_req.cd_type != req.cd_type) {
- continue;
- }
- return true;
- }
- return false;
-}
-
-static void mesh_attrs_merge_requests(const DRW_MeshAttributes *src_requests,
- DRW_MeshAttributes *dst_requests)
-{
- for (int i = 0; i < src_requests->num_requests; i++) {
- if (dst_requests->num_requests == GPU_MAX_ATTR) {
- return;
- }
-
- if (has_request(dst_requests, src_requests->requests[i])) {
- continue;
- }
-
- dst_requests->requests[dst_requests->num_requests] = src_requests->requests[i];
- dst_requests->num_requests += 1;
- }
-}
-
-static void drw_mesh_attributes_clear(DRW_MeshAttributes *attributes)
-{
- memset(attributes, 0, sizeof(DRW_MeshAttributes));
-}
-
-static void drw_mesh_attributes_merge(DRW_MeshAttributes *dst,
- const DRW_MeshAttributes *src,
- ThreadMutex *mesh_render_mutex)
-{
- BLI_mutex_lock(mesh_render_mutex);
- mesh_attrs_merge_requests(src, dst);
- BLI_mutex_unlock(mesh_render_mutex);
-}
-
-/* Return true if all requests in b are in a. */
-static bool drw_mesh_attributes_overlap(DRW_MeshAttributes *a, DRW_MeshAttributes *b)
-{
- for (int i = 0; i < b->num_requests; i++) {
- if (!has_request(a, b->requests[i])) {
- return false;
- }
- }
-
- return true;
-}
-
-static void drw_mesh_attributes_add_request(DRW_MeshAttributes *attrs,
- CustomDataType type,
- int layer,
- AttributeDomain domain)
-{
- if (attrs->num_requests >= GPU_MAX_ATTR) {
- return;
- }
-
- DRW_AttributeRequest *req = &attrs->requests[attrs->num_requests];
- req->cd_type = type;
- req->layer_index = layer;
- req->domain = domain;
- attrs->num_requests += 1;
-}
-
-/** \} */
-
BLI_INLINE const CustomData *mesh_cd_ldata_get_from_mesh(const Mesh *me)
{
switch ((eMeshWrapperType)me->runtime.wrapper_type) {
@@ -475,36 +390,6 @@ static void mesh_cd_calc_active_mloopcol_layer(const Object *object,
}
}
-static bool custom_data_match_attribute(const CustomData *custom_data,
- const char *name,
- int *r_layer_index,
- int *r_type)
-{
- const int possible_attribute_types[7] = {
- CD_PROP_BOOL,
- CD_PROP_INT8,
- CD_PROP_INT32,
- CD_PROP_FLOAT,
- CD_PROP_FLOAT2,
- CD_PROP_FLOAT3,
- CD_PROP_COLOR,
- };
-
- for (int i = 0; i < ARRAY_SIZE(possible_attribute_types); i++) {
- const int attr_type = possible_attribute_types[i];
- int layer_index = CustomData_get_named_layer(custom_data, attr_type, name);
- if (layer_index == -1) {
- continue;
- }
-
- *r_layer_index = layer_index;
- *r_type = attr_type;
- return true;
- }
-
- return false;
-}
-
static uint mesh_cd_calc_gpu_layers_vcol_used(const Mesh *me_query,
const CustomData *cd_vdata,
const CustomData *cd_ldata,
@@ -556,7 +441,7 @@ static DRW_MeshCDMask mesh_cd_calc_used_gpu_layers(const Object *object,
const Mesh *me,
struct GPUMaterial **gpumat_array,
int gpumat_array_len,
- DRW_MeshAttributes *attributes)
+ DRW_Attributes *attributes)
{
const Mesh *me_final = editmesh_final_or_this(object, me);
const CustomData *cd_ldata = mesh_cd_ldata_get_from_mesh(me_final);
@@ -636,16 +521,16 @@ static DRW_MeshCDMask mesh_cd_calc_used_gpu_layers(const Object *object,
if (layer == -1) {
/* Try to match a generic attribute, we use the first attribute domain with a
* matching name. */
- if (custom_data_match_attribute(cd_vdata, name, &layer, &type)) {
+ if (drw_custom_data_match_attribute(cd_vdata, name, &layer, &type)) {
domain = ATTR_DOMAIN_POINT;
}
- else if (custom_data_match_attribute(cd_ldata, name, &layer, &type)) {
+ else if (drw_custom_data_match_attribute(cd_ldata, name, &layer, &type)) {
domain = ATTR_DOMAIN_CORNER;
}
- else if (custom_data_match_attribute(cd_pdata, name, &layer, &type)) {
+ else if (drw_custom_data_match_attribute(cd_pdata, name, &layer, &type)) {
domain = ATTR_DOMAIN_FACE;
}
- else if (custom_data_match_attribute(cd_edata, name, &layer, &type)) {
+ else if (drw_custom_data_match_attribute(cd_edata, name, &layer, &type)) {
domain = ATTR_DOMAIN_EDGE;
}
else {
@@ -718,7 +603,7 @@ static DRW_MeshCDMask mesh_cd_calc_used_gpu_layers(const Object *object,
}
if (layer != -1 && domain != ATTR_DOMAIN_NUM) {
- drw_mesh_attributes_add_request(attributes, type, layer, domain);
+ drw_attributes_add_request(attributes, type, layer, domain);
}
break;
}
@@ -729,7 +614,7 @@ static DRW_MeshCDMask mesh_cd_calc_used_gpu_layers(const Object *object,
case CD_PROP_FLOAT:
case CD_PROP_FLOAT2: {
if (layer != -1 && domain != ATTR_DOMAIN_NUM) {
- drw_mesh_attributes_add_request(attributes, type, layer, domain);
+ drw_attributes_add_request(attributes, type, layer, domain);
}
break;
}
@@ -1317,8 +1202,8 @@ GPUBatch **DRW_mesh_batch_cache_get_surface_shaded(Object *object,
uint gpumat_array_len)
{
MeshBatchCache *cache = mesh_batch_cache_get(me);
- DRW_MeshAttributes attrs_needed;
- drw_mesh_attributes_clear(&attrs_needed);
+ DRW_Attributes attrs_needed;
+ drw_attributes_clear(&attrs_needed);
DRW_MeshCDMask cd_needed = mesh_cd_calc_used_gpu_layers(
object, me, gpumat_array, gpumat_array_len, &attrs_needed);
@@ -1326,7 +1211,7 @@ GPUBatch **DRW_mesh_batch_cache_get_surface_shaded(Object *object,
mesh_cd_layers_type_merge(&cache->cd_needed, cd_needed);
ThreadMutex *mesh_render_mutex = (ThreadMutex *)me->runtime.render_mutex;
- drw_mesh_attributes_merge(&cache->attr_needed, &attrs_needed, mesh_render_mutex);
+ drw_attributes_merge(&cache->attr_needed, &attrs_needed, mesh_render_mutex);
mesh_batch_cache_request_surface_batches(cache);
return cache->surface_per_mat;
}
@@ -1596,7 +1481,7 @@ void DRW_mesh_batch_cache_free_old(Mesh *me, int ctime)
cache->lastmatch = ctime;
}
- if (drw_mesh_attributes_overlap(&cache->attr_used_over_time, &cache->attr_used)) {
+ if (drw_attributes_overlap(&cache->attr_used_over_time, &cache->attr_used)) {
cache->lastmatch = ctime;
}
@@ -1605,12 +1490,12 @@ void DRW_mesh_batch_cache_free_old(Mesh *me, int ctime)
}
mesh_cd_layers_type_clear(&cache->cd_used_over_time);
- drw_mesh_attributes_clear(&cache->attr_used_over_time);
+ drw_attributes_clear(&cache->attr_used_over_time);
}
static void drw_add_attributes_vbo(GPUBatch *batch,
MeshBufferList *mbuflist,
- DRW_MeshAttributes *attr_used)
+ DRW_Attributes *attr_used)
{
for (int i = 0; i < attr_used->num_requests; i++) {
DRW_vbo_request(batch, &mbuflist->vbo.attr[i]);
@@ -1721,7 +1606,7 @@ void DRW_mesh_batch_cache_create_requested(struct TaskGraph *task_graph,
/* TODO(fclem): We could be a bit smarter here and only do it per
* material. */
bool cd_overlap = mesh_cd_layers_type_overlap(cache->cd_used, cache->cd_needed);
- bool attr_overlap = drw_mesh_attributes_overlap(&cache->attr_used, &cache->attr_needed);
+ bool attr_overlap = drw_attributes_overlap(&cache->attr_used, &cache->attr_needed);
if (cd_overlap == false || attr_overlap == false) {
FOREACH_MESH_BUFFER_CACHE (cache, mbc) {
if ((cache->cd_used.uv & cache->cd_needed.uv) != cache->cd_needed.uv) {
@@ -1741,7 +1626,7 @@ void DRW_mesh_batch_cache_create_requested(struct TaskGraph *task_graph,
if ((cache->cd_used.vcol & cache->cd_needed.vcol) != cache->cd_needed.vcol) {
GPU_VERTBUF_DISCARD_SAFE(mbc->buff.vbo.vcol);
}
- if (!drw_mesh_attributes_overlap(&cache->attr_used, &cache->attr_needed)) {
+ if (!drw_attributes_overlap(&cache->attr_used, &cache->attr_needed)) {
for (int i = 0; i < GPU_MAX_ATTR; i++) {
GPU_VERTBUF_DISCARD_SAFE(mbc->buff.vbo.attr[i]);
}
@@ -1756,13 +1641,13 @@ void DRW_mesh_batch_cache_create_requested(struct TaskGraph *task_graph,
cache->batch_ready &= ~(MBC_SURFACE);
mesh_cd_layers_type_merge(&cache->cd_used, cache->cd_needed);
- drw_mesh_attributes_merge(&cache->attr_used, &cache->attr_needed, mesh_render_mutex);
+ drw_attributes_merge(&cache->attr_used, &cache->attr_needed, mesh_render_mutex);
}
mesh_cd_layers_type_merge(&cache->cd_used_over_time, cache->cd_needed);
mesh_cd_layers_type_clear(&cache->cd_needed);
- drw_mesh_attributes_merge(&cache->attr_used_over_time, &cache->attr_needed, mesh_render_mutex);
- drw_mesh_attributes_clear(&cache->attr_needed);
+ drw_attributes_merge(&cache->attr_used_over_time, &cache->attr_needed, mesh_render_mutex);
+ drw_attributes_clear(&cache->attr_needed);
}
if (batch_requested & MBC_EDITUV) {