Welcome to mirror list, hosted at ThFree Co, Russian Federation.

git.blender.org/blender.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
Diffstat (limited to 'source/blender/draw')
-rw-r--r--source/blender/draw/DRW_engine.h2
-rw-r--r--source/blender/draw/intern/draw_instance_data.c367
-rw-r--r--source/blender/draw/intern/draw_instance_data.h22
-rw-r--r--source/blender/draw/intern/draw_manager.c5
-rw-r--r--source/blender/draw/intern/draw_manager.h16
-rw-r--r--source/blender/draw/intern/draw_manager_data.c22
-rw-r--r--source/blender/draw/intern/draw_manager_exec.c18
7 files changed, 452 insertions, 0 deletions
diff --git a/source/blender/draw/DRW_engine.h b/source/blender/draw/DRW_engine.h
index ca5c2c94b40..2d5b93f4272 100644
--- a/source/blender/draw/DRW_engine.h
+++ b/source/blender/draw/DRW_engine.h
@@ -36,6 +36,7 @@ struct ARegion;
struct DRWInstanceDataList;
struct Depsgraph;
struct DrawEngineType;
+struct GHash;
struct GPUMaterial;
struct GPUOffScreen;
struct GPUViewport;
@@ -140,6 +141,7 @@ void DRW_render_gpencil(struct RenderEngine *engine, struct Depsgraph *depsgraph
/* This is here because GPUViewport needs it */
struct DRWInstanceDataList *DRW_instance_data_list_create(void);
void DRW_instance_data_list_free(struct DRWInstanceDataList *idatalist);
+void DRW_uniform_attrs_pool_free(struct GHash *table);
void DRW_render_context_enable(struct Render *render);
void DRW_render_context_disable(struct Render *render);
diff --git a/source/blender/draw/intern/draw_instance_data.c b/source/blender/draw/intern/draw_instance_data.c
index 4050a5f8b69..47710c32916 100644
--- a/source/blender/draw/intern/draw_instance_data.c
+++ b/source/blender/draw/intern/draw_instance_data.c
@@ -30,9 +30,20 @@
*/
#include "draw_instance_data.h"
+#include "draw_manager.h"
+
#include "DRW_engine.h"
#include "DRW_render.h" /* For DRW_shgroup_get_instance_count() */
+#include "GPU_material.h"
+
+#include "DNA_particle_types.h"
+
+#include "BKE_duplilist.h"
+
+#include "RNA_access.h"
+
+#include "BLI_bitmap.h"
#include "BLI_memblock.h"
#include "BLI_mempool.h"
#include "BLI_utildefines.h"
@@ -408,3 +419,359 @@ void DRW_instance_data_list_resize(DRWInstanceDataList *idatalist)
}
/** \} */
+/* -------------------------------------------------------------------- */
+/** \name Sparse Uniform Buffer
+ * \{ */
+
+#define CHUNK_LIST_STEP (1 << 4)
+
+/** A chunked UBO manager that doesn't actually allocate unneeded chunks. */
+typedef struct DRWSparseUniformBuf {
+ /* Memory buffers used to stage chunk data before transfer to UBOs. */
+ char **chunk_buffers;
+ /* Uniform buffer objects with flushed data. */
+ struct GPUUniformBuf **chunk_ubos;
+ /* True if the relevant chunk contains data (distinct from simply being allocated). */
+ BLI_bitmap *chunk_used;
+
+ int num_chunks;
+ unsigned int item_size, chunk_size, chunk_bytes;
+} DRWSparseUniformBuf;
+
+static void drw_sparse_uniform_buffer_init(DRWSparseUniformBuf *buffer,
+ unsigned int item_size,
+ unsigned int chunk_size)
+{
+ buffer->chunk_buffers = NULL;
+ buffer->chunk_used = NULL;
+ buffer->chunk_ubos = NULL;
+ buffer->num_chunks = 0;
+ buffer->item_size = item_size;
+ buffer->chunk_size = chunk_size;
+ buffer->chunk_bytes = item_size * chunk_size;
+}
+
+/** Allocate a chunked UBO with the specified item and chunk size. */
+DRWSparseUniformBuf *DRW_sparse_uniform_buffer_new(unsigned int item_size, unsigned int chunk_size)
+{
+ DRWSparseUniformBuf *buf = MEM_mallocN(sizeof(DRWSparseUniformBuf), __func__);
+ drw_sparse_uniform_buffer_init(buf, item_size, chunk_size);
+ return buf;
+}
+
+/** Flush data from ordinary memory to UBOs. */
+void DRW_sparse_uniform_buffer_flush(DRWSparseUniformBuf *buffer)
+{
+ for (int i = 0; i < buffer->num_chunks; i++) {
+ if (BLI_BITMAP_TEST(buffer->chunk_used, i)) {
+ if (buffer->chunk_ubos[i] == NULL) {
+ buffer->chunk_ubos[i] = GPU_uniformbuf_create(buffer->chunk_bytes);
+ }
+ GPU_uniformbuf_update(buffer->chunk_ubos[i], buffer->chunk_buffers[i]);
+ }
+ }
+}
+
+/** Clean all buffers and free unused ones. */
+void DRW_sparse_uniform_buffer_clear(DRWSparseUniformBuf *buffer, bool free_all)
+{
+ int max_used_chunk = 0;
+
+ for (int i = 0; i < buffer->num_chunks; i++) {
+ /* Delete buffers that were not used since the last clear call. */
+ if (free_all || !BLI_BITMAP_TEST(buffer->chunk_used, i)) {
+ MEM_SAFE_FREE(buffer->chunk_buffers[i]);
+
+ if (buffer->chunk_ubos[i]) {
+ GPU_uniformbuf_free(buffer->chunk_ubos[i]);
+ buffer->chunk_ubos[i] = NULL;
+ }
+ }
+ else {
+ max_used_chunk = i + 1;
+ }
+ }
+
+ /* Shrink the chunk array if appropriate. */
+ const int old_num_chunks = buffer->num_chunks;
+
+ buffer->num_chunks = (max_used_chunk + CHUNK_LIST_STEP - 1) & ~(CHUNK_LIST_STEP - 1);
+
+ if (buffer->num_chunks == 0) {
+ /* Ensure that an empty pool holds no memory allocations. */
+ MEM_SAFE_FREE(buffer->chunk_buffers);
+ MEM_SAFE_FREE(buffer->chunk_used);
+ MEM_SAFE_FREE(buffer->chunk_ubos);
+ return;
+ }
+
+ if (buffer->num_chunks != old_num_chunks) {
+ buffer->chunk_buffers = MEM_recallocN(buffer->chunk_buffers,
+ buffer->num_chunks * sizeof(void *));
+ buffer->chunk_ubos = MEM_recallocN(buffer->chunk_ubos, buffer->num_chunks * sizeof(void *));
+ BLI_BITMAP_RESIZE(buffer->chunk_used, buffer->num_chunks);
+ }
+
+ BLI_bitmap_set_all(buffer->chunk_used, false, buffer->num_chunks);
+}
+
+/** Frees the buffer. */
+void DRW_sparse_uniform_buffer_free(DRWSparseUniformBuf *buffer)
+{
+ DRW_sparse_uniform_buffer_clear(buffer, true);
+ MEM_freeN(buffer);
+}
+
+/** Checks if the buffer contains any allocated chunks. */
+bool DRW_sparse_uniform_buffer_is_empty(DRWSparseUniformBuf *buffer)
+{
+ return buffer->num_chunks == 0;
+}
+
+static GPUUniformBuf *drw_sparse_uniform_buffer_get_ubo(DRWSparseUniformBuf *buffer, int chunk)
+{
+ if (buffer && chunk < buffer->num_chunks && BLI_BITMAP_TEST(buffer->chunk_used, chunk)) {
+ return buffer->chunk_ubos[chunk];
+ }
+ else {
+ return NULL;
+ }
+}
+
+/** Bind the UBO for the given chunk, if present. A NULL buffer pointer is handled as empty. */
+void DRW_sparse_uniform_buffer_bind(DRWSparseUniformBuf *buffer, int chunk, int location)
+{
+ GPUUniformBuf *ubo = drw_sparse_uniform_buffer_get_ubo(buffer, chunk);
+ if (ubo) {
+ GPU_uniformbuf_bind(ubo, location);
+ }
+}
+
+/** Unbind the UBO for the given chunk, if present. A NULL buffer pointer is handled as empty. */
+void DRW_sparse_uniform_buffer_unbind(DRWSparseUniformBuf *buffer, int chunk)
+{
+ GPUUniformBuf *ubo = drw_sparse_uniform_buffer_get_ubo(buffer, chunk);
+ if (ubo) {
+ GPU_uniformbuf_unbind(ubo);
+ }
+}
+
+/** Returns a pointer to the given item of the given chunk, allocating memory if necessary. */
+void *DRW_sparse_uniform_buffer_ensure_item(DRWSparseUniformBuf *pool, int chunk, int item)
+{
+ if (chunk >= pool->num_chunks) {
+ pool->num_chunks = (chunk + CHUNK_LIST_STEP) & ~(CHUNK_LIST_STEP - 1);
+ pool->chunk_buffers = MEM_recallocN(pool->chunk_buffers, pool->num_chunks * sizeof(void *));
+ pool->chunk_ubos = MEM_recallocN(pool->chunk_ubos, pool->num_chunks * sizeof(void *));
+ BLI_BITMAP_RESIZE(pool->chunk_used, pool->num_chunks);
+ }
+
+ char *buffer = pool->chunk_buffers[chunk];
+
+ if (buffer == NULL) {
+ pool->chunk_buffers[chunk] = buffer = MEM_callocN(pool->chunk_bytes, __func__);
+ }
+ else if (!BLI_BITMAP_TEST(pool->chunk_used, chunk)) {
+ memset(buffer, 0, pool->chunk_bytes);
+ }
+
+ BLI_BITMAP_ENABLE(pool->chunk_used, chunk);
+
+ return buffer + pool->item_size * item;
+}
+
+/** \} */
+
+/* -------------------------------------------------------------------- */
+/** \name Uniform Attribute Buffers
+ * \{ */
+
+/** Sparse UBO buffer for a specific uniform attribute list. */
+typedef struct DRWUniformAttrBuf {
+ /* Attribute list (also used as hash table key) handled by this buffer. */
+ GPUUniformAttrList key;
+ /* Sparse UBO buffer containing the attribute values. */
+ DRWSparseUniformBuf ubos;
+ /* Last handle used to update the buffer, checked for avoiding redundant updates. */
+ DRWResourceHandle last_handle;
+ /* Linked list pointer used for freeing the empty unneeded buffers. */
+ struct DRWUniformAttrBuf *next_empty;
+} DRWUniformAttrBuf;
+
+static DRWUniformAttrBuf *drw_uniform_attrs_pool_ensure(GHash *table, GPUUniformAttrList *key)
+{
+ void **pkey, **pval;
+
+ if (!BLI_ghash_ensure_p_ex(table, key, &pkey, &pval)) {
+ DRWUniformAttrBuf *buffer = MEM_callocN(sizeof(*buffer), __func__);
+
+ *pkey = &buffer->key;
+ *pval = buffer;
+
+ GPU_uniform_attr_list_copy(&buffer->key, key);
+ drw_sparse_uniform_buffer_init(
+ &buffer->ubos, key->count * sizeof(float[4]), DRW_RESOURCE_CHUNK_LEN);
+
+ buffer->last_handle = (DRWResourceHandle)-1;
+ }
+
+ return (DRWUniformAttrBuf *)*pval;
+}
+
+/* This function mirrors lookup_property in cycles/blender/blender_object.cpp */
+static bool drw_uniform_property_lookup(ID *id, const char *name, float r_data[4])
+{
+ PointerRNA ptr, id_ptr;
+ PropertyRNA *prop;
+
+ if (!id) {
+ return false;
+ }
+
+ RNA_id_pointer_create(id, &id_ptr);
+
+ if (!RNA_path_resolve(&id_ptr, name, &ptr, &prop)) {
+ return false;
+ }
+
+ PropertyType type = RNA_property_type(prop);
+ int arraylen = RNA_property_array_length(&ptr, prop);
+
+ if (arraylen == 0) {
+ float value;
+
+ if (type == PROP_FLOAT)
+ value = RNA_property_float_get(&ptr, prop);
+ else if (type == PROP_INT)
+ value = RNA_property_int_get(&ptr, prop);
+ else
+ return false;
+
+ copy_v4_fl4(r_data, value, value, value, 1);
+ return true;
+ }
+ else if (type == PROP_FLOAT && arraylen <= 4) {
+ copy_v4_fl4(r_data, 0, 0, 0, 1);
+ RNA_property_float_get_array(&ptr, prop, r_data);
+ return true;
+ }
+
+ return false;
+}
+
+/* This function mirrors lookup_instance_property in cycles/blender/blender_object.cpp */
+static void drw_uniform_attribute_lookup(GPUUniformAttr *attr,
+ Object *ob,
+ Object *dupli_parent,
+ DupliObject *dupli_source,
+ float r_data[4])
+{
+ char idprop_name[sizeof(attr->name) + 4];
+
+ copy_v4_fl(r_data, 0);
+ sprintf(idprop_name, "[\"%s\"]", attr->name);
+
+ /* If requesting instance data, check the parent particle system and object. */
+ if (attr->use_dupli) {
+ if (dupli_source && dupli_source->particle_system) {
+ ParticleSettings *settings = dupli_source->particle_system->part;
+ if (drw_uniform_property_lookup((ID *)settings, idprop_name, r_data) ||
+ drw_uniform_property_lookup((ID *)settings, attr->name, r_data)) {
+ return;
+ }
+ }
+ if (drw_uniform_property_lookup((ID *)dupli_parent, idprop_name, r_data) ||
+ drw_uniform_property_lookup((ID *)dupli_parent, attr->name, r_data)) {
+ return;
+ }
+ }
+
+ /* Check the object and mesh. */
+ if (ob) {
+ if (drw_uniform_property_lookup((ID *)ob, idprop_name, r_data) ||
+ drw_uniform_property_lookup((ID *)ob, attr->name, r_data) ||
+ drw_uniform_property_lookup((ID *)ob->data, idprop_name, r_data) ||
+ drw_uniform_property_lookup((ID *)ob->data, attr->name, r_data)) {
+ return;
+ }
+ }
+}
+
+void drw_uniform_attrs_pool_update(GHash *table,
+ GPUUniformAttrList *key,
+ DRWResourceHandle *handle,
+ Object *ob,
+ Object *dupli_parent,
+ DupliObject *dupli_source)
+{
+ DRWUniformAttrBuf *buffer = drw_uniform_attrs_pool_ensure(table, key);
+
+ if (buffer->last_handle != *handle) {
+ buffer->last_handle = *handle;
+
+ int chunk = DRW_handle_chunk_get(handle);
+ int item = DRW_handle_id_get(handle);
+ float(*values)[4] = DRW_sparse_uniform_buffer_ensure_item(&buffer->ubos, chunk, item);
+
+ LISTBASE_FOREACH (GPUUniformAttr *, attr, &buffer->key.list) {
+ drw_uniform_attribute_lookup(attr, ob, dupli_parent, dupli_source, *values++);
+ }
+ }
+}
+
+DRWSparseUniformBuf *DRW_uniform_attrs_pool_find_ubo(GHash *table, struct GPUUniformAttrList *key)
+{
+ DRWUniformAttrBuf *buffer = BLI_ghash_lookup(table, key);
+ return buffer ? &buffer->ubos : NULL;
+}
+
+GHash *DRW_uniform_attrs_pool_new()
+{
+ return GPU_uniform_attr_list_hash_new("obattr_hash");
+}
+
+void DRW_uniform_attrs_pool_flush_all(GHash *table)
+{
+ GHASH_FOREACH_BEGIN (DRWUniformAttrBuf *, buffer, table) {
+ DRW_sparse_uniform_buffer_flush(&buffer->ubos);
+ }
+ GHASH_FOREACH_END();
+}
+
+static void drw_uniform_attrs_pool_free_cb(void *ptr)
+{
+ DRWUniformAttrBuf *buffer = ptr;
+
+ GPU_uniform_attr_list_free(&buffer->key);
+ DRW_sparse_uniform_buffer_clear(&buffer->ubos, true);
+ MEM_freeN(buffer);
+}
+
+void DRW_uniform_attrs_pool_clear_all(GHash *table)
+{
+ DRWUniformAttrBuf *remove_list = NULL;
+
+ GHASH_FOREACH_BEGIN (DRWUniformAttrBuf *, buffer, table) {
+ buffer->last_handle = (DRWResourceHandle)-1;
+ DRW_sparse_uniform_buffer_clear(&buffer->ubos, false);
+
+ if (DRW_sparse_uniform_buffer_is_empty(&buffer->ubos)) {
+ buffer->next_empty = remove_list;
+ remove_list = buffer;
+ }
+ }
+ GHASH_FOREACH_END();
+
+ while (remove_list) {
+ DRWUniformAttrBuf *buffer = remove_list;
+ remove_list = buffer->next_empty;
+ BLI_ghash_remove(table, &buffer->key, NULL, drw_uniform_attrs_pool_free_cb);
+ }
+}
+
+void DRW_uniform_attrs_pool_free(GHash *table)
+{
+ BLI_ghash_free(table, NULL, drw_uniform_attrs_pool_free_cb);
+}
+
+/** \} */
diff --git a/source/blender/draw/intern/draw_instance_data.h b/source/blender/draw/intern/draw_instance_data.h
index e562d99097e..c959a9e19d6 100644
--- a/source/blender/draw/intern/draw_instance_data.h
+++ b/source/blender/draw/intern/draw_instance_data.h
@@ -31,8 +31,12 @@
#define DRW_BUFFER_VERTS_CHUNK 128
+struct GHash;
+struct GPUUniformAttrList;
+
typedef struct DRWInstanceData DRWInstanceData;
typedef struct DRWInstanceDataList DRWInstanceDataList;
+typedef struct DRWSparseUniformBuf DRWSparseUniformBuf;
void *DRW_instance_data_next(DRWInstanceData *idata);
DRWInstanceData *DRW_instance_data_request(DRWInstanceDataList *idatalist, uint attr_size);
@@ -54,3 +58,21 @@ void DRW_instance_buffer_finish(DRWInstanceDataList *idatalist);
void DRW_instance_data_list_reset(DRWInstanceDataList *idatalist);
void DRW_instance_data_list_free_unused(DRWInstanceDataList *idatalist);
void DRW_instance_data_list_resize(DRWInstanceDataList *idatalist);
+
+/* Sparse chunked UBO manager. */
+DRWSparseUniformBuf *DRW_sparse_uniform_buffer_new(unsigned int item_size,
+ unsigned int chunk_size);
+void DRW_sparse_uniform_buffer_flush(DRWSparseUniformBuf *buffer);
+void DRW_sparse_uniform_buffer_clear(DRWSparseUniformBuf *buffer, bool free_all);
+void DRW_sparse_uniform_buffer_free(DRWSparseUniformBuf *buffer);
+bool DRW_sparse_uniform_buffer_is_empty(DRWSparseUniformBuf *buffer);
+void DRW_sparse_uniform_buffer_bind(DRWSparseUniformBuf *buffer, int chunk, int location);
+void DRW_sparse_uniform_buffer_unbind(DRWSparseUniformBuf *buffer, int chunk);
+void *DRW_sparse_uniform_buffer_ensure_item(DRWSparseUniformBuf *buffer, int chunk, int item);
+
+/* Uniform attribute UBO management. */
+struct GHash *DRW_uniform_attrs_pool_new(void);
+void DRW_uniform_attrs_pool_flush_all(struct GHash *table);
+void DRW_uniform_attrs_pool_clear_all(struct GHash *table);
+struct DRWSparseUniformBuf *DRW_uniform_attrs_pool_find_ubo(struct GHash *table,
+ struct GPUUniformAttrList *key);
diff --git a/source/blender/draw/intern/draw_manager.c b/source/blender/draw/intern/draw_manager.c
index b60583deda3..f51328e9bc9 100644
--- a/source/blender/draw/intern/draw_manager.c
+++ b/source/blender/draw/intern/draw_manager.c
@@ -467,6 +467,8 @@ static void drw_viewport_cache_resize(void)
BLI_memblock_clear(DST.vmempool->passes, NULL);
BLI_memblock_clear(DST.vmempool->views, NULL);
BLI_memblock_clear(DST.vmempool->images, NULL);
+
+ DRW_uniform_attrs_pool_clear_all(DST.vmempool->obattrs_ubo_pool);
}
DRW_instance_data_list_free_unused(DST.idatalist);
@@ -593,6 +595,9 @@ static void drw_viewport_var_init(void)
if (DST.vmempool->images == NULL) {
DST.vmempool->images = BLI_memblock_create(sizeof(GPUTexture *));
}
+ if (DST.vmempool->obattrs_ubo_pool == NULL) {
+ DST.vmempool->obattrs_ubo_pool = DRW_uniform_attrs_pool_new();
+ }
DST.resource_handle = 0;
DST.pass_handle = 0;
diff --git a/source/blender/draw/intern/draw_manager.h b/source/blender/draw/intern/draw_manager.h
index 8e505d5df71..dae204ad9f7 100644
--- a/source/blender/draw/intern/draw_manager.h
+++ b/source/blender/draw/intern/draw_manager.h
@@ -43,6 +43,9 @@
#include "draw_instance_data.h"
+struct Object;
+struct DupliObject;
+
/* Use draw manager to call GPU_select, see: DRW_draw_select_loop */
#define USE_GPU_SELECT
@@ -286,6 +289,7 @@ typedef enum {
/** Per drawcall uniforms/UBO */
DRW_UNIFORM_BLOCK_OBMATS,
DRW_UNIFORM_BLOCK_OBINFOS,
+ DRW_UNIFORM_BLOCK_OBATTRS,
DRW_UNIFORM_RESOURCE_CHUNK,
DRW_UNIFORM_RESOURCE_ID,
/** Legacy / Fallback */
@@ -317,6 +321,8 @@ struct DRWUniform {
float fvalue[4];
/* DRW_UNIFORM_INT_COPY */
int ivalue[4];
+ /* DRW_UNIFORM_BLOCK_OBATTRS */
+ struct GPUUniformAttrList *uniform_attrs;
};
int location; /* Uniform location or binding point for textures and ubos. */
uint8_t type; /* DRWUniformType */
@@ -340,6 +346,9 @@ struct DRWShadingGroup {
struct {
int objectinfo; /* Equal to 1 if the shader needs obinfos. */
DRWResourceHandle pass_handle; /* Memblock key to parent pass. */
+
+ /* Set of uniform attributes used by this shader. */
+ struct GPUUniformAttrList *uniform_attrs;
};
/* This struct is used after cache populate if using the Z sorting.
* It will not conflict with the above struct. */
@@ -598,3 +607,10 @@ void drw_resource_buffer_finish(ViewportMemoryPool *vmempool);
GPUBatch *drw_cache_procedural_points_get(void);
GPUBatch *drw_cache_procedural_lines_get(void);
GPUBatch *drw_cache_procedural_triangles_get(void);
+
+void drw_uniform_attrs_pool_update(struct GHash *table,
+ struct GPUUniformAttrList *key,
+ DRWResourceHandle *handle,
+ struct Object *ob,
+ struct Object *dupli_parent,
+ struct DupliObject *dupli_source);
diff --git a/source/blender/draw/intern/draw_manager_data.c b/source/blender/draw/intern/draw_manager_data.c
index 5f394804bcf..f1d3f8c8d5a 100644
--- a/source/blender/draw/intern/draw_manager_data.c
+++ b/source/blender/draw/intern/draw_manager_data.c
@@ -128,6 +128,8 @@ void drw_resource_buffer_finish(ViewportMemoryPool *vmempool)
GPU_uniformbuf_update(vmempool->obinfos_ubo[i], data_infos);
}
+ DRW_uniform_attrs_pool_flush_all(vmempool->obattrs_ubo_pool);
+
/* Aligned alloc to avoid unaligned memcpy. */
DRWCommandChunk *chunk_tmp = MEM_mallocN_aligned(sizeof(DRWCommandChunk), 16, "tmp call chunk");
DRWCommandChunk *chunk;
@@ -209,6 +211,9 @@ static void drw_shgroup_uniform_create_ex(DRWShadingGroup *shgroup,
uni->texture_ref = (GPUTexture **)value;
uni->sampler_state = sampler_state;
break;
+ case DRW_UNIFORM_BLOCK_OBATTRS:
+ uni->uniform_attrs = (GPUUniformAttrList *)value;
+ break;
default:
uni->pvalue = (const float *)value;
break;
@@ -611,6 +616,15 @@ static DRWResourceHandle drw_resource_handle(DRWShadingGroup *shgroup,
}
}
+ if (shgroup->uniform_attrs) {
+ drw_uniform_attrs_pool_update(DST.vmempool->obattrs_ubo_pool,
+ shgroup->uniform_attrs,
+ &DST.ob_handle,
+ ob,
+ DST.dupli_parent,
+ DST.dupli_source);
+ }
+
return DST.ob_handle;
}
@@ -1184,6 +1198,7 @@ void DRW_buffer_add_entry_array(DRWCallBuffer *callbuf, const void *attr[], uint
static void drw_shgroup_init(DRWShadingGroup *shgroup, GPUShader *shader)
{
shgroup->uniforms = NULL;
+ shgroup->uniform_attrs = NULL;
int view_ubo_location = GPU_shader_get_builtin_block(shader, GPU_UNIFORM_BLOCK_VIEW);
int model_ubo_location = GPU_shader_get_builtin_block(shader, GPU_UNIFORM_BLOCK_MODEL);
@@ -1329,6 +1344,13 @@ void DRW_shgroup_add_material_resources(DRWShadingGroup *grp, struct GPUMaterial
if (ubo != NULL) {
DRW_shgroup_uniform_block(grp, GPU_UBO_BLOCK_NAME, ubo);
}
+
+ GPUUniformAttrList *uattrs = GPU_material_uniform_attributes(material);
+ if (uattrs != NULL) {
+ int loc = GPU_shader_get_uniform_block_binding(grp->shader, GPU_ATTRIBUTE_UBO_BLOCK_NAME);
+ drw_shgroup_uniform_create_ex(grp, loc, DRW_UNIFORM_BLOCK_OBATTRS, uattrs, 0, 0, 1);
+ grp->uniform_attrs = uattrs;
+ }
}
GPUVertFormat *DRW_shgroup_instance_format_array(const DRWInstanceAttrFormat attrs[],
diff --git a/source/blender/draw/intern/draw_manager_exec.c b/source/blender/draw/intern/draw_manager_exec.c
index 808b5cc675b..21bcaa2d2b2 100644
--- a/source/blender/draw/intern/draw_manager_exec.c
+++ b/source/blender/draw/intern/draw_manager_exec.c
@@ -57,12 +57,15 @@ typedef struct DRWCommandsState {
/* Resource location. */
int obmats_loc;
int obinfos_loc;
+ int obattrs_loc;
int baseinst_loc;
int chunkid_loc;
int resourceid_loc;
/* Legacy matrix support. */
int obmat_loc;
int obinv_loc;
+ /* Uniform Attributes. */
+ DRWSparseUniformBuf *obattrs_ubo;
/* Selection ID state. */
GPUVertBuf *select_buf;
uint select_id;
@@ -648,6 +651,12 @@ static void draw_update_uniforms(DRWShadingGroup *shgroup,
state->obinfos_loc = uni->location;
GPU_uniformbuf_bind(DST.vmempool->obinfos_ubo[0], uni->location);
break;
+ case DRW_UNIFORM_BLOCK_OBATTRS:
+ state->obattrs_loc = uni->location;
+ state->obattrs_ubo = DRW_uniform_attrs_pool_find_ubo(DST.vmempool->obattrs_ubo_pool,
+ uni->uniform_attrs);
+ DRW_sparse_uniform_buffer_bind(state->obattrs_ubo, 0, uni->location);
+ break;
case DRW_UNIFORM_RESOURCE_CHUNK:
state->chunkid_loc = uni->location;
GPU_shader_uniform_int(shgroup->shader, uni->location, 0);
@@ -762,6 +771,10 @@ static void draw_call_resource_bind(DRWCommandsState *state, const DRWResourceHa
GPU_uniformbuf_unbind(DST.vmempool->obinfos_ubo[state->resource_chunk]);
GPU_uniformbuf_bind(DST.vmempool->obinfos_ubo[chunk], state->obinfos_loc);
}
+ if (state->obattrs_loc != -1) {
+ DRW_sparse_uniform_buffer_unbind(state->obattrs_ubo, state->resource_chunk);
+ DRW_sparse_uniform_buffer_bind(state->obattrs_ubo, chunk, state->obattrs_loc);
+ }
state->resource_chunk = chunk;
}
@@ -884,6 +897,9 @@ static void draw_call_batching_finish(DRWShadingGroup *shgroup, DRWCommandsState
if (state->obinfos_loc != -1) {
GPU_uniformbuf_unbind(DST.vmempool->obinfos_ubo[state->resource_chunk]);
}
+ if (state->obattrs_loc != -1) {
+ DRW_sparse_uniform_buffer_unbind(state->obattrs_ubo, state->resource_chunk);
+ }
}
static void draw_shgroup(DRWShadingGroup *shgroup, DRWState pass_state)
@@ -893,11 +909,13 @@ static void draw_shgroup(DRWShadingGroup *shgroup, DRWState pass_state)
DRWCommandsState state = {
.obmats_loc = -1,
.obinfos_loc = -1,
+ .obattrs_loc = -1,
.baseinst_loc = -1,
.chunkid_loc = -1,
.resourceid_loc = -1,
.obmat_loc = -1,
.obinv_loc = -1,
+ .obattrs_ubo = NULL,
.drw_state_enabled = 0,
.drw_state_disabled = 0,
};