Welcome to mirror list, hosted at ThFree Co, Russian Federation.

git.blender.org/blender.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
Diffstat (limited to 'source/blender/draw/intern/draw_instance_data.h')
-rw-r--r--source/blender/draw/intern/draw_instance_data.h48
1 files changed, 47 insertions, 1 deletions
diff --git a/source/blender/draw/intern/draw_instance_data.h b/source/blender/draw/intern/draw_instance_data.h
index c959a9e19d6..fb88b3dc6ec 100644
--- a/source/blender/draw/intern/draw_instance_data.h
+++ b/source/blender/draw/intern/draw_instance_data.h
@@ -38,21 +38,41 @@ typedef struct DRWInstanceData DRWInstanceData;
typedef struct DRWInstanceDataList DRWInstanceDataList;
typedef struct DRWSparseUniformBuf DRWSparseUniformBuf;
+/**
+ * Return a pointer to the next instance data space.
+ */
void *DRW_instance_data_next(DRWInstanceData *idata);
DRWInstanceData *DRW_instance_data_request(DRWInstanceDataList *idatalist, uint attr_size);
+/**
+ * This manager allows to distribute existing batches for instancing
+ * attributes. This reduce the number of batches creation.
+ * Querying a batch is done with a vertex format. This format should
+ * be static so that its pointer never changes (because we are using
+ * this pointer as identifier [we don't want to check the full format
+ * that would be too slow]).
+ */
GPUVertBuf *DRW_temp_buffer_request(DRWInstanceDataList *idatalist,
GPUVertFormat *format,
int *vert_len);
+/**
+ * \note Does not return a valid drawable batch until DRW_instance_buffer_finish has run.
+ * Initialization is delayed because instancer or geom could still not be initialized.
+ */
GPUBatch *DRW_temp_batch_instance_request(DRWInstanceDataList *idatalist,
GPUVertBuf *buf,
GPUBatch *instancer,
GPUBatch *geom);
+/**
+ * \note Use only with buf allocated via DRW_temp_buffer_request.
+ */
GPUBatch *DRW_temp_batch_request(DRWInstanceDataList *idatalist,
GPUVertBuf *buf,
GPUPrimType type);
-/* Upload all instance data to the GPU as soon as possible. */
+/**
+ * Upload all instance data to the GPU as soon as possible.
+ */
void DRW_instance_buffer_finish(DRWInstanceDataList *idatalist);
void DRW_instance_data_list_reset(DRWInstanceDataList *idatalist);
@@ -60,17 +80,43 @@ void DRW_instance_data_list_free_unused(DRWInstanceDataList *idatalist);
void DRW_instance_data_list_resize(DRWInstanceDataList *idatalist);
/* Sparse chunked UBO manager. */
+
+/**
+ * Allocate a chunked UBO with the specified item and chunk size.
+ */
DRWSparseUniformBuf *DRW_sparse_uniform_buffer_new(unsigned int item_size,
unsigned int chunk_size);
+/**
+ * Flush data from ordinary memory to UBOs.
+ */
void DRW_sparse_uniform_buffer_flush(DRWSparseUniformBuf *buffer);
+/**
+ * Clean all buffers and free unused ones.
+ */
void DRW_sparse_uniform_buffer_clear(DRWSparseUniformBuf *buffer, bool free_all);
+/**
+ * Frees the buffer.
+ */
void DRW_sparse_uniform_buffer_free(DRWSparseUniformBuf *buffer);
+/**
+ * Checks if the buffer contains any allocated chunks.
+ */
bool DRW_sparse_uniform_buffer_is_empty(DRWSparseUniformBuf *buffer);
+/**
+ * Bind the UBO for the given chunk, if present. A NULL buffer pointer is handled as empty.
+ */
void DRW_sparse_uniform_buffer_bind(DRWSparseUniformBuf *buffer, int chunk, int location);
+/**
+ * Unbind the UBO for the given chunk, if present. A NULL buffer pointer is handled as empty.
+ */
void DRW_sparse_uniform_buffer_unbind(DRWSparseUniformBuf *buffer, int chunk);
+/**
+ * Returns a pointer to the given item of the given chunk, allocating memory if necessary.
+ */
void *DRW_sparse_uniform_buffer_ensure_item(DRWSparseUniformBuf *buffer, int chunk, int item);
/* Uniform attribute UBO management. */
+
struct GHash *DRW_uniform_attrs_pool_new(void);
void DRW_uniform_attrs_pool_flush_all(struct GHash *table);
void DRW_uniform_attrs_pool_clear_all(struct GHash *table);