Welcome to mirror list, hosted at ThFree Co, Russian Federation.

git.blender.org/blender.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorClément Foucault <foucault.clem@gmail.com>2018-12-08 22:10:20 +0300
committerClément Foucault <foucault.clem@gmail.com>2018-12-10 21:02:17 +0300
commit1d92888ee8e8abf7ba40c1875dedc84e0138be2c (patch)
tree55e75bdd89e785906c048a794a2d976960dcf204 /source/blender/draw/intern/draw_cache.c
parent33cc3344a26d674c1283c5fd8c007a63f0d8a5fc (diff)
DRW: Batch Cache: Add batch request system
This makes it possible for engines to ask for batches and only fill their data after all engine populate functions have run. This means that, when creating the batches data we already know all the batches that are needed for this redraw and the needed data. This allows for less redundant data preparation and better attrib masking. Ideally, we should run all viewports populate function before executing the batch construction but this is not the scope of this patch. Conversion from the old request method will be progressive and both can coexist (see uses of mesh_create_pos_and_nor()).
Diffstat (limited to 'source/blender/draw/intern/draw_cache.c')
-rw-r--r--source/blender/draw/intern/draw_cache.c81
1 files changed, 79 insertions, 2 deletions
diff --git a/source/blender/draw/intern/draw_cache.c b/source/blender/draw/intern/draw_cache.c
index 70d9f960265..2d047226b07 100644
--- a/source/blender/draw/intern/draw_cache.c
+++ b/source/blender/draw/intern/draw_cache.c
@@ -48,10 +48,9 @@
#include "MEM_guardedalloc.h"
-#include "DRW_render.h"
-
#include "draw_cache.h"
#include "draw_cache_impl.h"
+#include "draw_manager.h"
/* Batch's only (free'd as an array) */
static struct DRWShapeCache {
@@ -3703,3 +3702,81 @@ GPUBatch *DRW_cache_cursor_get(bool crosshair_lines)
}
return *drw_cursor;
}
+
+/** \} */
+
+/* -------------------------------------------------------------------- */
+
+/** \name Batch Cache Impl. common
+ * \{ */
+
+GPUBatch *DRW_batch_request(GPUBatch **batch)
+{
+ if (*batch == NULL) {
+ *batch = MEM_callocN(sizeof(GPUBatch), "GPUBatch");
+ }
+ return *batch;
+}
+
+bool DRW_batch_requested(GPUBatch *batch, int prim_type)
+{
+ /* Batch has been requested if it has been created but not initialized. */
+ if (batch != NULL && batch->verts[0] == NULL) {
+ /* HACK. We init without a valid VBO and let the first vbo binding
+ * fill verts[0]. */
+ GPU_batch_init_ex(batch, prim_type, (GPUVertBuf *)1, NULL, 0);
+ batch->verts[0] = NULL;
+ return true;
+ }
+ return false;
+}
+
+void DRW_ibo_request(GPUBatch *batch, GPUIndexBuf **ibo)
+{
+ if (*ibo == NULL) {
+ *ibo = MEM_callocN(sizeof(GPUIndexBuf), "GPUIndexBuf");
+ }
+ GPU_batch_vao_cache_clear(batch);
+ batch->elem = *ibo;
+}
+
+bool DRW_ibo_requested(GPUIndexBuf *ibo)
+{
+ /* TODO do not rely on data uploaded. This prevents multithreading.
+ * (need access to a gl context) */
+ return (ibo != NULL && ibo->ibo_id == 0);
+}
+
+void DRW_vbo_request(GPUBatch *batch, GPUVertBuf **vbo)
+{
+ if (*vbo == NULL) {
+ *vbo = MEM_callocN(sizeof(GPUVertBuf), "GPUVertBuf");
+ }
+ /* HACK set first vbo if not init. */
+ if (batch->verts[0] == NULL) {
+ GPU_batch_vao_cache_clear(batch);
+ batch->verts[0] = *vbo;
+ }
+ else {
+ GPU_batch_vertbuf_add(batch, *vbo);
+ }
+}
+
+bool DRW_vbo_requested(GPUVertBuf *vbo)
+{
+ return (vbo != NULL && vbo->format.attr_len == 0);
+}
+
+void drw_batch_cache_generate_requested(Object *ob)
+{
+ switch (ob->type) {
+ case OB_MESH:
+ DRW_mesh_batch_cache_create_requested(ob);
+ break;
+ /* TODO all cases */
+ default:
+ break;
+ }
+}
+
+/** \} */