Welcome to mirror list, hosted at ThFree Co, Russian Federation.

git.blender.org/blender.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
Diffstat (limited to 'source/blender/draw')
-rw-r--r--source/blender/draw/intern/draw_cache.c12
-rw-r--r--source/blender/draw/intern/draw_cache.h1
-rw-r--r--source/blender/draw/intern/draw_cache_impl_mesh.c2
-rw-r--r--source/blender/draw/intern/draw_cache_impl_metaball.c10
-rw-r--r--source/blender/draw/intern/draw_cache_impl_volume.c2
-rw-r--r--source/blender/draw/intern/draw_cache_inline.h12
-rw-r--r--source/blender/draw/intern/draw_instance_data.c151
-rw-r--r--source/blender/draw/intern/draw_manager.c3
-rw-r--r--source/blender/draw/intern/draw_manager.h1
-rw-r--r--source/blender/draw/intern/draw_manager_exec.c41
10 files changed, 99 insertions, 136 deletions
diff --git a/source/blender/draw/intern/draw_cache.c b/source/blender/draw/intern/draw_cache.c
index 46b7a88b2a6..4d7440a3276 100644
--- a/source/blender/draw/intern/draw_cache.c
+++ b/source/blender/draw/intern/draw_cache.c
@@ -152,18 +152,6 @@ void DRW_shape_cache_free(void)
}
}
-void DRW_shape_cache_reset(void)
-{
- uint i = sizeof(SHC) / sizeof(GPUBatch *);
- GPUBatch **batch = (GPUBatch **)&SHC;
- while (i--) {
- if (*batch) {
- GPU_batch_vao_cache_clear(*batch);
- }
- batch++;
- }
-}
-
/* -------------------------------------------------------------------- */
/** \name Procedural Batches
* \{ */
diff --git a/source/blender/draw/intern/draw_cache.h b/source/blender/draw/intern/draw_cache.h
index 5f1744a7aec..8597f86f8e6 100644
--- a/source/blender/draw/intern/draw_cache.h
+++ b/source/blender/draw/intern/draw_cache.h
@@ -33,7 +33,6 @@ struct VolumeGrid;
struct bGPDstroke;
void DRW_shape_cache_free(void);
-void DRW_shape_cache_reset(void);
/* 3D cursor */
struct GPUBatch *DRW_cache_cursor_get(bool crosshair_lines);
diff --git a/source/blender/draw/intern/draw_cache_impl_mesh.c b/source/blender/draw/intern/draw_cache_impl_mesh.c
index d6faeb16583..0e2be993787 100644
--- a/source/blender/draw/intern/draw_cache_impl_mesh.c
+++ b/source/blender/draw/intern/draw_cache_impl_mesh.c
@@ -1248,7 +1248,7 @@ void DRW_mesh_batch_cache_create_requested(struct TaskGraph *task_graph,
saved_elem_ranges[i] = cache->surface_per_mat[i]->elem;
/* Avoid deletion as the batch is owner. */
cache->surface_per_mat[i]->elem = NULL;
- cache->surface_per_mat[i]->owns_flag &= ~GPU_BATCH_OWNS_INDEX;
+ cache->surface_per_mat[i]->flag &= ~GPU_BATCH_OWNS_INDEX;
}
}
/* We can't discard batches at this point as they have been
diff --git a/source/blender/draw/intern/draw_cache_impl_metaball.c b/source/blender/draw/intern/draw_cache_impl_metaball.c
index 076d32ffe1f..5f0af06931e 100644
--- a/source/blender/draw/intern/draw_cache_impl_metaball.c
+++ b/source/blender/draw/intern/draw_cache_impl_metaball.c
@@ -155,7 +155,7 @@ static GPUVertBuf *mball_batch_cache_get_pos_and_normals(Object *ob, MetaBallBat
{
if (cache->pos_nor_in_order == NULL) {
ListBase *lb = &ob->runtime.curve_cache->disp;
- cache->pos_nor_in_order = MEM_callocN(sizeof(GPUVertBuf), __func__);
+ cache->pos_nor_in_order = GPU_vertbuf_create(GPU_USAGE_STATIC);
DRW_displist_vertbuf_create_pos_and_nor(lb, cache->pos_nor_in_order);
}
return cache->pos_nor_in_order;
@@ -165,7 +165,7 @@ static GPUIndexBuf *mball_batch_cache_get_edges_adj_lines(Object *ob, MetaBallBa
{
if (cache->edges_adj_lines == NULL) {
ListBase *lb = &ob->runtime.curve_cache->disp;
- cache->edges_adj_lines = MEM_callocN(sizeof(GPUVertBuf), __func__);
+ cache->edges_adj_lines = GPU_indexbuf_calloc();
DRW_displist_indexbuf_create_edges_adjacency_lines(
lb, cache->edges_adj_lines, &cache->is_manifold);
}
@@ -187,7 +187,7 @@ GPUBatch *DRW_metaball_batch_cache_get_triangles_with_normals(Object *ob)
if (cache->batch == NULL) {
ListBase *lb = &ob->runtime.curve_cache->disp;
- GPUIndexBuf *ibo = MEM_callocN(sizeof(GPUIndexBuf), __func__);
+ GPUIndexBuf *ibo = GPU_indexbuf_calloc();
DRW_displist_indexbuf_create_triangles_in_order(lb, ibo);
cache->batch = GPU_batch_create_ex(GPU_PRIM_TRIS,
mball_batch_cache_get_pos_and_normals(ob, cache),
@@ -234,10 +234,10 @@ GPUBatch *DRW_metaball_batch_cache_get_wireframes_face(Object *ob)
if (cache->face_wire.batch == NULL) {
ListBase *lb = &ob->runtime.curve_cache->disp;
- GPUVertBuf *vbo_wiredata = MEM_callocN(sizeof(GPUVertBuf), __func__);
+ GPUVertBuf *vbo_wiredata = GPU_vertbuf_create(GPU_USAGE_STATIC);
DRW_displist_vertbuf_create_wiredata(lb, vbo_wiredata);
- GPUIndexBuf *ibo = MEM_callocN(sizeof(GPUIndexBuf), __func__);
+ GPUIndexBuf *ibo = GPU_indexbuf_calloc();
DRW_displist_indexbuf_create_lines_in_order(lb, ibo);
cache->face_wire.batch = GPU_batch_create_ex(GPU_PRIM_LINES,
diff --git a/source/blender/draw/intern/draw_cache_impl_volume.c b/source/blender/draw/intern/draw_cache_impl_volume.c
index e07f5b33d58..825fec83cf1 100644
--- a/source/blender/draw/intern/draw_cache_impl_volume.c
+++ b/source/blender/draw/intern/draw_cache_impl_volume.c
@@ -163,7 +163,7 @@ static void drw_volume_wireframe_cb(
GPU_vertbuf_attr_fill_stride(cache->face_wire.pos_nor_in_order, nor_id, 0, &packed_normal);
/* Create wiredata. */
- GPUVertBuf *vbo_wiredata = MEM_callocN(sizeof(GPUVertBuf), __func__);
+ GPUVertBuf *vbo_wiredata = GPU_vertbuf_create(GPU_USAGE_STATIC);
DRW_vertbuf_create_wiredata(vbo_wiredata, totvert);
if (volume->display.wireframe_type == VOLUME_WIREFRAME_POINTS) {
diff --git a/source/blender/draw/intern/draw_cache_inline.h b/source/blender/draw/intern/draw_cache_inline.h
index 06d6f1afc31..0f0e1785a2a 100644
--- a/source/blender/draw/intern/draw_cache_inline.h
+++ b/source/blender/draw/intern/draw_cache_inline.h
@@ -48,7 +48,7 @@ BLI_INLINE GPUBatch *DRW_batch_request(GPUBatch **batch)
{
/* XXX TODO(fclem): We are writing to batch cache here. Need to make this thread safe. */
if (*batch == NULL) {
- *batch = GPU_batch_calloc(1);
+ *batch = GPU_batch_calloc();
}
return *batch;
}
@@ -69,11 +69,10 @@ BLI_INLINE bool DRW_batch_requested(GPUBatch *batch, int prim_type)
BLI_INLINE void DRW_ibo_request(GPUBatch *batch, GPUIndexBuf **ibo)
{
if (*ibo == NULL) {
- *ibo = MEM_callocN(sizeof(GPUIndexBuf), "GPUIndexBuf");
+ *ibo = GPU_indexbuf_calloc();
}
if (batch != NULL) {
- GPU_batch_vao_cache_clear(batch);
- batch->elem = *ibo;
+ GPU_batch_elembuf_set(batch, *ibo, false);
}
}
@@ -87,13 +86,12 @@ BLI_INLINE bool DRW_ibo_requested(GPUIndexBuf *ibo)
BLI_INLINE void DRW_vbo_request(GPUBatch *batch, GPUVertBuf **vbo)
{
if (*vbo == NULL) {
- *vbo = MEM_callocN(sizeof(GPUVertBuf), "GPUVertBuf");
+ *vbo = GPU_vertbuf_create(GPU_USAGE_STATIC);
}
if (batch != NULL) {
/* HACK set first vbo if not init. */
if (batch->verts[0] == NULL) {
- GPU_batch_vao_cache_clear(batch);
- batch->verts[0] = *vbo;
+ GPU_batch_vertbuf_add(batch, *vbo);
}
else {
/* HACK: bypass assert */
diff --git a/source/blender/draw/intern/draw_instance_data.c b/source/blender/draw/intern/draw_instance_data.c
index 5005f38c558..4e08e6e5129 100644
--- a/source/blender/draw/intern/draw_instance_data.c
+++ b/source/blender/draw/intern/draw_instance_data.c
@@ -59,50 +59,50 @@ struct DRWInstanceDataList {
};
typedef struct DRWTempBufferHandle {
- /** Must be first for casting. */
- GPUVertBuf buf;
+ GPUVertBuf *buf;
/** Format pointer for reuse. */
GPUVertFormat *format;
/** Touched vertex length for resize. */
int *vert_len;
} DRWTempBufferHandle;
-static ListBase g_idatalists = {NULL, NULL};
+typedef struct DRWTempInstancingHandle {
+ /** Copy of geom but with the per-instance attributes. */
+ GPUBatch *batch;
+ /** Batch containing instancing attributes. */
+ GPUBatch *instancer;
+ /** Callbuffer to be used instead of instancer . */
+ GPUVertBuf *buf;
+ /** Original non-instanced batch pointer. */
+ GPUBatch *geom;
+} DRWTempInstancingHandle;
-/* -------------------------------------------------------------------- */
-/** \name Instance Buffer Management
- * \{ */
+static ListBase g_idatalists = {NULL, NULL};
-static void instance_batch_free(GPUBatch *geom, void *UNUSED(user_data))
+static void instancing_batch_references_add(GPUBatch *batch)
{
- if (geom->verts[0] == NULL) {
- /** XXX This is a false positive case.
- * The batch has been requested but not init yet
- * and there is a chance that it might become init.
- */
- return;
+ for (int i = 0; i < GPU_BATCH_VBO_MAX_LEN && batch->verts[i]; i++) {
+ GPU_vertbuf_handle_ref_add(batch->verts[i]);
+ }
+ for (int i = 0; i < GPU_BATCH_INST_VBO_MAX_LEN && batch->inst[i]; i++) {
+ GPU_vertbuf_handle_ref_add(batch->inst[i]);
}
+}
- /* Free all batches that use the same vbos before they are reused. */
- /* TODO: Make it thread safe! Batch freeing can happen from another thread. */
- /* FIXME: This is not really correct. The correct way would be to check based on
- * the vertex buffers. We assume the batch containing the VBO is being when it should. */
- /* PERF: This is doing a linear search. This can be very costly. */
- LISTBASE_FOREACH (DRWInstanceDataList *, data_list, &g_idatalists) {
- BLI_memblock *memblock = data_list->pool_instancing;
- BLI_memblock_iter iter;
- BLI_memblock_iternew(memblock, &iter);
- GPUBatch **batch_ptr;
- while ((batch_ptr = (GPUBatch **)BLI_memblock_iterstep(&iter))) {
- GPUBatch *batch = *batch_ptr;
- /* Only check verts[0] that's enough. */
- if (batch->verts[0] == geom->verts[0]) {
- GPU_batch_clear(batch);
- }
- }
+static void instancing_batch_references_remove(GPUBatch *batch)
+{
+ for (int i = 0; i < GPU_BATCH_VBO_MAX_LEN && batch->verts[i]; i++) {
+ GPU_vertbuf_handle_ref_remove(batch->verts[i]);
+ }
+ for (int i = 0; i < GPU_BATCH_INST_VBO_MAX_LEN && batch->inst[i]; i++) {
+ GPU_vertbuf_handle_ref_remove(batch->inst[i]);
}
}
+/* -------------------------------------------------------------------- */
+/** \name Instance Buffer Management
+ * \{ */
+
/**
* This manager allows to distribute existing batches for instancing
* attributes. This reduce the number of batches creation.
@@ -119,20 +119,23 @@ GPUVertBuf *DRW_temp_buffer_request(DRWInstanceDataList *idatalist,
BLI_assert(vert_len != NULL);
DRWTempBufferHandle *handle = BLI_memblock_alloc(idatalist->pool_buffers);
- GPUVertBuf *vert = &handle->buf;
- handle->vert_len = vert_len;
if (handle->format != format) {
handle->format = format;
- /* TODO/PERF: Save the allocated data from freeing to avoid reallocation. */
- GPU_vertbuf_clear(vert);
+ GPU_VERTBUF_DISCARD_SAFE(handle->buf);
+
+ GPUVertBuf *vert = GPU_vertbuf_create(GPU_USAGE_DYNAMIC);
GPU_vertbuf_init_with_format_ex(vert, format, GPU_USAGE_DYNAMIC);
GPU_vertbuf_data_alloc(vert, DRW_BUFFER_VERTS_CHUNK);
+
+ handle->buf = vert;
}
- return vert;
+ handle->vert_len = vert_len;
+ return handle->buf;
}
-/* NOTE: Does not return a valid drawable batch until DRW_instance_buffer_finish has run. */
+/* NOTE: Does not return a valid drawable batch until DRW_instance_buffer_finish has run.
+ * Initialization is delayed because instancer or geom could still not be initialized. */
GPUBatch *DRW_temp_batch_instance_request(DRWInstanceDataList *idatalist,
GPUVertBuf *buf,
GPUBatch *instancer,
@@ -143,17 +146,17 @@ GPUBatch *DRW_temp_batch_instance_request(DRWInstanceDataList *idatalist,
/* Only call with one of them. */
BLI_assert((instancer != NULL) != (buf != NULL));
- GPUBatch **batch_ptr = BLI_memblock_alloc(idatalist->pool_instancing);
- if (*batch_ptr == NULL) {
- *batch_ptr = GPU_batch_calloc(1);
+ DRWTempInstancingHandle *handle = BLI_memblock_alloc(idatalist->pool_instancing);
+ if (handle->batch == NULL) {
+ handle->batch = GPU_batch_calloc();
}
- GPUBatch *batch = *batch_ptr;
+ GPUBatch *batch = handle->batch;
bool instancer_compat = buf ? ((batch->inst[0] == buf) && (buf->vbo_id != 0)) :
- ((batch->inst[0] == instancer->inst[0]) &&
- (batch->inst[1] == instancer->inst[1]));
- bool is_compatible = (batch->gl_prim_type == geom->gl_prim_type) && instancer_compat &&
- (batch->phase == GPU_BATCH_READY_TO_DRAW) && (batch->elem == geom->elem);
+ ((batch->inst[0] == instancer->verts[0]) &&
+ (batch->inst[1] == instancer->verts[1]));
+ bool is_compatible = (batch->prim_type == geom->prim_type) && instancer_compat &&
+ (batch->flag & GPU_BATCH_BUILDING) == 0 && (batch->elem == geom->elem);
for (int i = 0; i < GPU_BATCH_VBO_MAX_LEN && is_compatible; i++) {
if (batch->verts[i] != geom->verts[i]) {
is_compatible = false;
@@ -161,15 +164,13 @@ GPUBatch *DRW_temp_batch_instance_request(DRWInstanceDataList *idatalist,
}
if (!is_compatible) {
+ instancing_batch_references_remove(batch);
GPU_batch_clear(batch);
- /* Save args and init later */
- batch->inst[0] = buf;
- batch->inst[1] = (void *)instancer; /* HACK to save the pointer without other alloc. */
- batch->phase = GPU_BATCH_READY_TO_BUILD;
- batch->verts[0] = (void *)geom; /* HACK to save the pointer without other alloc. */
-
- /* Make sure to free this batch if the instance geom gets free. */
- GPU_batch_callback_free_set(geom, &instance_batch_free, NULL);
+ /* Save args and init later. */
+ batch->flag = GPU_BATCH_BUILDING;
+ handle->buf = buf;
+ handle->instancer = instancer;
+ handle->geom = geom;
}
return batch;
}
@@ -179,14 +180,14 @@ GPUBatch *DRW_temp_batch_request(DRWInstanceDataList *idatalist,
GPUVertBuf *buf,
GPUPrimType prim_type)
{
- GPUBatch **batch_ptr = BLI_memblock_alloc(idatalist->pool_instancing);
+ GPUBatch **batch_ptr = BLI_memblock_alloc(idatalist->pool_batching);
if (*batch_ptr == NULL) {
- *batch_ptr = GPU_batch_calloc(1);
+ *batch_ptr = GPU_batch_calloc();
}
GPUBatch *batch = *batch_ptr;
bool is_compatible = (batch->verts[0] == buf) && (buf->vbo_id != 0) &&
- (batch->gl_prim_type == convert_prim_type_to_gl(prim_type));
+ (batch->prim_type == prim_type);
if (!is_compatible) {
GPU_batch_clear(batch);
GPU_batch_init(batch, prim_type, buf, NULL);
@@ -197,7 +198,13 @@ GPUBatch *DRW_temp_batch_request(DRWInstanceDataList *idatalist,
static void temp_buffer_handle_free(DRWTempBufferHandle *handle)
{
handle->format = NULL;
- GPU_vertbuf_clear(&handle->buf);
+ GPU_VERTBUF_DISCARD_SAFE(handle->buf);
+}
+
+static void temp_instancing_handle_free(DRWTempInstancingHandle *handle)
+{
+ instancing_batch_references_remove(handle->batch);
+ GPU_BATCH_DISCARD_SAFE(handle->batch);
}
static void temp_batch_free(GPUBatch **batch)
@@ -215,23 +222,22 @@ void DRW_instance_buffer_finish(DRWInstanceDataList *idatalist)
if (handle->vert_len != NULL) {
uint vert_len = *(handle->vert_len);
uint target_buf_size = ((vert_len / DRW_BUFFER_VERTS_CHUNK) + 1) * DRW_BUFFER_VERTS_CHUNK;
- if (target_buf_size < handle->buf.vertex_alloc) {
- GPU_vertbuf_data_resize(&handle->buf, target_buf_size);
+ if (target_buf_size < handle->buf->vertex_alloc) {
+ GPU_vertbuf_data_resize(handle->buf, target_buf_size);
}
- GPU_vertbuf_data_len_set(&handle->buf, vert_len);
- GPU_vertbuf_use(&handle->buf); /* Send data. */
+ GPU_vertbuf_data_len_set(handle->buf, vert_len);
+ GPU_vertbuf_use(handle->buf); /* Send data. */
}
}
/* Finish pending instancing batches. */
- GPUBatch **batch_ptr;
+ DRWTempInstancingHandle *handle_inst;
BLI_memblock_iternew(idatalist->pool_instancing, &iter);
- while ((batch_ptr = BLI_memblock_iterstep(&iter))) {
- GPUBatch *batch = *batch_ptr;
- if (batch && batch->phase == GPU_BATCH_READY_TO_BUILD) {
- GPUVertBuf *inst_buf = batch->inst[0];
- /* HACK see DRW_temp_batch_instance_request. */
- GPUBatch *inst_batch = (void *)batch->inst[1];
- GPUBatch *geom = (void *)batch->verts[0];
+ while ((handle_inst = BLI_memblock_iterstep(&iter))) {
+ GPUBatch *batch = handle_inst->batch;
+ if (batch && batch->flag == GPU_BATCH_BUILDING) {
+ GPUVertBuf *inst_buf = handle_inst->buf;
+ GPUBatch *inst_batch = handle_inst->instancer;
+ GPUBatch *geom = handle_inst->geom;
GPU_batch_copy(batch, geom);
if (inst_batch != NULL) {
for (int i = 0; i < GPU_BATCH_INST_VBO_MAX_LEN && inst_batch->verts[i]; i++) {
@@ -241,11 +247,14 @@ void DRW_instance_buffer_finish(DRWInstanceDataList *idatalist)
else {
GPU_batch_instbuf_add_ex(batch, inst_buf, false);
}
+ /* Add reference to avoid comparing pointers (in DRW_temp_batch_request) that could
+ * potentially be the same. This will delay the freeing of the GPUVertBuf itself. */
+ instancing_batch_references_add(batch);
}
}
/* Resize pools and free unused. */
BLI_memblock_clear(idatalist->pool_buffers, (MemblockValFreeFP)temp_buffer_handle_free);
- BLI_memblock_clear(idatalist->pool_instancing, (MemblockValFreeFP)temp_batch_free);
+ BLI_memblock_clear(idatalist->pool_instancing, (MemblockValFreeFP)temp_instancing_handle_free);
BLI_memblock_clear(idatalist->pool_batching, (MemblockValFreeFP)temp_batch_free);
}
@@ -318,7 +327,7 @@ DRWInstanceDataList *DRW_instance_data_list_create(void)
DRWInstanceDataList *idatalist = MEM_callocN(sizeof(DRWInstanceDataList), "DRWInstanceDataList");
idatalist->pool_batching = BLI_memblock_create(sizeof(GPUBatch *));
- idatalist->pool_instancing = BLI_memblock_create(sizeof(GPUBatch *));
+ idatalist->pool_instancing = BLI_memblock_create(sizeof(DRWTempInstancingHandle));
idatalist->pool_buffers = BLI_memblock_create(sizeof(DRWTempBufferHandle));
BLI_addtail(&g_idatalists, idatalist);
@@ -341,7 +350,7 @@ void DRW_instance_data_list_free(DRWInstanceDataList *idatalist)
}
BLI_memblock_destroy(idatalist->pool_buffers, (MemblockValFreeFP)temp_buffer_handle_free);
- BLI_memblock_destroy(idatalist->pool_instancing, (MemblockValFreeFP)temp_batch_free);
+ BLI_memblock_destroy(idatalist->pool_instancing, (MemblockValFreeFP)temp_instancing_handle_free);
BLI_memblock_destroy(idatalist->pool_batching, (MemblockValFreeFP)temp_batch_free);
BLI_remlink(&g_idatalists, idatalist);
diff --git a/source/blender/draw/intern/draw_manager.c b/source/blender/draw/intern/draw_manager.c
index 4a5e07476a9..e436424b460 100644
--- a/source/blender/draw/intern/draw_manager.c
+++ b/source/blender/draw/intern/draw_manager.c
@@ -2824,7 +2824,6 @@ void DRW_opengl_context_enable_ex(bool restore)
if (!G.background) {
immActivate();
}
- BLF_batch_reset();
}
}
}
@@ -2888,13 +2887,11 @@ void DRW_gpu_render_context_enable(void *re_gpu_context)
BLI_assert(!BLI_thread_is_main());
GPU_context_active_set(re_gpu_context);
- DRW_shape_cache_reset(); /* XXX fix that too. */
}
/* Needs to be called BEFORE DRW_opengl_render_context_disable() */
void DRW_gpu_render_context_disable(void *UNUSED(re_gpu_context))
{
- DRW_shape_cache_reset(); /* XXX fix that too. */
GPU_context_active_set(NULL);
}
diff --git a/source/blender/draw/intern/draw_manager.h b/source/blender/draw/intern/draw_manager.h
index 92a01cbbe04..d15a55e7bef 100644
--- a/source/blender/draw/intern/draw_manager.h
+++ b/source/blender/draw/intern/draw_manager.h
@@ -35,6 +35,7 @@
#include "GPU_batch.h"
#include "GPU_context.h"
+#include "GPU_drawlist.h"
#include "GPU_framebuffer.h"
#include "GPU_shader.h"
#include "GPU_uniformbuffer.h"
diff --git a/source/blender/draw/intern/draw_manager_exec.c b/source/blender/draw/intern/draw_manager_exec.c
index e3860b1bfb2..b931bdd0cbe 100644
--- a/source/blender/draw/intern/draw_manager_exec.c
+++ b/source/blender/draw/intern/draw_manager_exec.c
@@ -54,8 +54,6 @@ typedef struct DRWCommandsState {
int resource_id;
int base_inst;
int inst_count;
- int v_first;
- int v_count;
bool neg_scale;
/* Resource location. */
int obmats_loc;
@@ -663,18 +661,9 @@ BLI_INLINE void draw_legacy_matrix_update(DRWShadingGroup *shgroup,
BLI_INLINE void draw_geometry_bind(DRWShadingGroup *shgroup, GPUBatch *geom)
{
- /* XXX hacking #GPUBatch. we don't want to call glUseProgram! (huge performance loss) */
- if (DST.batch) {
- DST.batch->program_in_use = false;
- }
-
DST.batch = geom;
- GPU_batch_set_shader_no_bind(geom, shgroup->shader);
-
- geom->program_in_use = true; /* XXX hacking #GPUBatch */
-
- GPU_batch_bind(geom);
+ GPU_batch_set_shader(geom, shgroup->shader);
}
BLI_INLINE void draw_geometry_execute(DRWShadingGroup *shgroup,
@@ -714,18 +703,12 @@ BLI_INLINE void draw_indirect_call(DRWShadingGroup *shgroup, DRWCommandsState *s
GPU_draw_list_submit(DST.draw_list);
draw_geometry_bind(shgroup, state->batch);
}
- GPU_draw_list_command_add(
- DST.draw_list, state->v_first, state->v_count, state->base_inst, state->inst_count);
+ GPU_draw_list_append(DST.draw_list, state->batch, state->base_inst, state->inst_count);
}
/* Fallback when unsupported */
else {
- draw_geometry_execute(shgroup,
- state->batch,
- state->v_first,
- state->v_count,
- state->base_inst,
- state->inst_count,
- state->baseinst_loc);
+ draw_geometry_execute(
+ shgroup, state->batch, 0, 0, state->base_inst, state->inst_count, state->baseinst_loc);
}
}
@@ -873,10 +856,10 @@ BLI_INLINE void draw_select_buffer(DRWShadingGroup *shgroup,
/* Batching */
if (!is_instancing) {
/* FIXME: Meh a bit nasty. */
- if (batch->gl_prim_type == convert_prim_type_to_gl(GPU_PRIM_TRIS)) {
+ if (batch->prim_type == GPU_PRIM_TRIS) {
count = 3;
}
- else if (batch->gl_prim_type == convert_prim_type_to_gl(GPU_PRIM_LINES)) {
+ else if (batch->prim_type == GPU_PRIM_LINES) {
count = 2;
}
}
@@ -1015,8 +998,6 @@ static void draw_call_batching_start(DRWCommandsState *state)
state->resource_id = -1;
state->base_inst = 0;
state->inst_count = 0;
- state->v_first = 0;
- state->v_count = 0;
state->batch = NULL;
state->select_id = -1;
@@ -1039,15 +1020,10 @@ static void draw_call_batching_do(DRWShadingGroup *shgroup,
draw_call_batching_flush(shgroup, state);
state->batch = call->batch;
- state->v_first = (call->batch->elem) ? call->batch->elem->index_start : 0;
- state->v_count = (call->batch->elem) ? call->batch->elem->index_len :
- call->batch->verts[0]->vertex_len;
state->inst_count = 1;
state->base_inst = id;
draw_call_resource_bind(state, &call->handle);
-
- GPU_draw_list_init(DST.draw_list, state->batch);
}
/* Is the id consecutive? */
else if (id != state->base_inst + state->inst_count) {
@@ -1111,10 +1087,6 @@ static void draw_shgroup(DRWShadingGroup *shgroup, DRWState pass_state)
}
GPU_shader_bind(shgroup->shader);
DST.shader = shgroup->shader;
- /* XXX hacking gawain */
- if (DST.batch) {
- DST.batch->program_in_use = false;
- }
DST.batch = NULL;
}
@@ -1305,7 +1277,6 @@ static void drw_draw_pass_ex(DRWPass *pass,
}
if (DST.batch) {
- DST.batch->program_in_use = false;
DST.batch = NULL;
}