Welcome to mirror list, hosted at ThFree Co, Russian Federation.

git.blender.org/blender.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
path: root/source
diff options
context:
space:
mode:
authorAntonio Vazquez <blendergit@gmail.com>2020-08-13 17:57:42 +0300
committerAntonio Vazquez <blendergit@gmail.com>2020-08-13 17:57:42 +0300
commit735c717a63c8870d2ef4a910d82a2648cbaaa5e1 (patch)
tree4cff1cfe01053b8cc188cc69e1c2c60946fe37cc /source
parentcba7391d4a42a44efeddae3ff717e542a3c73738 (diff)
parent53683dec7d9ac9f324ff91904c9f80b8018b9b9c (diff)
Merge branch 'master' into greasepencil-edit-curve
Diffstat (limited to 'source')
-rw-r--r--source/blender/blenfont/BLF_api.h3
-rw-r--r--source/blender/blenfont/intern/blf.c5
-rw-r--r--source/blender/blenfont/intern/blf_font.c7
-rw-r--r--source/blender/blenfont/intern/blf_internal.h1
-rw-r--r--source/blender/blenkernel/BKE_screen.h10
-rw-r--r--source/blender/blenkernel/intern/DerivedMesh.c6
-rw-r--r--source/blender/blenkernel/intern/object_dupli.c4
-rw-r--r--source/blender/blenkernel/intern/softbody.c3
-rw-r--r--source/blender/blenloader/intern/versioning_defaults.c2
-rw-r--r--source/blender/bmesh/CMakeLists.txt1
-rw-r--r--source/blender/bmesh/intern/bmesh_mesh_convert.c15
-rw-r--r--source/blender/draw/intern/draw_cache.c12
-rw-r--r--source/blender/draw/intern/draw_cache.h1
-rw-r--r--source/blender/draw/intern/draw_cache_impl_mesh.c2
-rw-r--r--source/blender/draw/intern/draw_cache_impl_metaball.c10
-rw-r--r--source/blender/draw/intern/draw_cache_impl_volume.c2
-rw-r--r--source/blender/draw/intern/draw_cache_inline.h12
-rw-r--r--source/blender/draw/intern/draw_instance_data.c151
-rw-r--r--source/blender/draw/intern/draw_manager.c3
-rw-r--r--source/blender/draw/intern/draw_manager.h1
-rw-r--r--source/blender/draw/intern/draw_manager_exec.c41
-rw-r--r--source/blender/editors/datafiles/CMakeLists.txt1
-rw-r--r--source/blender/editors/include/UI_view2d.h6
-rw-r--r--source/blender/editors/interface/interface_widgets.c7
-rw-r--r--source/blender/editors/interface/view2d.c11
-rw-r--r--source/blender/editors/interface/view2d_ops.c24
-rw-r--r--source/blender/editors/mesh/editmesh_knife.c8
-rw-r--r--source/blender/editors/object/object_bake_api.c5
-rw-r--r--source/blender/editors/screen/screen_draw.c1
-rw-r--r--source/blender/editors/space_image/space_image.c1
-rw-r--r--source/blender/editors/space_sequencer/sequencer_view.c8
-rw-r--r--source/blender/editors/space_sequencer/space_sequencer.c21
-rw-r--r--source/blender/editors/space_view3d/view3d_draw.c4
-rw-r--r--source/blender/editors/space_view3d/view3d_placement.c12
-rw-r--r--source/blender/editors/uvedit/uvedit_draw.c12
-rw-r--r--source/blender/gpu/CMakeLists.txt11
-rw-r--r--source/blender/gpu/GPU_batch.h150
-rw-r--r--source/blender/gpu/GPU_batch_presets.h3
-rw-r--r--source/blender/gpu/GPU_drawlist.h46
-rw-r--r--source/blender/gpu/GPU_element.h2
-rw-r--r--source/blender/gpu/GPU_shader.h13
-rw-r--r--source/blender/gpu/GPU_shader_interface.h6
-rw-r--r--source/blender/gpu/GPU_vertex_buffer.h6
-rw-r--r--source/blender/gpu/intern/gpu_backend.hh18
-rw-r--r--source/blender/gpu/intern/gpu_batch.cc859
-rw-r--r--source/blender/gpu/intern/gpu_batch_presets.c25
-rw-r--r--source/blender/gpu/intern/gpu_batch_private.hh (renamed from source/blender/gpu/intern/gpu_batch_private.h)18
-rw-r--r--source/blender/gpu/intern/gpu_context.cc24
-rw-r--r--source/blender/gpu/intern/gpu_context_private.hh4
-rw-r--r--source/blender/gpu/intern/gpu_drawlist.cc59
-rw-r--r--source/blender/gpu/intern/gpu_drawlist_private.hh40
-rw-r--r--source/blender/gpu/intern/gpu_element.cc5
-rw-r--r--source/blender/gpu/intern/gpu_immediate.cc12
-rw-r--r--source/blender/gpu/intern/gpu_shader.cc176
-rw-r--r--source/blender/gpu/intern/gpu_shader_interface.cc24
-rw-r--r--source/blender/gpu/intern/gpu_vertex_buffer.cc19
-rw-r--r--source/blender/gpu/opengl/gl_backend.hh12
-rw-r--r--source/blender/gpu/opengl/gl_batch.cc367
-rw-r--r--source/blender/gpu/opengl/gl_batch.hh105
-rw-r--r--source/blender/gpu/opengl/gl_context.cc15
-rw-r--r--source/blender/gpu/opengl/gl_context.hh15
-rw-r--r--source/blender/gpu/opengl/gl_drawlist.cc240
-rw-r--r--source/blender/gpu/opengl/gl_drawlist.hh80
-rw-r--r--source/blender/gpu/opengl/gl_vertex_array.cc158
-rw-r--r--source/blender/gpu/opengl/gl_vertex_array.hh44
-rw-r--r--source/blender/makesdna/DNA_space_types.h1
-rw-r--r--source/blender/makesrna/intern/rna_object_force.c2
-rw-r--r--source/blender/makesrna/intern/rna_space.c6
-rw-r--r--source/blender/python/gpu/gpu_py_batch.c8
-rw-r--r--source/blender/windowmanager/intern/wm_surface.c2
-rw-r--r--source/blender/windowmanager/intern/wm_window.c2
71 files changed, 1773 insertions, 1217 deletions
diff --git a/source/blender/blenfont/BLF_api.h b/source/blender/blenfont/BLF_api.h
index 9013836fd1e..bf84f5c57b3 100644
--- a/source/blender/blenfont/BLF_api.h
+++ b/source/blender/blenfont/BLF_api.h
@@ -42,8 +42,7 @@ int BLF_init(void);
void BLF_exit(void);
void BLF_default_dpi(int dpi);
void BLF_default_set(int fontid);
-int BLF_default(void); /* get default font ID so we can pass it to other functions */
-void BLF_batch_reset(void); /* call when changing opengl context. */
+int BLF_default(void); /* get default font ID so we can pass it to other functions */
void BLF_cache_clear(void);
diff --git a/source/blender/blenfont/intern/blf.c b/source/blender/blenfont/intern/blf.c
index 95b074fa2df..c5c2bc3f3ba 100644
--- a/source/blender/blenfont/intern/blf.c
+++ b/source/blender/blenfont/intern/blf.c
@@ -125,11 +125,6 @@ void BLF_exit(void)
blf_font_exit();
}
-void BLF_batch_reset(void)
-{
- blf_batch_draw_vao_clear();
-}
-
void BLF_cache_clear(void)
{
FontBLF *font;
diff --git a/source/blender/blenfont/intern/blf_font.c b/source/blender/blenfont/intern/blf_font.c
index ff31878a929..76829db755c 100644
--- a/source/blender/blenfont/intern/blf_font.c
+++ b/source/blender/blenfont/intern/blf_font.c
@@ -113,13 +113,6 @@ static void blf_batch_draw_exit(void)
GPU_BATCH_DISCARD_SAFE(g_batch.batch);
}
-void blf_batch_draw_vao_clear(void)
-{
- if (g_batch.batch) {
- GPU_batch_vao_cache_clear(g_batch.batch);
- }
-}
-
void blf_batch_draw_begin(FontBLF *font)
{
if (g_batch.batch == NULL) {
diff --git a/source/blender/blenfont/intern/blf_internal.h b/source/blender/blenfont/intern/blf_internal.h
index ba0873f4fd4..b616f47a897 100644
--- a/source/blender/blenfont/intern/blf_internal.h
+++ b/source/blender/blenfont/intern/blf_internal.h
@@ -30,7 +30,6 @@ struct ResultBLF;
struct rctf;
struct rcti;
-void blf_batch_draw_vao_clear(void);
void blf_batch_draw_begin(struct FontBLF *font);
void blf_batch_draw(void);
diff --git a/source/blender/blenkernel/BKE_screen.h b/source/blender/blenkernel/BKE_screen.h
index edab543fc37..1090deae93f 100644
--- a/source/blender/blenkernel/BKE_screen.h
+++ b/source/blender/blenkernel/BKE_screen.h
@@ -183,6 +183,16 @@ typedef struct ARegionType {
/* return context data */
int (*context)(const struct bContext *C, const char *member, struct bContextDataResult *result);
+ /* Is called whenever the current visible View2D's region changes.
+ *
+ * Used from user code such as view navigation/zoom operators to inform region about changes.
+ * The goal is to support zoom-to-fit features which gets disabled when manual navigation is
+ * performed.
+ *
+ * This callback is not called on indirect changes of the current viewport (which could happen
+ * when the `v2d->tot is changed and `cur` is adopted accordingly). */
+ void (*on_view2d_changed)(const struct bContext *C, struct ARegion *region);
+
/* custom drawing callbacks */
ListBase drawcalls;
diff --git a/source/blender/blenkernel/intern/DerivedMesh.c b/source/blender/blenkernel/intern/DerivedMesh.c
index 63e7933dd56..0dc85dfaa18 100644
--- a/source/blender/blenkernel/intern/DerivedMesh.c
+++ b/source/blender/blenkernel/intern/DerivedMesh.c
@@ -1810,6 +1810,12 @@ static void mesh_build_data(struct Depsgraph *depsgraph,
BKE_object_boundbox_calc_from_mesh(ob, mesh_eval);
+ /* Make sure that drivers can target shapekey properties.
+ * Note that this causes a potential inconsistency, as the shapekey may have a
+ * different topology than the evaluated mesh. */
+ BLI_assert(mesh->key == NULL || DEG_is_evaluated_id(&mesh->key->id));
+ mesh_eval->key = mesh->key;
+
if ((ob->mode & OB_MODE_ALL_SCULPT) && ob->sculpt) {
if (DEG_is_active(depsgraph)) {
BKE_sculpt_update_object_after_eval(depsgraph, ob);
diff --git a/source/blender/blenkernel/intern/object_dupli.c b/source/blender/blenkernel/intern/object_dupli.c
index 108a952dc69..d69f4a39263 100644
--- a/source/blender/blenkernel/intern/object_dupli.c
+++ b/source/blender/blenkernel/intern/object_dupli.c
@@ -897,8 +897,8 @@ static DupliObject *face_dupli(const DupliContext *ctx,
/* Make offset relative to inst_ob using relative child transform. */
mul_mat3_m4_v3(child_imat, obmat[3]);
- /* XXX ugly hack to ensure same behavior as in master this should not be needed,
- * #Object.parentinv is not consistent outside of parenting. */
+ /* XXX ugly hack to ensure same behavior as in master.
+ * This should not be needed, #Object.parentinv is not consistent outside of parenting. */
{
float imat[3][3];
copy_m3_m4(imat, inst_ob->parentinv);
diff --git a/source/blender/blenkernel/intern/softbody.c b/source/blender/blenkernel/intern/softbody.c
index 1ab9766a7ec..6a6f74d9fb4 100644
--- a/source/blender/blenkernel/intern/softbody.c
+++ b/source/blender/blenkernel/intern/softbody.c
@@ -2965,6 +2965,9 @@ static void lattice_to_softbody(Scene *scene, Object *ob)
if (ob->softflag & OB_SB_EDGES) {
makelatticesprings(lt, ob->soft->bspring, ob->softflag & OB_SB_QUADS, ob);
build_bps_springlist(ob); /* link bps to springs */
+ if (ob->softflag & OB_SB_SELF) {
+ calculate_collision_balls(ob);
+ }
}
}
diff --git a/source/blender/blenloader/intern/versioning_defaults.c b/source/blender/blenloader/intern/versioning_defaults.c
index df0b2b380fa..b4bee9a3c7e 100644
--- a/source/blender/blenloader/intern/versioning_defaults.c
+++ b/source/blender/blenloader/intern/versioning_defaults.c
@@ -174,7 +174,7 @@ static void blo_update_defaults_screen(bScreen *screen,
}
else if (area->spacetype == SPACE_SEQ) {
SpaceSeq *seq = area->spacedata.first;
- seq->flag |= SEQ_SHOW_MARKERS | SEQ_SHOW_FCURVES;
+ seq->flag |= SEQ_SHOW_MARKERS | SEQ_SHOW_FCURVES | SEQ_ZOOM_TO_FIT;
}
else if (area->spacetype == SPACE_TEXT) {
/* Show syntax and line numbers in Script workspace text editor. */
diff --git a/source/blender/bmesh/CMakeLists.txt b/source/blender/bmesh/CMakeLists.txt
index b97b5cc95f2..0eeb0d21b5b 100644
--- a/source/blender/bmesh/CMakeLists.txt
+++ b/source/blender/bmesh/CMakeLists.txt
@@ -23,6 +23,7 @@ set(INC
../blenkernel
../blenlib
../blentranslation
+ ../depsgraph
../makesdna
../../../intern/atomic
../../../intern/eigen
diff --git a/source/blender/bmesh/intern/bmesh_mesh_convert.c b/source/blender/bmesh/intern/bmesh_mesh_convert.c
index 8db125970fd..4671df90d53 100644
--- a/source/blender/bmesh/intern/bmesh_mesh_convert.c
+++ b/source/blender/bmesh/intern/bmesh_mesh_convert.c
@@ -90,6 +90,8 @@
#include "BKE_key.h"
#include "BKE_main.h"
+#include "DEG_depsgraph_query.h"
+
#include "bmesh.h"
#include "intern/bmesh_private.h" /* For element checking. */
@@ -231,7 +233,13 @@ void BM_mesh_bm_from_me(BMesh *bm, const Mesh *me, const struct BMeshFromMeshPar
/* -------------------------------------------------------------------- */
/* Shape Key */
- int tot_shape_keys = me->key ? BLI_listbase_count(&me->key->block) : 0;
+ int tot_shape_keys = 0;
+ if (me->key != NULL && DEG_is_original_id(&me->id)) {
+ /* Evaluated meshes can be topologically inconsistent with their shape keys.
+ * Shape keys are also already integrated into the state of the evaluated
+ * mesh, so considering them here would kind of apply them twice. */
+ tot_shape_keys = BLI_listbase_count(&me->key->block);
+ }
if (is_new == false) {
tot_shape_keys = min_ii(tot_shape_keys, CustomData_number_of_layers(&bm->vdata, CD_SHAPEKEY));
}
@@ -239,7 +247,7 @@ void BM_mesh_bm_from_me(BMesh *bm, const Mesh *me, const struct BMeshFromMeshPar
BLI_array_alloca(shape_key_table, tot_shape_keys) :
NULL;
- if ((params->active_shapekey != 0) && (me->key != NULL)) {
+ if ((params->active_shapekey != 0) && tot_shape_keys > 0) {
actkey = BLI_findlink(&me->key->block, params->active_shapekey - 1);
}
else {
@@ -298,7 +306,8 @@ void BM_mesh_bm_from_me(BMesh *bm, const Mesh *me, const struct BMeshFromMeshPar
const int cd_vert_bweight_offset = CustomData_get_offset(&bm->vdata, CD_BWEIGHT);
const int cd_edge_bweight_offset = CustomData_get_offset(&bm->edata, CD_BWEIGHT);
const int cd_edge_crease_offset = CustomData_get_offset(&bm->edata, CD_CREASE);
- const int cd_shape_key_offset = me->key ? CustomData_get_offset(&bm->vdata, CD_SHAPEKEY) : -1;
+ const int cd_shape_key_offset = tot_shape_keys ? CustomData_get_offset(&bm->vdata, CD_SHAPEKEY) :
+ -1;
const int cd_shape_keyindex_offset = is_new && (tot_shape_keys || params->add_key_index) ?
CustomData_get_offset(&bm->vdata, CD_SHAPE_KEYINDEX) :
-1;
diff --git a/source/blender/draw/intern/draw_cache.c b/source/blender/draw/intern/draw_cache.c
index 46b7a88b2a6..4d7440a3276 100644
--- a/source/blender/draw/intern/draw_cache.c
+++ b/source/blender/draw/intern/draw_cache.c
@@ -152,18 +152,6 @@ void DRW_shape_cache_free(void)
}
}
-void DRW_shape_cache_reset(void)
-{
- uint i = sizeof(SHC) / sizeof(GPUBatch *);
- GPUBatch **batch = (GPUBatch **)&SHC;
- while (i--) {
- if (*batch) {
- GPU_batch_vao_cache_clear(*batch);
- }
- batch++;
- }
-}
-
/* -------------------------------------------------------------------- */
/** \name Procedural Batches
* \{ */
diff --git a/source/blender/draw/intern/draw_cache.h b/source/blender/draw/intern/draw_cache.h
index 5f1744a7aec..8597f86f8e6 100644
--- a/source/blender/draw/intern/draw_cache.h
+++ b/source/blender/draw/intern/draw_cache.h
@@ -33,7 +33,6 @@ struct VolumeGrid;
struct bGPDstroke;
void DRW_shape_cache_free(void);
-void DRW_shape_cache_reset(void);
/* 3D cursor */
struct GPUBatch *DRW_cache_cursor_get(bool crosshair_lines);
diff --git a/source/blender/draw/intern/draw_cache_impl_mesh.c b/source/blender/draw/intern/draw_cache_impl_mesh.c
index d6faeb16583..0e2be993787 100644
--- a/source/blender/draw/intern/draw_cache_impl_mesh.c
+++ b/source/blender/draw/intern/draw_cache_impl_mesh.c
@@ -1248,7 +1248,7 @@ void DRW_mesh_batch_cache_create_requested(struct TaskGraph *task_graph,
saved_elem_ranges[i] = cache->surface_per_mat[i]->elem;
/* Avoid deletion as the batch is owner. */
cache->surface_per_mat[i]->elem = NULL;
- cache->surface_per_mat[i]->owns_flag &= ~GPU_BATCH_OWNS_INDEX;
+ cache->surface_per_mat[i]->flag &= ~GPU_BATCH_OWNS_INDEX;
}
}
/* We can't discard batches at this point as they have been
diff --git a/source/blender/draw/intern/draw_cache_impl_metaball.c b/source/blender/draw/intern/draw_cache_impl_metaball.c
index 076d32ffe1f..5f0af06931e 100644
--- a/source/blender/draw/intern/draw_cache_impl_metaball.c
+++ b/source/blender/draw/intern/draw_cache_impl_metaball.c
@@ -155,7 +155,7 @@ static GPUVertBuf *mball_batch_cache_get_pos_and_normals(Object *ob, MetaBallBat
{
if (cache->pos_nor_in_order == NULL) {
ListBase *lb = &ob->runtime.curve_cache->disp;
- cache->pos_nor_in_order = MEM_callocN(sizeof(GPUVertBuf), __func__);
+ cache->pos_nor_in_order = GPU_vertbuf_create(GPU_USAGE_STATIC);
DRW_displist_vertbuf_create_pos_and_nor(lb, cache->pos_nor_in_order);
}
return cache->pos_nor_in_order;
@@ -165,7 +165,7 @@ static GPUIndexBuf *mball_batch_cache_get_edges_adj_lines(Object *ob, MetaBallBa
{
if (cache->edges_adj_lines == NULL) {
ListBase *lb = &ob->runtime.curve_cache->disp;
- cache->edges_adj_lines = MEM_callocN(sizeof(GPUVertBuf), __func__);
+ cache->edges_adj_lines = GPU_indexbuf_calloc();
DRW_displist_indexbuf_create_edges_adjacency_lines(
lb, cache->edges_adj_lines, &cache->is_manifold);
}
@@ -187,7 +187,7 @@ GPUBatch *DRW_metaball_batch_cache_get_triangles_with_normals(Object *ob)
if (cache->batch == NULL) {
ListBase *lb = &ob->runtime.curve_cache->disp;
- GPUIndexBuf *ibo = MEM_callocN(sizeof(GPUIndexBuf), __func__);
+ GPUIndexBuf *ibo = GPU_indexbuf_calloc();
DRW_displist_indexbuf_create_triangles_in_order(lb, ibo);
cache->batch = GPU_batch_create_ex(GPU_PRIM_TRIS,
mball_batch_cache_get_pos_and_normals(ob, cache),
@@ -234,10 +234,10 @@ GPUBatch *DRW_metaball_batch_cache_get_wireframes_face(Object *ob)
if (cache->face_wire.batch == NULL) {
ListBase *lb = &ob->runtime.curve_cache->disp;
- GPUVertBuf *vbo_wiredata = MEM_callocN(sizeof(GPUVertBuf), __func__);
+ GPUVertBuf *vbo_wiredata = GPU_vertbuf_create(GPU_USAGE_STATIC);
DRW_displist_vertbuf_create_wiredata(lb, vbo_wiredata);
- GPUIndexBuf *ibo = MEM_callocN(sizeof(GPUIndexBuf), __func__);
+ GPUIndexBuf *ibo = GPU_indexbuf_calloc();
DRW_displist_indexbuf_create_lines_in_order(lb, ibo);
cache->face_wire.batch = GPU_batch_create_ex(GPU_PRIM_LINES,
diff --git a/source/blender/draw/intern/draw_cache_impl_volume.c b/source/blender/draw/intern/draw_cache_impl_volume.c
index e07f5b33d58..825fec83cf1 100644
--- a/source/blender/draw/intern/draw_cache_impl_volume.c
+++ b/source/blender/draw/intern/draw_cache_impl_volume.c
@@ -163,7 +163,7 @@ static void drw_volume_wireframe_cb(
GPU_vertbuf_attr_fill_stride(cache->face_wire.pos_nor_in_order, nor_id, 0, &packed_normal);
/* Create wiredata. */
- GPUVertBuf *vbo_wiredata = MEM_callocN(sizeof(GPUVertBuf), __func__);
+ GPUVertBuf *vbo_wiredata = GPU_vertbuf_create(GPU_USAGE_STATIC);
DRW_vertbuf_create_wiredata(vbo_wiredata, totvert);
if (volume->display.wireframe_type == VOLUME_WIREFRAME_POINTS) {
diff --git a/source/blender/draw/intern/draw_cache_inline.h b/source/blender/draw/intern/draw_cache_inline.h
index 06d6f1afc31..0f0e1785a2a 100644
--- a/source/blender/draw/intern/draw_cache_inline.h
+++ b/source/blender/draw/intern/draw_cache_inline.h
@@ -48,7 +48,7 @@ BLI_INLINE GPUBatch *DRW_batch_request(GPUBatch **batch)
{
/* XXX TODO(fclem): We are writing to batch cache here. Need to make this thread safe. */
if (*batch == NULL) {
- *batch = GPU_batch_calloc(1);
+ *batch = GPU_batch_calloc();
}
return *batch;
}
@@ -69,11 +69,10 @@ BLI_INLINE bool DRW_batch_requested(GPUBatch *batch, int prim_type)
BLI_INLINE void DRW_ibo_request(GPUBatch *batch, GPUIndexBuf **ibo)
{
if (*ibo == NULL) {
- *ibo = MEM_callocN(sizeof(GPUIndexBuf), "GPUIndexBuf");
+ *ibo = GPU_indexbuf_calloc();
}
if (batch != NULL) {
- GPU_batch_vao_cache_clear(batch);
- batch->elem = *ibo;
+ GPU_batch_elembuf_set(batch, *ibo, false);
}
}
@@ -87,13 +86,12 @@ BLI_INLINE bool DRW_ibo_requested(GPUIndexBuf *ibo)
BLI_INLINE void DRW_vbo_request(GPUBatch *batch, GPUVertBuf **vbo)
{
if (*vbo == NULL) {
- *vbo = MEM_callocN(sizeof(GPUVertBuf), "GPUVertBuf");
+ *vbo = GPU_vertbuf_create(GPU_USAGE_STATIC);
}
if (batch != NULL) {
/* HACK set first vbo if not init. */
if (batch->verts[0] == NULL) {
- GPU_batch_vao_cache_clear(batch);
- batch->verts[0] = *vbo;
+ GPU_batch_vertbuf_add(batch, *vbo);
}
else {
/* HACK: bypass assert */
diff --git a/source/blender/draw/intern/draw_instance_data.c b/source/blender/draw/intern/draw_instance_data.c
index 5005f38c558..4e08e6e5129 100644
--- a/source/blender/draw/intern/draw_instance_data.c
+++ b/source/blender/draw/intern/draw_instance_data.c
@@ -59,50 +59,50 @@ struct DRWInstanceDataList {
};
typedef struct DRWTempBufferHandle {
- /** Must be first for casting. */
- GPUVertBuf buf;
+ GPUVertBuf *buf;
/** Format pointer for reuse. */
GPUVertFormat *format;
/** Touched vertex length for resize. */
int *vert_len;
} DRWTempBufferHandle;
-static ListBase g_idatalists = {NULL, NULL};
+typedef struct DRWTempInstancingHandle {
+ /** Copy of geom but with the per-instance attributes. */
+ GPUBatch *batch;
+ /** Batch containing instancing attributes. */
+ GPUBatch *instancer;
+ /** Callbuffer to be used instead of instancer . */
+ GPUVertBuf *buf;
+ /** Original non-instanced batch pointer. */
+ GPUBatch *geom;
+} DRWTempInstancingHandle;
-/* -------------------------------------------------------------------- */
-/** \name Instance Buffer Management
- * \{ */
+static ListBase g_idatalists = {NULL, NULL};
-static void instance_batch_free(GPUBatch *geom, void *UNUSED(user_data))
+static void instancing_batch_references_add(GPUBatch *batch)
{
- if (geom->verts[0] == NULL) {
- /** XXX This is a false positive case.
- * The batch has been requested but not init yet
- * and there is a chance that it might become init.
- */
- return;
+ for (int i = 0; i < GPU_BATCH_VBO_MAX_LEN && batch->verts[i]; i++) {
+ GPU_vertbuf_handle_ref_add(batch->verts[i]);
+ }
+ for (int i = 0; i < GPU_BATCH_INST_VBO_MAX_LEN && batch->inst[i]; i++) {
+ GPU_vertbuf_handle_ref_add(batch->inst[i]);
}
+}
- /* Free all batches that use the same vbos before they are reused. */
- /* TODO: Make it thread safe! Batch freeing can happen from another thread. */
- /* FIXME: This is not really correct. The correct way would be to check based on
- * the vertex buffers. We assume the batch containing the VBO is being when it should. */
- /* PERF: This is doing a linear search. This can be very costly. */
- LISTBASE_FOREACH (DRWInstanceDataList *, data_list, &g_idatalists) {
- BLI_memblock *memblock = data_list->pool_instancing;
- BLI_memblock_iter iter;
- BLI_memblock_iternew(memblock, &iter);
- GPUBatch **batch_ptr;
- while ((batch_ptr = (GPUBatch **)BLI_memblock_iterstep(&iter))) {
- GPUBatch *batch = *batch_ptr;
- /* Only check verts[0] that's enough. */
- if (batch->verts[0] == geom->verts[0]) {
- GPU_batch_clear(batch);
- }
- }
+static void instancing_batch_references_remove(GPUBatch *batch)
+{
+ for (int i = 0; i < GPU_BATCH_VBO_MAX_LEN && batch->verts[i]; i++) {
+ GPU_vertbuf_handle_ref_remove(batch->verts[i]);
+ }
+ for (int i = 0; i < GPU_BATCH_INST_VBO_MAX_LEN && batch->inst[i]; i++) {
+ GPU_vertbuf_handle_ref_remove(batch->inst[i]);
}
}
+/* -------------------------------------------------------------------- */
+/** \name Instance Buffer Management
+ * \{ */
+
/**
* This manager allows to distribute existing batches for instancing
* attributes. This reduce the number of batches creation.
@@ -119,20 +119,23 @@ GPUVertBuf *DRW_temp_buffer_request(DRWInstanceDataList *idatalist,
BLI_assert(vert_len != NULL);
DRWTempBufferHandle *handle = BLI_memblock_alloc(idatalist->pool_buffers);
- GPUVertBuf *vert = &handle->buf;
- handle->vert_len = vert_len;
if (handle->format != format) {
handle->format = format;
- /* TODO/PERF: Save the allocated data from freeing to avoid reallocation. */
- GPU_vertbuf_clear(vert);
+ GPU_VERTBUF_DISCARD_SAFE(handle->buf);
+
+ GPUVertBuf *vert = GPU_vertbuf_create(GPU_USAGE_DYNAMIC);
GPU_vertbuf_init_with_format_ex(vert, format, GPU_USAGE_DYNAMIC);
GPU_vertbuf_data_alloc(vert, DRW_BUFFER_VERTS_CHUNK);
+
+ handle->buf = vert;
}
- return vert;
+ handle->vert_len = vert_len;
+ return handle->buf;
}
-/* NOTE: Does not return a valid drawable batch until DRW_instance_buffer_finish has run. */
+/* NOTE: Does not return a valid drawable batch until DRW_instance_buffer_finish has run.
+ * Initialization is delayed because instancer or geom could still not be initialized. */
GPUBatch *DRW_temp_batch_instance_request(DRWInstanceDataList *idatalist,
GPUVertBuf *buf,
GPUBatch *instancer,
@@ -143,17 +146,17 @@ GPUBatch *DRW_temp_batch_instance_request(DRWInstanceDataList *idatalist,
/* Only call with one of them. */
BLI_assert((instancer != NULL) != (buf != NULL));
- GPUBatch **batch_ptr = BLI_memblock_alloc(idatalist->pool_instancing);
- if (*batch_ptr == NULL) {
- *batch_ptr = GPU_batch_calloc(1);
+ DRWTempInstancingHandle *handle = BLI_memblock_alloc(idatalist->pool_instancing);
+ if (handle->batch == NULL) {
+ handle->batch = GPU_batch_calloc();
}
- GPUBatch *batch = *batch_ptr;
+ GPUBatch *batch = handle->batch;
bool instancer_compat = buf ? ((batch->inst[0] == buf) && (buf->vbo_id != 0)) :
- ((batch->inst[0] == instancer->inst[0]) &&
- (batch->inst[1] == instancer->inst[1]));
- bool is_compatible = (batch->gl_prim_type == geom->gl_prim_type) && instancer_compat &&
- (batch->phase == GPU_BATCH_READY_TO_DRAW) && (batch->elem == geom->elem);
+ ((batch->inst[0] == instancer->verts[0]) &&
+ (batch->inst[1] == instancer->verts[1]));
+ bool is_compatible = (batch->prim_type == geom->prim_type) && instancer_compat &&
+ (batch->flag & GPU_BATCH_BUILDING) == 0 && (batch->elem == geom->elem);
for (int i = 0; i < GPU_BATCH_VBO_MAX_LEN && is_compatible; i++) {
if (batch->verts[i] != geom->verts[i]) {
is_compatible = false;
@@ -161,15 +164,13 @@ GPUBatch *DRW_temp_batch_instance_request(DRWInstanceDataList *idatalist,
}
if (!is_compatible) {
+ instancing_batch_references_remove(batch);
GPU_batch_clear(batch);
- /* Save args and init later */
- batch->inst[0] = buf;
- batch->inst[1] = (void *)instancer; /* HACK to save the pointer without other alloc. */
- batch->phase = GPU_BATCH_READY_TO_BUILD;
- batch->verts[0] = (void *)geom; /* HACK to save the pointer without other alloc. */
-
- /* Make sure to free this batch if the instance geom gets free. */
- GPU_batch_callback_free_set(geom, &instance_batch_free, NULL);
+ /* Save args and init later. */
+ batch->flag = GPU_BATCH_BUILDING;
+ handle->buf = buf;
+ handle->instancer = instancer;
+ handle->geom = geom;
}
return batch;
}
@@ -179,14 +180,14 @@ GPUBatch *DRW_temp_batch_request(DRWInstanceDataList *idatalist,
GPUVertBuf *buf,
GPUPrimType prim_type)
{
- GPUBatch **batch_ptr = BLI_memblock_alloc(idatalist->pool_instancing);
+ GPUBatch **batch_ptr = BLI_memblock_alloc(idatalist->pool_batching);
if (*batch_ptr == NULL) {
- *batch_ptr = GPU_batch_calloc(1);
+ *batch_ptr = GPU_batch_calloc();
}
GPUBatch *batch = *batch_ptr;
bool is_compatible = (batch->verts[0] == buf) && (buf->vbo_id != 0) &&
- (batch->gl_prim_type == convert_prim_type_to_gl(prim_type));
+ (batch->prim_type == prim_type);
if (!is_compatible) {
GPU_batch_clear(batch);
GPU_batch_init(batch, prim_type, buf, NULL);
@@ -197,7 +198,13 @@ GPUBatch *DRW_temp_batch_request(DRWInstanceDataList *idatalist,
static void temp_buffer_handle_free(DRWTempBufferHandle *handle)
{
handle->format = NULL;
- GPU_vertbuf_clear(&handle->buf);
+ GPU_VERTBUF_DISCARD_SAFE(handle->buf);
+}
+
+static void temp_instancing_handle_free(DRWTempInstancingHandle *handle)
+{
+ instancing_batch_references_remove(handle->batch);
+ GPU_BATCH_DISCARD_SAFE(handle->batch);
}
static void temp_batch_free(GPUBatch **batch)
@@ -215,23 +222,22 @@ void DRW_instance_buffer_finish(DRWInstanceDataList *idatalist)
if (handle->vert_len != NULL) {
uint vert_len = *(handle->vert_len);
uint target_buf_size = ((vert_len / DRW_BUFFER_VERTS_CHUNK) + 1) * DRW_BUFFER_VERTS_CHUNK;
- if (target_buf_size < handle->buf.vertex_alloc) {
- GPU_vertbuf_data_resize(&handle->buf, target_buf_size);
+ if (target_buf_size < handle->buf->vertex_alloc) {
+ GPU_vertbuf_data_resize(handle->buf, target_buf_size);
}
- GPU_vertbuf_data_len_set(&handle->buf, vert_len);
- GPU_vertbuf_use(&handle->buf); /* Send data. */
+ GPU_vertbuf_data_len_set(handle->buf, vert_len);
+ GPU_vertbuf_use(handle->buf); /* Send data. */
}
}
/* Finish pending instancing batches. */
- GPUBatch **batch_ptr;
+ DRWTempInstancingHandle *handle_inst;
BLI_memblock_iternew(idatalist->pool_instancing, &iter);
- while ((batch_ptr = BLI_memblock_iterstep(&iter))) {
- GPUBatch *batch = *batch_ptr;
- if (batch && batch->phase == GPU_BATCH_READY_TO_BUILD) {
- GPUVertBuf *inst_buf = batch->inst[0];
- /* HACK see DRW_temp_batch_instance_request. */
- GPUBatch *inst_batch = (void *)batch->inst[1];
- GPUBatch *geom = (void *)batch->verts[0];
+ while ((handle_inst = BLI_memblock_iterstep(&iter))) {
+ GPUBatch *batch = handle_inst->batch;
+ if (batch && batch->flag == GPU_BATCH_BUILDING) {
+ GPUVertBuf *inst_buf = handle_inst->buf;
+ GPUBatch *inst_batch = handle_inst->instancer;
+ GPUBatch *geom = handle_inst->geom;
GPU_batch_copy(batch, geom);
if (inst_batch != NULL) {
for (int i = 0; i < GPU_BATCH_INST_VBO_MAX_LEN && inst_batch->verts[i]; i++) {
@@ -241,11 +247,14 @@ void DRW_instance_buffer_finish(DRWInstanceDataList *idatalist)
else {
GPU_batch_instbuf_add_ex(batch, inst_buf, false);
}
+ /* Add reference to avoid comparing pointers (in DRW_temp_batch_request) that could
+ * potentially be the same. This will delay the freeing of the GPUVertBuf itself. */
+ instancing_batch_references_add(batch);
}
}
/* Resize pools and free unused. */
BLI_memblock_clear(idatalist->pool_buffers, (MemblockValFreeFP)temp_buffer_handle_free);
- BLI_memblock_clear(idatalist->pool_instancing, (MemblockValFreeFP)temp_batch_free);
+ BLI_memblock_clear(idatalist->pool_instancing, (MemblockValFreeFP)temp_instancing_handle_free);
BLI_memblock_clear(idatalist->pool_batching, (MemblockValFreeFP)temp_batch_free);
}
@@ -318,7 +327,7 @@ DRWInstanceDataList *DRW_instance_data_list_create(void)
DRWInstanceDataList *idatalist = MEM_callocN(sizeof(DRWInstanceDataList), "DRWInstanceDataList");
idatalist->pool_batching = BLI_memblock_create(sizeof(GPUBatch *));
- idatalist->pool_instancing = BLI_memblock_create(sizeof(GPUBatch *));
+ idatalist->pool_instancing = BLI_memblock_create(sizeof(DRWTempInstancingHandle));
idatalist->pool_buffers = BLI_memblock_create(sizeof(DRWTempBufferHandle));
BLI_addtail(&g_idatalists, idatalist);
@@ -341,7 +350,7 @@ void DRW_instance_data_list_free(DRWInstanceDataList *idatalist)
}
BLI_memblock_destroy(idatalist->pool_buffers, (MemblockValFreeFP)temp_buffer_handle_free);
- BLI_memblock_destroy(idatalist->pool_instancing, (MemblockValFreeFP)temp_batch_free);
+ BLI_memblock_destroy(idatalist->pool_instancing, (MemblockValFreeFP)temp_instancing_handle_free);
BLI_memblock_destroy(idatalist->pool_batching, (MemblockValFreeFP)temp_batch_free);
BLI_remlink(&g_idatalists, idatalist);
diff --git a/source/blender/draw/intern/draw_manager.c b/source/blender/draw/intern/draw_manager.c
index 4a5e07476a9..e436424b460 100644
--- a/source/blender/draw/intern/draw_manager.c
+++ b/source/blender/draw/intern/draw_manager.c
@@ -2824,7 +2824,6 @@ void DRW_opengl_context_enable_ex(bool restore)
if (!G.background) {
immActivate();
}
- BLF_batch_reset();
}
}
}
@@ -2888,13 +2887,11 @@ void DRW_gpu_render_context_enable(void *re_gpu_context)
BLI_assert(!BLI_thread_is_main());
GPU_context_active_set(re_gpu_context);
- DRW_shape_cache_reset(); /* XXX fix that too. */
}
/* Needs to be called BEFORE DRW_opengl_render_context_disable() */
void DRW_gpu_render_context_disable(void *UNUSED(re_gpu_context))
{
- DRW_shape_cache_reset(); /* XXX fix that too. */
GPU_context_active_set(NULL);
}
diff --git a/source/blender/draw/intern/draw_manager.h b/source/blender/draw/intern/draw_manager.h
index 92a01cbbe04..d15a55e7bef 100644
--- a/source/blender/draw/intern/draw_manager.h
+++ b/source/blender/draw/intern/draw_manager.h
@@ -35,6 +35,7 @@
#include "GPU_batch.h"
#include "GPU_context.h"
+#include "GPU_drawlist.h"
#include "GPU_framebuffer.h"
#include "GPU_shader.h"
#include "GPU_uniformbuffer.h"
diff --git a/source/blender/draw/intern/draw_manager_exec.c b/source/blender/draw/intern/draw_manager_exec.c
index e3860b1bfb2..b931bdd0cbe 100644
--- a/source/blender/draw/intern/draw_manager_exec.c
+++ b/source/blender/draw/intern/draw_manager_exec.c
@@ -54,8 +54,6 @@ typedef struct DRWCommandsState {
int resource_id;
int base_inst;
int inst_count;
- int v_first;
- int v_count;
bool neg_scale;
/* Resource location. */
int obmats_loc;
@@ -663,18 +661,9 @@ BLI_INLINE void draw_legacy_matrix_update(DRWShadingGroup *shgroup,
BLI_INLINE void draw_geometry_bind(DRWShadingGroup *shgroup, GPUBatch *geom)
{
- /* XXX hacking #GPUBatch. we don't want to call glUseProgram! (huge performance loss) */
- if (DST.batch) {
- DST.batch->program_in_use = false;
- }
-
DST.batch = geom;
- GPU_batch_set_shader_no_bind(geom, shgroup->shader);
-
- geom->program_in_use = true; /* XXX hacking #GPUBatch */
-
- GPU_batch_bind(geom);
+ GPU_batch_set_shader(geom, shgroup->shader);
}
BLI_INLINE void draw_geometry_execute(DRWShadingGroup *shgroup,
@@ -714,18 +703,12 @@ BLI_INLINE void draw_indirect_call(DRWShadingGroup *shgroup, DRWCommandsState *s
GPU_draw_list_submit(DST.draw_list);
draw_geometry_bind(shgroup, state->batch);
}
- GPU_draw_list_command_add(
- DST.draw_list, state->v_first, state->v_count, state->base_inst, state->inst_count);
+ GPU_draw_list_append(DST.draw_list, state->batch, state->base_inst, state->inst_count);
}
/* Fallback when unsupported */
else {
- draw_geometry_execute(shgroup,
- state->batch,
- state->v_first,
- state->v_count,
- state->base_inst,
- state->inst_count,
- state->baseinst_loc);
+ draw_geometry_execute(
+ shgroup, state->batch, 0, 0, state->base_inst, state->inst_count, state->baseinst_loc);
}
}
@@ -873,10 +856,10 @@ BLI_INLINE void draw_select_buffer(DRWShadingGroup *shgroup,
/* Batching */
if (!is_instancing) {
/* FIXME: Meh a bit nasty. */
- if (batch->gl_prim_type == convert_prim_type_to_gl(GPU_PRIM_TRIS)) {
+ if (batch->prim_type == GPU_PRIM_TRIS) {
count = 3;
}
- else if (batch->gl_prim_type == convert_prim_type_to_gl(GPU_PRIM_LINES)) {
+ else if (batch->prim_type == GPU_PRIM_LINES) {
count = 2;
}
}
@@ -1015,8 +998,6 @@ static void draw_call_batching_start(DRWCommandsState *state)
state->resource_id = -1;
state->base_inst = 0;
state->inst_count = 0;
- state->v_first = 0;
- state->v_count = 0;
state->batch = NULL;
state->select_id = -1;
@@ -1039,15 +1020,10 @@ static void draw_call_batching_do(DRWShadingGroup *shgroup,
draw_call_batching_flush(shgroup, state);
state->batch = call->batch;
- state->v_first = (call->batch->elem) ? call->batch->elem->index_start : 0;
- state->v_count = (call->batch->elem) ? call->batch->elem->index_len :
- call->batch->verts[0]->vertex_len;
state->inst_count = 1;
state->base_inst = id;
draw_call_resource_bind(state, &call->handle);
-
- GPU_draw_list_init(DST.draw_list, state->batch);
}
/* Is the id consecutive? */
else if (id != state->base_inst + state->inst_count) {
@@ -1111,10 +1087,6 @@ static void draw_shgroup(DRWShadingGroup *shgroup, DRWState pass_state)
}
GPU_shader_bind(shgroup->shader);
DST.shader = shgroup->shader;
- /* XXX hacking gawain */
- if (DST.batch) {
- DST.batch->program_in_use = false;
- }
DST.batch = NULL;
}
@@ -1305,7 +1277,6 @@ static void drw_draw_pass_ex(DRWPass *pass,
}
if (DST.batch) {
- DST.batch->program_in_use = false;
DST.batch = NULL;
}
diff --git a/source/blender/editors/datafiles/CMakeLists.txt b/source/blender/editors/datafiles/CMakeLists.txt
index 0dcb8de37f1..b8cc704eb23 100644
--- a/source/blender/editors/datafiles/CMakeLists.txt
+++ b/source/blender/editors/datafiles/CMakeLists.txt
@@ -744,6 +744,7 @@ set_property(GLOBAL PROPERTY ICON_GEOM_NAMES
ops.pose.relax
ops.sculpt.border_hide
ops.sculpt.border_mask
+ ops.sculpt.cloth_filter
ops.sculpt.lasso_mask
ops.sculpt.mesh_filter
ops.sequencer.blade
diff --git a/source/blender/editors/include/UI_view2d.h b/source/blender/editors/include/UI_view2d.h
index 6e0f4434b7b..1bd9b3063bd 100644
--- a/source/blender/editors/include/UI_view2d.h
+++ b/source/blender/editors/include/UI_view2d.h
@@ -120,6 +120,12 @@ void UI_view2d_curRect_validate(struct View2D *v2d);
void UI_view2d_curRect_reset(struct View2D *v2d);
void UI_view2d_sync(struct bScreen *screen, struct ScrArea *area, struct View2D *v2dcur, int flag);
+/* Perform all required updates after `v2d->cur` as been modified.
+ * This includes like validation view validation (UI_view2d_curRect_validate).
+ *
+ * Current lintent is to use it from user code, such as view navigation and zoom operations. */
+void UI_view2d_curRect_changed(const struct bContext *C, struct View2D *v2d);
+
void UI_view2d_totRect_set(struct View2D *v2d, int width, int height);
void UI_view2d_totRect_set_resize(struct View2D *v2d, int width, int height, bool resize);
diff --git a/source/blender/editors/interface/interface_widgets.c b/source/blender/editors/interface/interface_widgets.c
index c4de2730600..1be62e535de 100644
--- a/source/blender/editors/interface/interface_widgets.c
+++ b/source/blender/editors/interface/interface_widgets.c
@@ -1181,12 +1181,7 @@ void UI_widgetbase_draw_cache_flush(void)
MAX_WIDGET_PARAMETERS * MAX_WIDGET_BASE_BATCH,
(float *)g_widget_base_batch.params);
GPU_batch_uniform_3fv(batch, "checkerColorAndSize", checker_params);
- GPU_matrix_bind(batch->interface);
- GPU_shader_set_srgb_uniform(batch->interface);
- GPU_batch_bind(batch);
- GPU_batch_draw_advanced(batch, 0, 0, 0, g_widget_base_batch.count);
-
- GPU_batch_program_use_end(batch);
+ GPU_batch_draw_instanced(batch, g_widget_base_batch.count);
}
g_widget_base_batch.count = 0;
}
diff --git a/source/blender/editors/interface/view2d.c b/source/blender/editors/interface/view2d.c
index 3efed43e08c..f15a95880f8 100644
--- a/source/blender/editors/interface/view2d.c
+++ b/source/blender/editors/interface/view2d.c
@@ -853,6 +853,17 @@ void UI_view2d_curRect_validate(View2D *v2d)
ui_view2d_curRect_validate_resize(v2d, false);
}
+void UI_view2d_curRect_changed(const bContext *C, View2D *v2d)
+{
+ UI_view2d_curRect_validate(v2d);
+
+ ARegion *region = CTX_wm_region(C);
+
+ if (region->type->on_view2d_changed != NULL) {
+ region->type->on_view2d_changed(C, region);
+ }
+}
+
/* ------------------ */
/* Called by menus to activate it, or by view2d operators
diff --git a/source/blender/editors/interface/view2d_ops.c b/source/blender/editors/interface/view2d_ops.c
index d62058699d9..7caa61ec91d 100644
--- a/source/blender/editors/interface/view2d_ops.c
+++ b/source/blender/editors/interface/view2d_ops.c
@@ -185,8 +185,8 @@ static void view_pan_apply_ex(bContext *C, v2dViewPanData *vpd, float dx, float
v2d->cur.ymax += dy;
}
- /* validate that view is in valid configuration after this operation */
- UI_view2d_curRect_validate(v2d);
+ /* Inform v2d about changes after this operation. */
+ UI_view2d_curRect_changed(C, v2d);
/* don't rebuild full tree in outliner, since we're just changing our view */
ED_region_tag_redraw_no_rebuild(vpd->region);
@@ -957,8 +957,8 @@ static void view_zoomstep_apply_ex(
}
}
- /* validate that view is in valid configuration after this operation */
- UI_view2d_curRect_validate(v2d);
+ /* Inform v2d about changes after this operation. */
+ UI_view2d_curRect_changed(C, v2d);
if (ED_region_snap_size_apply(region, snap_test)) {
ScrArea *area = CTX_wm_area(C);
@@ -1216,8 +1216,8 @@ static void view_zoomdrag_apply(bContext *C, wmOperator *op)
}
}
- /* validate that view is in valid configuration after this operation */
- UI_view2d_curRect_validate(v2d);
+ /* Inform v2d about changes after this operation. */
+ UI_view2d_curRect_changed(C, v2d);
if (ED_region_snap_size_apply(vzd->region, snap_test)) {
ScrArea *area = CTX_wm_area(C);
@@ -1806,7 +1806,7 @@ void UI_view2d_smooth_view(bContext *C, ARegion *region, const rctf *cur, const
if (ok == false) {
v2d->cur = sms.new_cur;
- UI_view2d_curRect_validate(v2d);
+ UI_view2d_curRect_changed(C, v2d);
ED_region_tag_redraw_no_rebuild(region);
UI_view2d_sync(CTX_wm_screen(C), CTX_wm_area(C), v2d, V2D_LOCK_COPY);
}
@@ -1853,7 +1853,7 @@ static int view2d_smoothview_invoke(bContext *C, wmOperator *UNUSED(op), const w
BLI_rctf_interp(&v2d->cur, &sms->orig_cur, &sms->new_cur, step);
}
- UI_view2d_curRect_validate(v2d);
+ UI_view2d_curRect_changed(C, v2d);
UI_view2d_sync(CTX_wm_screen(C), CTX_wm_area(C), v2d, V2D_LOCK_COPY);
ED_region_tag_redraw_no_rebuild(region);
@@ -2176,8 +2176,8 @@ static void scroller_activate_apply(bContext *C, wmOperator *op)
break;
}
- /* validate that view is in valid configuration after this operation */
- UI_view2d_curRect_validate(v2d);
+ /* Inform v2d about changes after this operation. */
+ UI_view2d_curRect_changed(C, v2d);
/* request updates to be done... */
ED_region_tag_redraw_no_rebuild(vsm->region);
@@ -2410,8 +2410,8 @@ static int reset_exec(bContext *C, wmOperator *UNUSED(op))
}
}
- /* validate that view is in valid configuration after this operation */
- UI_view2d_curRect_validate(v2d);
+ /* Inform v2d about changes after this operation. */
+ UI_view2d_curRect_changed(C, v2d);
if (ED_region_snap_size_apply(region, snap_test)) {
ScrArea *area = CTX_wm_area(C);
diff --git a/source/blender/editors/mesh/editmesh_knife.c b/source/blender/editors/mesh/editmesh_knife.c
index 6f4f75e802a..b02e48a652e 100644
--- a/source/blender/editors/mesh/editmesh_knife.c
+++ b/source/blender/editors/mesh/editmesh_knife.c
@@ -1147,16 +1147,13 @@ static void knifetool_draw(const bContext *UNUSED(C), ARegion *UNUSED(region), v
GPUBatch *batch = GPU_batch_create_ex(GPU_PRIM_POINTS, vert, NULL, GPU_BATCH_OWNS_VBO);
GPU_batch_program_set_builtin(batch, GPU_SHADER_3D_UNIFORM_COLOR);
- GPU_batch_bind(batch);
/* draw any snapped verts first */
rgba_uchar_to_float(fcol, kcd->colors.point_a);
GPU_batch_uniform_4fv(batch, "color", fcol);
- GPU_matrix_bind(batch->interface);
- GPU_shader_set_srgb_uniform(batch->interface);
GPU_point_size(11);
if (snapped_verts_count > 0) {
- GPU_batch_draw_advanced(batch, 0, snapped_verts_count, 0, 0);
+ GPU_batch_draw_range(batch, 0, snapped_verts_count);
}
/* now draw the rest */
@@ -1164,10 +1161,9 @@ static void knifetool_draw(const bContext *UNUSED(C), ARegion *UNUSED(region), v
GPU_batch_uniform_4fv(batch, "color", fcol);
GPU_point_size(7);
if (other_verts_count > 0) {
- GPU_batch_draw_advanced(batch, snapped_verts_count, other_verts_count, 0, 0);
+ GPU_batch_draw_range(batch, snapped_verts_count, other_verts_count);
}
- GPU_batch_program_use_end(batch);
GPU_batch_discard(batch);
GPU_blend(false);
diff --git a/source/blender/editors/object/object_bake_api.c b/source/blender/editors/object/object_bake_api.c
index cb92fab3cb0..eb8b976320f 100644
--- a/source/blender/editors/object/object_bake_api.c
+++ b/source/blender/editors/object/object_bake_api.c
@@ -1596,9 +1596,8 @@ static void bake_set_props(wmOperator *op, Scene *scene)
prop = RNA_struct_find_property(op->ptr, "cage_object");
if (!RNA_property_is_set(op->ptr, prop)) {
- if (bake->cage_object) {
- RNA_property_string_set(op->ptr, prop, bake->cage_object->id.name + 2);
- }
+ RNA_property_string_set(
+ op->ptr, prop, (bake->cage_object) ? bake->cage_object->id.name + 2 : "");
}
prop = RNA_struct_find_property(op->ptr, "normal_space");
diff --git a/source/blender/editors/screen/screen_draw.c b/source/blender/editors/screen/screen_draw.c
index 40a452a5363..d8d47fb01aa 100644
--- a/source/blender/editors/screen/screen_draw.c
+++ b/source/blender/editors/screen/screen_draw.c
@@ -343,6 +343,7 @@ static void drawscredge_area_draw(
}
GPUBatch *batch = batch_screen_edges_get(NULL);
+ GPU_batch_program_set_builtin(batch, GPU_SHADER_2D_AREA_EDGES);
GPU_batch_uniform_4fv(batch, "rect", (float *)&rect);
GPU_batch_draw(batch);
}
diff --git a/source/blender/editors/space_image/space_image.c b/source/blender/editors/space_image/space_image.c
index c01bc01588e..a64d5505ebe 100644
--- a/source/blender/editors/space_image/space_image.c
+++ b/source/blender/editors/space_image/space_image.c
@@ -642,7 +642,6 @@ static void image_main_region_draw(const bContext *C, ARegion *region)
// View2DScrollers *scrollers;
float col[3];
- GPU_batch_presets_reset();
GPUViewport *viewport = WM_draw_region_get_viewport(region);
GPUFrameBuffer *framebuffer_default, *framebuffer_overlay;
diff --git a/source/blender/editors/space_sequencer/sequencer_view.c b/source/blender/editors/space_sequencer/sequencer_view.c
index c1dac30bcb6..491c475b596 100644
--- a/source/blender/editors/space_sequencer/sequencer_view.c
+++ b/source/blender/editors/space_sequencer/sequencer_view.c
@@ -144,17 +144,17 @@ void SEQUENCER_OT_view_frame(wmOperatorType *ot)
static int sequencer_view_all_preview_exec(bContext *C, wmOperator *UNUSED(op))
{
+ SpaceSeq *sseq = CTX_wm_space_seq(C);
bScreen *screen = CTX_wm_screen(C);
ScrArea *area = CTX_wm_area(C);
#if 0
ARegion *region = CTX_wm_region(C);
- SpaceSeq *sseq = area->spacedata.first;
Scene *scene = CTX_data_scene(C);
#endif
View2D *v2d = UI_view2d_fromcontext(C);
v2d->cur = v2d->tot;
- UI_view2d_curRect_validate(v2d);
+ UI_view2d_curRect_changed(C, v2d);
UI_view2d_sync(screen, area, v2d, V2D_LOCK_COPY);
#if 0
@@ -186,6 +186,8 @@ static int sequencer_view_all_preview_exec(bContext *C, wmOperator *UNUSED(op))
}
#endif
+ sseq->flag |= SEQ_ZOOM_TO_FIT;
+
ED_area_tag_redraw(CTX_wm_area(C));
return OPERATOR_FINISHED;
}
@@ -228,6 +230,8 @@ static int sequencer_view_zoom_ratio_exec(bContext *C, wmOperator *op)
ED_region_tag_redraw(CTX_wm_region(C));
+ UI_view2d_curRect_changed(C, v2d);
+
return OPERATOR_FINISHED;
}
diff --git a/source/blender/editors/space_sequencer/space_sequencer.c b/source/blender/editors/space_sequencer/space_sequencer.c
index b8bb3e4d43b..4a6bd0de60c 100644
--- a/source/blender/editors/space_sequencer/space_sequencer.c
+++ b/source/blender/editors/space_sequencer/space_sequencer.c
@@ -96,7 +96,8 @@ static SpaceLink *sequencer_create(const ScrArea *UNUSED(area), const Scene *sce
sseq->chanshown = 0;
sseq->view = SEQ_VIEW_SEQUENCE;
sseq->mainb = SEQ_DRAW_IMG_IMBUF;
- sseq->flag = SEQ_SHOW_GPENCIL | SEQ_USE_ALPHA | SEQ_SHOW_MARKERS | SEQ_SHOW_FCURVES;
+ sseq->flag = SEQ_SHOW_GPENCIL | SEQ_USE_ALPHA | SEQ_SHOW_MARKERS | SEQ_SHOW_FCURVES |
+ SEQ_ZOOM_TO_FIT;
/* Tool header. */
region = MEM_callocN(sizeof(ARegion), "tool header for sequencer");
@@ -679,6 +680,22 @@ static void sequencer_preview_region_init(wmWindowManager *wm, ARegion *region)
WM_event_add_keymap_handler_v2d_mask(&region->handlers, keymap);
}
+static void sequencer_preview_region_layout(const bContext *C, ARegion *region)
+{
+ SpaceSeq *sseq = CTX_wm_space_seq(C);
+
+ if (sseq->flag & SEQ_ZOOM_TO_FIT) {
+ View2D *v2d = &region->v2d;
+ v2d->cur = v2d->tot;
+ }
+}
+
+static void sequencer_preview_region_view2d_changed(const bContext *C, ARegion *UNUSED(region))
+{
+ SpaceSeq *sseq = CTX_wm_space_seq(C);
+ sseq->flag &= ~SEQ_ZOOM_TO_FIT;
+}
+
static void sequencer_preview_region_draw(const bContext *C, ARegion *region)
{
ScrArea *area = CTX_wm_area(C);
@@ -881,6 +898,8 @@ void ED_spacetype_sequencer(void)
art = MEM_callocN(sizeof(ARegionType), "spacetype sequencer region");
art->regionid = RGN_TYPE_PREVIEW;
art->init = sequencer_preview_region_init;
+ art->layout = sequencer_preview_region_layout;
+ art->on_view2d_changed = sequencer_preview_region_view2d_changed;
art->draw = sequencer_preview_region_draw;
art->listener = sequencer_preview_region_listener;
art->keymapflag = ED_KEYMAP_TOOL | ED_KEYMAP_GIZMO | ED_KEYMAP_VIEW2D | ED_KEYMAP_FRAMES |
diff --git a/source/blender/editors/space_view3d/view3d_draw.c b/source/blender/editors/space_view3d/view3d_draw.c
index 0442a0f35c9..ac70547c293 100644
--- a/source/blender/editors/space_view3d/view3d_draw.c
+++ b/source/blender/editors/space_view3d/view3d_draw.c
@@ -1623,10 +1623,6 @@ void view3d_main_region_draw(const bContext *C, ARegion *region)
BKE_image_free_old_gputextures(bmain);
GPU_pass_cache_garbage_collect();
- /* XXX This is in order to draw UI batches with the DRW
- * old context since we now use it for drawing the entire area. */
- gpu_batch_presets_reset();
-
/* No depth test for drawing action zones afterwards. */
GPU_depth_test(false);
diff --git a/source/blender/editors/space_view3d/view3d_placement.c b/source/blender/editors/space_view3d/view3d_placement.c
index a828dbc2ee0..a21c1458286 100644
--- a/source/blender/editors/space_view3d/view3d_placement.c
+++ b/source/blender/editors/space_view3d/view3d_placement.c
@@ -262,8 +262,6 @@ static void draw_line_loop(const float coords[][3], int coords_len, const float
GPUBatch *batch = GPU_batch_create_ex(GPU_PRIM_LINE_LOOP, vert, NULL, GPU_BATCH_OWNS_VBO);
GPU_batch_program_set_builtin(batch, GPU_SHADER_3D_POLYLINE_UNIFORM_COLOR);
- GPU_batch_bind(batch);
-
GPU_batch_uniform_4fv(batch, "color", color);
float viewport[4];
@@ -273,8 +271,6 @@ static void draw_line_loop(const float coords[][3], int coords_len, const float
GPU_batch_draw(batch);
- GPU_batch_program_use_end(batch);
-
GPU_batch_discard(batch);
GPU_blend(false);
}
@@ -299,8 +295,6 @@ static void draw_line_pairs(const float coords_a[][3],
GPUBatch *batch = GPU_batch_create_ex(GPU_PRIM_LINES, vert, NULL, GPU_BATCH_OWNS_VBO);
GPU_batch_program_set_builtin(batch, GPU_SHADER_3D_POLYLINE_UNIFORM_COLOR);
- GPU_batch_bind(batch);
-
GPU_batch_uniform_4fv(batch, "color", color);
float viewport[4];
@@ -310,8 +304,6 @@ static void draw_line_pairs(const float coords_a[][3],
GPU_batch_draw(batch);
- GPU_batch_program_use_end(batch);
-
GPU_batch_discard(batch);
GPU_blend(false);
}
@@ -351,8 +343,6 @@ static void draw_line_bounds(const BoundBox *bounds, const float color[4])
GPUBatch *batch = GPU_batch_create_ex(GPU_PRIM_LINES, vert, NULL, GPU_BATCH_OWNS_VBO);
GPU_batch_program_set_builtin(batch, GPU_SHADER_3D_POLYLINE_UNIFORM_COLOR);
- GPU_batch_bind(batch);
-
GPU_batch_uniform_4fv(batch, "color", color);
float viewport[4];
@@ -362,8 +352,6 @@ static void draw_line_bounds(const BoundBox *bounds, const float color[4])
GPU_batch_draw(batch);
- GPU_batch_program_use_end(batch);
-
GPU_batch_discard(batch);
GPU_blend(false);
}
diff --git a/source/blender/editors/uvedit/uvedit_draw.c b/source/blender/editors/uvedit/uvedit_draw.c
index df8d3cfb8db..f26e95d8861 100644
--- a/source/blender/editors/uvedit/uvedit_draw.c
+++ b/source/blender/editors/uvedit/uvedit_draw.c
@@ -288,10 +288,6 @@ static void draw_uvs_texpaint(const Scene *scene, Object *ob, Depsgraph *depsgra
uint idx = 0;
bool prev_ma_match = (mpoly->mat_nr == (ob_eval->actcol - 1));
- GPU_matrix_bind(geom->interface);
- GPU_shader_set_srgb_uniform(geom->interface);
- GPU_batch_bind(geom);
-
/* TODO(fclem): If drawcall count becomes a problem in the future
* we can use multi draw indirect drawcalls for this.
* (not implemented in GPU module at the time of writing). */
@@ -299,7 +295,7 @@ static void draw_uvs_texpaint(const Scene *scene, Object *ob, Depsgraph *depsgra
bool ma_match = (mpoly->mat_nr == (ob_eval->actcol - 1));
if (ma_match != prev_ma_match) {
if (ma_match == false) {
- GPU_batch_draw_advanced(geom, draw_start, idx - draw_start, 0, 0);
+ GPU_batch_draw_range(geom, draw_start, idx - draw_start);
}
else {
draw_start = idx;
@@ -309,10 +305,8 @@ static void draw_uvs_texpaint(const Scene *scene, Object *ob, Depsgraph *depsgra
prev_ma_match = ma_match;
}
if (prev_ma_match == true) {
- GPU_batch_draw_advanced(geom, draw_start, idx - draw_start, 0, 0);
+ GPU_batch_draw_range(geom, draw_start, idx - draw_start);
}
-
- GPU_batch_program_use_end(geom);
}
else {
GPU_batch_draw(geom);
@@ -455,6 +449,8 @@ static void draw_uvs(SpaceImage *sima,
}
col1[3] = overlay_alpha;
+ GPU_batch_program_set_builtin(batch->edges, shader);
+
/* Inner Line. Use depth test to insure selection is drawn on top. */
GPU_depth_test(true);
GPU_line_width(1.0f);
diff --git a/source/blender/gpu/CMakeLists.txt b/source/blender/gpu/CMakeLists.txt
index 80ea28aca3c..906ae31fbc7 100644
--- a/source/blender/gpu/CMakeLists.txt
+++ b/source/blender/gpu/CMakeLists.txt
@@ -63,6 +63,7 @@ set(SRC
intern/gpu_codegen.c
intern/gpu_context.cc
intern/gpu_debug.cc
+ intern/gpu_drawlist.cc
intern/gpu_element.cc
intern/gpu_extensions.cc
intern/gpu_framebuffer.cc
@@ -88,7 +89,10 @@ set(SRC
intern/gpu_vertex_format.cc
intern/gpu_viewport.c
+ opengl/gl_batch.cc
opengl/gl_context.cc
+ opengl/gl_drawlist.cc
+ opengl/gl_vertex_array.cc
GPU_attr_binding.h
GPU_batch.h
@@ -98,6 +102,7 @@ set(SRC
GPU_common.h
GPU_context.h
GPU_debug.h
+ GPU_drawlist.h
GPU_element.h
GPU_extensions.h
GPU_framebuffer.h
@@ -122,9 +127,10 @@ set(SRC
intern/gpu_attr_binding_private.h
intern/gpu_backend.hh
- intern/gpu_batch_private.h
+ intern/gpu_batch_private.hh
intern/gpu_codegen.h
intern/gpu_context_private.hh
+ intern/gpu_drawlist_private.hh
intern/gpu_material_library.h
intern/gpu_matrix_private.h
intern/gpu_node_graph.h
@@ -135,7 +141,10 @@ set(SRC
intern/gpu_vertex_format_private.h
opengl/gl_backend.hh
+ opengl/gl_batch.hh
opengl/gl_context.hh
+ opengl/gl_drawlist.hh
+ opengl/gl_vertex_array.hh
)
set(LIB
diff --git a/source/blender/gpu/GPU_batch.h b/source/blender/gpu/GPU_batch.h
index 855214c279c..d57739156f8 100644
--- a/source/blender/gpu/GPU_batch.h
+++ b/source/blender/gpu/GPU_batch.h
@@ -26,85 +26,82 @@
#pragma once
+#include "BLI_utildefines.h"
+
#include "GPU_element.h"
#include "GPU_shader.h"
-#include "GPU_shader_interface.h"
#include "GPU_vertex_buffer.h"
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-typedef enum {
- GPU_BATCH_UNUSED,
- GPU_BATCH_READY_TO_FORMAT,
- GPU_BATCH_READY_TO_BUILD,
- GPU_BATCH_BUILDING,
- GPU_BATCH_READY_TO_DRAW,
-} GPUBatchPhase;
-
#define GPU_BATCH_VBO_MAX_LEN 6
#define GPU_BATCH_INST_VBO_MAX_LEN 2
#define GPU_BATCH_VAO_STATIC_LEN 3
#define GPU_BATCH_VAO_DYN_ALLOC_COUNT 16
-typedef struct GPUBatch {
- /* geometry */
+typedef enum eGPUBatchFlag {
+ /** Invalid default state. */
+ GPU_BATCH_INVALID = 0,
+
+ /** GPUVertBuf ownership. (One bit per vbo) */
+ GPU_BATCH_OWNS_VBO = (1 << 0),
+ GPU_BATCH_OWNS_VBO_MAX = (GPU_BATCH_OWNS_VBO << (GPU_BATCH_VBO_MAX_LEN - 1)),
+ GPU_BATCH_OWNS_VBO_ANY = ((GPU_BATCH_OWNS_VBO << GPU_BATCH_VBO_MAX_LEN) - 1),
+ /** Instance GPUVertBuf ownership. (One bit per vbo) */
+ GPU_BATCH_OWNS_INST_VBO = (GPU_BATCH_OWNS_VBO_MAX << 1),
+ GPU_BATCH_OWNS_INST_VBO_MAX = (GPU_BATCH_OWNS_INST_VBO << (GPU_BATCH_INST_VBO_MAX_LEN - 1)),
+ GPU_BATCH_OWNS_INST_VBO_ANY = ((GPU_BATCH_OWNS_INST_VBO << GPU_BATCH_INST_VBO_MAX_LEN) - 1) &
+ ~GPU_BATCH_OWNS_VBO_ANY,
+ /** GPUIndexBuf ownership. */
+ GPU_BATCH_OWNS_INDEX = (GPU_BATCH_OWNS_INST_VBO_MAX << 1),
+
+ /** Has been initialized. At least one VBO is set. */
+ GPU_BATCH_INIT = (1 << 16),
+ /** Batch is initialized but it's VBOs are still being populated. (optional) */
+ GPU_BATCH_BUILDING = (1 << 16),
+ /** Cached data need to be rebuild. (VAO, PSO, ...) */
+ GPU_BATCH_DIRTY = (1 << 17),
+} eGPUBatchFlag;
+
+#define GPU_BATCH_OWNS_NONE GPU_BATCH_INVALID
+
+BLI_STATIC_ASSERT(GPU_BATCH_OWNS_INDEX < GPU_BATCH_INIT,
+ "eGPUBatchFlag: Error: status flags are shadowed by the ownership bits!")
+
+ENUM_OPERATORS(eGPUBatchFlag)
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+/**
+ * IMPORTANT: Do not allocate manually as the real struct is bigger (i.e: GLBatch). This is only
+ * the common and "public" part of the struct. Use the provided allocator.
+ * TODO(fclem) Make the content of this struct hidden and expose getters/setters.
+ **/
+typedef struct GPUBatch {
/** verts[0] is required, others can be NULL */
GPUVertBuf *verts[GPU_BATCH_VBO_MAX_LEN];
/** Instance attributes. */
GPUVertBuf *inst[GPU_BATCH_INST_VBO_MAX_LEN];
/** NULL if element list not needed */
GPUIndexBuf *elem;
- uint32_t gl_prim_type;
-
- /* cached values (avoid dereferencing later) */
- uint32_t vao_id;
- uint32_t program;
- const struct GPUShaderInterface *interface;
-
- /* book-keeping */
- uint owns_flag;
- /** used to free all vaos. this implies all vaos were created under the same context. */
- struct GPUContext *context;
- GPUBatchPhase phase;
- bool program_in_use;
-
- /* Vao management: remembers all geometry state (vertex attribute bindings & element buffer)
- * for each shader interface. Start with a static number of vaos and fallback to dynamic count
- * if necessary. Once a batch goes dynamic it does not go back. */
- bool is_dynamic_vao_count;
- union {
- /** Static handle count */
- struct {
- const struct GPUShaderInterface *interfaces[GPU_BATCH_VAO_STATIC_LEN];
- uint32_t vao_ids[GPU_BATCH_VAO_STATIC_LEN];
- } static_vaos;
- /** Dynamic handle count */
- struct {
- uint count;
- const struct GPUShaderInterface **interfaces;
- uint32_t *vao_ids;
- } dynamic_vaos;
- };
-
- /* XXX This is the only solution if we want to have some data structure using
- * batches as key to identify nodes. We must destroy these nodes with this callback. */
- void (*free_callback)(struct GPUBatch *, void *);
- void *callback_data;
+ /** Bookeeping. */
+ eGPUBatchFlag flag;
+ /** Type of geometry to draw. */
+ GPUPrimType prim_type;
+ /** Current assigned shader. DEPRECATED. Here only for uniform binding. */
+ struct GPUShader *shader;
} GPUBatch;
-enum {
- GPU_BATCH_OWNS_VBO = (1 << 0),
- /* each vbo index gets bit-shifted */
- GPU_BATCH_OWNS_INSTANCES = (1 << 30),
- GPU_BATCH_OWNS_INDEX = (1u << 31u),
-};
-
-GPUBatch *GPU_batch_calloc(uint count);
-GPUBatch *GPU_batch_create_ex(GPUPrimType, GPUVertBuf *, GPUIndexBuf *, uint owns_flag);
-void GPU_batch_init_ex(GPUBatch *, GPUPrimType, GPUVertBuf *, GPUIndexBuf *, uint owns_flag);
+GPUBatch *GPU_batch_calloc(void);
+GPUBatch *GPU_batch_create_ex(GPUPrimType prim,
+ GPUVertBuf *vert,
+ GPUIndexBuf *elem,
+ eGPUBatchFlag own_flag);
+void GPU_batch_init_ex(GPUBatch *batch,
+ GPUPrimType prim,
+ GPUVertBuf *vert,
+ GPUIndexBuf *elem,
+ eGPUBatchFlag own_flag);
void GPU_batch_copy(GPUBatch *batch_dst, GPUBatch *batch_src);
#define GPU_batch_create(prim, verts, elem) GPU_batch_create_ex(prim, verts, elem, 0)
@@ -115,10 +112,6 @@ void GPU_batch_clear(GPUBatch *);
void GPU_batch_discard(GPUBatch *); /* verts & elem are not discarded */
-void GPU_batch_vao_cache_clear(GPUBatch *);
-
-void GPU_batch_callback_free_set(GPUBatch *, void (*callback)(GPUBatch *, void *), void *);
-
void GPU_batch_instbuf_set(GPUBatch *, GPUVertBuf *, bool own_vbo); /* Instancing */
void GPU_batch_elembuf_set(GPUBatch *batch, GPUIndexBuf *elem, bool own_ibo);
@@ -128,19 +121,13 @@ int GPU_batch_vertbuf_add_ex(GPUBatch *, GPUVertBuf *, bool own_vbo);
#define GPU_batch_vertbuf_add(batch, verts) GPU_batch_vertbuf_add_ex(batch, verts, false)
void GPU_batch_set_shader(GPUBatch *batch, GPUShader *shader);
-void GPU_batch_set_shader_no_bind(GPUBatch *batch, GPUShader *shader);
void GPU_batch_program_set_imm_shader(GPUBatch *batch);
void GPU_batch_program_set_builtin(GPUBatch *batch, eGPUBuiltinShader shader_id);
void GPU_batch_program_set_builtin_with_config(GPUBatch *batch,
eGPUBuiltinShader shader_id,
eGPUShaderConfig sh_cfg);
-/* Entire batch draws with one shader program, but can be redrawn later with another program. */
-/* Vertex shader's inputs must be compatible with the batch's vertex format. */
-
-void GPU_batch_program_use_begin(GPUBatch *); /* call before Batch_Uniform (temp hack?) */
-void GPU_batch_program_use_end(GPUBatch *);
-void GPU_batch_uniform_1ui(GPUBatch *, const char *name, uint value);
+/* Will only work after setting the batch program. */
void GPU_batch_uniform_1i(GPUBatch *, const char *name, int value);
void GPU_batch_uniform_1b(GPUBatch *, const char *name, bool value);
void GPU_batch_uniform_1f(GPUBatch *, const char *name, float value);
@@ -154,10 +141,10 @@ void GPU_batch_uniform_2fv_array(GPUBatch *, const char *name, const int len, co
void GPU_batch_uniform_4fv_array(GPUBatch *, const char *name, const int len, const float *data);
void GPU_batch_uniform_mat4(GPUBatch *, const char *name, const float data[4][4]);
-void GPU_batch_draw(GPUBatch *);
+void GPU_batch_draw(GPUBatch *batch);
+void GPU_batch_draw_range(GPUBatch *batch, int v_first, int v_count);
+void GPU_batch_draw_instanced(GPUBatch *batch, int i_count);
-/* Needs to be called before GPU_batch_draw_advanced. */
-void GPU_batch_bind(GPUBatch *);
/* This does not bind/unbind shader and does not call GPU_matrix_bind() */
void GPU_batch_draw_advanced(GPUBatch *, int v_first, int v_count, int i_first, int i_count);
@@ -199,19 +186,6 @@ GPUBatch *create_BatchInGeneral(GPUPrimType, VertexBufferStuff, ElementListStuff
#endif /* future plans */
-/**
- * #GPUDrawList is an API to do lots of similar draw-calls very fast using multi-draw-indirect.
- * There is a fallback if the feature is not supported.
- */
-typedef struct GPUDrawList GPUDrawList;
-
-GPUDrawList *GPU_draw_list_create(int length);
-void GPU_draw_list_discard(GPUDrawList *list);
-void GPU_draw_list_init(GPUDrawList *list, GPUBatch *batch);
-void GPU_draw_list_command_add(
- GPUDrawList *list, int v_first, int v_count, int i_first, int i_count);
-void GPU_draw_list_submit(GPUDrawList *list);
-
void gpu_batch_init(void);
void gpu_batch_exit(void);
diff --git a/source/blender/gpu/GPU_batch_presets.h b/source/blender/gpu/GPU_batch_presets.h
index 1674cf776db..7a235dd0e12 100644
--- a/source/blender/gpu/GPU_batch_presets.h
+++ b/source/blender/gpu/GPU_batch_presets.h
@@ -46,11 +46,8 @@ struct GPUBatch *GPU_batch_preset_panel_drag_widget(const float pixelsize,
void gpu_batch_presets_init(void);
void gpu_batch_presets_register(struct GPUBatch *preset_batch);
bool gpu_batch_presets_unregister(struct GPUBatch *preset_batch);
-void gpu_batch_presets_reset(void);
void gpu_batch_presets_exit(void);
-void GPU_batch_presets_reset(void);
-
#ifdef __cplusplus
}
#endif
diff --git a/source/blender/gpu/GPU_drawlist.h b/source/blender/gpu/GPU_drawlist.h
new file mode 100644
index 00000000000..27f70da8cf8
--- /dev/null
+++ b/source/blender/gpu/GPU_drawlist.h
@@ -0,0 +1,46 @@
+/*
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * The Original Code is Copyright (C) 2020 Blender Foundation.
+ * All rights reserved.
+ */
+
+/** \file
+ * \ingroup gpu
+ *
+ * GPUDrawList is an API to do lots of similar draw-calls very fast using
+ * multi-draw-indirect. There is a fallback if the feature is not supported.
+ */
+
+#pragma once
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+struct GPUBatch;
+
+typedef void *GPUDrawList; /* Opaque pointer. */
+
+/* Create a list with at least length drawcalls. Length can affect performance. */
+GPUDrawList GPU_draw_list_create(int length);
+void GPU_draw_list_discard(GPUDrawList list);
+
+void GPU_draw_list_append(GPUDrawList list, GPUBatch *batch, int i_first, int i_count);
+void GPU_draw_list_submit(GPUDrawList list);
+
+#ifdef __cplusplus
+}
+#endif
diff --git a/source/blender/gpu/GPU_element.h b/source/blender/gpu/GPU_element.h
index 3d5195b12fc..5cf85b4ea0e 100644
--- a/source/blender/gpu/GPU_element.h
+++ b/source/blender/gpu/GPU_element.h
@@ -54,6 +54,8 @@ typedef struct GPUIndexBuf {
};
} GPUIndexBuf;
+GPUIndexBuf *GPU_indexbuf_calloc(void);
+
void GPU_indexbuf_use(GPUIndexBuf *);
uint GPU_indexbuf_size_get(const GPUIndexBuf *);
diff --git a/source/blender/gpu/GPU_shader.h b/source/blender/gpu/GPU_shader.h
index f782742ae53..0b9109fbd4b 100644
--- a/source/blender/gpu/GPU_shader.h
+++ b/source/blender/gpu/GPU_shader.h
@@ -104,6 +104,19 @@ void GPU_shader_uniform_vector_int(
void GPU_shader_uniform_float(GPUShader *shader, int location, float value);
void GPU_shader_uniform_int(GPUShader *shader, int location, int value);
+void GPU_shader_uniform_1i(GPUShader *sh, const char *name, int value);
+void GPU_shader_uniform_1b(GPUShader *sh, const char *name, bool value);
+void GPU_shader_uniform_1f(GPUShader *sh, const char *name, float value);
+void GPU_shader_uniform_2f(GPUShader *sh, const char *name, float x, float y);
+void GPU_shader_uniform_3f(GPUShader *sh, const char *name, float x, float y, float z);
+void GPU_shader_uniform_4f(GPUShader *sh, const char *name, float x, float y, float z, float w);
+void GPU_shader_uniform_2fv(GPUShader *sh, const char *name, const float data[2]);
+void GPU_shader_uniform_3fv(GPUShader *sh, const char *name, const float data[3]);
+void GPU_shader_uniform_4fv(GPUShader *sh, const char *name, const float data[4]);
+void GPU_shader_uniform_mat4(GPUShader *sh, const char *name, const float data[4][4]);
+void GPU_shader_uniform_2fv_array(GPUShader *sh, const char *name, int len, const float (*val)[2]);
+void GPU_shader_uniform_4fv_array(GPUShader *sh, const char *name, int len, const float (*val)[4]);
+
int GPU_shader_get_attribute(GPUShader *shader, const char *name);
char *GPU_shader_get_binary(GPUShader *shader, uint *r_binary_format, int *r_binary_len);
diff --git a/source/blender/gpu/GPU_shader_interface.h b/source/blender/gpu/GPU_shader_interface.h
index 8aba1236b65..47e4e432d66 100644
--- a/source/blender/gpu/GPU_shader_interface.h
+++ b/source/blender/gpu/GPU_shader_interface.h
@@ -80,7 +80,7 @@ typedef struct GPUShaderInterface {
/** Buffer containing all inputs names separated by '\0'. */
char *name_buffer;
/** Reference to GPUBatches using this interface */
- struct GPUBatch **batches;
+ void **batches;
uint batches_len;
/** Input counts. */
uint attribute_len;
@@ -109,8 +109,8 @@ const GPUShaderInput *GPU_shaderinterface_ubo(const GPUShaderInterface *, const
const GPUShaderInput *GPU_shaderinterface_attr(const GPUShaderInterface *, const char *name);
/* keep track of batches using this interface */
-void GPU_shaderinterface_add_batch_ref(GPUShaderInterface *, struct GPUBatch *);
-void GPU_shaderinterface_remove_batch_ref(GPUShaderInterface *, struct GPUBatch *);
+void GPU_shaderinterface_add_batch_ref(GPUShaderInterface *interface, void *cache);
+void GPU_shaderinterface_remove_batch_ref(GPUShaderInterface *interface, void *cache);
#ifdef __cplusplus
}
diff --git a/source/blender/gpu/GPU_vertex_buffer.h b/source/blender/gpu/GPU_vertex_buffer.h
index 757255496e0..bd1019bb1f5 100644
--- a/source/blender/gpu/GPU_vertex_buffer.h
+++ b/source/blender/gpu/GPU_vertex_buffer.h
@@ -59,6 +59,8 @@ typedef struct GPUVertBuf {
uint32_t vbo_id;
/** Usage hint for GL optimisation. */
GPUUsageType usage;
+ /** This counter will only avoid freeing the GPUVertBuf, not the data. */
+ char handle_refcount;
/** Data has been touched and need to be reuploaded to GPU. */
bool dirty;
uchar *data; /* NULL indicates data in VRAM (unmapped) */
@@ -73,6 +75,10 @@ GPUVertBuf *GPU_vertbuf_create_with_format_ex(const GPUVertFormat *, GPUUsageTyp
void GPU_vertbuf_clear(GPUVertBuf *verts);
void GPU_vertbuf_discard(GPUVertBuf *);
+/* Avoid GPUVertBuf datablock being free but not its data. */
+void GPU_vertbuf_handle_ref_add(GPUVertBuf *verts);
+void GPU_vertbuf_handle_ref_remove(GPUVertBuf *verts);
+
void GPU_vertbuf_init(GPUVertBuf *, GPUUsageType);
void GPU_vertbuf_init_with_format_ex(GPUVertBuf *, const GPUVertFormat *, GPUUsageType);
diff --git a/source/blender/gpu/intern/gpu_backend.hh b/source/blender/gpu/intern/gpu_backend.hh
index 24f592f214f..ba382e3c3fc 100644
--- a/source/blender/gpu/intern/gpu_backend.hh
+++ b/source/blender/gpu/intern/gpu_backend.hh
@@ -25,13 +25,27 @@
#pragma once
-struct GPUContext;
+#include "gpu_context_private.hh"
+#include "gpu_drawlist_private.hh"
+#include "gpu_batch_private.hh"
+
+namespace blender {
+namespace gpu {
class GPUBackend {
public:
virtual ~GPUBackend(){};
+ static GPUBackend *get(void);
+
virtual GPUContext *context_alloc(void *ghost_window) = 0;
+
+ virtual Batch *batch_alloc(void) = 0;
+ virtual DrawList *drawlist_alloc(int list_length) = 0;
+ // virtual FrameBuffer *framebuffer_alloc(void) = 0;
+ // virtual Shader *shader_alloc(void) = 0;
+ // virtual Texture *texture_alloc(void) = 0;
};
-GPUBackend *gpu_backend_get(void);
+} // namespace gpu
+} // namespace blender
diff --git a/source/blender/gpu/intern/gpu_batch.cc b/source/blender/gpu/intern/gpu_batch.cc
index a6ba4d3d89a..7b006bdc6c2 100644
--- a/source/blender/gpu/intern/gpu_batch.cc
+++ b/source/blender/gpu/intern/gpu_batch.cc
@@ -26,6 +26,8 @@
#include "MEM_guardedalloc.h"
+#include "BLI_math_base.h"
+
#include "GPU_batch.h"
#include "GPU_batch_presets.h"
#include "GPU_extensions.h"
@@ -33,7 +35,8 @@
#include "GPU_platform.h"
#include "GPU_shader.h"
-#include "gpu_batch_private.h"
+#include "gpu_backend.hh"
+#include "gpu_batch_private.hh"
#include "gpu_context_private.hh"
#include "gpu_primitive_private.h"
#include "gpu_shader_private.h"
@@ -43,69 +46,38 @@
#include <stdlib.h>
#include <string.h>
-static GLuint g_default_attr_vbo = 0;
-
-static void batch_update_program_bindings(GPUBatch *batch, uint i_first);
+using namespace blender::gpu;
-void GPU_batch_vao_cache_clear(GPUBatch *batch)
-{
- if (batch->context == NULL) {
- return;
- }
- if (batch->is_dynamic_vao_count) {
- for (int i = 0; i < batch->dynamic_vaos.count; i++) {
- if (batch->dynamic_vaos.vao_ids[i]) {
- GPU_vao_free(batch->dynamic_vaos.vao_ids[i], batch->context);
- }
- if (batch->dynamic_vaos.interfaces[i]) {
- GPU_shaderinterface_remove_batch_ref(
- (GPUShaderInterface *)batch->dynamic_vaos.interfaces[i], batch);
- }
- }
- MEM_freeN((void *)batch->dynamic_vaos.interfaces);
- MEM_freeN(batch->dynamic_vaos.vao_ids);
- }
- else {
- for (int i = 0; i < GPU_BATCH_VAO_STATIC_LEN; i++) {
- if (batch->static_vaos.vao_ids[i]) {
- GPU_vao_free(batch->static_vaos.vao_ids[i], batch->context);
- }
- if (batch->static_vaos.interfaces[i]) {
- GPU_shaderinterface_remove_batch_ref(
- (GPUShaderInterface *)batch->static_vaos.interfaces[i], batch);
- }
- }
- }
- batch->is_dynamic_vao_count = false;
- for (int i = 0; i < GPU_BATCH_VAO_STATIC_LEN; i++) {
- batch->static_vaos.vao_ids[i] = 0;
- batch->static_vaos.interfaces[i] = NULL;
- }
- gpu_context_remove_batch(batch->context, batch);
- batch->context = NULL;
-}
+/* -------------------------------------------------------------------- */
+/** \name Creation & Deletion
+ * \{ */
-GPUBatch *GPU_batch_calloc(uint count)
+GPUBatch *GPU_batch_calloc(void)
{
- return (GPUBatch *)MEM_callocN(sizeof(GPUBatch) * count, "GPUBatch");
+ GPUBatch *batch = GPUBackend::get()->batch_alloc();
+ memset(batch, 0, sizeof(*batch));
+ return batch;
}
GPUBatch *GPU_batch_create_ex(GPUPrimType prim_type,
GPUVertBuf *verts,
GPUIndexBuf *elem,
- uint owns_flag)
+ eGPUBatchFlag owns_flag)
{
- GPUBatch *batch = GPU_batch_calloc(1);
+ GPUBatch *batch = GPU_batch_calloc();
GPU_batch_init_ex(batch, prim_type, verts, elem, owns_flag);
return batch;
}
-void GPU_batch_init_ex(
- GPUBatch *batch, GPUPrimType prim_type, GPUVertBuf *verts, GPUIndexBuf *elem, uint owns_flag)
+void GPU_batch_init_ex(GPUBatch *batch,
+ GPUPrimType prim_type,
+ GPUVertBuf *verts,
+ GPUIndexBuf *elem,
+ eGPUBatchFlag owns_flag)
{
-#if TRUST_NO_ONE
- assert(verts != NULL);
-#endif
+ BLI_assert(verts != NULL);
+ /* Do not pass any other flag */
+ BLI_assert((owns_flag & ~(GPU_BATCH_OWNS_VBO | GPU_BATCH_OWNS_INDEX)) == 0);
batch->verts[0] = verts;
for (int v = 1; v < GPU_BATCH_VBO_MAX_LEN; v++) {
@@ -115,19 +87,18 @@ void GPU_batch_init_ex(
batch->inst[v] = NULL;
}
batch->elem = elem;
- batch->gl_prim_type = convert_prim_type_to_gl(prim_type);
- batch->phase = GPU_BATCH_READY_TO_DRAW;
- batch->is_dynamic_vao_count = false;
- batch->owns_flag = owns_flag;
- batch->free_callback = NULL;
+ batch->prim_type = prim_type;
+ batch->flag = owns_flag | GPU_BATCH_INIT | GPU_BATCH_DIRTY;
+ batch->shader = NULL;
}
/* This will share the VBOs with the new batch. */
void GPU_batch_copy(GPUBatch *batch_dst, GPUBatch *batch_src)
{
- GPU_batch_init_ex(batch_dst, GPU_PRIM_POINTS, batch_src->verts[0], batch_src->elem, 0);
+ GPU_batch_init_ex(
+ batch_dst, GPU_PRIM_POINTS, batch_src->verts[0], batch_src->elem, GPU_BATCH_INVALID);
- batch_dst->gl_prim_type = batch_src->gl_prim_type;
+ batch_dst->prim_type = batch_src->prim_type;
for (int v = 1; v < GPU_BATCH_VBO_MAX_LEN; v++) {
batch_dst->verts[v] = batch_src->verts[v];
}
@@ -135,484 +106,182 @@ void GPU_batch_copy(GPUBatch *batch_dst, GPUBatch *batch_src)
void GPU_batch_clear(GPUBatch *batch)
{
- if (batch->owns_flag & GPU_BATCH_OWNS_INDEX) {
+ if (batch->flag & GPU_BATCH_OWNS_INDEX) {
GPU_indexbuf_discard(batch->elem);
}
- if (batch->owns_flag & GPU_BATCH_OWNS_INSTANCES) {
- GPU_vertbuf_discard(batch->inst[0]);
- GPU_VERTBUF_DISCARD_SAFE(batch->inst[1]);
- }
- if ((batch->owns_flag & ~GPU_BATCH_OWNS_INDEX) != 0) {
- for (int v = 0; v < GPU_BATCH_VBO_MAX_LEN; v++) {
- if (batch->verts[v] == NULL) {
- break;
+ if (batch->flag & GPU_BATCH_OWNS_VBO_ANY) {
+ for (int v = 0; (v < GPU_BATCH_VBO_MAX_LEN) && batch->verts[v]; v++) {
+ if (batch->flag & (GPU_BATCH_OWNS_VBO << v)) {
+ GPU_VERTBUF_DISCARD_SAFE(batch->verts[v]);
}
- if (batch->owns_flag & (1 << v)) {
- GPU_vertbuf_discard(batch->verts[v]);
+ }
+ }
+ if (batch->flag & GPU_BATCH_OWNS_INST_VBO_ANY) {
+ for (int v = 0; (v < GPU_BATCH_INST_VBO_MAX_LEN) && batch->inst[v]; v++) {
+ if (batch->flag & (GPU_BATCH_OWNS_INST_VBO << v)) {
+ GPU_VERTBUF_DISCARD_SAFE(batch->inst[v]);
}
}
}
- GPU_batch_vao_cache_clear(batch);
- batch->phase = GPU_BATCH_UNUSED;
+ batch->flag = GPU_BATCH_INVALID;
}
void GPU_batch_discard(GPUBatch *batch)
{
- if (batch->free_callback) {
- batch->free_callback(batch, batch->callback_data);
- }
-
GPU_batch_clear(batch);
- MEM_freeN(batch);
-}
-void GPU_batch_callback_free_set(GPUBatch *batch,
- void (*callback)(GPUBatch *, void *),
- void *user_data)
-{
- batch->free_callback = callback;
- batch->callback_data = user_data;
+ delete static_cast<Batch *>(batch);
}
+/** \} */
+
+/* -------------------------------------------------------------------- */
+/** \name Buffers Management
+ * \{ */
+
+/* NOTE: Override ONLY the first instance vbo (and free them if owned). */
void GPU_batch_instbuf_set(GPUBatch *batch, GPUVertBuf *inst, bool own_vbo)
{
-#if TRUST_NO_ONE
- assert(inst != NULL);
-#endif
- /* redo the bindings */
- GPU_batch_vao_cache_clear(batch);
+ BLI_assert(inst);
+ batch->flag |= GPU_BATCH_DIRTY;
- if (batch->inst[0] != NULL && (batch->owns_flag & GPU_BATCH_OWNS_INSTANCES)) {
+ if (batch->inst[0] && (batch->flag & GPU_BATCH_OWNS_INST_VBO)) {
GPU_vertbuf_discard(batch->inst[0]);
- GPU_VERTBUF_DISCARD_SAFE(batch->inst[1]);
}
batch->inst[0] = inst;
- if (own_vbo) {
- batch->owns_flag |= GPU_BATCH_OWNS_INSTANCES;
- }
- else {
- batch->owns_flag &= ~GPU_BATCH_OWNS_INSTANCES;
- }
+ SET_FLAG_FROM_TEST(batch->flag, own_vbo, GPU_BATCH_OWNS_INST_VBO);
}
+/* NOTE: Override any previously assigned elem (and free it if owned). */
void GPU_batch_elembuf_set(GPUBatch *batch, GPUIndexBuf *elem, bool own_ibo)
{
- BLI_assert(elem != NULL);
- /* redo the bindings */
- GPU_batch_vao_cache_clear(batch);
+ BLI_assert(elem);
+ batch->flag |= GPU_BATCH_DIRTY;
- if (batch->elem != NULL && (batch->owns_flag & GPU_BATCH_OWNS_INDEX)) {
+ if (batch->elem && (batch->flag & GPU_BATCH_OWNS_INDEX)) {
GPU_indexbuf_discard(batch->elem);
}
batch->elem = elem;
- if (own_ibo) {
- batch->owns_flag |= GPU_BATCH_OWNS_INDEX;
- }
- else {
- batch->owns_flag &= ~GPU_BATCH_OWNS_INDEX;
- }
+ SET_FLAG_FROM_TEST(batch->flag, own_ibo, GPU_BATCH_OWNS_INDEX);
}
-/* A bit of a quick hack. Should be streamlined as the vbos handling */
int GPU_batch_instbuf_add_ex(GPUBatch *batch, GPUVertBuf *insts, bool own_vbo)
{
- /* redo the bindings */
- GPU_batch_vao_cache_clear(batch);
+ BLI_assert(insts);
+ batch->flag |= GPU_BATCH_DIRTY;
for (uint v = 0; v < GPU_BATCH_INST_VBO_MAX_LEN; v++) {
if (batch->inst[v] == NULL) {
-#if TRUST_NO_ONE
/* for now all VertexBuffers must have same vertex_len */
- if (batch->inst[0] != NULL) {
- /* Allow for different size of vertex buf (will choose the smallest number of verts). */
- // assert(insts->vertex_len == batch->inst[0]->vertex_len);
- assert(own_vbo == ((batch->owns_flag & GPU_BATCH_OWNS_INSTANCES) != 0));
+ if (batch->inst[0]) {
+ /* Allow for different size of vertex buf (will choose the smallest
+ * number of verts). */
+ // BLI_assert(insts->vertex_len == batch->inst[0]->vertex_len);
}
-#endif
+
batch->inst[v] = insts;
- if (own_vbo) {
- batch->owns_flag |= GPU_BATCH_OWNS_INSTANCES;
- }
+ SET_FLAG_FROM_TEST(batch->flag, own_vbo, (eGPUBatchFlag)(GPU_BATCH_OWNS_INST_VBO << v));
return v;
}
}
-
/* we only make it this far if there is no room for another GPUVertBuf */
-#if TRUST_NO_ONE
- assert(false);
-#endif
+ BLI_assert(0 && "Not enough Instance VBO slot in batch");
return -1;
}
/* Returns the index of verts in the batch. */
int GPU_batch_vertbuf_add_ex(GPUBatch *batch, GPUVertBuf *verts, bool own_vbo)
{
- /* redo the bindings */
- GPU_batch_vao_cache_clear(batch);
+ BLI_assert(verts);
+ batch->flag |= GPU_BATCH_DIRTY;
for (uint v = 0; v < GPU_BATCH_VBO_MAX_LEN; v++) {
if (batch->verts[v] == NULL) {
-#if TRUST_NO_ONE
/* for now all VertexBuffers must have same vertex_len */
if (batch->verts[0] != NULL) {
- assert(verts->vertex_len == batch->verts[0]->vertex_len);
+ BLI_assert(verts->vertex_len == batch->verts[0]->vertex_len);
}
-#endif
batch->verts[v] = verts;
- /* TODO: mark dirty so we can keep attribute bindings up-to-date */
- if (own_vbo) {
- batch->owns_flag |= (1 << v);
- }
+ SET_FLAG_FROM_TEST(batch->flag, own_vbo, (eGPUBatchFlag)(GPU_BATCH_OWNS_VBO << v));
return v;
}
}
-
/* we only make it this far if there is no room for another GPUVertBuf */
-#if TRUST_NO_ONE
- assert(false);
-#endif
+ BLI_assert(0 && "Not enough VBO slot in batch");
return -1;
}
-static GLuint batch_vao_get(GPUBatch *batch)
-{
- /* Search through cache */
- if (batch->is_dynamic_vao_count) {
- for (int i = 0; i < batch->dynamic_vaos.count; i++) {
- if (batch->dynamic_vaos.interfaces[i] == batch->interface) {
- return batch->dynamic_vaos.vao_ids[i];
- }
- }
- }
- else {
- for (int i = 0; i < GPU_BATCH_VAO_STATIC_LEN; i++) {
- if (batch->static_vaos.interfaces[i] == batch->interface) {
- return batch->static_vaos.vao_ids[i];
- }
- }
- }
-
- /* Set context of this batch.
- * It will be bound to it until GPU_batch_vao_cache_clear is called.
- * Until then it can only be drawn with this context. */
- if (batch->context == NULL) {
- batch->context = GPU_context_active_get();
- gpu_context_add_batch(batch->context, batch);
- }
-#if TRUST_NO_ONE
- else {
- /* Make sure you are not trying to draw this batch in another context. */
- assert(batch->context == GPU_context_active_get());
- }
-#endif
-
- /* Cache miss, time to add a new entry! */
- GLuint new_vao = 0;
- if (!batch->is_dynamic_vao_count) {
- int i; /* find first unused slot */
- for (i = 0; i < GPU_BATCH_VAO_STATIC_LEN; i++) {
- if (batch->static_vaos.vao_ids[i] == 0) {
- break;
- }
- }
-
- if (i < GPU_BATCH_VAO_STATIC_LEN) {
- batch->static_vaos.interfaces[i] = batch->interface;
- batch->static_vaos.vao_ids[i] = new_vao = GPU_vao_alloc();
- }
- else {
- /* Not enough place switch to dynamic. */
- batch->is_dynamic_vao_count = true;
- /* Erase previous entries, they will be added back if drawn again. */
- for (int j = 0; j < GPU_BATCH_VAO_STATIC_LEN; j++) {
- GPU_shaderinterface_remove_batch_ref(
- (GPUShaderInterface *)batch->static_vaos.interfaces[j], batch);
- GPU_vao_free(batch->static_vaos.vao_ids[j], batch->context);
- }
- /* Init dynamic arrays and let the branch below set the values. */
- batch->dynamic_vaos.count = GPU_BATCH_VAO_DYN_ALLOC_COUNT;
- batch->dynamic_vaos.interfaces = (const GPUShaderInterface **)MEM_callocN(
- batch->dynamic_vaos.count * sizeof(GPUShaderInterface *), "dyn vaos interfaces");
- batch->dynamic_vaos.vao_ids = (GLuint *)MEM_callocN(
- batch->dynamic_vaos.count * sizeof(GLuint), "dyn vaos ids");
- }
- }
-
- if (batch->is_dynamic_vao_count) {
- int i; /* find first unused slot */
- for (i = 0; i < batch->dynamic_vaos.count; i++) {
- if (batch->dynamic_vaos.vao_ids[i] == 0) {
- break;
- }
- }
-
- if (i == batch->dynamic_vaos.count) {
- /* Not enough place, realloc the array. */
- i = batch->dynamic_vaos.count;
- batch->dynamic_vaos.count += GPU_BATCH_VAO_DYN_ALLOC_COUNT;
- batch->dynamic_vaos.interfaces = (const GPUShaderInterface **)MEM_recallocN(
- (void *)batch->dynamic_vaos.interfaces,
- sizeof(GPUShaderInterface *) * batch->dynamic_vaos.count);
- batch->dynamic_vaos.vao_ids = (GLuint *)MEM_recallocN(
- batch->dynamic_vaos.vao_ids, sizeof(GLuint) * batch->dynamic_vaos.count);
- }
- batch->dynamic_vaos.interfaces[i] = batch->interface;
- batch->dynamic_vaos.vao_ids[i] = new_vao = GPU_vao_alloc();
- }
-
- GPU_shaderinterface_add_batch_ref((GPUShaderInterface *)batch->interface, batch);
-
-#if TRUST_NO_ONE
- assert(new_vao != 0);
-#endif
-
- /* We just got a fresh VAO we need to initialize it. */
- glBindVertexArray(new_vao);
- batch_update_program_bindings(batch, 0);
- glBindVertexArray(0);
-
- return new_vao;
-}
+/** \} */
-void GPU_batch_set_shader_no_bind(GPUBatch *batch, GPUShader *shader)
-{
-#if TRUST_NO_ONE
- assert(glIsProgram(shader->program));
- assert(batch->program_in_use == 0);
-#endif
- batch->interface = shader->interface;
- batch->program = shader->program;
- batch->vao_id = batch_vao_get(batch);
-}
+/* -------------------------------------------------------------------- */
+/** \name Uniform setters
+ *
+ * TODO(fclem) port this to GPUShader.
+ * \{ */
void GPU_batch_set_shader(GPUBatch *batch, GPUShader *shader)
{
- GPU_batch_set_shader_no_bind(batch, shader);
- GPU_batch_program_use_begin(batch); /* hack! to make Batch_Uniform* simpler */
-}
-
-void gpu_batch_remove_interface_ref(GPUBatch *batch, const GPUShaderInterface *interface)
-{
- if (batch->is_dynamic_vao_count) {
- for (int i = 0; i < batch->dynamic_vaos.count; i++) {
- if (batch->dynamic_vaos.interfaces[i] == interface) {
- GPU_vao_free(batch->dynamic_vaos.vao_ids[i], batch->context);
- batch->dynamic_vaos.vao_ids[i] = 0;
- batch->dynamic_vaos.interfaces[i] = NULL;
- break; /* cannot have duplicates */
- }
- }
- }
- else {
- int i;
- for (i = 0; i < GPU_BATCH_VAO_STATIC_LEN; i++) {
- if (batch->static_vaos.interfaces[i] == interface) {
- GPU_vao_free(batch->static_vaos.vao_ids[i], batch->context);
- batch->static_vaos.vao_ids[i] = 0;
- batch->static_vaos.interfaces[i] = NULL;
- break; /* cannot have duplicates */
- }
- }
- }
-}
-
-static void create_bindings(GPUVertBuf *verts,
- const GPUShaderInterface *interface,
- uint16_t *attr_mask,
- uint v_first,
- const bool use_instancing)
-{
- const GPUVertFormat *format = &verts->format;
-
- const uint attr_len = format->attr_len;
- uint stride = format->stride;
- uint offset = 0;
-
- GPU_vertbuf_use(verts);
-
- for (uint a_idx = 0; a_idx < attr_len; a_idx++) {
- const GPUVertAttr *a = &format->attrs[a_idx];
-
- if (format->deinterleaved) {
- offset += ((a_idx == 0) ? 0 : format->attrs[a_idx - 1].sz) * verts->vertex_len;
- stride = a->sz;
- }
- else {
- offset = a->offset;
- }
-
- const GLvoid *pointer = (const GLubyte *)0 + offset + v_first * stride;
- const GLenum type = convert_comp_type_to_gl(static_cast<GPUVertCompType>(a->comp_type));
-
- for (uint n_idx = 0; n_idx < a->name_len; n_idx++) {
- const char *name = GPU_vertformat_attr_name_get(format, a, n_idx);
- const GPUShaderInput *input = GPU_shaderinterface_attr(interface, name);
-
- if (input == NULL) {
- continue;
- }
-
- *attr_mask &= ~(1 << input->location);
-
- if (a->comp_len == 16 || a->comp_len == 12 || a->comp_len == 8) {
- BLI_assert(a->fetch_mode == GPU_FETCH_FLOAT);
- BLI_assert(a->comp_type == GPU_COMP_F32);
- for (int i = 0; i < a->comp_len / 4; i++) {
- glEnableVertexAttribArray(input->location + i);
- glVertexAttribDivisor(input->location + i, (use_instancing) ? 1 : 0);
- glVertexAttribPointer(
- input->location + i, 4, type, GL_FALSE, stride, (const GLubyte *)pointer + i * 16);
- }
- }
- else {
- glEnableVertexAttribArray(input->location);
- glVertexAttribDivisor(input->location, (use_instancing) ? 1 : 0);
-
- switch (a->fetch_mode) {
- case GPU_FETCH_FLOAT:
- case GPU_FETCH_INT_TO_FLOAT:
- glVertexAttribPointer(input->location, a->comp_len, type, GL_FALSE, stride, pointer);
- break;
- case GPU_FETCH_INT_TO_FLOAT_UNIT:
- glVertexAttribPointer(input->location, a->comp_len, type, GL_TRUE, stride, pointer);
- break;
- case GPU_FETCH_INT:
- glVertexAttribIPointer(input->location, a->comp_len, type, stride, pointer);
- break;
- }
- }
- }
- }
-}
-
-static void batch_update_program_bindings(GPUBatch *batch, uint i_first)
-{
- uint16_t attr_mask = batch->interface->enabled_attr_mask;
-
- /* Reverse order so first VBO'S have more prevalence (in term of attribute override). */
- for (int v = GPU_BATCH_VBO_MAX_LEN - 1; v > -1; v--) {
- if (batch->verts[v] != NULL) {
- create_bindings(batch->verts[v], batch->interface, &attr_mask, 0, false);
- }
- }
-
- for (int v = GPU_BATCH_INST_VBO_MAX_LEN - 1; v > -1; v--) {
- if (batch->inst[v]) {
- create_bindings(batch->inst[v], batch->interface, &attr_mask, i_first, true);
- }
- }
-
- if (attr_mask != 0 && GLEW_ARB_vertex_attrib_binding) {
- for (uint16_t mask = 1, a = 0; a < 16; a++, mask <<= 1) {
- if (attr_mask & mask) {
- /* This replaces glVertexAttrib4f(a, 0.0f, 0.0f, 0.0f, 1.0f); with a more modern style.
- * Fix issues for some drivers (see T75069). */
- glBindVertexBuffer(a, g_default_attr_vbo, (intptr_t)0, (intptr_t)0);
-
- glEnableVertexAttribArray(a);
- glVertexAttribFormat(a, 4, GL_FLOAT, GL_FALSE, 0);
- glVertexAttribBinding(a, a);
- }
- }
- }
-
- if (batch->elem) {
- GPU_indexbuf_use(batch->elem);
- }
-}
-
-void GPU_batch_program_use_begin(GPUBatch *batch)
-{
- /* NOTE: use_program & done_using_program are fragile, depend on staying in sync with
- * the GL context's active program.
- * use_program doesn't mark other programs as "not used". */
- /* TODO: make not fragile (somehow) */
-
- if (!batch->program_in_use) {
- glUseProgram(batch->program);
- batch->program_in_use = true;
- }
+ batch->shader = shader;
+ GPU_shader_bind(batch->shader);
}
-void GPU_batch_program_use_end(GPUBatch *batch)
-{
- if (batch->program_in_use) {
-#if PROGRAM_NO_OPTI
- glUseProgram(0);
-#endif
- batch->program_in_use = false;
- }
-}
-
-#if TRUST_NO_ONE
-# define GET_UNIFORM \
- const GPUShaderInput *uniform = GPU_shaderinterface_uniform(batch->interface, name); \
- assert(uniform);
-#else
-# define GET_UNIFORM \
- const GPUShaderInput *uniform = GPU_shaderinterface_uniform(batch->interface, name);
-#endif
-
-void GPU_batch_uniform_1ui(GPUBatch *batch, const char *name, uint value)
-{
- GET_UNIFORM
- glUniform1ui(uniform->location, value);
-}
+#define GET_UNIFORM \
+ const GPUShaderInput *uniform = GPU_shaderinterface_uniform(batch->shader->interface, name); \
+ BLI_assert(uniform);
void GPU_batch_uniform_1i(GPUBatch *batch, const char *name, int value)
{
GET_UNIFORM
- glUniform1i(uniform->location, value);
+ GPU_shader_uniform_int(batch->shader, uniform->location, value);
}
void GPU_batch_uniform_1b(GPUBatch *batch, const char *name, bool value)
{
- GET_UNIFORM
- glUniform1i(uniform->location, value ? GL_TRUE : GL_FALSE);
+ GPU_batch_uniform_1i(batch, name, value ? GL_TRUE : GL_FALSE);
}
void GPU_batch_uniform_2f(GPUBatch *batch, const char *name, float x, float y)
{
- GET_UNIFORM
- glUniform2f(uniform->location, x, y);
+ const float data[2] = {x, y};
+ GPU_batch_uniform_2fv(batch, name, data);
}
void GPU_batch_uniform_3f(GPUBatch *batch, const char *name, float x, float y, float z)
{
- GET_UNIFORM
- glUniform3f(uniform->location, x, y, z);
+ const float data[3] = {x, y, z};
+ GPU_batch_uniform_3fv(batch, name, data);
}
void GPU_batch_uniform_4f(GPUBatch *batch, const char *name, float x, float y, float z, float w)
{
- GET_UNIFORM
- glUniform4f(uniform->location, x, y, z, w);
+ const float data[4] = {x, y, z, w};
+ GPU_batch_uniform_4fv(batch, name, data);
}
void GPU_batch_uniform_1f(GPUBatch *batch, const char *name, float x)
{
GET_UNIFORM
- glUniform1f(uniform->location, x);
+ GPU_shader_uniform_float(batch->shader, uniform->location, x);
}
void GPU_batch_uniform_2fv(GPUBatch *batch, const char *name, const float data[2])
{
GET_UNIFORM
- glUniform2fv(uniform->location, 1, data);
+ GPU_shader_uniform_vector(batch->shader, uniform->location, 2, 1, data);
}
void GPU_batch_uniform_3fv(GPUBatch *batch, const char *name, const float data[3])
{
GET_UNIFORM
- glUniform3fv(uniform->location, 1, data);
+ GPU_shader_uniform_vector(batch->shader, uniform->location, 3, 1, data);
}
void GPU_batch_uniform_4fv(GPUBatch *batch, const char *name, const float data[4])
{
GET_UNIFORM
- glUniform4fv(uniform->location, 1, data);
+ GPU_shader_uniform_vector(batch->shader, uniform->location, 4, 1, data);
}
void GPU_batch_uniform_2fv_array(GPUBatch *batch,
@@ -621,7 +290,7 @@ void GPU_batch_uniform_2fv_array(GPUBatch *batch,
const float *data)
{
GET_UNIFORM
- glUniform2fv(uniform->location, len, data);
+ GPU_shader_uniform_vector(batch->shader, uniform->location, 2, len, data);
}
void GPU_batch_uniform_4fv_array(GPUBatch *batch,
@@ -630,68 +299,48 @@ void GPU_batch_uniform_4fv_array(GPUBatch *batch,
const float *data)
{
GET_UNIFORM
- glUniform4fv(uniform->location, len, data);
+ GPU_shader_uniform_vector(batch->shader, uniform->location, 4, len, data);
}
void GPU_batch_uniform_mat4(GPUBatch *batch, const char *name, const float data[4][4])
{
GET_UNIFORM
- glUniformMatrix4fv(uniform->location, 1, GL_FALSE, (const float *)data);
+ GPU_shader_uniform_vector(batch->shader, uniform->location, 16, 1, (const float *)data);
}
-static void *elem_offset(const GPUIndexBuf *el, int v_first)
+/** \} */
+
+/* -------------------------------------------------------------------- */
+/** \name Drawing / Drawcall functions
+ * \{ */
+
+void GPU_batch_draw(GPUBatch *batch)
{
-#if GPU_TRACK_INDEX_RANGE
- if (el->index_type == GPU_INDEX_U16) {
- return (GLushort *)0 + v_first + el->index_start;
- }
-#endif
- return (GLuint *)0 + v_first + el->index_start;
+ GPU_shader_bind(batch->shader);
+ GPU_batch_draw_advanced(batch, 0, 0, 0, 0);
+ GPU_shader_unbind();
}
-/* Use when drawing with GPU_batch_draw_advanced */
-void GPU_batch_bind(GPUBatch *batch)
+void GPU_batch_draw_range(GPUBatch *batch, int v_first, int v_count)
{
- glBindVertexArray(batch->vao_id);
-
-#if GPU_TRACK_INDEX_RANGE
- /* Can be removed if GL 4.3 is required. */
- if (!GLEW_ARB_ES3_compatibility && batch->elem != NULL) {
- GLuint restart_index = (batch->elem->index_type == GPU_INDEX_U16) ? (GLuint)0xFFFF :
- (GLuint)0xFFFFFFFF;
- glPrimitiveRestartIndex(restart_index);
- }
-#endif
+ GPU_shader_bind(batch->shader);
+ GPU_batch_draw_advanced(batch, v_first, v_count, 0, 0);
+ GPU_shader_unbind();
}
-void GPU_batch_draw(GPUBatch *batch)
+/* Draw multiple instance of a batch without having any instance attributes. */
+void GPU_batch_draw_instanced(GPUBatch *batch, int i_count)
{
-#if TRUST_NO_ONE
- assert(batch->phase == GPU_BATCH_READY_TO_DRAW);
- assert(batch->verts[0]->vbo_id != 0);
-#endif
- GPU_batch_program_use_begin(batch);
- GPU_matrix_bind(batch->interface); // external call.
- GPU_shader_set_srgb_uniform(batch->interface);
-
- GPU_batch_bind(batch);
- GPU_batch_draw_advanced(batch, 0, 0, 0, 0);
+ BLI_assert(batch->inst[0] == NULL);
- GPU_batch_program_use_end(batch);
+ GPU_shader_bind(batch->shader);
+ GPU_batch_draw_advanced(batch, 0, 0, 0, i_count);
+ GPU_shader_unbind();
}
-#if GPU_TRACK_INDEX_RANGE
-# define BASE_INDEX(el) ((el)->base_index)
-# define INDEX_TYPE(el) ((el)->gl_index_type)
-#else
-# define BASE_INDEX(el) 0
-# define INDEX_TYPE(el) GL_UNSIGNED_INT
-#endif
-
void GPU_batch_draw_advanced(GPUBatch *batch, int v_first, int v_count, int i_first, int i_count)
{
- BLI_assert(batch->program_in_use);
- /* TODO could assert that VAO is bound. */
+ BLI_assert(GPU_context_active_get()->shader != NULL);
if (v_count == 0) {
v_count = (batch->elem) ? batch->elem->index_len : batch->verts[0]->vertex_len;
@@ -699,8 +348,8 @@ void GPU_batch_draw_advanced(GPUBatch *batch, int v_first, int v_count, int i_fi
if (i_count == 0) {
i_count = (batch->inst[0]) ? batch->inst[0]->vertex_len : 1;
/* Meh. This is to be able to use different numbers of verts in instance vbos. */
- if (batch->inst[1] && i_count > batch->inst[1]->vertex_len) {
- i_count = batch->inst[1]->vertex_len;
+ if (batch->inst[1] != NULL) {
+ i_count = min_ii(i_count, batch->inst[1]->vertex_len);
}
}
@@ -709,76 +358,7 @@ void GPU_batch_draw_advanced(GPUBatch *batch, int v_first, int v_count, int i_fi
return;
}
- /* Verify there is enough data do draw. */
- /* TODO(fclem) Nice to have but this is invalid when using procedural draw-calls.
- * The right assert would be to check if there is an enabled attribute from each VBO
- * and check their length. */
- // BLI_assert(i_first + i_count <= (batch->inst ? batch->inst->vertex_len : INT_MAX));
- // BLI_assert(v_first + v_count <=
- // (batch->elem ? batch->elem->index_len : batch->verts[0]->vertex_len));
-
-#ifdef __APPLE__
- GLuint vao = 0;
-#endif
-
- if (!GPU_arb_base_instance_is_supported()) {
- if (i_first > 0) {
-#ifdef __APPLE__
- /**
- * There seems to be a nasty bug when drawing using the same VAO reconfiguring. (see T71147)
- * We just use a throwaway VAO for that. Note that this is likely to degrade performance.
- **/
- glGenVertexArrays(1, &vao);
- glBindVertexArray(vao);
-#else
- /* If using offset drawing with instancing, we must
- * use the default VAO and redo bindings. */
- glBindVertexArray(GPU_vao_default());
-#endif
- batch_update_program_bindings(batch, i_first);
- }
- else {
- /* Previous call could have bind the default vao
- * see above. */
- glBindVertexArray(batch->vao_id);
- }
- }
-
- if (batch->elem) {
- const GPUIndexBuf *el = batch->elem;
- GLenum index_type = INDEX_TYPE(el);
- GLint base_index = BASE_INDEX(el);
- void *v_first_ofs = elem_offset(el, v_first);
-
- if (GPU_arb_base_instance_is_supported()) {
- glDrawElementsInstancedBaseVertexBaseInstance(
- batch->gl_prim_type, v_count, index_type, v_first_ofs, i_count, base_index, i_first);
- }
- else {
- glDrawElementsInstancedBaseVertex(
- batch->gl_prim_type, v_count, index_type, v_first_ofs, i_count, base_index);
- }
- }
- else {
-#ifdef __APPLE__
- glDisable(GL_PRIMITIVE_RESTART);
-#endif
- if (GPU_arb_base_instance_is_supported()) {
- glDrawArraysInstancedBaseInstance(batch->gl_prim_type, v_first, v_count, i_count, i_first);
- }
- else {
- glDrawArraysInstanced(batch->gl_prim_type, v_first, v_count, i_count);
- }
-#ifdef __APPLE__
- glEnable(GL_PRIMITIVE_RESTART);
-#endif
- }
-
-#ifdef __APPLE__
- if (vao != 0) {
- glDeleteVertexArrays(1, &vao);
- }
-#endif
+ static_cast<Batch *>(batch)->draw(v_first, v_count, i_first, i_count);
}
/* just draw some vertices and let shader place them where we want. */
@@ -795,191 +375,6 @@ void GPU_draw_primitive(GPUPrimType prim_type, int v_count)
// glBindVertexArray(0);
}
-/* -------------------------------------------------------------------- */
-/** \name Indirect Draw Calls
- * \{ */
-
-#if 0
-# define USE_MULTI_DRAW_INDIRECT 0
-#else
-# define USE_MULTI_DRAW_INDIRECT \
- (GL_ARB_multi_draw_indirect && GPU_arb_base_instance_is_supported())
-#endif
-
-typedef struct GPUDrawCommand {
- uint v_count;
- uint i_count;
- uint v_first;
- uint i_first;
-} GPUDrawCommand;
-
-typedef struct GPUDrawCommandIndexed {
- uint v_count;
- uint i_count;
- uint v_first;
- uint base_index;
- uint i_first;
-} GPUDrawCommandIndexed;
-
-struct GPUDrawList {
- GPUBatch *batch;
- uint base_index; /* Avoid dereferencing batch. */
- uint cmd_offset; /* in bytes, offset inside indirect command buffer. */
- uint cmd_len; /* Number of used command for the next call. */
- uint buffer_size; /* in bytes, size of indirect command buffer. */
- GLuint buffer_id; /* Draw Indirect Buffer id */
- union {
- GPUDrawCommand *commands;
- GPUDrawCommandIndexed *commands_indexed;
- };
-};
-
-GPUDrawList *GPU_draw_list_create(int length)
-{
- GPUDrawList *list = (GPUDrawList *)MEM_callocN(sizeof(GPUDrawList), "GPUDrawList");
- /* Alloc the biggest possible command list which is indexed. */
- list->buffer_size = sizeof(GPUDrawCommandIndexed) * length;
- if (USE_MULTI_DRAW_INDIRECT) {
- list->buffer_id = GPU_buf_alloc();
- glBindBuffer(GL_DRAW_INDIRECT_BUFFER, list->buffer_id);
- glBufferData(GL_DRAW_INDIRECT_BUFFER, list->buffer_size, NULL, GL_DYNAMIC_DRAW);
- }
- else {
- list->commands = (GPUDrawCommand *)MEM_mallocN(list->buffer_size, "GPUDrawList data");
- }
- return list;
-}
-
-void GPU_draw_list_discard(GPUDrawList *list)
-{
- if (list->buffer_id) {
- GPU_buf_free(list->buffer_id);
- }
- else {
- MEM_SAFE_FREE(list->commands);
- }
- MEM_freeN(list);
-}
-
-void GPU_draw_list_init(GPUDrawList *list, GPUBatch *batch)
-{
- BLI_assert(batch->phase == GPU_BATCH_READY_TO_DRAW);
- list->batch = batch;
- list->base_index = batch->elem ? BASE_INDEX(batch->elem) : UINT_MAX;
- list->cmd_len = 0;
-
- if (USE_MULTI_DRAW_INDIRECT) {
- if (list->commands == NULL) {
- glBindBuffer(GL_DRAW_INDIRECT_BUFFER, list->buffer_id);
- if (list->cmd_offset >= list->buffer_size) {
- /* Orphan buffer data and start fresh. */
- glBufferData(GL_DRAW_INDIRECT_BUFFER, list->buffer_size, NULL, GL_DYNAMIC_DRAW);
- list->cmd_offset = 0;
- }
- GLenum flags = GL_MAP_WRITE_BIT | GL_MAP_UNSYNCHRONIZED_BIT | GL_MAP_FLUSH_EXPLICIT_BIT;
- list->commands = (GPUDrawCommand *)glMapBufferRange(
- GL_DRAW_INDIRECT_BUFFER, list->cmd_offset, list->buffer_size - list->cmd_offset, flags);
- }
- }
- else {
- list->cmd_offset = 0;
- }
-}
-
-void GPU_draw_list_command_add(
- GPUDrawList *list, int v_first, int v_count, int i_first, int i_count)
-{
- BLI_assert(list->commands);
-
- if (v_count == 0 || i_count == 0) {
- return;
- }
-
- if (list->base_index != UINT_MAX) {
- GPUDrawCommandIndexed *cmd = list->commands_indexed + list->cmd_len;
- cmd->v_first = v_first;
- cmd->v_count = v_count;
- cmd->i_count = i_count;
- cmd->base_index = list->base_index;
- cmd->i_first = i_first;
- }
- else {
- GPUDrawCommand *cmd = list->commands + list->cmd_len;
- cmd->v_first = v_first;
- cmd->v_count = v_count;
- cmd->i_count = i_count;
- cmd->i_first = i_first;
- }
-
- list->cmd_len++;
- uint offset = list->cmd_offset + list->cmd_len * sizeof(GPUDrawCommandIndexed);
-
- if (offset == list->buffer_size) {
- GPU_draw_list_submit(list);
- GPU_draw_list_init(list, list->batch);
- }
-}
-
-void GPU_draw_list_submit(GPUDrawList *list)
-{
- GPUBatch *batch = list->batch;
-
- if (list->cmd_len == 0) {
- return;
- }
-
- BLI_assert(list->commands);
- BLI_assert(batch->program_in_use);
- /* TODO could assert that VAO is bound. */
-
- /* TODO We loose a bit of memory here if we only draw arrays. Fix that. */
- uintptr_t offset = list->cmd_offset;
- uint cmd_len = list->cmd_len;
- size_t bytes_used = cmd_len * sizeof(GPUDrawCommandIndexed);
- list->cmd_len = 0; /* Avoid reuse. */
-
- /* Only do multi-draw indirect if doing more than 2 drawcall.
- * This avoids the overhead of buffer mapping if scene is
- * not very instance friendly.
- * BUT we also need to take into account the case where only
- * a few instances are needed to finish filling a call buffer. */
- const bool do_mdi = (cmd_len > 2) || (list->cmd_offset + bytes_used == list->buffer_size);
-
- if (USE_MULTI_DRAW_INDIRECT && do_mdi) {
- GLenum prim = batch->gl_prim_type;
-
- glBindBuffer(GL_DRAW_INDIRECT_BUFFER, list->buffer_id);
- glFlushMappedBufferRange(GL_DRAW_INDIRECT_BUFFER, 0, bytes_used);
- glUnmapBuffer(GL_DRAW_INDIRECT_BUFFER);
- list->commands = NULL; /* Unmapped */
- list->cmd_offset += bytes_used;
-
- if (batch->elem) {
- glMultiDrawElementsIndirect(prim, INDEX_TYPE(batch->elem), (void *)offset, cmd_len, 0);
- }
- else {
- glMultiDrawArraysIndirect(prim, (void *)offset, cmd_len, 0);
- }
- }
- else {
- /* Fallback */
- if (batch->elem) {
- GPUDrawCommandIndexed *cmd = list->commands_indexed;
- for (int i = 0; i < cmd_len; i++, cmd++) {
- /* Index start was added by Draw manager. Avoid counting it twice. */
- cmd->v_first -= batch->elem->index_start;
- GPU_batch_draw_advanced(batch, cmd->v_first, cmd->v_count, cmd->i_first, cmd->i_count);
- }
- }
- else {
- GPUDrawCommand *cmd = list->commands;
- for (int i = 0; i < cmd_len; i++, cmd++) {
- GPU_batch_draw_advanced(batch, cmd->v_first, cmd->v_count, cmd->i_first, cmd->i_count);
- }
- }
- }
-}
-
/** \} */
/* -------------------------------------------------------------------- */
@@ -1015,23 +410,11 @@ void GPU_batch_program_set_imm_shader(GPUBatch *batch)
void gpu_batch_init(void)
{
- if (g_default_attr_vbo == 0) {
- g_default_attr_vbo = GPU_buf_alloc();
-
- float default_attrib_data[4] = {0.0f, 0.0f, 0.0f, 1.0f};
- glBindBuffer(GL_ARRAY_BUFFER, g_default_attr_vbo);
- glBufferData(GL_ARRAY_BUFFER, sizeof(float[4]), default_attrib_data, GL_STATIC_DRAW);
- glBindBuffer(GL_ARRAY_BUFFER, 0);
- }
-
gpu_batch_presets_init();
}
void gpu_batch_exit(void)
{
- GPU_buf_free(g_default_attr_vbo);
- g_default_attr_vbo = 0;
-
gpu_batch_presets_exit();
}
diff --git a/source/blender/gpu/intern/gpu_batch_presets.c b/source/blender/gpu/intern/gpu_batch_presets.c
index 3d9b4326c7e..71c971d8656 100644
--- a/source/blender/gpu/intern/gpu_batch_presets.c
+++ b/source/blender/gpu/intern/gpu_batch_presets.c
@@ -380,18 +380,6 @@ bool gpu_batch_presets_unregister(GPUBatch *preset_batch)
return false;
}
-void gpu_batch_presets_reset(void)
-{
- BLI_mutex_lock(&g_presets_3d.mutex);
- /* Reset vao caches for these every time we switch opengl context.
- * This way they will draw correctly for each window. */
- LISTBASE_FOREACH (LinkData *, link, &presets_list) {
- GPUBatch *preset = link->data;
- GPU_batch_vao_cache_clear(preset);
- }
- BLI_mutex_unlock(&g_presets_3d.mutex);
-}
-
void gpu_batch_presets_exit(void)
{
LinkData *link;
@@ -404,17 +392,4 @@ void gpu_batch_presets_exit(void)
BLI_mutex_end(&g_presets_3d.mutex);
}
-/**
- * This function only needs to be accessed externally because
- * we are drawing UI batches with the DRW old context.
- *
- * And now we use it for drawing the entire area.
- *
- * XXX (Clément) - to cleanup in the upcoming 2.91 refactor.
- **/
-void GPU_batch_presets_reset()
-{
- gpu_batch_presets_reset();
-}
-
/** \} */
diff --git a/source/blender/gpu/intern/gpu_batch_private.h b/source/blender/gpu/intern/gpu_batch_private.hh
index 93745b9ca9b..3a8044efc1d 100644
--- a/source/blender/gpu/intern/gpu_batch_private.h
+++ b/source/blender/gpu/intern/gpu_batch_private.hh
@@ -30,12 +30,16 @@
#include "GPU_context.h"
#include "GPU_shader_interface.h"
-#ifdef __cplusplus
-extern "C" {
-#endif
+namespace blender {
+namespace gpu {
-void gpu_batch_remove_interface_ref(GPUBatch *batch, const GPUShaderInterface *interface);
+class Batch : public GPUBatch {
+ public:
+ Batch(){};
+ virtual ~Batch(){};
-#ifdef __cplusplus
-}
-#endif
+ virtual void draw(int v_first, int v_count, int i_first, int i_count) = 0;
+};
+
+} // namespace gpu
+} // namespace blender
diff --git a/source/blender/gpu/intern/gpu_context.cc b/source/blender/gpu/intern/gpu_context.cc
index 283784aec20..e04631910c1 100644
--- a/source/blender/gpu/intern/gpu_context.cc
+++ b/source/blender/gpu/intern/gpu_context.cc
@@ -40,7 +40,7 @@
#include "GHOST_C-api.h"
#include "gpu_backend.hh"
-#include "gpu_batch_private.h"
+#include "gpu_batch_private.hh"
#include "gpu_context_private.hh"
#include "gpu_matrix_private.h"
@@ -83,12 +83,12 @@ bool GPUContext::is_active_on_thread(void)
GPUContext *GPU_context_create(void *ghost_window)
{
- if (gpu_backend_get() == NULL) {
+ if (GPUBackend::get() == NULL) {
/* TODO move where it make sense. */
GPU_backend_init(GPU_BACKEND_OPENGL);
}
- GPUContext *ctx = gpu_backend_get()->context_alloc(ghost_window);
+ GPUContext *ctx = GPUBackend::get()->context_alloc(ghost_window);
GPU_context_active_set(ctx);
return ctx;
@@ -173,14 +173,14 @@ void GPU_fbo_free(GLuint fbo_id, GPUContext *ctx)
void GPU_buf_free(GLuint buf_id)
{
/* TODO avoid using backend */
- GPUBackend *backend = gpu_backend_get();
+ GPUBackend *backend = GPUBackend::get();
static_cast<GLBackend *>(backend)->buf_free(buf_id);
}
void GPU_tex_free(GLuint tex_id)
{
/* TODO avoid using backend */
- GPUBackend *backend = gpu_backend_get();
+ GPUBackend *backend = GPUBackend::get();
static_cast<GLBackend *>(backend)->tex_free(tex_id);
}
@@ -188,18 +188,6 @@ void GPU_tex_free(GLuint tex_id)
* which are not shared across contexts. So we need to keep track of
* ownership. */
-void gpu_context_add_batch(GPUContext *ctx, GPUBatch *batch)
-{
- BLI_assert(ctx);
- static_cast<GLContext *>(ctx)->batch_register(batch);
-}
-
-void gpu_context_remove_batch(GPUContext *ctx, GPUBatch *batch)
-{
- BLI_assert(ctx);
- static_cast<GLContext *>(ctx)->batch_unregister(batch);
-}
-
void gpu_context_add_framebuffer(GPUContext *ctx, GPUFrameBuffer *fb)
{
#ifdef DEBUG
@@ -285,7 +273,7 @@ void GPU_backend_exit(void)
delete g_backend;
}
-GPUBackend *gpu_backend_get(void)
+GPUBackend *GPUBackend::get(void)
{
return g_backend;
}
diff --git a/source/blender/gpu/intern/gpu_context_private.hh b/source/blender/gpu/intern/gpu_context_private.hh
index d369dbe7402..3f9fca16ff7 100644
--- a/source/blender/gpu/intern/gpu_context_private.hh
+++ b/source/blender/gpu/intern/gpu_context_private.hh
@@ -41,6 +41,7 @@ struct GPUMatrixState;
struct GPUContext {
public:
/** State managment */
+ GPUShader *shader = NULL;
GPUFrameBuffer *current_fbo = NULL;
GPUMatrixState *matrix_state = NULL;
@@ -77,9 +78,6 @@ void GPU_tex_free(GLuint tex_id);
void GPU_vao_free(GLuint vao_id, GPUContext *ctx);
void GPU_fbo_free(GLuint fbo_id, GPUContext *ctx);
-void gpu_context_add_batch(GPUContext *ctx, GPUBatch *batch);
-void gpu_context_remove_batch(GPUContext *ctx, GPUBatch *batch);
-
void gpu_context_add_framebuffer(GPUContext *ctx, struct GPUFrameBuffer *fb);
void gpu_context_remove_framebuffer(GPUContext *ctx, struct GPUFrameBuffer *fb);
diff --git a/source/blender/gpu/intern/gpu_drawlist.cc b/source/blender/gpu/intern/gpu_drawlist.cc
new file mode 100644
index 00000000000..7b807a2fa80
--- /dev/null
+++ b/source/blender/gpu/intern/gpu_drawlist.cc
@@ -0,0 +1,59 @@
+/*
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * The Original Code is Copyright (C) 2016 by Mike Erwin.
+ * All rights reserved.
+ */
+
+/** \file
+ * \ingroup gpu
+ *
+ * Implementation of Multi Draw Indirect.
+ */
+
+#include "MEM_guardedalloc.h"
+
+#include "GPU_batch.h"
+#include "GPU_drawlist.h"
+
+#include "gpu_backend.hh"
+
+#include "gpu_drawlist_private.hh"
+
+using namespace blender::gpu;
+
+GPUDrawList GPU_draw_list_create(int list_length)
+{
+ DrawList *list_ptr = GPUBackend::get()->drawlist_alloc(list_length);
+ return reinterpret_cast<DrawList *>(list_ptr);
+}
+
+void GPU_draw_list_discard(GPUDrawList list)
+{
+ DrawList *list_ptr = reinterpret_cast<DrawList *>(list);
+ delete list_ptr;
+}
+
+void GPU_draw_list_append(GPUDrawList list, GPUBatch *batch, int i_first, int i_count)
+{
+ DrawList *list_ptr = reinterpret_cast<DrawList *>(list);
+ list_ptr->append(batch, i_first, i_count);
+}
+
+void GPU_draw_list_submit(GPUDrawList list)
+{
+ DrawList *list_ptr = reinterpret_cast<DrawList *>(list);
+ list_ptr->submit();
+}
diff --git a/source/blender/gpu/intern/gpu_drawlist_private.hh b/source/blender/gpu/intern/gpu_drawlist_private.hh
new file mode 100644
index 00000000000..04cc18a5ffd
--- /dev/null
+++ b/source/blender/gpu/intern/gpu_drawlist_private.hh
@@ -0,0 +1,40 @@
+/*
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * The Original Code is Copyright (C) 2020 Blender Foundation.
+ * All rights reserved.
+ */
+
+/** \file
+ * \ingroup gpu
+ */
+
+#pragma once
+
+#include "MEM_guardedalloc.h"
+
+namespace blender {
+namespace gpu {
+
+class DrawList {
+ public:
+ virtual ~DrawList(){};
+
+ virtual void append(GPUBatch *batch, int i_first, int i_count) = 0;
+ virtual void submit() = 0;
+};
+
+} // namespace gpu
+} // namespace blender
diff --git a/source/blender/gpu/intern/gpu_element.cc b/source/blender/gpu/intern/gpu_element.cc
index cf7cc1d214c..29c95c725fd 100644
--- a/source/blender/gpu/intern/gpu_element.cc
+++ b/source/blender/gpu/intern/gpu_element.cc
@@ -326,6 +326,11 @@ static void squeeze_indices_short(GPUIndexBufBuilder *builder,
#endif /* GPU_TRACK_INDEX_RANGE */
+GPUIndexBuf *GPU_indexbuf_calloc(void)
+{
+ return (GPUIndexBuf *)MEM_callocN(sizeof(GPUIndexBuf), __func__);
+}
+
GPUIndexBuf *GPU_indexbuf_build(GPUIndexBufBuilder *builder)
{
GPUIndexBuf *elem = (GPUIndexBuf *)MEM_callocN(sizeof(GPUIndexBuf), "GPUIndexBuf");
diff --git a/source/blender/gpu/intern/gpu_immediate.cc b/source/blender/gpu/intern/gpu_immediate.cc
index 9cededa54f7..2d137c2f21c 100644
--- a/source/blender/gpu/intern/gpu_immediate.cc
+++ b/source/blender/gpu/intern/gpu_immediate.cc
@@ -171,12 +171,8 @@ void immBindBuiltinProgram(eGPUBuiltinShader shader_id)
void immUnbindProgram(void)
{
-#if TRUST_NO_ONE
- assert(imm.bound_program != NULL);
-#endif
-#if PROGRAM_NO_OPTI
- glUseProgram(0);
-#endif
+ BLI_assert(imm.bound_program != NULL);
+ GPU_shader_unbind();
imm.bound_program = NULL;
}
@@ -321,7 +317,7 @@ GPUBatch *immBeginBatch(GPUPrimType prim_type, uint vertex_len)
imm.vertex_data = verts->data;
imm.batch = GPU_batch_create_ex(prim_type, verts, NULL, GPU_BATCH_OWNS_VBO);
- imm.batch->phase = GPU_BATCH_BUILDING;
+ imm.batch->flag |= GPU_BATCH_BUILDING;
return imm.batch;
}
@@ -423,7 +419,7 @@ void immEnd(void)
/* TODO: resize only if vertex count is much smaller */
}
GPU_batch_set_shader(imm.batch, imm.bound_program);
- imm.batch->phase = GPU_BATCH_READY_TO_DRAW;
+ imm.batch->flag &= ~GPU_BATCH_BUILDING;
imm.batch = NULL; /* don't free, batch belongs to caller */
}
else {
diff --git a/source/blender/gpu/intern/gpu_shader.cc b/source/blender/gpu/intern/gpu_shader.cc
index 03b7d5402f5..7a44efce7fb 100644
--- a/source/blender/gpu/intern/gpu_shader.cc
+++ b/source/blender/gpu/intern/gpu_shader.cc
@@ -42,6 +42,7 @@
#include "GPU_texture.h"
#include "GPU_uniformbuffer.h"
+#include "gpu_context_private.hh"
#include "gpu_shader_private.h"
extern "C" char datatoc_gpu_shader_colorspace_lib_glsl[];
@@ -258,38 +259,6 @@ GPUShader *GPU_shader_create_from_python(const char *vertexcode,
return sh;
}
-GPUShader *GPU_shader_load_from_binary(const char *binary,
- const int binary_format,
- const int binary_len,
- const char *shname)
-{
- BLI_assert(GL_ARB_get_program_binary);
- int success;
- int program = glCreateProgram();
-
- glProgramBinary(program, binary_format, binary, binary_len);
- glGetProgramiv(program, GL_LINK_STATUS, &success);
-
- if (success) {
- glUseProgram(program);
-
- GPUShader *shader = (GPUShader *)MEM_callocN(sizeof(*shader), __func__);
- shader->interface = GPU_shaderinterface_create(program);
- shader->program = program;
-
-#ifndef NDEBUG
- BLI_snprintf(shader->name, sizeof(shader->name), "%s_%u", shname, g_shaderid++);
-#else
- UNUSED_VARS(shname);
-#endif
-
- return shader;
- }
-
- glDeleteProgram(program);
- return NULL;
-}
-
GPUShader *GPU_shader_create_ex(const char *vertexcode,
const char *fragcode,
const char *geocode,
@@ -598,14 +567,27 @@ void GPU_shader_bind(GPUShader *shader)
{
BLI_assert(shader && shader->program);
- glUseProgram(shader->program);
- GPU_matrix_bind(shader->interface);
- GPU_shader_set_srgb_uniform(shader->interface);
+ GPUContext *ctx = GPU_context_active_get();
+
+ if (ctx->shader != shader) {
+ ctx->shader = shader;
+ glUseProgram(shader->program);
+ GPU_matrix_bind(shader->interface);
+ GPU_shader_set_srgb_uniform(shader->interface);
+ }
+
+ if (GPU_matrix_dirty_get()) {
+ GPU_matrix_bind(shader->interface);
+ }
}
void GPU_shader_unbind(void)
{
+#ifndef NDEBUG
+ GPUContext *ctx = GPU_context_active_get();
+ ctx->shader = NULL;
glUseProgram(0);
+#endif
}
/** \} */
@@ -709,38 +691,12 @@ int GPU_shader_get_program(GPUShader *shader)
return (int)shader->program;
}
-char *GPU_shader_get_binary(GPUShader *shader, uint *r_binary_format, int *r_binary_len)
-{
- BLI_assert(GLEW_ARB_get_program_binary);
- char *r_binary;
- int binary_len = 0;
-
- glGetProgramiv(shader->program, GL_PROGRAM_BINARY_LENGTH, &binary_len);
- r_binary = (char *)MEM_mallocN(binary_len, __func__);
- glGetProgramBinary(shader->program, binary_len, NULL, r_binary_format, r_binary);
-
- if (r_binary_len) {
- *r_binary_len = binary_len;
- }
-
- return r_binary;
-}
-
/** \} */
/* -------------------------------------------------------------------- */
/** \name Uniforms setters
* \{ */
-void GPU_shader_uniform_float(GPUShader *UNUSED(shader), int location, float value)
-{
- if (location == -1) {
- return;
- }
-
- glUniform1f(location, value);
-}
-
void GPU_shader_uniform_vector(
GPUShader *UNUSED(shader), int location, int length, int arraysize, const float *value)
{
@@ -773,22 +729,9 @@ void GPU_shader_uniform_vector(
}
}
-void GPU_shader_uniform_int(GPUShader *UNUSED(shader), int location, int value)
-{
- if (location == -1) {
- return;
- }
-
- glUniform1i(location, value);
-}
-
void GPU_shader_uniform_vector_int(
GPUShader *UNUSED(shader), int location, int length, int arraysize, const int *value)
{
- if (location == -1) {
- return;
- }
-
switch (length) {
case 1:
glUniform1iv(location, arraysize, value);
@@ -808,6 +751,91 @@ void GPU_shader_uniform_vector_int(
}
}
+void GPU_shader_uniform_int(GPUShader *shader, int location, int value)
+{
+ GPU_shader_uniform_vector_int(shader, location, 1, 1, &value);
+}
+
+void GPU_shader_uniform_float(GPUShader *shader, int location, float value)
+{
+ GPU_shader_uniform_vector(shader, location, 1, 1, &value);
+}
+
+#define GET_UNIFORM \
+ const GPUShaderInput *uniform = GPU_shaderinterface_uniform(sh->interface, name); \
+ BLI_assert(uniform);
+
+void GPU_shader_uniform_1i(GPUShader *sh, const char *name, int value)
+{
+ GET_UNIFORM
+ GPU_shader_uniform_int(sh, uniform->location, value);
+}
+
+void GPU_shader_uniform_1b(GPUShader *sh, const char *name, bool value)
+{
+ GPU_shader_uniform_1i(sh, name, value ? 1 : 0);
+}
+
+void GPU_shader_uniform_2f(GPUShader *sh, const char *name, float x, float y)
+{
+ const float data[2] = {x, y};
+ GPU_shader_uniform_2fv(sh, name, data);
+}
+
+void GPU_shader_uniform_3f(GPUShader *sh, const char *name, float x, float y, float z)
+{
+ const float data[3] = {x, y, z};
+ GPU_shader_uniform_3fv(sh, name, data);
+}
+
+void GPU_shader_uniform_4f(GPUShader *sh, const char *name, float x, float y, float z, float w)
+{
+ const float data[4] = {x, y, z, w};
+ GPU_shader_uniform_4fv(sh, name, data);
+}
+
+void GPU_shader_uniform_1f(GPUShader *sh, const char *name, float x)
+{
+ GET_UNIFORM
+ GPU_shader_uniform_float(sh, uniform->location, x);
+}
+
+void GPU_shader_uniform_2fv(GPUShader *sh, const char *name, const float data[2])
+{
+ GET_UNIFORM
+ GPU_shader_uniform_vector(sh, uniform->location, 2, 1, data);
+}
+
+void GPU_shader_uniform_3fv(GPUShader *sh, const char *name, const float data[3])
+{
+ GET_UNIFORM
+ GPU_shader_uniform_vector(sh, uniform->location, 3, 1, data);
+}
+
+void GPU_shader_uniform_4fv(GPUShader *sh, const char *name, const float data[4])
+{
+ GET_UNIFORM
+ GPU_shader_uniform_vector(sh, uniform->location, 4, 1, data);
+}
+
+void GPU_shader_uniform_mat4(GPUShader *sh, const char *name, const float data[4][4])
+{
+ GET_UNIFORM
+ GPU_shader_uniform_vector(sh, uniform->location, 16, 1, (const float *)data);
+}
+
+void GPU_shader_uniform_2fv_array(GPUShader *sh, const char *name, int len, const float (*val)[2])
+{
+ GET_UNIFORM
+ GPU_shader_uniform_vector(sh, uniform->location, 2, len, (const float *)val);
+}
+
+void GPU_shader_uniform_4fv_array(GPUShader *sh, const char *name, int len, const float (*val)[4])
+{
+ GET_UNIFORM
+ GPU_shader_uniform_vector(sh, uniform->location, 4, len, (const float *)val);
+}
+
/** \} */
/* -------------------------------------------------------------------- */
diff --git a/source/blender/gpu/intern/gpu_shader_interface.cc b/source/blender/gpu/intern/gpu_shader_interface.cc
index 4511d4a199d..ef90dde1877 100644
--- a/source/blender/gpu/intern/gpu_shader_interface.cc
+++ b/source/blender/gpu/intern/gpu_shader_interface.cc
@@ -32,9 +32,11 @@
#include "GPU_shader_interface.h"
-#include "gpu_batch_private.h"
+#include "gpu_batch_private.hh"
#include "gpu_context_private.hh"
+#include "gl_batch.hh"
+
#include <stddef.h>
#include <stdlib.h>
#include <string.h>
@@ -45,6 +47,8 @@
# include <stdio.h>
#endif
+using namespace blender::gpu;
+
static const char *BuiltinUniform_name(GPUUniformBuiltin u)
{
switch (u) {
@@ -400,8 +404,8 @@ GPUShaderInterface *GPU_shaderinterface_create(int32_t program)
/* Batches ref buffer */
shaderface->batches_len = GPU_SHADERINTERFACE_REF_ALLOC_COUNT;
- shaderface->batches = (GPUBatch **)MEM_callocN(shaderface->batches_len * sizeof(GPUBatch *),
- "GPUShaderInterface batches");
+ shaderface->batches = (void **)MEM_callocN(shaderface->batches_len * sizeof(GPUBatch *),
+ "GPUShaderInterface batches");
MEM_freeN(uniforms_from_blocks);
MEM_freeN(inputs_tmp);
@@ -468,7 +472,8 @@ void GPU_shaderinterface_discard(GPUShaderInterface *shaderface)
/* Remove this interface from all linked Batches vao cache. */
for (int i = 0; i < shaderface->batches_len; i++) {
if (shaderface->batches[i] != NULL) {
- gpu_batch_remove_interface_ref(shaderface->batches[i], shaderface);
+ /* XXX GL specific. to be removed during refactor. */
+ reinterpret_cast<GLVaoCache *>(shaderface->batches[i])->remove(shaderface);
}
}
MEM_freeN(shaderface->batches);
@@ -511,7 +516,7 @@ int32_t GPU_shaderinterface_block_builtin(const GPUShaderInterface *shaderface,
return shaderface->builtin_blocks[builtin];
}
-void GPU_shaderinterface_add_batch_ref(GPUShaderInterface *shaderface, GPUBatch *batch)
+void GPU_shaderinterface_add_batch_ref(GPUShaderInterface *shaderface, void *batch)
{
int i; /* find first unused slot */
for (i = 0; i < shaderface->batches_len; i++) {
@@ -523,13 +528,14 @@ void GPU_shaderinterface_add_batch_ref(GPUShaderInterface *shaderface, GPUBatch
/* Not enough place, realloc the array. */
i = shaderface->batches_len;
shaderface->batches_len += GPU_SHADERINTERFACE_REF_ALLOC_COUNT;
- shaderface->batches = (GPUBatch **)MEM_recallocN(shaderface->batches,
- sizeof(GPUBatch *) * shaderface->batches_len);
+ shaderface->batches = (void **)MEM_recallocN(shaderface->batches,
+ sizeof(void *) * shaderface->batches_len);
}
- shaderface->batches[i] = batch;
+ /** XXX todo cleanup. */
+ shaderface->batches[i] = reinterpret_cast<void *>(batch);
}
-void GPU_shaderinterface_remove_batch_ref(GPUShaderInterface *shaderface, GPUBatch *batch)
+void GPU_shaderinterface_remove_batch_ref(GPUShaderInterface *shaderface, void *batch)
{
for (int i = 0; i < shaderface->batches_len; i++) {
if (shaderface->batches[i] == batch) {
diff --git a/source/blender/gpu/intern/gpu_vertex_buffer.cc b/source/blender/gpu/intern/gpu_vertex_buffer.cc
index 67ad8835b6a..debf9835c90 100644
--- a/source/blender/gpu/intern/gpu_vertex_buffer.cc
+++ b/source/blender/gpu/intern/gpu_vertex_buffer.cc
@@ -77,6 +77,7 @@ void GPU_vertbuf_init(GPUVertBuf *verts, GPUUsageType usage)
memset(verts, 0, sizeof(GPUVertBuf));
verts->usage = usage;
verts->dirty = true;
+ verts->handle_refcount = 1;
}
void GPU_vertbuf_init_with_format_ex(GPUVertBuf *verts,
@@ -137,7 +138,23 @@ void GPU_vertbuf_clear(GPUVertBuf *verts)
void GPU_vertbuf_discard(GPUVertBuf *verts)
{
GPU_vertbuf_clear(verts);
- MEM_freeN(verts);
+ GPU_vertbuf_handle_ref_remove(verts);
+}
+
+void GPU_vertbuf_handle_ref_add(GPUVertBuf *verts)
+{
+ verts->handle_refcount++;
+}
+
+void GPU_vertbuf_handle_ref_remove(GPUVertBuf *verts)
+{
+ BLI_assert(verts->handle_refcount > 0);
+ verts->handle_refcount--;
+ if (verts->handle_refcount == 0) {
+ /* Should already have been cleared. */
+ BLI_assert(verts->vbo_id == 0 && verts->data == NULL);
+ MEM_freeN(verts);
+ }
}
uint GPU_vertbuf_size_get(const GPUVertBuf *verts)
diff --git a/source/blender/gpu/opengl/gl_backend.hh b/source/blender/gpu/opengl/gl_backend.hh
index f7c01b2f184..eba275f0245 100644
--- a/source/blender/gpu/opengl/gl_backend.hh
+++ b/source/blender/gpu/opengl/gl_backend.hh
@@ -27,7 +27,9 @@
#include "BLI_vector.hh"
+#include "gl_batch.hh"
#include "gl_context.hh"
+#include "gl_drawlist.hh"
namespace blender {
namespace gpu {
@@ -42,6 +44,16 @@ class GLBackend : public GPUBackend {
return new GLContext(ghost_window, shared_orphan_list_);
};
+ Batch *batch_alloc(void)
+ {
+ return new GLBatch();
+ };
+
+ DrawList *drawlist_alloc(int list_length)
+ {
+ return new GLDrawList(list_length);
+ };
+
/* TODO remove */
void buf_free(GLuint buf_id);
void tex_free(GLuint tex_id);
diff --git a/source/blender/gpu/opengl/gl_batch.cc b/source/blender/gpu/opengl/gl_batch.cc
new file mode 100644
index 00000000000..00e1a61f7cf
--- /dev/null
+++ b/source/blender/gpu/opengl/gl_batch.cc
@@ -0,0 +1,367 @@
+/*
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * The Original Code is Copyright (C) 2016 by Mike Erwin.
+ * All rights reserved.
+ */
+
+/** \file
+ * \ingroup gpu
+ *
+ * GL implementation of GPUBatch.
+ * The only specificity of GL here is that it caches a list of
+ * Vertex Array Objects based on the bound shader interface.
+ */
+
+#include "BLI_assert.h"
+
+#include "glew-mx.h"
+
+#include "GPU_extensions.h"
+
+#include "gpu_batch_private.hh"
+#include "gpu_primitive_private.h"
+#include "gpu_shader_private.h"
+
+#include "gl_batch.hh"
+#include "gl_context.hh"
+#include "gl_vertex_array.hh"
+
+using namespace blender::gpu;
+
+/* -------------------------------------------------------------------- */
+/** \name Vao cache
+ *
+ * Each GLBatch has a small cache of VAO objects that are used to avoid VAO reconfiguration.
+ * TODO(fclem) Could be revisited to avoid so much cross references.
+ * \{ */
+
+GLVaoCache::GLVaoCache(void)
+{
+ init();
+}
+
+GLVaoCache::~GLVaoCache()
+{
+ this->clear();
+}
+
+void GLVaoCache::init(void)
+{
+ context_ = NULL;
+ interface_ = NULL;
+ is_dynamic_vao_count = false;
+ for (int i = 0; i < GPU_VAO_STATIC_LEN; i++) {
+ static_vaos.interfaces[i] = NULL;
+ static_vaos.vao_ids[i] = 0;
+ }
+ vao_base_instance_ = 0;
+ base_instance_ = 0;
+}
+
+/* Create a new VAO object and store it in the cache. */
+void GLVaoCache::insert(const GPUShaderInterface *interface, GLuint vao)
+{
+ /* Now insert the cache. */
+ if (!is_dynamic_vao_count) {
+ int i; /* find first unused slot */
+ for (i = 0; i < GPU_VAO_STATIC_LEN; i++) {
+ if (static_vaos.vao_ids[i] == 0) {
+ break;
+ }
+ }
+
+ if (i < GPU_VAO_STATIC_LEN) {
+ static_vaos.interfaces[i] = interface;
+ static_vaos.vao_ids[i] = vao;
+ }
+ else {
+ /* Erase previous entries, they will be added back if drawn again. */
+ for (int i = 0; i < GPU_VAO_STATIC_LEN; i++) {
+ if (static_vaos.interfaces[i] != NULL) {
+ GPU_shaderinterface_remove_batch_ref(
+ const_cast<GPUShaderInterface *>(static_vaos.interfaces[i]), this);
+ context_->vao_free(static_vaos.vao_ids[i]);
+ }
+ }
+ /* Not enough place switch to dynamic. */
+ is_dynamic_vao_count = true;
+ /* Init dynamic arrays and let the branch below set the values. */
+ dynamic_vaos.count = GPU_BATCH_VAO_DYN_ALLOC_COUNT;
+ dynamic_vaos.interfaces = (const GPUShaderInterface **)MEM_callocN(
+ dynamic_vaos.count * sizeof(GPUShaderInterface *), "dyn vaos interfaces");
+ dynamic_vaos.vao_ids = (GLuint *)MEM_callocN(dynamic_vaos.count * sizeof(GLuint),
+ "dyn vaos ids");
+ }
+ }
+
+ if (is_dynamic_vao_count) {
+ int i; /* find first unused slot */
+ for (i = 0; i < dynamic_vaos.count; i++) {
+ if (dynamic_vaos.vao_ids[i] == 0) {
+ break;
+ }
+ }
+
+ if (i == dynamic_vaos.count) {
+ /* Not enough place, realloc the array. */
+ i = dynamic_vaos.count;
+ dynamic_vaos.count += GPU_BATCH_VAO_DYN_ALLOC_COUNT;
+ dynamic_vaos.interfaces = (const GPUShaderInterface **)MEM_recallocN(
+ (void *)dynamic_vaos.interfaces, sizeof(GPUShaderInterface *) * dynamic_vaos.count);
+ dynamic_vaos.vao_ids = (GLuint *)MEM_recallocN(dynamic_vaos.vao_ids,
+ sizeof(GLuint) * dynamic_vaos.count);
+ }
+ dynamic_vaos.interfaces[i] = interface;
+ dynamic_vaos.vao_ids[i] = vao;
+ }
+
+ GPU_shaderinterface_add_batch_ref(const_cast<GPUShaderInterface *>(interface), this);
+}
+
+void GLVaoCache::remove(const GPUShaderInterface *interface)
+{
+ const int count = (is_dynamic_vao_count) ? dynamic_vaos.count : GPU_VAO_STATIC_LEN;
+ GLuint *vaos = (is_dynamic_vao_count) ? dynamic_vaos.vao_ids : static_vaos.vao_ids;
+ const GPUShaderInterface **interfaces = (is_dynamic_vao_count) ? dynamic_vaos.interfaces :
+ static_vaos.interfaces;
+ for (int i = 0; i < count; i++) {
+ if (interfaces[i] == interface) {
+ context_->vao_free(vaos[i]);
+ vaos[i] = 0;
+ interfaces[i] = NULL;
+ break; /* cannot have duplicates */
+ }
+ }
+}
+
+void GLVaoCache::clear(void)
+{
+ GLContext *ctx = static_cast<GLContext *>(GPU_context_active_get());
+ const int count = (is_dynamic_vao_count) ? dynamic_vaos.count : GPU_VAO_STATIC_LEN;
+ GLuint *vaos = (is_dynamic_vao_count) ? dynamic_vaos.vao_ids : static_vaos.vao_ids;
+ const GPUShaderInterface **interfaces = (is_dynamic_vao_count) ? dynamic_vaos.interfaces :
+ static_vaos.interfaces;
+ /* Early out, nothing to free. */
+ if (context_ == NULL) {
+ return;
+ }
+
+ if (context_ == ctx) {
+ glDeleteVertexArrays(count, vaos);
+ glDeleteVertexArrays(1, &vao_base_instance_);
+ }
+ else {
+ /* TODO(fclem) Slow way. Could avoid multiple mutex lock here */
+ for (int i = 0; i < count; i++) {
+ context_->vao_free(vaos[i]);
+ }
+ context_->vao_free(vao_base_instance_);
+ }
+
+ for (int i = 0; i < count; i++) {
+ if (interfaces[i] == NULL) {
+ continue;
+ }
+ GPU_shaderinterface_remove_batch_ref(const_cast<GPUShaderInterface *>(interfaces[i]), this);
+ }
+
+ if (is_dynamic_vao_count) {
+ MEM_freeN((void *)dynamic_vaos.interfaces);
+ MEM_freeN(dynamic_vaos.vao_ids);
+ }
+
+ if (context_) {
+ context_->vao_cache_unregister(this);
+ }
+ /* Reinit. */
+ this->init();
+}
+
+/* Return 0 on cache miss (invalid VAO) */
+GLuint GLVaoCache::lookup(const GPUShaderInterface *interface)
+{
+ const int count = (is_dynamic_vao_count) ? dynamic_vaos.count : GPU_VAO_STATIC_LEN;
+ const GPUShaderInterface **interfaces = (is_dynamic_vao_count) ? dynamic_vaos.interfaces :
+ static_vaos.interfaces;
+ for (int i = 0; i < count; i++) {
+ if (interfaces[i] == interface) {
+ return (is_dynamic_vao_count) ? dynamic_vaos.vao_ids[i] : static_vaos.vao_ids[i];
+ }
+ }
+ return 0;
+}
+
+/* The GLVaoCache object is only valid for one GLContext.
+ * Reset the cache if trying to draw in another context; */
+void GLVaoCache::context_check(void)
+{
+ GLContext *ctx = static_cast<GLContext *>(GPU_context_active_get());
+ BLI_assert(ctx);
+
+ if (context_ != ctx) {
+ if (context_ != NULL) {
+ /* IMPORTANT: Trying to draw a batch in multiple different context will trash the VAO cache.
+ * This has major performance impact and should be avoided in most cases. */
+ context_->vao_cache_unregister(this);
+ }
+ this->clear();
+ context_ = ctx;
+ context_->vao_cache_register(this);
+ }
+}
+
+GLuint GLVaoCache::base_instance_vao_get(GPUBatch *batch, int i_first)
+{
+ this->context_check();
+ /* Make sure the interface is up to date. */
+ if (interface_ != GPU_context_active_get()->shader->interface) {
+ vao_get(batch);
+ /* Trigger update. */
+ base_instance_ = 0;
+ }
+ /**
+ * There seems to be a nasty bug when drawing using the same VAO reconfiguring (T71147).
+ * We just use a throwaway VAO for that. Note that this is likely to degrade performance.
+ **/
+#ifdef __APPLE__
+ glDeleteVertexArrays(1, &vao_base_instance_);
+ vao_base_instance_ = 0;
+#endif
+
+ if (vao_base_instance_ == 0) {
+ glGenVertexArrays(1, &vao_base_instance_);
+ }
+
+ if (base_instance_ != i_first) {
+ base_instance_ = i_first;
+ GLVertArray::update_bindings(vao_base_instance_, batch, interface_, i_first);
+ }
+ return base_instance_;
+}
+
+GLuint GLVaoCache::vao_get(GPUBatch *batch)
+{
+ this->context_check();
+
+ GPUContext *ctx = GPU_context_active_get();
+ if (interface_ != ctx->shader->interface) {
+ interface_ = ctx->shader->interface;
+ vao_id_ = this->lookup(interface_);
+
+ if (vao_id_ == 0) {
+ /* Cache miss, create a new VAO. */
+ glGenVertexArrays(1, &vao_id_);
+ this->insert(interface_, vao_id_);
+ GLVertArray::update_bindings(vao_id_, batch, interface_, 0);
+ }
+ }
+
+ return vao_id_;
+}
+/** \} */
+
+/* -------------------------------------------------------------------- */
+/** \name Creation & Deletion
+ * \{ */
+
+GLBatch::GLBatch(void)
+{
+}
+
+GLBatch::~GLBatch()
+{
+}
+
+/** \} */
+
+/* -------------------------------------------------------------------- */
+/** \name Drawing
+ * \{ */
+
+#if GPU_TRACK_INDEX_RANGE
+# define BASE_INDEX(el) ((el)->base_index)
+# define INDEX_TYPE(el) ((el)->gl_index_type)
+#else
+# define BASE_INDEX(el) 0
+# define INDEX_TYPE(el) GL_UNSIGNED_INT
+#endif
+
+void GLBatch::bind(int i_first)
+{
+ if (flag & GPU_BATCH_DIRTY) {
+ vao_cache_.clear();
+ }
+
+#if GPU_TRACK_INDEX_RANGE
+ /* Can be removed if GL 4.3 is required. */
+ if (!GLEW_ARB_ES3_compatibility && (elem != NULL)) {
+ glPrimitiveRestartIndex((elem->index_type == GPU_INDEX_U16) ? 0xFFFFu : 0xFFFFFFFFu);
+ }
+#endif
+
+ /* Can be removed if GL 4.2 is required. */
+ if (!GPU_arb_base_instance_is_supported() && (i_first > 0)) {
+ glBindVertexArray(vao_cache_.base_instance_vao_get(this, i_first));
+ }
+ else {
+ glBindVertexArray(vao_cache_.vao_get(this));
+ }
+}
+
+void GLBatch::draw(int v_first, int v_count, int i_first, int i_count)
+{
+ this->bind(i_first);
+
+ GLenum gl_type = convert_prim_type_to_gl(prim_type);
+
+ if (elem) {
+ const GPUIndexBuf *el = elem;
+ GLenum index_type = INDEX_TYPE(el);
+ GLint base_index = BASE_INDEX(el);
+ void *v_first_ofs = (GLuint *)0 + v_first + el->index_start;
+
+#if GPU_TRACK_INDEX_RANGE
+ if (el->index_type == GPU_INDEX_U16) {
+ v_first_ofs = (GLushort *)0 + v_first + el->index_start;
+ }
+#endif
+
+ if (GPU_arb_base_instance_is_supported()) {
+ glDrawElementsInstancedBaseVertexBaseInstance(
+ gl_type, v_count, index_type, v_first_ofs, i_count, base_index, i_first);
+ }
+ else {
+ glDrawElementsInstancedBaseVertex(
+ gl_type, v_count, index_type, v_first_ofs, i_count, base_index);
+ }
+ }
+ else {
+#ifdef __APPLE__
+ glDisable(GL_PRIMITIVE_RESTART);
+#endif
+ if (GPU_arb_base_instance_is_supported()) {
+ glDrawArraysInstancedBaseInstance(gl_type, v_first, v_count, i_count, i_first);
+ }
+ else {
+ glDrawArraysInstanced(gl_type, v_first, v_count, i_count);
+ }
+#ifdef __APPLE__
+ glEnable(GL_PRIMITIVE_RESTART);
+#endif
+ }
+}
+
+/** \} */
diff --git a/source/blender/gpu/opengl/gl_batch.hh b/source/blender/gpu/opengl/gl_batch.hh
new file mode 100644
index 00000000000..d70f43aed2a
--- /dev/null
+++ b/source/blender/gpu/opengl/gl_batch.hh
@@ -0,0 +1,105 @@
+/*
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * Copyright 2020, Blender Foundation.
+ * All rights reserved.
+ */
+
+/** \file
+ * \ingroup gpu
+ *
+ * GPU geometry batch
+ * Contains VAOs + VBOs + Shader representing a drawable entity.
+ */
+
+#pragma once
+
+#include "MEM_guardedalloc.h"
+
+#include "gpu_batch_private.hh"
+
+#include "glew-mx.h"
+
+#include "GPU_shader_interface.h"
+
+namespace blender {
+namespace gpu {
+
+#define GPU_VAO_STATIC_LEN 3
+
+/* Vao management: remembers all geometry state (vertex attribute bindings & element buffer)
+ * for each shader interface. Start with a static number of vaos and fallback to dynamic count
+ * if necessary. Once a batch goes dynamic it does not go back. */
+class GLVaoCache {
+ private:
+ /** Context for which the vao_cache_ was generated. */
+ struct GLContext *context_ = NULL;
+ /** Last interface this batch was drawn with. */
+ GPUShaderInterface *interface_ = NULL;
+ /** Cached vao for the last interface. */
+ GLuint vao_id_ = 0;
+ /** Used whend arb_base_instance is not supported. */
+ GLuint vao_base_instance_ = 0;
+ int base_instance_ = 0;
+
+ bool is_dynamic_vao_count = false;
+ union {
+ /** Static handle count */
+ struct {
+ const GPUShaderInterface *interfaces[GPU_VAO_STATIC_LEN];
+ GLuint vao_ids[GPU_VAO_STATIC_LEN];
+ } static_vaos;
+ /** Dynamic handle count */
+ struct {
+ uint count;
+ const GPUShaderInterface **interfaces;
+ GLuint *vao_ids;
+ } dynamic_vaos;
+ };
+
+ public:
+ GLVaoCache();
+ ~GLVaoCache();
+
+ GLuint vao_get(GPUBatch *batch);
+ GLuint base_instance_vao_get(GPUBatch *batch, int i_first);
+
+ GLuint lookup(const GPUShaderInterface *interface);
+ void insert(const GPUShaderInterface *interface, GLuint vao_id);
+ void remove(const GPUShaderInterface *interface);
+ void clear(void);
+
+ private:
+ void init(void);
+ void context_check(void);
+};
+
+class GLBatch : public Batch {
+ public:
+ /** All vaos corresponding to all the GPUShaderInterface this batch was drawn with. */
+ GLVaoCache vao_cache_;
+
+ public:
+ GLBatch();
+ ~GLBatch();
+
+ void draw(int v_first, int v_count, int i_first, int i_count) override;
+ void bind(int i_first);
+
+ MEM_CXX_CLASS_ALLOC_FUNCS("GLBatch");
+};
+
+} // namespace gpu
+} // namespace blender
diff --git a/source/blender/gpu/opengl/gl_context.cc b/source/blender/gpu/opengl/gl_context.cc
index 00a10924ff6..dd413612879 100644
--- a/source/blender/gpu/opengl/gl_context.cc
+++ b/source/blender/gpu/opengl/gl_context.cc
@@ -63,8 +63,8 @@ GLContext::~GLContext()
/* For now don't allow GPUFrameBuffers to be reuse in another context. */
BLI_assert(framebuffers_.is_empty());
/* Delete vaos so the batch can be reused in another context. */
- for (GPUBatch *batch : batches_) {
- GPU_batch_vao_cache_clear(batch);
+ for (GLVaoCache *cache : vao_caches_) {
+ cache->clear();
}
glDeleteVertexArrays(1, &default_vao_);
glDeleteBuffers(1, &default_attr_vbo_);
@@ -197,20 +197,17 @@ void GLBackend::tex_free(GLuint tex_id)
* is discarded.
* \{ */
-void GLContext::batch_register(struct GPUBatch *batch)
+void GLContext::vao_cache_register(GLVaoCache *cache)
{
lists_mutex_.lock();
- batches_.add(batch);
+ vao_caches_.add(cache);
lists_mutex_.unlock();
}
-void GLContext::batch_unregister(struct GPUBatch *batch)
+void GLContext::vao_cache_unregister(GLVaoCache *cache)
{
- /* vao_cache_clear() can acquire lists_mutex_ so avoid deadlock. */
- // reinterpret_cast<GLBatch *>(batch)->vao_cache_clear();
-
lists_mutex_.lock();
- batches_.remove(batch);
+ vao_caches_.remove(cache);
lists_mutex_.unlock();
}
diff --git a/source/blender/gpu/opengl/gl_context.hh b/source/blender/gpu/opengl/gl_context.hh
index 3b55965b9d1..0b762c939f1 100644
--- a/source/blender/gpu/opengl/gl_context.hh
+++ b/source/blender/gpu/opengl/gl_context.hh
@@ -25,15 +25,16 @@
#include "gpu_context_private.hh"
+#include "GPU_framebuffer.h"
+
#include "BLI_set.hh"
#include "BLI_vector.hh"
#include "glew-mx.h"
-#include <iostream>
+#include "gl_batch.hh"
+
#include <mutex>
-#include <unordered_set>
-#include <vector>
namespace blender {
namespace gpu {
@@ -50,7 +51,7 @@ class GLSharedOrphanLists {
void orphans_clear(void);
};
-class GLContext : public GPUContext {
+struct GLContext : public GPUContext {
/* TODO(fclem) these needs to become private. */
public:
/** Default VAO for procedural draw calls. */
@@ -63,7 +64,7 @@ class GLContext : public GPUContext {
* GPUBatch & GPUFramebuffer have references to the context they are from, in the case the
* context is destroyed, we need to remove any reference to it.
*/
- Set<GPUBatch *> batches_;
+ Set<GLVaoCache *> vao_caches_;
Set<GPUFrameBuffer *> framebuffers_;
/** Mutex for the bellow structures. */
std::mutex lists_mutex_;
@@ -87,8 +88,8 @@ class GLContext : public GPUContext {
void vao_free(GLuint vao_id);
void fbo_free(GLuint fbo_id);
- void batch_register(struct GPUBatch *batch);
- void batch_unregister(struct GPUBatch *batch);
+ void vao_cache_register(GLVaoCache *cache);
+ void vao_cache_unregister(GLVaoCache *cache);
void framebuffer_register(struct GPUFrameBuffer *fb);
void framebuffer_unregister(struct GPUFrameBuffer *fb);
};
diff --git a/source/blender/gpu/opengl/gl_drawlist.cc b/source/blender/gpu/opengl/gl_drawlist.cc
new file mode 100644
index 00000000000..c121fb9ba2c
--- /dev/null
+++ b/source/blender/gpu/opengl/gl_drawlist.cc
@@ -0,0 +1,240 @@
+/*
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * The Original Code is Copyright (C) 2016 by Mike Erwin.
+ * All rights reserved.
+ */
+
+/** \file
+ * \ingroup gpu
+ *
+ * Implementation of Multi Draw Indirect using OpenGL.
+ * Fallback if the needed extensions are not supported.
+ */
+
+#include "BLI_assert.h"
+
+#include "GPU_batch.h"
+#include "GPU_extensions.h"
+
+#include "glew-mx.h"
+
+#include "gpu_context_private.hh"
+#include "gpu_drawlist_private.hh"
+#include "gpu_primitive_private.h"
+
+#include "gl_backend.hh"
+#include "gl_drawlist.hh"
+
+#include <limits.h>
+
+#define USE_MULTI_DRAW_INDIRECT 1
+
+/* TODO remove. */
+#if GPU_TRACK_INDEX_RANGE
+# define BASE_INDEX(el) ((el)->base_index)
+# define INDEX_TYPE(el) ((el)->gl_index_type)
+#else
+# define BASE_INDEX(el) 0
+# define INDEX_TYPE(el) GL_UNSIGNED_INT
+#endif
+
+using namespace blender::gpu;
+
+typedef struct GLDrawCommand {
+ GLuint v_count;
+ GLuint i_count;
+ GLuint v_first;
+ GLuint i_first;
+} GLDrawCommand;
+
+typedef struct GLDrawCommandIndexed {
+ GLuint v_count;
+ GLuint i_count;
+ GLuint v_first;
+ GLuint base_index;
+ GLuint i_first;
+} GLDrawCommandIndexed;
+
+#define MDI_ENABLED (buffer_size_ != 0)
+#define MDI_DISABLED (buffer_size_ == 0)
+#define MDI_INDEXED (base_index_ != UINT_MAX)
+
+GLDrawList::GLDrawList(int length)
+{
+ BLI_assert(length > 0);
+ batch_ = NULL;
+ buffer_id_ = 0;
+ command_len_ = 0;
+ command_offset_ = 0;
+ data_offset_ = 0;
+ data_size_ = 0;
+ data_ = NULL;
+
+ if (USE_MULTI_DRAW_INDIRECT && GLEW_ARB_multi_draw_indirect &&
+ GPU_arb_base_instance_is_supported()) {
+ /* Alloc the biggest possible command list, which is indexed. */
+ buffer_size_ = sizeof(GLDrawCommandIndexed) * length;
+ }
+ else {
+ /* Indicates MDI is not supported. */
+ buffer_size_ = 0;
+ }
+}
+
+GLDrawList::~GLDrawList()
+{
+ /* TODO This ... */
+ static_cast<GLBackend *>(GPUBackend::get())->buf_free(buffer_id_);
+ /* ... should be this. */
+ // context_->buf_free(buffer_id_)
+}
+
+void GLDrawList::init(void)
+{
+ BLI_assert(GPU_context_active_get());
+ BLI_assert(MDI_ENABLED);
+ BLI_assert(data_ == NULL);
+ batch_ = NULL;
+ command_len_ = 0;
+
+ if (buffer_id_ == 0) {
+ /* Allocate on first use. */
+ glGenBuffers(1, &buffer_id_);
+ context_ = static_cast<GLContext *>(GPU_context_active_get());
+ }
+
+ glBindBuffer(GL_DRAW_INDIRECT_BUFFER, buffer_id_);
+ /* If buffer is full, orphan buffer data and start fresh. */
+ // if (command_offset_ >= data_size_) {
+ glBufferData(GL_DRAW_INDIRECT_BUFFER, buffer_size_, NULL, GL_DYNAMIC_DRAW);
+ data_offset_ = 0;
+ // }
+ /* Map the remaining range. */
+ GLbitfield flag = GL_MAP_WRITE_BIT | GL_MAP_UNSYNCHRONIZED_BIT | GL_MAP_FLUSH_EXPLICIT_BIT;
+ data_size_ = buffer_size_ - data_offset_;
+ data_ = (GLbyte *)glMapBufferRange(GL_DRAW_INDIRECT_BUFFER, data_offset_, data_size_, flag);
+ command_offset_ = 0;
+}
+
+void GLDrawList::append(GPUBatch *batch, int i_first, int i_count)
+{
+ /* Fallback when MultiDrawIndirect is not supported/enabled. */
+ if (MDI_DISABLED) {
+ GPU_batch_draw_advanced(batch, 0, 0, i_first, i_count);
+ return;
+ }
+
+ if (data_ == NULL) {
+ this->init();
+ }
+
+ if (batch != batch_) {
+ // BLI_assert(batch->flag | GPU_BATCH_INIT);
+ this->submit();
+ batch_ = batch;
+ /* Cached for faster access. */
+ base_index_ = batch->elem ? BASE_INDEX(batch->elem) : UINT_MAX;
+ v_first_ = batch->elem ? batch->elem->index_start : 0;
+ v_count_ = batch->elem ? batch->elem->index_len : batch->verts[0]->vertex_len;
+ }
+
+ if (MDI_INDEXED) {
+ GLDrawCommandIndexed *cmd = reinterpret_cast<GLDrawCommandIndexed *>(data_ + command_offset_);
+ cmd->v_first = v_first_;
+ cmd->v_count = v_count_;
+ cmd->i_count = i_count;
+ cmd->base_index = base_index_;
+ cmd->i_first = i_first;
+ command_offset_ += sizeof(GLDrawCommandIndexed);
+ }
+ else {
+ GLDrawCommand *cmd = reinterpret_cast<GLDrawCommand *>(data_ + command_offset_);
+ cmd->v_first = v_first_;
+ cmd->v_count = v_count_;
+ cmd->i_count = i_count;
+ cmd->i_first = i_first;
+ command_offset_ += sizeof(GLDrawCommand);
+ }
+
+ command_len_++;
+
+ if (command_offset_ >= data_size_) {
+ this->submit();
+ }
+}
+
+void GLDrawList::submit(void)
+{
+ if (command_len_ == 0) {
+ return;
+ }
+ /* Something's wrong if we get here without MDI support. */
+ BLI_assert(MDI_ENABLED);
+ BLI_assert(data_);
+ BLI_assert(GPU_context_active_get()->shader != NULL);
+
+ GLBatch *batch = static_cast<GLBatch *>(batch_);
+
+ /* Only do multi-draw indirect if doing more than 2 drawcall. This avoids the overhead of
+ * buffer mapping if scene is not very instance friendly. BUT we also need to take into
+ * account the
+ * case where only a few instances are needed to finish filling a call buffer. */
+ const bool is_finishing_a_buffer = (command_offset_ >= data_size_);
+ if (command_len_ > 2 || is_finishing_a_buffer) {
+ GLenum prim = convert_prim_type_to_gl(batch_->prim_type);
+ void *offset = (void *)data_offset_;
+
+ glBindBuffer(GL_DRAW_INDIRECT_BUFFER, buffer_id_);
+ glFlushMappedBufferRange(GL_DRAW_INDIRECT_BUFFER, 0, command_offset_);
+ glUnmapBuffer(GL_DRAW_INDIRECT_BUFFER);
+ data_ = NULL; /* Unmapped */
+ data_offset_ += command_offset_;
+
+ batch->bind(0);
+
+ if (MDI_INDEXED) {
+ glMultiDrawElementsIndirect(prim, INDEX_TYPE(batch_->elem), offset, command_len_, 0);
+ }
+ else {
+ glMultiDrawArraysIndirect(prim, offset, command_len_, 0);
+ }
+ }
+ else {
+ /* Fallback do simple drawcalls, and don't unmap the buffer. */
+ if (MDI_INDEXED) {
+ GLDrawCommandIndexed *cmd = (GLDrawCommandIndexed *)data_;
+ for (int i = 0; i < command_len_; i++, cmd++) {
+ /* Index start was already added. Avoid counting it twice. */
+ cmd->v_first -= batch->elem->index_start;
+ batch->draw(cmd->v_first, cmd->v_count, cmd->i_first, cmd->i_count);
+ }
+ /* Reuse the same data. */
+ command_offset_ -= command_len_ * sizeof(GLDrawCommandIndexed);
+ }
+ else {
+ GLDrawCommand *cmd = (GLDrawCommand *)data_;
+ for (int i = 0; i < command_len_; i++, cmd++) {
+ batch->draw(cmd->v_first, cmd->v_count, cmd->i_first, cmd->i_count);
+ }
+ /* Reuse the same data. */
+ command_offset_ -= command_len_ * sizeof(GLDrawCommand);
+ }
+ }
+ /* Do not submit this buffer again. */
+ command_len_ = 0;
+}
+
+/** \} */ \ No newline at end of file
diff --git a/source/blender/gpu/opengl/gl_drawlist.hh b/source/blender/gpu/opengl/gl_drawlist.hh
new file mode 100644
index 00000000000..4f085149388
--- /dev/null
+++ b/source/blender/gpu/opengl/gl_drawlist.hh
@@ -0,0 +1,80 @@
+/*
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * The Original Code is Copyright (C) 2020 Blender Foundation.
+ * All rights reserved.
+ */
+
+/** \file
+ * \ingroup gpu
+ */
+
+#pragma once
+
+#include "MEM_guardedalloc.h"
+
+#include "BLI_sys_types.h"
+
+#include "GPU_batch.h"
+#include "GPU_glew.h"
+
+#include "gpu_drawlist_private.hh"
+
+#include "gl_context.hh"
+
+namespace blender {
+namespace gpu {
+
+class GLDrawList : public DrawList {
+ public:
+ GLDrawList(int length);
+ ~GLDrawList();
+
+ void append(GPUBatch *batch, int i_first, int i_count) override;
+ void submit(void) override;
+
+ private:
+ void init(void);
+
+ /** Batch for which we are recording commands for. */
+ GPUBatch *batch_;
+ /** Mapped memory bounds. */
+ GLbyte *data_;
+ /** Length of the mapped buffer (in byte). */
+ GLsizeiptr data_size_;
+ /** Current offset inside the mapped buffer (in byte). */
+ GLintptr command_offset_;
+ /** Current number of command recorded inside the mapped buffer. */
+ uint command_len_;
+ /** Is UINT_MAX if not drawing indexed geom. Also Avoid dereferencing batch. */
+ GLuint base_index_;
+ /** Also Avoid dereferencing batch. */
+ GLuint v_first_, v_count_;
+
+ /** GL Indirect Buffer id. 0 means MultiDrawIndirect is not supported/enabled. */
+ GLuint buffer_id_;
+ /** Length of whole the buffer (in byte). */
+ GLsizeiptr buffer_size_;
+ /** Offset of data_ inside the whole buffer (in byte). */
+ GLintptr data_offset_;
+
+ /** To free the buffer_id_. */
+ GLContext *context_;
+
+ MEM_CXX_CLASS_ALLOC_FUNCS("GLDrawList");
+};
+
+} // namespace gpu
+} // namespace blender
diff --git a/source/blender/gpu/opengl/gl_vertex_array.cc b/source/blender/gpu/opengl/gl_vertex_array.cc
new file mode 100644
index 00000000000..907dc37e46f
--- /dev/null
+++ b/source/blender/gpu/opengl/gl_vertex_array.cc
@@ -0,0 +1,158 @@
+/*
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * The Original Code is Copyright (C) 2016 by Mike Erwin.
+ * All rights reserved.
+ */
+
+/** \file
+ * \ingroup gpu
+ */
+
+#include "GPU_glew.h"
+
+#include "GPU_shader_interface.h"
+#include "GPU_vertex_buffer.h"
+
+#include "gpu_vertex_format_private.h"
+
+#include "gl_batch.hh"
+#include "gl_context.hh"
+
+#include "gl_vertex_array.hh"
+
+using namespace blender::gpu;
+
+/* -------------------------------------------------------------------- */
+/** \name Vertex Array Bindings
+ * \{ */
+
+/* Returns enabled vertex pointers as a bitflag (one bit per attrib). */
+static uint16_t vbo_bind(const GPUShaderInterface *interface,
+ const GPUVertFormat *format,
+ uint v_first,
+ uint v_len,
+ const bool use_instancing)
+{
+ uint16_t enabled_attrib = 0;
+ const uint attr_len = format->attr_len;
+ uint stride = format->stride;
+ uint offset = 0;
+ GLuint divisor = (use_instancing) ? 1 : 0;
+
+ for (uint a_idx = 0; a_idx < attr_len; a_idx++) {
+ const GPUVertAttr *a = &format->attrs[a_idx];
+
+ if (format->deinterleaved) {
+ offset += ((a_idx == 0) ? 0 : format->attrs[a_idx - 1].sz) * v_len;
+ stride = a->sz;
+ }
+ else {
+ offset = a->offset;
+ }
+
+ const GLvoid *pointer = (const GLubyte *)0 + offset + v_first * stride;
+ const GLenum type = convert_comp_type_to_gl(static_cast<GPUVertCompType>(a->comp_type));
+
+ for (uint n_idx = 0; n_idx < a->name_len; n_idx++) {
+ const char *name = GPU_vertformat_attr_name_get(format, a, n_idx);
+ const GPUShaderInput *input = GPU_shaderinterface_attr(interface, name);
+
+ if (input == NULL) {
+ continue;
+ }
+
+ enabled_attrib |= (1 << input->location);
+
+ if (a->comp_len == 16 || a->comp_len == 12 || a->comp_len == 8) {
+ BLI_assert(a->fetch_mode == GPU_FETCH_FLOAT);
+ BLI_assert(a->comp_type == GPU_COMP_F32);
+ for (int i = 0; i < a->comp_len / 4; i++) {
+ glEnableVertexAttribArray(input->location + i);
+ glVertexAttribDivisor(input->location + i, divisor);
+ glVertexAttribPointer(
+ input->location + i, 4, type, GL_FALSE, stride, (const GLubyte *)pointer + i * 16);
+ }
+ }
+ else {
+ glEnableVertexAttribArray(input->location);
+ glVertexAttribDivisor(input->location, divisor);
+
+ switch (a->fetch_mode) {
+ case GPU_FETCH_FLOAT:
+ case GPU_FETCH_INT_TO_FLOAT:
+ glVertexAttribPointer(input->location, a->comp_len, type, GL_FALSE, stride, pointer);
+ break;
+ case GPU_FETCH_INT_TO_FLOAT_UNIT:
+ glVertexAttribPointer(input->location, a->comp_len, type, GL_TRUE, stride, pointer);
+ break;
+ case GPU_FETCH_INT:
+ glVertexAttribIPointer(input->location, a->comp_len, type, stride, pointer);
+ break;
+ }
+ }
+ }
+ }
+ return enabled_attrib;
+}
+
+/* Update the Attrib Binding of the currently bound VAO. */
+void GLVertArray::update_bindings(const GLuint vao,
+ const GPUBatch *batch,
+ const GPUShaderInterface *interface,
+ const int base_instance)
+{
+ uint16_t attr_mask = interface->enabled_attr_mask;
+
+ glBindVertexArray(vao);
+
+ /* Reverse order so first VBO'S have more prevalence (in term of attribute override). */
+ for (int v = GPU_BATCH_VBO_MAX_LEN - 1; v > -1; v--) {
+ GPUVertBuf *vbo = batch->verts[v];
+ if (vbo) {
+ GPU_vertbuf_use(vbo);
+ attr_mask &= ~vbo_bind(interface, &vbo->format, 0, vbo->vertex_len, false);
+ }
+ }
+
+ for (int v = GPU_BATCH_INST_VBO_MAX_LEN - 1; v > -1; v--) {
+ GPUVertBuf *vbo = batch->inst[v];
+ if (vbo) {
+ GPU_vertbuf_use(vbo);
+ attr_mask &= ~vbo_bind(interface, &vbo->format, base_instance, vbo->vertex_len, true);
+ }
+ }
+
+ if (attr_mask != 0 && GLEW_ARB_vertex_attrib_binding) {
+ for (uint16_t mask = 1, a = 0; a < 16; a++, mask <<= 1) {
+ if (attr_mask & mask) {
+ GLContext *ctx = static_cast<GLContext *>(GPU_context_active_get());
+ /* This replaces glVertexAttrib4f(a, 0.0f, 0.0f, 0.0f, 1.0f); with a more modern style.
+ * Fix issues for some drivers (see T75069). */
+ glBindVertexBuffer(a, ctx->default_attr_vbo_, (intptr_t)0, (intptr_t)0);
+ glEnableVertexAttribArray(a);
+ glVertexAttribFormat(a, 4, GL_FLOAT, GL_FALSE, 0);
+ glVertexAttribBinding(a, a);
+ }
+ }
+ }
+
+ if (batch->elem) {
+ /* Binds the index buffer. This state is also saved in the VAO. */
+ GPU_indexbuf_use(batch->elem);
+ }
+}
+
+/** \} */
diff --git a/source/blender/gpu/opengl/gl_vertex_array.hh b/source/blender/gpu/opengl/gl_vertex_array.hh
new file mode 100644
index 00000000000..6da414d7e62
--- /dev/null
+++ b/source/blender/gpu/opengl/gl_vertex_array.hh
@@ -0,0 +1,44 @@
+/*
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * The Original Code is Copyright (C) 2020 Blender Foundation.
+ * All rights reserved.
+ */
+
+/** \file
+ * \ingroup gpu
+ */
+
+#pragma once
+
+#include "glew-mx.h"
+
+#include "GPU_batch.h"
+#include "GPU_shader_interface.h"
+
+namespace blender {
+namespace gpu {
+
+namespace GLVertArray {
+
+void update_bindings(const GLuint vao,
+ const GPUBatch *batch,
+ const GPUShaderInterface *interface,
+ const int base_instance);
+
+} // namespace GLVertArray
+
+} // namespace gpu
+} // namespace blender
diff --git a/source/blender/makesdna/DNA_space_types.h b/source/blender/makesdna/DNA_space_types.h
index 7d77e8478ae..ad1635ba0c0 100644
--- a/source/blender/makesdna/DNA_space_types.h
+++ b/source/blender/makesdna/DNA_space_types.h
@@ -612,6 +612,7 @@ typedef enum eSpaceSeq_Flag {
SEQ_SHOW_SAFE_CENTER = (1 << 9),
SEQ_SHOW_METADATA = (1 << 10),
SEQ_SHOW_MARKERS = (1 << 11), /* show markers region */
+ SEQ_ZOOM_TO_FIT = (1 << 12),
} eSpaceSeq_Flag;
/* SpaceSeq.view */
diff --git a/source/blender/makesrna/intern/rna_object_force.c b/source/blender/makesrna/intern/rna_object_force.c
index fa837df682a..bb3756d9cfc 100644
--- a/source/blender/makesrna/intern/rna_object_force.c
+++ b/source/blender/makesrna/intern/rna_object_force.c
@@ -935,7 +935,7 @@ static void rna_def_pointcache_common(StructRNA *srna)
prop = RNA_def_property(srna, "frame_start", PROP_INT, PROP_TIME);
RNA_def_property_int_sdna(prop, NULL, "startframe");
RNA_def_property_range(prop, -MAXFRAME, MAXFRAME);
- RNA_def_property_ui_range(prop, 1, MAXFRAME, 1, 1);
+ RNA_def_property_ui_range(prop, 0, MAXFRAME, 1, 1);
RNA_def_property_ui_text(prop, "Start", "Frame on which the simulation starts");
prop = RNA_def_property(srna, "frame_end", PROP_INT, PROP_TIME);
diff --git a/source/blender/makesrna/intern/rna_space.c b/source/blender/makesrna/intern/rna_space.c
index 155f5ab3043..9f259aba0c7 100644
--- a/source/blender/makesrna/intern/rna_space.c
+++ b/source/blender/makesrna/intern/rna_space.c
@@ -4796,6 +4796,12 @@ static void rna_def_space_sequencer(BlenderRNA *brna)
RNA_def_property_ui_text(prop, "Waveform Displaying", "How Waveforms are drawn");
RNA_def_property_update(prop, NC_SPACE | ND_SPACE_SEQUENCER, NULL);
+ prop = RNA_def_property(srna, "zoom_to_fit", PROP_BOOLEAN, PROP_NONE);
+ RNA_def_property_boolean_sdna(prop, NULL, "flag", SEQ_ZOOM_TO_FIT);
+ RNA_def_property_ui_text(
+ prop, "Zoom to Fit", "Automatically zoom preview image to make it fully fit the region");
+ RNA_def_property_update(prop, NC_SPACE | ND_SPACE_SEQUENCER, NULL);
+
prop = RNA_def_property(srna, "show_overexposed", PROP_INT, PROP_NONE);
RNA_def_property_int_sdna(prop, NULL, "zebra");
RNA_def_property_ui_text(prop, "Show Overexposed", "Show overexposed areas with zebra stripes");
diff --git a/source/blender/python/gpu/gpu_py_batch.c b/source/blender/python/gpu/gpu_py_batch.c
index 01bccc57c7a..bb7028c11ab 100644
--- a/source/blender/python/gpu/gpu_py_batch.c
+++ b/source/blender/python/gpu/gpu_py_batch.c
@@ -50,7 +50,7 @@
static bool bpygpu_batch_is_program_or_error(BPyGPUBatch *self)
{
- if (!glIsProgram(self->batch->program)) {
+ if (!self->batch->shader) {
PyErr_SetString(PyExc_RuntimeError, "batch does not have any program assigned to it");
return false;
}
@@ -227,7 +227,7 @@ static PyObject *bpygpu_Batch_draw(BPyGPUBatch *self, PyObject *args)
return NULL;
}
}
- else if (self->batch->program != GPU_shader_get_program(py_program->shader)) {
+ else if (self->batch->shader != py_program->shader) {
GPU_batch_set_shader(self->batch, py_program->shader);
}
@@ -240,7 +240,7 @@ static PyObject *bpygpu_Batch_program_use_begin(BPyGPUBatch *self)
if (!bpygpu_batch_is_program_or_error(self)) {
return NULL;
}
- GPU_batch_program_use_begin(self->batch);
+ GPU_shader_bind(self->batch->shader);
Py_RETURN_NONE;
}
@@ -249,7 +249,7 @@ static PyObject *bpygpu_Batch_program_use_end(BPyGPUBatch *self)
if (!bpygpu_batch_is_program_or_error(self)) {
return NULL;
}
- GPU_batch_program_use_end(self->batch);
+ GPU_shader_unbind();
Py_RETURN_NONE;
}
diff --git a/source/blender/windowmanager/intern/wm_surface.c b/source/blender/windowmanager/intern/wm_surface.c
index 12e55790259..9948434d340 100644
--- a/source/blender/windowmanager/intern/wm_surface.c
+++ b/source/blender/windowmanager/intern/wm_surface.c
@@ -56,8 +56,6 @@ void wm_surface_clear_drawable(void)
WM_opengl_context_release(g_drawable->ghost_ctx);
GPU_context_active_set(NULL);
- BLF_batch_reset();
- gpu_batch_presets_reset();
immDeactivate();
if (g_drawable->deactivate) {
diff --git a/source/blender/windowmanager/intern/wm_window.c b/source/blender/windowmanager/intern/wm_window.c
index 47afa343394..a8a1817be5e 100644
--- a/source/blender/windowmanager/intern/wm_window.c
+++ b/source/blender/windowmanager/intern/wm_window.c
@@ -1112,8 +1112,6 @@ static void wm_window_set_drawable(wmWindowManager *wm, wmWindow *win, bool acti
void wm_window_clear_drawable(wmWindowManager *wm)
{
if (wm->windrawable) {
- BLF_batch_reset();
- gpu_batch_presets_reset();
immDeactivate();
wm->windrawable = NULL;
}