Welcome to mirror list, hosted at ThFree Co, Russian Federation.

git.blender.org/blender.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--source/blender/draw/intern/draw_cache.c2
-rw-r--r--source/blender/draw/intern/draw_cache_impl.h14
-rw-r--r--source/blender/draw/intern/draw_cache_impl_curve.c154
-rw-r--r--source/blender/draw/intern/draw_cache_impl_displist.c275
-rw-r--r--source/blender/draw/intern/draw_cache_impl_metaball.c11
5 files changed, 271 insertions, 185 deletions
diff --git a/source/blender/draw/intern/draw_cache.c b/source/blender/draw/intern/draw_cache.c
index 3e49d890327..dbd05baa144 100644
--- a/source/blender/draw/intern/draw_cache.c
+++ b/source/blender/draw/intern/draw_cache.c
@@ -3703,7 +3703,7 @@ bool DRW_ibo_requested(GPUIndexBuf *ibo)
{
/* TODO do not rely on data uploaded. This prevents multithreading.
* (need access to a gl context) */
- return (ibo != NULL && ibo->ibo_id == 0);
+ return (ibo != NULL && ibo->ibo_id == 0 && ibo->data == NULL);
}
void DRW_vbo_request(GPUBatch *batch, GPUVertBuf **vbo)
diff --git a/source/blender/draw/intern/draw_cache_impl.h b/source/blender/draw/intern/draw_cache_impl.h
index 6df7a896ffa..114d694041e 100644
--- a/source/blender/draw/intern/draw_cache_impl.h
+++ b/source/blender/draw/intern/draw_cache_impl.h
@@ -82,13 +82,13 @@ struct GPUBatch **DRW_metaball_batch_cache_get_surface_shaded(struct Object *ob,
struct GPUBatch *DRW_metaball_batch_cache_get_wireframes_face(struct Object *ob);
/* DispList */
-struct GPUVertBuf *DRW_displist_vertbuf_calc_pos_with_normals(struct ListBase *lb, struct GPUVertBuf *vbo);
-struct GPUIndexBuf *DRW_displist_indexbuf_calc_triangles_in_order(struct ListBase *lb, struct GPUIndexBuf *vbo);
-struct GPUIndexBuf **DRW_displist_indexbuf_calc_triangles_in_order_split_by_material(
- struct ListBase *lb, uint gpumat_array_len);
-struct GPUBatch **DRW_displist_batch_calc_tri_pos_normals_and_uv_split_by_material(
- struct ListBase *lb, uint gpumat_array_len);
-struct GPUBatch *DRW_displist_create_edges_overlay_batch(ListBase *lb, struct GPUVertBuf *vbo);
+void DRW_displist_vertbuf_create_pos_and_nor(struct ListBase *lb, struct GPUVertBuf *vbo);
+void DRW_displist_vertbuf_create_pos_and_nor_and_uv_tess(
+ struct ListBase *lb, struct GPUVertBuf *vbo_pos_nor, struct GPUVertBuf *vbo_uv);
+void DRW_displist_vertbuf_create_wireframe_data_tess(struct ListBase *lb, struct GPUVertBuf *vbo);
+void DRW_displist_indexbuf_create_triangles_in_order(struct ListBase *lb, struct GPUIndexBuf *vbo);
+void DRW_displist_indexbuf_create_triangles_tess_split_by_material(
+ struct ListBase *lb, struct GPUIndexBuf **ibo_mat, uint mat_len);
/* Lattice */
struct GPUBatch *DRW_lattice_batch_cache_get_all_edges(struct Lattice *lt, bool use_weight, const int actdef);
diff --git a/source/blender/draw/intern/draw_cache_impl_curve.c b/source/blender/draw/intern/draw_cache_impl_curve.c
index ac991336e05..a2bf950e129 100644
--- a/source/blender/draw/intern/draw_cache_impl_curve.c
+++ b/source/blender/draw/intern/draw_cache_impl_curve.c
@@ -40,6 +40,7 @@
#include "GPU_batch.h"
#include "GPU_texture.h"
+#include "GPU_material.h"
#include "UI_resources.h"
@@ -297,6 +298,47 @@ static int curve_render_data_normal_len_get(const CurveRenderData *rdata)
return rdata->normal.len;
}
+static void curve_cd_calc_used_gpu_layers(int *cd_layers, struct GPUMaterial **gpumat_array, int gpumat_array_len)
+{
+ GPUVertexAttribs gattribs = {{{0}}};
+ for (int i = 0; i < gpumat_array_len; i++) {
+ struct GPUMaterial *gpumat = gpumat_array[i];
+ if (gpumat == NULL) {
+ continue;
+ }
+ GPU_material_vertex_attributes(gpumat, &gattribs);
+ for (int j = 0; j < gattribs.totlayer; j++) {
+ const char *name = gattribs.layer[j].name;
+ int type = gattribs.layer[j].type;
+
+ /* Curves cannot have named layers.
+ * Note: We could relax this assumption later. */
+ if (name[0] != '\0') {
+ continue;
+ }
+
+ if (type == CD_AUTO_FROM_NAME) {
+ type = CD_MTFACE;
+ }
+
+ switch (type) {
+ case CD_MTFACE:
+ *cd_layers |= CD_MLOOPUV;
+ break;
+ case CD_TANGENT:
+ /* Currently unsupported */
+ // *cd_layers |= CD_TANGENT;
+ break;
+ case CD_MCOL:
+ /* Curve object don't have Color data. */
+ break;
+ case CD_ORCO:
+ *cd_layers |= CD_ORCO;
+ break;
+ }
+ }
+ }
+}
/* ---------------------------------------------------------------------- */
/* Curve GPUBatch Cache */
@@ -310,6 +352,7 @@ typedef struct CurveBatchCache {
struct {
GPUVertBuf *pos_nor;
+ GPUVertBuf *uv;
GPUVertBuf *wireframe_data;
} tess;
@@ -346,6 +389,7 @@ typedef struct CurveBatchCache {
GPUIndexBuf **surf_per_mat_tris;
GPUBatch **surf_per_mat;
int mat_len;
+ int cd_used, cd_needed;
/* settings to determine if cache is invalid */
bool is_dirty;
@@ -362,6 +406,10 @@ static bool curve_batch_cache_valid(Curve *cu)
return false;
}
+ if (cache->mat_len != max_ii(1, cu->totcol)) {
+ return false;
+ }
+
if (cache->is_dirty) {
return false;
}
@@ -401,6 +449,17 @@ static void curve_batch_cache_init(Curve *cu)
}
#endif
+ cache->cd_used = 0;
+ cache->mat_len = max_ii(1, cu->totcol);
+ cache->surf_per_mat_tris = MEM_mallocN(sizeof(*cache->surf_per_mat_tris) * cache->mat_len, __func__);
+ cache->surf_per_mat = MEM_mallocN(sizeof(*cache->surf_per_mat) * cache->mat_len, __func__);
+
+ /* TODO Might be wiser to alloc in one chunck. */
+ for (int i = 0; i < cache->mat_len; ++i) {
+ cache->surf_per_mat_tris[i] = MEM_callocN(sizeof(GPUIndexBuf), "GPUIndexBuf");
+ cache->surf_per_mat[i] = MEM_callocN(sizeof(GPUBatch), "GPUBatch");
+ }
+
cache->is_editmode = (cu->editnurb != NULL) || (cu->editfont != NULL);
cache->is_dirty = false;
@@ -472,6 +531,7 @@ static void curve_batch_cache_clear(Curve *cu)
MEM_SAFE_FREE(cache->surf_per_mat_tris);
MEM_SAFE_FREE(cache->surf_per_mat);
cache->mat_len = 0;
+ cache->cd_used = 0;
}
void DRW_curve_batch_cache_free(Curve *cu)
@@ -786,46 +846,16 @@ GPUBatch **DRW_curve_batch_cache_get_surface_shaded(
struct Curve *cu,
struct GPUMaterial **gpumat_array, uint gpumat_array_len)
{
-#if 0
CurveBatchCache *cache = curve_batch_cache_get(cu);
- if (cache->surface.mat_len != gpumat_array_len) {
- GPU_BATCH_DISCARD_ARRAY_SAFE(cache->surface.shaded_triangles, cache->surface.mat_len);
- }
-
- if (cache->surface.shaded_triangles == NULL) {
- CurveRenderData *rdata = curve_render_data_create(cu, ob_curve_cache, CU_DATATYPE_SURFACE);
- ListBase *lb = &rdata->ob_curve_cache->disp;
-
- cache->surface.mat_len = gpumat_array_len;
- if (cu->flag & CU_UV_ORCO) {
- cache->surface.shaded_triangles = DRW_displist_batch_calc_tri_pos_normals_and_uv_split_by_material(
- lb, gpumat_array_len);
- }
- else {
- cache->surface.shaded_triangles = MEM_mallocN(
- sizeof(*cache->surface.shaded_triangles) * gpumat_array_len, __func__);
- GPUIndexBuf **el = DRW_displist_indexbuf_calc_triangles_in_order_split_by_material(
- lb, gpumat_array_len);
-
- if (cache->surface.verts == NULL) {
- cache->surface.verts = DRW_displist_vertbuf_calc_pos_with_normals(lb);
- }
-
- for (int i = 0; i < gpumat_array_len; ++i) {
- cache->surface.shaded_triangles[i] = GPU_batch_create_ex(
- GPU_PRIM_TRIS, cache->surface.verts, el[i], GPU_BATCH_OWNS_INDEX);
- }
+ BLI_assert(gpumat_array_len == cache->mat_len);
- MEM_freeN(el); /* Save `el` in cache? */
- }
+ curve_cd_calc_used_gpu_layers(&cache->cd_needed, gpumat_array, gpumat_array_len);
- curve_render_data_free(rdata);
+ for (int i = 0; i < cache->mat_len; ++i) {
+ DRW_batch_request(&cache->surf_per_mat[i]);
}
-
- return cache->surface.shaded_triangles;
-#endif
- return NULL;
+ return cache->surf_per_mat;
}
GPUBatch *DRW_curve_batch_cache_get_wireframes_face(Curve *cu)
@@ -847,6 +877,18 @@ void DRW_curve_batch_cache_create_requested(Object *ob)
Curve *me = (Curve *)ob->data;
CurveBatchCache *cache = curve_batch_cache_get(me);
+ /* Verify that all surface batches have needed attrib layers. */
+ /* TODO(fclem): We could be a bit smarter here and only do it per material. */
+ for (int i = 0; i < cache->mat_len; ++i) {
+ if ((cache->cd_used & cache->cd_needed) != cache->cd_needed) {
+ /* We can't discard batches at this point as they have been
+ * referenced for drawing. Just clear them in place. */
+ GPU_batch_clear(cache->surf_per_mat[i]);
+ memset(cache->surf_per_mat[i], 0, sizeof(*cache->surf_per_mat[i]));
+ }
+ }
+ cache->cd_used = cache->cd_needed;
+
/* Init batches and request VBOs & IBOs */
if (DRW_batch_requested(cache->batch.surfaces, GPU_PRIM_TRIS)) {
DRW_ibo_request(cache->batch.surfaces, &cache->ibo.surfaces_tris);
@@ -857,7 +899,6 @@ void DRW_curve_batch_cache_create_requested(Object *ob)
DRW_vbo_request(cache->batch.curves, &cache->ordered.curves_pos);
}
if (DRW_batch_requested(cache->batch.wire_triangles, GPU_PRIM_TRIS)) {
- DRW_vbo_request(cache->batch.wire_triangles, &cache->tess.pos_nor);
DRW_vbo_request(cache->batch.wire_triangles, &cache->tess.wireframe_data);
}
@@ -879,12 +920,24 @@ void DRW_curve_batch_cache_create_requested(Object *ob)
if (DRW_batch_requested(cache->batch.edit_normals, GPU_PRIM_LINES)) {
DRW_vbo_request(cache->batch.edit_normals, &cache->edit.curves_nor);
}
+ for (int i = 0; i < cache->mat_len; ++i) {
+ if (DRW_batch_requested(cache->surf_per_mat[i], GPU_PRIM_TRIS)) {
+ if (cache->mat_len > 1) {
+ DRW_ibo_request(cache->surf_per_mat[i], &cache->surf_per_mat_tris[i]);
+ }
+ if (cache->cd_used & CD_MLOOPUV) {
+ DRW_vbo_request(cache->surf_per_mat[i], &cache->tess.uv);
+ }
+ DRW_vbo_request(cache->surf_per_mat[i], &cache->tess.pos_nor);
+ }
+ }
/* Generate MeshRenderData flags */
int mr_flag = 0;
DRW_ADD_FLAG_FROM_VBO_REQUEST(mr_flag, cache->ordered.pos_nor, CU_DATATYPE_SURFACE);
DRW_ADD_FLAG_FROM_VBO_REQUEST(mr_flag, cache->ordered.curves_pos, CU_DATATYPE_WIRE);
DRW_ADD_FLAG_FROM_VBO_REQUEST(mr_flag, cache->tess.pos_nor, CU_DATATYPE_SURFACE);
+ DRW_ADD_FLAG_FROM_VBO_REQUEST(mr_flag, cache->tess.uv, CU_DATATYPE_SURFACE);
DRW_ADD_FLAG_FROM_VBO_REQUEST(mr_flag, cache->tess.wireframe_data, CU_DATATYPE_SURFACE);
DRW_ADD_FLAG_FROM_IBO_REQUEST(mr_flag, cache->ibo.surfaces_tris, CU_DATATYPE_SURFACE);
DRW_ADD_FLAG_FROM_IBO_REQUEST(mr_flag, cache->ibo.curves_lines, CU_DATATYPE_WIRE);
@@ -896,6 +949,10 @@ void DRW_curve_batch_cache_create_requested(Object *ob)
DRW_ADD_FLAG_FROM_IBO_REQUEST(mr_flag, cache->ibo.edit_verts_points, CU_DATATYPE_OVERLAY);
DRW_ADD_FLAG_FROM_IBO_REQUEST(mr_flag, cache->ibo.edit_lines, CU_DATATYPE_OVERLAY);
+ for (int i = 0; i < cache->mat_len; ++i) {
+ DRW_ADD_FLAG_FROM_IBO_REQUEST(mr_flag, cache->surf_per_mat_tris[i], CU_DATATYPE_SURFACE);
+ }
+
CurveRenderData *rdata = curve_render_data_create(me, ob->runtime.curve_cache, mr_flag);
/* DispLists */
@@ -903,21 +960,30 @@ void DRW_curve_batch_cache_create_requested(Object *ob)
/* Generate VBOs */
if (DRW_vbo_requested(cache->ordered.pos_nor)) {
- DRW_displist_vertbuf_calc_pos_with_normals(lb, cache->ordered.pos_nor);
+ DRW_displist_vertbuf_create_pos_and_nor(lb, cache->ordered.pos_nor);
}
if (DRW_vbo_requested(cache->ordered.curves_pos)) {
curve_create_curves_pos(rdata, cache->ordered.curves_pos);
}
+ if (DRW_vbo_requested(cache->tess.pos_nor) ||
+ DRW_vbo_requested(cache->tess.uv))
+ {
+ DRW_displist_vertbuf_create_pos_and_nor_and_uv_tess(lb, cache->tess.pos_nor, cache->tess.uv);
+ }
if (DRW_vbo_requested(cache->tess.wireframe_data)) {
- DRW_displist_create_edges_overlay_batch(lb, cache->tess.wireframe_data);
+ DRW_displist_vertbuf_create_wireframe_data_tess(lb, cache->tess.wireframe_data);
+ }
+
+ if (DRW_ibo_requested(cache->surf_per_mat_tris[0])) {
+ DRW_displist_indexbuf_create_triangles_tess_split_by_material(lb, cache->surf_per_mat_tris, cache->mat_len);
}
if (DRW_ibo_requested(cache->ibo.curves_lines)) {
curve_create_curves_lines(rdata, cache->ibo.curves_lines);
}
if (DRW_ibo_requested(cache->ibo.surfaces_tris)) {
- DRW_displist_indexbuf_calc_triangles_in_order(lb, cache->ibo.surfaces_tris);
+ DRW_displist_indexbuf_create_triangles_in_order(lb, cache->ibo.surfaces_tris);
}
if (DRW_vbo_requested(cache->edit.pos) ||
@@ -932,6 +998,16 @@ void DRW_curve_batch_cache_create_requested(Object *ob)
curve_create_edit_curves_nor(rdata, cache->edit.curves_nor);
}
+#ifdef DEBUG
+ /* Make sure all requested batches have been setup. */
+ for (int i = 0; i < sizeof(cache->batch) / sizeof(void *); ++i) {
+ GPUBatch **batch = (GPUBatch **)&cache->batch;
+ if (batch[i] != NULL) {
+ BLI_assert(batch[i]->verts[0] != NULL);
+ }
+ }
+#endif
+
curve_render_data_free(rdata);
}
diff --git a/source/blender/draw/intern/draw_cache_impl_displist.c b/source/blender/draw/intern/draw_cache_impl_displist.c
index 4e25fc692be..95a5cc6edbf 100644
--- a/source/blender/draw/intern/draw_cache_impl_displist.c
+++ b/source/blender/draw/intern/draw_cache_impl_displist.c
@@ -125,7 +125,7 @@ static void displist_indexbufbuilder_set(
}
}
-GPUVertBuf *DRW_displist_vertbuf_calc_pos_with_normals(ListBase *lb, GPUVertBuf *vbo)
+void DRW_displist_vertbuf_create_pos_and_nor(ListBase *lb, GPUVertBuf *vbo)
{
static GPUVertFormat format = { 0 };
static struct { uint pos, nor; } attr_id;
@@ -135,12 +135,7 @@ GPUVertBuf *DRW_displist_vertbuf_calc_pos_with_normals(ListBase *lb, GPUVertBuf
attr_id.nor = GPU_vertformat_attr_add(&format, "nor", GPU_COMP_I16, 4, GPU_FETCH_INT_TO_FLOAT_UNIT);
}
- if (vbo == NULL) {
- vbo = GPU_vertbuf_create_with_format(&format);
- }
- else {
- GPU_vertbuf_init_with_format(vbo, &format);
- }
+ GPU_vertbuf_init_with_format(vbo, &format);
GPU_vertbuf_data_alloc(vbo, curve_render_surface_vert_len_get(lb));
BKE_displist_normals_add(lb);
@@ -167,11 +162,9 @@ GPUVertBuf *DRW_displist_vertbuf_calc_pos_with_normals(ListBase *lb, GPUVertBuf
}
}
}
-
- return vbo;
}
-GPUIndexBuf *DRW_displist_indexbuf_calc_triangles_in_order(ListBase *lb, GPUIndexBuf *ibo)
+void DRW_displist_indexbuf_create_triangles_in_order(ListBase *lb, GPUIndexBuf *ibo)
{
const int tri_len = curve_render_surface_tri_len_get(lb);
const int vert_len = curve_render_surface_vert_len_get(lb);
@@ -187,45 +180,67 @@ GPUIndexBuf *DRW_displist_indexbuf_calc_triangles_in_order(ListBase *lb, GPUInde
ofs += dl_vert_len(dl);
}
- if (ibo != NULL) {
- GPU_indexbuf_build_in_place(&elb, ibo);
- }
- else {
- ibo = GPU_indexbuf_build(&elb);
- }
- return ibo;
+ GPU_indexbuf_build_in_place(&elb, ibo);
}
-GPUIndexBuf **DRW_displist_indexbuf_calc_triangles_in_order_split_by_material(ListBase *lb, uint gpumat_array_len)
+void DRW_displist_indexbuf_create_triangles_tess_split_by_material(
+ ListBase *lb,
+ GPUIndexBuf **ibo_mats, uint mat_len)
{
- GPUIndexBuf **shaded_triangles_in_order = MEM_callocN(
- sizeof(*shaded_triangles_in_order) * gpumat_array_len, __func__);
- GPUIndexBufBuilder *elb = BLI_array_alloca(elb, gpumat_array_len);
+ GPUIndexBufBuilder *elb = BLI_array_alloca(elb, mat_len);
const int tri_len = curve_render_surface_tri_len_get(lb);
- const int vert_len = curve_render_surface_vert_len_get(lb);
- int i;
/* Init each index buffer builder */
- for (i = 0; i < gpumat_array_len; i++) {
- GPU_indexbuf_init(&elb[i], GPU_PRIM_TRIS, tri_len, vert_len);
+ for (int i = 0; i < mat_len; i++) {
+ GPU_indexbuf_init(&elb[i], GPU_PRIM_TRIS, tri_len * 3, tri_len * 3);
}
/* calc each index buffer builder */
- int ofs = 0;
+ uint v_idx = 0;
for (const DispList *dl = lb->first; dl; dl = dl->next) {
- displist_indexbufbuilder_set((setTriIndicesFn *)GPU_indexbuf_add_tri_verts,
- (setTriIndicesFn *)GPU_indexbuf_add_tri_verts,
- &elb[dl->col], dl, ofs);
- ofs += dl_vert_len(dl);
+ if (ELEM(dl->type, DL_INDEX3, DL_INDEX4, DL_SURF)) {
+ GPUIndexBufBuilder *elem = &elb[dl->col];
+
+ if (dl->type == DL_INDEX3) {
+ for (int i = 0; i < dl->parts; i++) {
+ GPU_indexbuf_add_tri_verts(elem, v_idx + 0, v_idx + 1, v_idx + 2);
+ v_idx += 3;
+ }
+ }
+ else if (dl->type == DL_SURF) {
+ for (int a = 0; a < dl->parts; a++) {
+ if ((dl->flag & DL_CYCL_V) == 0 && a == dl->parts - 1) {
+ break;
+ }
+ int b = (dl->flag & DL_CYCL_U) ? 0 : 1;
+ for (; b < dl->nr; b++) {
+ /* TODO(fclem) reuse verts in a quad at least. */
+ GPU_indexbuf_add_tri_verts(elem, v_idx + 0, v_idx + 1, v_idx + 2);
+ GPU_indexbuf_add_tri_verts(elem, v_idx + 3, v_idx + 4, v_idx + 5);
+ v_idx += 6;
+ }
+ }
+ }
+ else {
+ BLI_assert(dl->type == DL_INDEX4);
+ const int *idx = dl->index;
+ for (int i = 0; i < dl->parts; i++, idx += 4) {
+ GPU_indexbuf_add_tri_verts(elem, v_idx + 0, v_idx + 1, v_idx + 2);
+ v_idx += 3;
+ if (idx[2] != idx[3]) {
+ GPU_indexbuf_add_tri_verts(elem, v_idx + 0, v_idx + 1, v_idx + 2);
+ v_idx += 3;
+ }
+ }
+ }
+ }
}
/* build each indexbuf */
- for (i = 0; i < gpumat_array_len; i++) {
- shaded_triangles_in_order[i] = GPU_indexbuf_build(&elb[i]);
+ for (int i = 0; i < mat_len; i++) {
+ GPU_indexbuf_build_in_place(&elb[i], ibo_mats[i]);
}
-
- return shaded_triangles_in_order;
}
typedef struct DRWDisplistWireThunk {
@@ -278,7 +293,8 @@ static void set_overlay_wires_quad_tri_indices(void *thunk, uint v1, uint v2, ui
}
}
-GPUBatch *DRW_displist_create_edges_overlay_batch(ListBase *lb, GPUVertBuf *vbo)
+/* TODO reuse the position and normals from other tesselation vertbuf. */
+void DRW_displist_vertbuf_create_wireframe_data_tess(ListBase *lb, GPUVertBuf *vbo)
{
static DRWDisplistWireThunk thunk;
static GPUVertFormat format = {0};
@@ -289,13 +305,8 @@ GPUBatch *DRW_displist_create_edges_overlay_batch(ListBase *lb, GPUVertBuf *vbo)
GPU_vertformat_triple_load(&format);
}
- if (vbo == NULL) {
- thunk.vbo = GPU_vertbuf_create_with_format(&format);
- }
- else {
- GPU_vertbuf_init_with_format(vbo, &format);
- thunk.vbo = vbo;
- }
+ GPU_vertbuf_init_with_format(vbo, &format);
+ thunk.vbo = vbo;
int vert_len = curve_render_surface_tri_len_get(lb) * 3;
GPU_vertbuf_data_alloc(thunk.vbo, vert_len);
@@ -316,81 +327,96 @@ GPUBatch *DRW_displist_create_edges_overlay_batch(ListBase *lb, GPUVertBuf *vbo)
if (thunk.vidx < vert_len) {
GPU_vertbuf_data_resize(thunk.vbo, thunk.vidx);
}
+}
- if (vbo == NULL) {
- return GPU_batch_create_ex(GPU_PRIM_TRIS, thunk.vbo, NULL, GPU_BATCH_OWNS_VBO);
+static void surf_uv_quad(const DispList *dl, const uint quad[4], float r_uv[4][2])
+{
+ int orco_sizeu = dl->nr - 1;
+ int orco_sizev = dl->parts - 1;
+
+ /* exception as handled in convertblender.c too */
+ if (dl->flag & DL_CYCL_U) {
+ orco_sizeu++;
}
- else {
- return NULL;
+ if (dl->flag & DL_CYCL_V) {
+ orco_sizev++;
}
-}
+ for (int i = 0; i < 4; i++) {
+ /* find uv based on vertex index into grid array */
+ r_uv[i][0] = (quad[i] / dl->nr) / (float)orco_sizev;
+ r_uv[i][1] = (quad[i] % dl->nr) / (float)orco_sizeu;
-static void displist_vertbuf_attr_set_tri_pos_normals_and_uv(
+ /* cyclic correction */
+ if ((i == 1 || i == 2) && r_uv[i][0] == 0.0f) {
+ r_uv[i][0] = 1.0f;
+ }
+ if ((i == 0 || i == 1) && r_uv[i][1] == 0.0f) {
+ r_uv[i][1] = 1.0f;
+ }
+ }
+}
+
+static void displist_vertbuf_attr_set_tri_pos_nor_uv(
GPUVertBufRaw *pos_step, GPUVertBufRaw *nor_step, GPUVertBufRaw *uv_step,
const float v1[3], const float v2[3], const float v3[3],
const float n1[3], const float n2[3], const float n3[3],
const float uv1[2], const float uv2[2], const float uv3[2])
{
- copy_v3_v3(GPU_vertbuf_raw_step(pos_step), v1);
- copy_v3_v3(GPU_vertbuf_raw_step(nor_step), n1);
- copy_v2_v2(GPU_vertbuf_raw_step(uv_step), uv1);
-
- copy_v3_v3(GPU_vertbuf_raw_step(pos_step), v2);
- copy_v3_v3(GPU_vertbuf_raw_step(nor_step), n2);
- copy_v2_v2(GPU_vertbuf_raw_step(uv_step), uv2);
+ if (pos_step->size != 0) {
+ copy_v3_v3(GPU_vertbuf_raw_step(pos_step), v1);
+ copy_v3_v3(GPU_vertbuf_raw_step(pos_step), v2);
+ copy_v3_v3(GPU_vertbuf_raw_step(pos_step), v3);
+
+ *(GPUPackedNormal *)GPU_vertbuf_raw_step(nor_step) = GPU_normal_convert_i10_v3(n1);
+ *(GPUPackedNormal *)GPU_vertbuf_raw_step(nor_step) = GPU_normal_convert_i10_v3(n2);
+ *(GPUPackedNormal *)GPU_vertbuf_raw_step(nor_step) = GPU_normal_convert_i10_v3(n3);
+ }
- copy_v3_v3(GPU_vertbuf_raw_step(pos_step), v3);
- copy_v3_v3(GPU_vertbuf_raw_step(nor_step), n3);
- copy_v2_v2(GPU_vertbuf_raw_step(uv_step), uv3);
+ if (uv_step->size != 0) {
+ normal_float_to_short_v2(GPU_vertbuf_raw_step(uv_step), uv1);
+ normal_float_to_short_v2(GPU_vertbuf_raw_step(uv_step), uv2);
+ normal_float_to_short_v2(GPU_vertbuf_raw_step(uv_step), uv3);
+ }
}
-GPUBatch **DRW_displist_batch_calc_tri_pos_normals_and_uv_split_by_material(ListBase *lb, uint gpumat_array_len)
+void DRW_displist_vertbuf_create_pos_and_nor_and_uv_tess(
+ ListBase *lb,
+ GPUVertBuf *vbo_pos_nor, GPUVertBuf *vbo_uv)
{
- static GPUVertFormat shaded_triangles_format = { 0 };
+ static GPUVertFormat format_pos_nor = { 0 };
+ static GPUVertFormat format_uv = { 0 };
static struct { uint pos, nor, uv; } attr_id;
-
- if (shaded_triangles_format.attr_len == 0) {
+ if (format_pos_nor.attr_len == 0) {
/* initialize vertex format */
- attr_id.pos = GPU_vertformat_attr_add(&shaded_triangles_format, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
- attr_id.nor = GPU_vertformat_attr_add(&shaded_triangles_format, "nor", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
- attr_id.uv = GPU_vertformat_attr_add(&shaded_triangles_format, "u", GPU_COMP_F32, 2, GPU_FETCH_FLOAT);
+ attr_id.pos = GPU_vertformat_attr_add(&format_pos_nor, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
+ attr_id.nor = GPU_vertformat_attr_add(&format_pos_nor, "nor", GPU_COMP_I10, 3, GPU_FETCH_INT_TO_FLOAT_UNIT);
+ /* UVs are in [0..1] range. We can compress them. */
+ attr_id.uv = GPU_vertformat_attr_add(&format_uv, "u", GPU_COMP_I16, 2, GPU_FETCH_INT_TO_FLOAT_UNIT);
}
- GPUBatch **shaded_triangles = MEM_mallocN(sizeof(*shaded_triangles) * gpumat_array_len, __func__);
-
- GPUVertBuf **vbo = BLI_array_alloca(vbo, gpumat_array_len);
- uint *vbo_len_capacity = BLI_array_alloca(vbo_len_capacity, gpumat_array_len);
+ int vbo_len_capacity = curve_render_surface_tri_len_get(lb) * 3;
- GPUVertBufRaw *pos_step, *nor_step, *uv_step;
- pos_step = BLI_array_alloca(pos_step, gpumat_array_len);
- nor_step = BLI_array_alloca(nor_step, gpumat_array_len);
- uv_step = BLI_array_alloca(uv_step, gpumat_array_len);
-
- /* Create each vertex buffer */
- for (int i = 0; i < gpumat_array_len; i++) {
- vbo[i] = GPU_vertbuf_create_with_format(&shaded_triangles_format);
- vbo_len_capacity[i] = 0;
- }
+ GPUVertBufRaw pos_step = {0};
+ GPUVertBufRaw nor_step = {0};
+ GPUVertBufRaw uv_step = {0};
- /* Calc `vbo_len_capacity` */
- for (const DispList *dl = lb->first; dl; dl = dl->next) {
- vbo_len_capacity[dl->col] += dl_tri_len(dl) * 3;
+ if (DRW_TEST_ASSIGN_VBO(vbo_pos_nor)) {
+ GPU_vertbuf_init_with_format(vbo_pos_nor, &format_pos_nor);
+ GPU_vertbuf_data_alloc(vbo_pos_nor, vbo_len_capacity);
+ GPU_vertbuf_attr_get_raw_data(vbo_pos_nor, attr_id.pos, &pos_step);
+ GPU_vertbuf_attr_get_raw_data(vbo_pos_nor, attr_id.nor, &nor_step);
}
-
- /* Alloc each vertex buffer and get each raw data */
- for (int i = 0; i < gpumat_array_len; i++) {
- GPU_vertbuf_data_alloc(vbo[i], vbo_len_capacity[i]);
- GPU_vertbuf_attr_get_raw_data(vbo[i], attr_id.pos, &pos_step[i]);
- GPU_vertbuf_attr_get_raw_data(vbo[i], attr_id.nor, &nor_step[i]);
- GPU_vertbuf_attr_get_raw_data(vbo[i], attr_id.uv, &uv_step[i]);
+ if (DRW_TEST_ASSIGN_VBO(vbo_uv)) {
+ GPU_vertbuf_init_with_format(vbo_uv, &format_uv);
+ GPU_vertbuf_data_alloc(vbo_uv, vbo_len_capacity);
+ GPU_vertbuf_attr_get_raw_data(vbo_uv, attr_id.uv, &uv_step);
}
BKE_displist_normals_add(lb);
for (const DispList *dl = lb->first; dl; dl = dl->next) {
if (ELEM(dl->type, DL_INDEX3, DL_INDEX4, DL_SURF)) {
- const int col = dl->col;
const float(*verts)[3] = (float(*)[3])dl->verts;
const float(*nors)[3] = (float(*)[3])dl->nors;
const int *idx = dl->index;
@@ -401,12 +427,14 @@ GPUBatch **DRW_displist_batch_calc_tri_pos_normals_and_uv_split_by_material(List
uv[0][1] = uv[1][1] = uv[2][1] = 0.0f;
const int i_end = dl->parts;
for (int i = 0; i < i_end; i++, idx += 3) {
- uv[0][0] = idx[0] / x_max;
- uv[1][0] = idx[2] / x_max;
- uv[2][0] = idx[1] / x_max;
+ if (vbo_uv) {
+ uv[0][0] = idx[0] / x_max;
+ uv[1][0] = idx[2] / x_max;
+ uv[2][0] = idx[1] / x_max;
+ }
- displist_vertbuf_attr_set_tri_pos_normals_and_uv(
- &pos_step[col], &nor_step[col], &uv_step[col],
+ displist_vertbuf_attr_set_tri_pos_nor_uv(
+ &pos_step, &nor_step, &uv_step,
verts[idx[0]], verts[idx[2]], verts[idx[1]],
dl->nors, dl->nors, dl->nors,
uv[0], uv[1], uv[2]);
@@ -440,39 +468,18 @@ GPUBatch **DRW_displist_batch_calc_tri_pos_normals_and_uv_split_by_material(List
}
for (; b < dl->nr; b++) {
- int orco_sizeu = dl->nr - 1;
- int orco_sizev = dl->parts - 1;
-
- /* exception as handled in convertblender.c too */
- if (dl->flag & DL_CYCL_U) {
- orco_sizeu++;
- }
- if (dl->flag & DL_CYCL_V) {
- orco_sizev++;
- }
-
- for (int i = 0; i < 4; i++) {
- /* find uv based on vertex index into grid array */
- uv[i][0] = (quad[i] / dl->nr) / (float)orco_sizev;
- uv[i][1] = (quad[i] % dl->nr) / (float)orco_sizeu;
-
- /* cyclic correction */
- if ((i == 1 || i == 2) && uv[i][0] == 0.0f) {
- uv[i][0] = 1.0f;
- }
- if ((i == 0 || i == 1) && uv[i][1] == 0.0f) {
- uv[i][1] = 1.0f;
- }
+ if (vbo_uv) {
+ surf_uv_quad(dl, quad, uv);
}
- displist_vertbuf_attr_set_tri_pos_normals_and_uv(
- &pos_step[col], &nor_step[col], &uv_step[col],
+ displist_vertbuf_attr_set_tri_pos_nor_uv(
+ &pos_step, &nor_step, &uv_step,
verts[quad[0]], verts[quad[1]], verts[quad[2]],
nors[quad[0]], nors[quad[1]], nors[quad[2]],
uv[0], uv[1], uv[2]);
- displist_vertbuf_attr_set_tri_pos_normals_and_uv(
- &pos_step[col], &nor_step[col], &uv_step[col],
+ displist_vertbuf_attr_set_tri_pos_nor_uv(
+ &pos_step, &nor_step, &uv_step,
verts[quad[0]], verts[quad[2]], verts[quad[3]],
nors[quad[0]], nors[quad[2]], nors[quad[3]],
uv[0], uv[2], uv[3]);
@@ -491,15 +498,15 @@ GPUBatch **DRW_displist_batch_calc_tri_pos_normals_and_uv_split_by_material(List
const int i_end = dl->parts;
for (int i = 0; i < i_end; i++, idx += 4) {
- displist_vertbuf_attr_set_tri_pos_normals_and_uv(
- &pos_step[col], &nor_step[col], &uv_step[col],
+ displist_vertbuf_attr_set_tri_pos_nor_uv(
+ &pos_step, &nor_step, &uv_step,
verts[idx[0]], verts[idx[1]], verts[idx[2]],
nors[idx[0]], nors[idx[1]], nors[idx[2]],
uv[0], uv[1], uv[2]);
if (idx[2] != idx[3]) {
- displist_vertbuf_attr_set_tri_pos_normals_and_uv(
- &pos_step[col], &nor_step[col], &uv_step[col],
+ displist_vertbuf_attr_set_tri_pos_nor_uv(
+ &pos_step, &nor_step, &uv_step,
verts[idx[0]], verts[idx[2]], verts[idx[3]],
nors[idx[0]], nors[idx[2]], nors[idx[3]],
uv[0], uv[2], uv[3]);
@@ -508,14 +515,12 @@ GPUBatch **DRW_displist_batch_calc_tri_pos_normals_and_uv_split_by_material(List
}
}
}
-
- for (int i = 0; i < gpumat_array_len; i++) {
- uint vbo_len_used = GPU_vertbuf_raw_used(&pos_step[i]);
- if (vbo_len_capacity[i] != vbo_len_used) {
- GPU_vertbuf_data_resize(vbo[i], vbo_len_used);
- }
- shaded_triangles[i] = GPU_batch_create_ex(GPU_PRIM_TRIS, vbo[i], NULL, GPU_BATCH_OWNS_VBO);
+#ifdef DEBUG
+ if (pos_step.size != 0) {
+ BLI_assert(vbo_len_capacity == GPU_vertbuf_raw_used(&pos_step));
}
-
- return shaded_triangles;
+ if (uv_step.size != 0) {
+ BLI_assert(vbo_len_capacity == GPU_vertbuf_raw_used(&uv_step));
+ }
+#endif
}
diff --git a/source/blender/draw/intern/draw_cache_impl_metaball.c b/source/blender/draw/intern/draw_cache_impl_metaball.c
index 304d93f465c..f406ad2380a 100644
--- a/source/blender/draw/intern/draw_cache_impl_metaball.c
+++ b/source/blender/draw/intern/draw_cache_impl_metaball.c
@@ -143,7 +143,8 @@ static GPUVertBuf *mball_batch_cache_get_pos_and_normals(Object *ob, MetaBallBat
{
if (cache->pos_nor_in_order == NULL) {
ListBase *lb = &ob->runtime.curve_cache->disp;
- cache->pos_nor_in_order = DRW_displist_vertbuf_calc_pos_with_normals(lb, NULL);
+ cache->pos_nor_in_order = MEM_callocN(sizeof(GPUVertBuf), __func__);
+ DRW_displist_vertbuf_create_pos_and_nor(lb, cache->pos_nor_in_order);
}
return cache->pos_nor_in_order;
}
@@ -164,10 +165,12 @@ GPUBatch *DRW_metaball_batch_cache_get_triangles_with_normals(Object *ob)
if (cache->batch == NULL) {
ListBase *lb = &ob->runtime.curve_cache->disp;
+ GPUIndexBuf *ibo = MEM_callocN(sizeof(GPUIndexBuf), __func__);
+ DRW_displist_indexbuf_create_triangles_in_order(lb, ibo);
cache->batch = GPU_batch_create_ex(
GPU_PRIM_TRIS,
mball_batch_cache_get_pos_and_normals(ob, cache),
- DRW_displist_indexbuf_calc_triangles_in_order(lb, NULL),
+ ibo,
GPU_BATCH_OWNS_INDEX);
}
@@ -204,7 +207,9 @@ GPUBatch *DRW_metaball_batch_cache_get_wireframes_face(Object *ob)
if (cache->face_wire.batch == NULL) {
ListBase *lb = &ob->runtime.curve_cache->disp;
- cache->face_wire.batch = DRW_displist_create_edges_overlay_batch(lb, NULL);
+ GPUVertBuf *vbo = MEM_callocN(sizeof(GPUVertBuf), __func__);
+ DRW_displist_vertbuf_create_wireframe_data_tess(lb, vbo);
+ cache->face_wire.batch = GPU_batch_create_ex(GPU_PRIM_TRIS, vbo, NULL, GPU_BATCH_OWNS_VBO);
}
return cache->face_wire.batch;