Welcome to mirror list, hosted at ThFree Co, Russian Federation.

git.blender.org/blender.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorClément Foucault <foucault.clem@gmail.com>2018-12-14 02:42:16 +0300
committerClément Foucault <foucault.clem@gmail.com>2018-12-14 18:17:29 +0300
commit18d056601303b96fcc934c639421e1fd59b36b63 (patch)
tree672bd9c367be9273d97e3ca124dc51afd76e59ed /source/blender/draw/intern/draw_cache_impl_displist.c
parentc09913e9ddc2d93894549923a90ee471c3a368db (diff)
Curve Batch Cache: Add back support for shaded geometry
This changes a bit the batches data structure. Instead of using one vbo per material we use one for all material and use index buffers for selecting the correct triangles. This is less optimized than before but has potential to become more optimized by merging the wireframe data vbo into the shading one. Also the index buffers are not strictly necessary and could be just ranges inside the buffer. But this needs more adding things inside GPUIndexBuf.
Diffstat (limited to 'source/blender/draw/intern/draw_cache_impl_displist.c')
-rw-r--r--source/blender/draw/intern/draw_cache_impl_displist.c275
1 files changed, 140 insertions, 135 deletions
diff --git a/source/blender/draw/intern/draw_cache_impl_displist.c b/source/blender/draw/intern/draw_cache_impl_displist.c
index 4e25fc692be..95a5cc6edbf 100644
--- a/source/blender/draw/intern/draw_cache_impl_displist.c
+++ b/source/blender/draw/intern/draw_cache_impl_displist.c
@@ -125,7 +125,7 @@ static void displist_indexbufbuilder_set(
}
}
-GPUVertBuf *DRW_displist_vertbuf_calc_pos_with_normals(ListBase *lb, GPUVertBuf *vbo)
+void DRW_displist_vertbuf_create_pos_and_nor(ListBase *lb, GPUVertBuf *vbo)
{
static GPUVertFormat format = { 0 };
static struct { uint pos, nor; } attr_id;
@@ -135,12 +135,7 @@ GPUVertBuf *DRW_displist_vertbuf_calc_pos_with_normals(ListBase *lb, GPUVertBuf
attr_id.nor = GPU_vertformat_attr_add(&format, "nor", GPU_COMP_I16, 4, GPU_FETCH_INT_TO_FLOAT_UNIT);
}
- if (vbo == NULL) {
- vbo = GPU_vertbuf_create_with_format(&format);
- }
- else {
- GPU_vertbuf_init_with_format(vbo, &format);
- }
+ GPU_vertbuf_init_with_format(vbo, &format);
GPU_vertbuf_data_alloc(vbo, curve_render_surface_vert_len_get(lb));
BKE_displist_normals_add(lb);
@@ -167,11 +162,9 @@ GPUVertBuf *DRW_displist_vertbuf_calc_pos_with_normals(ListBase *lb, GPUVertBuf
}
}
}
-
- return vbo;
}
-GPUIndexBuf *DRW_displist_indexbuf_calc_triangles_in_order(ListBase *lb, GPUIndexBuf *ibo)
+void DRW_displist_indexbuf_create_triangles_in_order(ListBase *lb, GPUIndexBuf *ibo)
{
const int tri_len = curve_render_surface_tri_len_get(lb);
const int vert_len = curve_render_surface_vert_len_get(lb);
@@ -187,45 +180,67 @@ GPUIndexBuf *DRW_displist_indexbuf_calc_triangles_in_order(ListBase *lb, GPUInde
ofs += dl_vert_len(dl);
}
- if (ibo != NULL) {
- GPU_indexbuf_build_in_place(&elb, ibo);
- }
- else {
- ibo = GPU_indexbuf_build(&elb);
- }
- return ibo;
+ GPU_indexbuf_build_in_place(&elb, ibo);
}
-GPUIndexBuf **DRW_displist_indexbuf_calc_triangles_in_order_split_by_material(ListBase *lb, uint gpumat_array_len)
+void DRW_displist_indexbuf_create_triangles_tess_split_by_material(
+ ListBase *lb,
+ GPUIndexBuf **ibo_mats, uint mat_len)
{
- GPUIndexBuf **shaded_triangles_in_order = MEM_callocN(
- sizeof(*shaded_triangles_in_order) * gpumat_array_len, __func__);
- GPUIndexBufBuilder *elb = BLI_array_alloca(elb, gpumat_array_len);
+ GPUIndexBufBuilder *elb = BLI_array_alloca(elb, mat_len);
const int tri_len = curve_render_surface_tri_len_get(lb);
- const int vert_len = curve_render_surface_vert_len_get(lb);
- int i;
/* Init each index buffer builder */
- for (i = 0; i < gpumat_array_len; i++) {
- GPU_indexbuf_init(&elb[i], GPU_PRIM_TRIS, tri_len, vert_len);
+ for (int i = 0; i < mat_len; i++) {
+ GPU_indexbuf_init(&elb[i], GPU_PRIM_TRIS, tri_len * 3, tri_len * 3);
}
/* calc each index buffer builder */
- int ofs = 0;
+ uint v_idx = 0;
for (const DispList *dl = lb->first; dl; dl = dl->next) {
- displist_indexbufbuilder_set((setTriIndicesFn *)GPU_indexbuf_add_tri_verts,
- (setTriIndicesFn *)GPU_indexbuf_add_tri_verts,
- &elb[dl->col], dl, ofs);
- ofs += dl_vert_len(dl);
+ if (ELEM(dl->type, DL_INDEX3, DL_INDEX4, DL_SURF)) {
+ GPUIndexBufBuilder *elem = &elb[dl->col];
+
+ if (dl->type == DL_INDEX3) {
+ for (int i = 0; i < dl->parts; i++) {
+ GPU_indexbuf_add_tri_verts(elem, v_idx + 0, v_idx + 1, v_idx + 2);
+ v_idx += 3;
+ }
+ }
+ else if (dl->type == DL_SURF) {
+ for (int a = 0; a < dl->parts; a++) {
+ if ((dl->flag & DL_CYCL_V) == 0 && a == dl->parts - 1) {
+ break;
+ }
+ int b = (dl->flag & DL_CYCL_U) ? 0 : 1;
+ for (; b < dl->nr; b++) {
+ /* TODO(fclem) reuse verts in a quad at least. */
+ GPU_indexbuf_add_tri_verts(elem, v_idx + 0, v_idx + 1, v_idx + 2);
+ GPU_indexbuf_add_tri_verts(elem, v_idx + 3, v_idx + 4, v_idx + 5);
+ v_idx += 6;
+ }
+ }
+ }
+ else {
+ BLI_assert(dl->type == DL_INDEX4);
+ const int *idx = dl->index;
+ for (int i = 0; i < dl->parts; i++, idx += 4) {
+ GPU_indexbuf_add_tri_verts(elem, v_idx + 0, v_idx + 1, v_idx + 2);
+ v_idx += 3;
+ if (idx[2] != idx[3]) {
+ GPU_indexbuf_add_tri_verts(elem, v_idx + 0, v_idx + 1, v_idx + 2);
+ v_idx += 3;
+ }
+ }
+ }
+ }
}
/* build each indexbuf */
- for (i = 0; i < gpumat_array_len; i++) {
- shaded_triangles_in_order[i] = GPU_indexbuf_build(&elb[i]);
+ for (int i = 0; i < mat_len; i++) {
+ GPU_indexbuf_build_in_place(&elb[i], ibo_mats[i]);
}
-
- return shaded_triangles_in_order;
}
typedef struct DRWDisplistWireThunk {
@@ -278,7 +293,8 @@ static void set_overlay_wires_quad_tri_indices(void *thunk, uint v1, uint v2, ui
}
}
-GPUBatch *DRW_displist_create_edges_overlay_batch(ListBase *lb, GPUVertBuf *vbo)
+/* TODO reuse the position and normals from other tesselation vertbuf. */
+void DRW_displist_vertbuf_create_wireframe_data_tess(ListBase *lb, GPUVertBuf *vbo)
{
static DRWDisplistWireThunk thunk;
static GPUVertFormat format = {0};
@@ -289,13 +305,8 @@ GPUBatch *DRW_displist_create_edges_overlay_batch(ListBase *lb, GPUVertBuf *vbo)
GPU_vertformat_triple_load(&format);
}
- if (vbo == NULL) {
- thunk.vbo = GPU_vertbuf_create_with_format(&format);
- }
- else {
- GPU_vertbuf_init_with_format(vbo, &format);
- thunk.vbo = vbo;
- }
+ GPU_vertbuf_init_with_format(vbo, &format);
+ thunk.vbo = vbo;
int vert_len = curve_render_surface_tri_len_get(lb) * 3;
GPU_vertbuf_data_alloc(thunk.vbo, vert_len);
@@ -316,81 +327,96 @@ GPUBatch *DRW_displist_create_edges_overlay_batch(ListBase *lb, GPUVertBuf *vbo)
if (thunk.vidx < vert_len) {
GPU_vertbuf_data_resize(thunk.vbo, thunk.vidx);
}
+}
- if (vbo == NULL) {
- return GPU_batch_create_ex(GPU_PRIM_TRIS, thunk.vbo, NULL, GPU_BATCH_OWNS_VBO);
+static void surf_uv_quad(const DispList *dl, const uint quad[4], float r_uv[4][2])
+{
+ int orco_sizeu = dl->nr - 1;
+ int orco_sizev = dl->parts - 1;
+
+ /* exception as handled in convertblender.c too */
+ if (dl->flag & DL_CYCL_U) {
+ orco_sizeu++;
}
- else {
- return NULL;
+ if (dl->flag & DL_CYCL_V) {
+ orco_sizev++;
}
-}
+ for (int i = 0; i < 4; i++) {
+ /* find uv based on vertex index into grid array */
+ r_uv[i][0] = (quad[i] / dl->nr) / (float)orco_sizev;
+ r_uv[i][1] = (quad[i] % dl->nr) / (float)orco_sizeu;
-static void displist_vertbuf_attr_set_tri_pos_normals_and_uv(
+ /* cyclic correction */
+ if ((i == 1 || i == 2) && r_uv[i][0] == 0.0f) {
+ r_uv[i][0] = 1.0f;
+ }
+ if ((i == 0 || i == 1) && r_uv[i][1] == 0.0f) {
+ r_uv[i][1] = 1.0f;
+ }
+ }
+}
+
+static void displist_vertbuf_attr_set_tri_pos_nor_uv(
GPUVertBufRaw *pos_step, GPUVertBufRaw *nor_step, GPUVertBufRaw *uv_step,
const float v1[3], const float v2[3], const float v3[3],
const float n1[3], const float n2[3], const float n3[3],
const float uv1[2], const float uv2[2], const float uv3[2])
{
- copy_v3_v3(GPU_vertbuf_raw_step(pos_step), v1);
- copy_v3_v3(GPU_vertbuf_raw_step(nor_step), n1);
- copy_v2_v2(GPU_vertbuf_raw_step(uv_step), uv1);
-
- copy_v3_v3(GPU_vertbuf_raw_step(pos_step), v2);
- copy_v3_v3(GPU_vertbuf_raw_step(nor_step), n2);
- copy_v2_v2(GPU_vertbuf_raw_step(uv_step), uv2);
+ if (pos_step->size != 0) {
+ copy_v3_v3(GPU_vertbuf_raw_step(pos_step), v1);
+ copy_v3_v3(GPU_vertbuf_raw_step(pos_step), v2);
+ copy_v3_v3(GPU_vertbuf_raw_step(pos_step), v3);
+
+ *(GPUPackedNormal *)GPU_vertbuf_raw_step(nor_step) = GPU_normal_convert_i10_v3(n1);
+ *(GPUPackedNormal *)GPU_vertbuf_raw_step(nor_step) = GPU_normal_convert_i10_v3(n2);
+ *(GPUPackedNormal *)GPU_vertbuf_raw_step(nor_step) = GPU_normal_convert_i10_v3(n3);
+ }
- copy_v3_v3(GPU_vertbuf_raw_step(pos_step), v3);
- copy_v3_v3(GPU_vertbuf_raw_step(nor_step), n3);
- copy_v2_v2(GPU_vertbuf_raw_step(uv_step), uv3);
+ if (uv_step->size != 0) {
+ normal_float_to_short_v2(GPU_vertbuf_raw_step(uv_step), uv1);
+ normal_float_to_short_v2(GPU_vertbuf_raw_step(uv_step), uv2);
+ normal_float_to_short_v2(GPU_vertbuf_raw_step(uv_step), uv3);
+ }
}
-GPUBatch **DRW_displist_batch_calc_tri_pos_normals_and_uv_split_by_material(ListBase *lb, uint gpumat_array_len)
+void DRW_displist_vertbuf_create_pos_and_nor_and_uv_tess(
+ ListBase *lb,
+ GPUVertBuf *vbo_pos_nor, GPUVertBuf *vbo_uv)
{
- static GPUVertFormat shaded_triangles_format = { 0 };
+ static GPUVertFormat format_pos_nor = { 0 };
+ static GPUVertFormat format_uv = { 0 };
static struct { uint pos, nor, uv; } attr_id;
-
- if (shaded_triangles_format.attr_len == 0) {
+ if (format_pos_nor.attr_len == 0) {
/* initialize vertex format */
- attr_id.pos = GPU_vertformat_attr_add(&shaded_triangles_format, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
- attr_id.nor = GPU_vertformat_attr_add(&shaded_triangles_format, "nor", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
- attr_id.uv = GPU_vertformat_attr_add(&shaded_triangles_format, "u", GPU_COMP_F32, 2, GPU_FETCH_FLOAT);
+ attr_id.pos = GPU_vertformat_attr_add(&format_pos_nor, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
+ attr_id.nor = GPU_vertformat_attr_add(&format_pos_nor, "nor", GPU_COMP_I10, 3, GPU_FETCH_INT_TO_FLOAT_UNIT);
+ /* UVs are in [0..1] range. We can compress them. */
+ attr_id.uv = GPU_vertformat_attr_add(&format_uv, "u", GPU_COMP_I16, 2, GPU_FETCH_INT_TO_FLOAT_UNIT);
}
- GPUBatch **shaded_triangles = MEM_mallocN(sizeof(*shaded_triangles) * gpumat_array_len, __func__);
-
- GPUVertBuf **vbo = BLI_array_alloca(vbo, gpumat_array_len);
- uint *vbo_len_capacity = BLI_array_alloca(vbo_len_capacity, gpumat_array_len);
+ int vbo_len_capacity = curve_render_surface_tri_len_get(lb) * 3;
- GPUVertBufRaw *pos_step, *nor_step, *uv_step;
- pos_step = BLI_array_alloca(pos_step, gpumat_array_len);
- nor_step = BLI_array_alloca(nor_step, gpumat_array_len);
- uv_step = BLI_array_alloca(uv_step, gpumat_array_len);
-
- /* Create each vertex buffer */
- for (int i = 0; i < gpumat_array_len; i++) {
- vbo[i] = GPU_vertbuf_create_with_format(&shaded_triangles_format);
- vbo_len_capacity[i] = 0;
- }
+ GPUVertBufRaw pos_step = {0};
+ GPUVertBufRaw nor_step = {0};
+ GPUVertBufRaw uv_step = {0};
- /* Calc `vbo_len_capacity` */
- for (const DispList *dl = lb->first; dl; dl = dl->next) {
- vbo_len_capacity[dl->col] += dl_tri_len(dl) * 3;
+ if (DRW_TEST_ASSIGN_VBO(vbo_pos_nor)) {
+ GPU_vertbuf_init_with_format(vbo_pos_nor, &format_pos_nor);
+ GPU_vertbuf_data_alloc(vbo_pos_nor, vbo_len_capacity);
+ GPU_vertbuf_attr_get_raw_data(vbo_pos_nor, attr_id.pos, &pos_step);
+ GPU_vertbuf_attr_get_raw_data(vbo_pos_nor, attr_id.nor, &nor_step);
}
-
- /* Alloc each vertex buffer and get each raw data */
- for (int i = 0; i < gpumat_array_len; i++) {
- GPU_vertbuf_data_alloc(vbo[i], vbo_len_capacity[i]);
- GPU_vertbuf_attr_get_raw_data(vbo[i], attr_id.pos, &pos_step[i]);
- GPU_vertbuf_attr_get_raw_data(vbo[i], attr_id.nor, &nor_step[i]);
- GPU_vertbuf_attr_get_raw_data(vbo[i], attr_id.uv, &uv_step[i]);
+ if (DRW_TEST_ASSIGN_VBO(vbo_uv)) {
+ GPU_vertbuf_init_with_format(vbo_uv, &format_uv);
+ GPU_vertbuf_data_alloc(vbo_uv, vbo_len_capacity);
+ GPU_vertbuf_attr_get_raw_data(vbo_uv, attr_id.uv, &uv_step);
}
BKE_displist_normals_add(lb);
for (const DispList *dl = lb->first; dl; dl = dl->next) {
if (ELEM(dl->type, DL_INDEX3, DL_INDEX4, DL_SURF)) {
- const int col = dl->col;
const float(*verts)[3] = (float(*)[3])dl->verts;
const float(*nors)[3] = (float(*)[3])dl->nors;
const int *idx = dl->index;
@@ -401,12 +427,14 @@ GPUBatch **DRW_displist_batch_calc_tri_pos_normals_and_uv_split_by_material(List
uv[0][1] = uv[1][1] = uv[2][1] = 0.0f;
const int i_end = dl->parts;
for (int i = 0; i < i_end; i++, idx += 3) {
- uv[0][0] = idx[0] / x_max;
- uv[1][0] = idx[2] / x_max;
- uv[2][0] = idx[1] / x_max;
+ if (vbo_uv) {
+ uv[0][0] = idx[0] / x_max;
+ uv[1][0] = idx[2] / x_max;
+ uv[2][0] = idx[1] / x_max;
+ }
- displist_vertbuf_attr_set_tri_pos_normals_and_uv(
- &pos_step[col], &nor_step[col], &uv_step[col],
+ displist_vertbuf_attr_set_tri_pos_nor_uv(
+ &pos_step, &nor_step, &uv_step,
verts[idx[0]], verts[idx[2]], verts[idx[1]],
dl->nors, dl->nors, dl->nors,
uv[0], uv[1], uv[2]);
@@ -440,39 +468,18 @@ GPUBatch **DRW_displist_batch_calc_tri_pos_normals_and_uv_split_by_material(List
}
for (; b < dl->nr; b++) {
- int orco_sizeu = dl->nr - 1;
- int orco_sizev = dl->parts - 1;
-
- /* exception as handled in convertblender.c too */
- if (dl->flag & DL_CYCL_U) {
- orco_sizeu++;
- }
- if (dl->flag & DL_CYCL_V) {
- orco_sizev++;
- }
-
- for (int i = 0; i < 4; i++) {
- /* find uv based on vertex index into grid array */
- uv[i][0] = (quad[i] / dl->nr) / (float)orco_sizev;
- uv[i][1] = (quad[i] % dl->nr) / (float)orco_sizeu;
-
- /* cyclic correction */
- if ((i == 1 || i == 2) && uv[i][0] == 0.0f) {
- uv[i][0] = 1.0f;
- }
- if ((i == 0 || i == 1) && uv[i][1] == 0.0f) {
- uv[i][1] = 1.0f;
- }
+ if (vbo_uv) {
+ surf_uv_quad(dl, quad, uv);
}
- displist_vertbuf_attr_set_tri_pos_normals_and_uv(
- &pos_step[col], &nor_step[col], &uv_step[col],
+ displist_vertbuf_attr_set_tri_pos_nor_uv(
+ &pos_step, &nor_step, &uv_step,
verts[quad[0]], verts[quad[1]], verts[quad[2]],
nors[quad[0]], nors[quad[1]], nors[quad[2]],
uv[0], uv[1], uv[2]);
- displist_vertbuf_attr_set_tri_pos_normals_and_uv(
- &pos_step[col], &nor_step[col], &uv_step[col],
+ displist_vertbuf_attr_set_tri_pos_nor_uv(
+ &pos_step, &nor_step, &uv_step,
verts[quad[0]], verts[quad[2]], verts[quad[3]],
nors[quad[0]], nors[quad[2]], nors[quad[3]],
uv[0], uv[2], uv[3]);
@@ -491,15 +498,15 @@ GPUBatch **DRW_displist_batch_calc_tri_pos_normals_and_uv_split_by_material(List
const int i_end = dl->parts;
for (int i = 0; i < i_end; i++, idx += 4) {
- displist_vertbuf_attr_set_tri_pos_normals_and_uv(
- &pos_step[col], &nor_step[col], &uv_step[col],
+ displist_vertbuf_attr_set_tri_pos_nor_uv(
+ &pos_step, &nor_step, &uv_step,
verts[idx[0]], verts[idx[1]], verts[idx[2]],
nors[idx[0]], nors[idx[1]], nors[idx[2]],
uv[0], uv[1], uv[2]);
if (idx[2] != idx[3]) {
- displist_vertbuf_attr_set_tri_pos_normals_and_uv(
- &pos_step[col], &nor_step[col], &uv_step[col],
+ displist_vertbuf_attr_set_tri_pos_nor_uv(
+ &pos_step, &nor_step, &uv_step,
verts[idx[0]], verts[idx[2]], verts[idx[3]],
nors[idx[0]], nors[idx[2]], nors[idx[3]],
uv[0], uv[2], uv[3]);
@@ -508,14 +515,12 @@ GPUBatch **DRW_displist_batch_calc_tri_pos_normals_and_uv_split_by_material(List
}
}
}
-
- for (int i = 0; i < gpumat_array_len; i++) {
- uint vbo_len_used = GPU_vertbuf_raw_used(&pos_step[i]);
- if (vbo_len_capacity[i] != vbo_len_used) {
- GPU_vertbuf_data_resize(vbo[i], vbo_len_used);
- }
- shaded_triangles[i] = GPU_batch_create_ex(GPU_PRIM_TRIS, vbo[i], NULL, GPU_BATCH_OWNS_VBO);
+#ifdef DEBUG
+ if (pos_step.size != 0) {
+ BLI_assert(vbo_len_capacity == GPU_vertbuf_raw_used(&pos_step));
}
-
- return shaded_triangles;
+ if (uv_step.size != 0) {
+ BLI_assert(vbo_len_capacity == GPU_vertbuf_raw_used(&uv_step));
+ }
+#endif
}