Welcome to mirror list, hosted at ThFree Co, Russian Federation.

git.blender.org/blender.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
Diffstat (limited to 'source/blender/draw/intern/draw_cache_impl_displist.c')
-rw-r--r--source/blender/draw/intern/draw_cache_impl_displist.c505
1 files changed, 0 insertions, 505 deletions
diff --git a/source/blender/draw/intern/draw_cache_impl_displist.c b/source/blender/draw/intern/draw_cache_impl_displist.c
index ee3d16b2830..96c088c3ee9 100644
--- a/source/blender/draw/intern/draw_cache_impl_displist.c
+++ b/source/blender/draw/intern/draw_cache_impl_displist.c
@@ -9,7 +9,6 @@
* \note DispList may be removed soon! This is a utility for object types that use render.
*/
-#include "BLI_alloca.h"
#include "BLI_edgehash.h"
#include "BLI_listbase.h"
#include "BLI_math_vector.h"
@@ -19,7 +18,6 @@
#include "DNA_scene_types.h"
#include "BKE_displist.h"
-#include "BKE_displist_tangent.h"
#include "GPU_batch.h"
#include "GPU_capabilities.h"
@@ -112,53 +110,6 @@ static void displist_indexbufbuilder_set(
}
}
-static int displist_indexbufbuilder_tess_set(
- SetTriIndicesFn *set_tri_indices,
- SetTriIndicesFn *set_quad_tri_indices, /* meh, find a better solution. */
- void *thunk,
- const DispList *dl,
- const int ofs)
-{
- int v_idx = ofs;
- if (ELEM(dl->type, DL_INDEX3, DL_INDEX4, DL_SURF)) {
- if (dl->type == DL_INDEX3) {
- for (int i = 0; i < dl->parts; i++) {
- set_tri_indices(thunk, v_idx + 0, v_idx + 1, v_idx + 2);
- v_idx += 3;
- }
- }
- else if (dl->type == DL_SURF) {
- for (int a = 0; a < dl->parts; a++) {
- if ((dl->flag & DL_CYCL_V) == 0 && a == dl->parts - 1) {
- break;
- }
- int b = (dl->flag & DL_CYCL_U) ? 0 : 1;
- for (; b < dl->nr; b++) {
- set_quad_tri_indices(thunk, v_idx + 0, v_idx + 1, v_idx + 2);
- set_quad_tri_indices(thunk, v_idx + 3, v_idx + 4, v_idx + 5);
- v_idx += 6;
- }
- }
- }
- else {
- BLI_assert(dl->type == DL_INDEX4);
- const int *idx = dl->index;
- for (int i = 0; i < dl->parts; i++, idx += 4) {
- if (idx[2] != idx[3]) {
- set_quad_tri_indices(thunk, v_idx + 0, v_idx + 1, v_idx + 2);
- set_quad_tri_indices(thunk, v_idx + 3, v_idx + 4, v_idx + 5);
- v_idx += 6;
- }
- else {
- set_tri_indices(thunk, v_idx + 0, v_idx + 1, v_idx + 2);
- v_idx += 3;
- }
- }
- }
- }
- return v_idx;
-}
-
void DRW_displist_vertbuf_create_pos_and_nor(ListBase *lb, GPUVertBuf *vbo, const Scene *scene)
{
const bool do_hq_normals = (scene->r.perf_flag & SCE_PERF_HQ_NORMALS) != 0 ||
@@ -273,35 +224,6 @@ void DRW_displist_indexbuf_create_triangles_in_order(ListBase *lb, GPUIndexBuf *
GPU_indexbuf_build_in_place(&elb, ibo);
}
-void DRW_displist_indexbuf_create_triangles_loop_split_by_material(ListBase *lb,
- GPUIndexBuf **ibo_mats,
- uint mat_len)
-{
- GPUIndexBufBuilder *elb = BLI_array_alloca(elb, mat_len);
-
- const int tri_len = curve_render_surface_tri_len_get(lb);
-
- /* Init each index buffer builder */
- for (int i = 0; i < mat_len; i++) {
- GPU_indexbuf_init(&elb[i], GPU_PRIM_TRIS, tri_len * 3, tri_len * 3);
- }
-
- /* calc each index buffer builder */
- uint v_idx = 0;
- LISTBASE_FOREACH (const DispList *, dl, lb) {
- v_idx = displist_indexbufbuilder_tess_set((SetTriIndicesFn *)GPU_indexbuf_add_tri_verts,
- (SetTriIndicesFn *)GPU_indexbuf_add_tri_verts,
- &elb[dl->col],
- dl,
- v_idx);
- }
-
- /* build each indexbuf */
- for (int i = 0; i < mat_len; i++) {
- GPU_indexbuf_build_in_place(&elb[i], ibo_mats[i]);
- }
-}
-
static void set_overlay_wires_tri_indices(void *thunk, uint v1, uint v2, uint v3)
{
GPUIndexBufBuilder *eld = (GPUIndexBufBuilder *)thunk;
@@ -335,433 +257,6 @@ void DRW_displist_indexbuf_create_lines_in_order(ListBase *lb, GPUIndexBuf *ibo)
GPU_indexbuf_build_in_place(&elb, ibo);
}
-static void surf_uv_quad(const DispList *dl, const uint quad[4], float r_uv[4][2])
-{
- int orco_sizeu = dl->nr - 1;
- int orco_sizev = dl->parts - 1;
-
- /* exception as handled in convertblender.c too */
- if (dl->flag & DL_CYCL_U) {
- orco_sizeu++;
- }
- if (dl->flag & DL_CYCL_V) {
- orco_sizev++;
- }
-
- for (int i = 0; i < 4; i++) {
- /* NOTE: For some reason the shading U and V are swapped compared to the
- * one described in the surface format. */
- /* find uv based on vertex index into grid array */
- r_uv[i][0] = (quad[i] / dl->nr) / (float)orco_sizev;
- r_uv[i][1] = (quad[i] % dl->nr) / (float)orco_sizeu;
-
- /* cyclic correction */
- if (ELEM(i, 1, 2) && r_uv[i][0] == 0.0f) {
- r_uv[i][0] = 1.0f;
- }
- if (ELEM(i, 0, 1) && r_uv[i][1] == 0.0f) {
- r_uv[i][1] = 1.0f;
- }
- }
-}
-
-static void displist_vertbuf_attr_set_nor(GPUVertBufRaw *step,
- const GPUNormal *n1,
- const GPUNormal *n2,
- const GPUNormal *n3,
- const bool do_hq_normals)
-{
- if (do_hq_normals) {
- copy_v3_v3_short(GPU_vertbuf_raw_step(step), n1->high);
- copy_v3_v3_short(GPU_vertbuf_raw_step(step), n2->high);
- copy_v3_v3_short(GPU_vertbuf_raw_step(step), n3->high);
- }
- else {
- *(GPUPackedNormal *)GPU_vertbuf_raw_step(step) = n1->low;
- *(GPUPackedNormal *)GPU_vertbuf_raw_step(step) = n2->low;
- *(GPUPackedNormal *)GPU_vertbuf_raw_step(step) = n3->low;
- }
-}
-
-static void displist_vertbuf_attr_set_tri_pos_nor_uv(GPUVertBufRaw *pos_step,
- GPUVertBufRaw *nor_step,
- GPUVertBufRaw *uv_step,
- GPUVertBufRaw *tan_step,
- const float v1[3],
- const float v2[3],
- const float v3[3],
- const GPUNormal *n1,
- const GPUNormal *n2,
- const GPUNormal *n3,
- const GPUNormal *t1,
- const GPUNormal *t2,
- const GPUNormal *t3,
- const float uv1[2],
- const float uv2[2],
- const float uv3[2],
- const bool do_hq_normals)
-{
- if (pos_step->size != 0) {
- copy_v3_v3(GPU_vertbuf_raw_step(pos_step), v1);
- copy_v3_v3(GPU_vertbuf_raw_step(pos_step), v2);
- copy_v3_v3(GPU_vertbuf_raw_step(pos_step), v3);
- displist_vertbuf_attr_set_nor(nor_step, n1, n2, n3, do_hq_normals);
- }
- if (uv_step->size != 0) {
- normal_float_to_short_v2(GPU_vertbuf_raw_step(uv_step), uv1);
- normal_float_to_short_v2(GPU_vertbuf_raw_step(uv_step), uv2);
- normal_float_to_short_v2(GPU_vertbuf_raw_step(uv_step), uv3);
- }
- if (tan_step->size != 0) {
- displist_vertbuf_attr_set_nor(tan_step, t1, t2, t3, do_hq_normals);
- }
-}
-
-#define SURFACE_QUAD_ITER_BEGIN(dl) \
- { \
- uint quad[4]; \
- int quad_index = 0; \
- int max_v = (dl->flag & DL_CYCL_V) ? dl->parts : (dl->parts - 1); \
- int max_u = (dl->flag & DL_CYCL_U) ? dl->nr : (dl->nr - 1); \
- for (int v = 0; v < max_v; v++) { \
- quad[3] = dl->nr * v; \
- quad[0] = quad[3] + 1; \
- quad[2] = quad[3] + dl->nr; \
- quad[1] = quad[0] + dl->nr; \
- /* Cyclic wrap */ \
- if (v == dl->parts - 1) { \
- quad[1] -= dl->parts * dl->nr; \
- quad[2] -= dl->parts * dl->nr; \
- } \
- for (int u = 0; u < max_u; u++, quad_index++) { \
- /* Cyclic wrap */ \
- if (u == dl->nr - 1) { \
- quad[0] -= dl->nr; \
- quad[1] -= dl->nr; \
- }
-
-#define SURFACE_QUAD_ITER_END \
- quad[2] = quad[1]; \
- quad[1]++; \
- quad[3] = quad[0]; \
- quad[0]++; \
- } \
- } \
- }
-
-static void displist_surf_fnors_ensure(const DispList *dl, float (**fnors)[3])
-{
- int u_len = dl->nr - ((dl->flag & DL_CYCL_U) ? 0 : 1);
- int v_len = dl->parts - ((dl->flag & DL_CYCL_V) ? 0 : 1);
- const float(*verts)[3] = (const float(*)[3])dl->verts;
- float(*nor_flat)[3] = MEM_mallocN(sizeof(float[3]) * u_len * v_len, __func__);
- *fnors = nor_flat;
-
- SURFACE_QUAD_ITER_BEGIN (dl) {
- normal_quad_v3(*nor_flat, verts[quad[0]], verts[quad[1]], verts[quad[2]], verts[quad[3]]);
- nor_flat++;
- }
- SURFACE_QUAD_ITER_END
-}
-
-void DRW_displist_vertbuf_create_loop_pos_and_nor_and_uv_and_tan(ListBase *lb,
- GPUVertBuf *vbo_pos_nor,
- GPUVertBuf *vbo_uv,
- GPUVertBuf *vbo_tan,
- const Scene *scene)
-{
- const bool do_hq_normals = (scene->r.perf_flag & SCE_PERF_HQ_NORMALS) != 0 ||
- GPU_use_hq_normals_workaround();
-
- static GPUVertFormat format_pos_nor = {0};
- static GPUVertFormat format_pos_nor_hq = {0};
- static GPUVertFormat format_uv = {0};
- static GPUVertFormat format_tan = {0};
- static GPUVertFormat format_tan_hq = {0};
- static struct {
- uint pos, nor, uv, tan;
- uint pos_hq, nor_hq, tan_hq;
- } attr_id;
- if (format_pos_nor.attr_len == 0) {
- /* initialize vertex format */
- attr_id.pos = GPU_vertformat_attr_add(
- &format_pos_nor, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
- attr_id.nor = GPU_vertformat_attr_add(
- &format_pos_nor, "nor", GPU_COMP_I10, 3, GPU_FETCH_INT_TO_FLOAT_UNIT);
- attr_id.pos_hq = GPU_vertformat_attr_add(
- &format_pos_nor_hq, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
- attr_id.nor_hq = GPU_vertformat_attr_add(
- &format_pos_nor_hq, "nor", GPU_COMP_I16, 3, GPU_FETCH_INT_TO_FLOAT_UNIT);
-
- /* UVs are in [0..1] range. We can compress them. */
- attr_id.uv = GPU_vertformat_attr_add(
- &format_uv, "u", GPU_COMP_I16, 2, GPU_FETCH_INT_TO_FLOAT_UNIT);
- GPU_vertformat_alias_add(&format_uv, "au");
-
- attr_id.tan = GPU_vertformat_attr_add(
- &format_tan, "t", GPU_COMP_I10, 4, GPU_FETCH_INT_TO_FLOAT_UNIT);
- GPU_vertformat_alias_add(&format_tan, "at");
- attr_id.tan_hq = GPU_vertformat_attr_add(
- &format_tan_hq, "t", GPU_COMP_I16, 3, GPU_FETCH_INT_TO_FLOAT_UNIT);
- GPU_vertformat_alias_add(&format_tan_hq, "at");
- }
- uint pos_id = do_hq_normals ? attr_id.pos_hq : attr_id.pos;
- uint nor_id = do_hq_normals ? attr_id.nor_hq : attr_id.nor;
- uint tan_id = do_hq_normals ? attr_id.tan_hq : attr_id.tan;
-
- int vbo_len_capacity = curve_render_surface_tri_len_get(lb) * 3;
-
- GPUVertBufRaw pos_step = {0};
- GPUVertBufRaw nor_step = {0};
- GPUVertBufRaw uv_step = {0};
- GPUVertBufRaw tan_step = {0};
-
-#define DRW_TEST_ASSIGN_VBO(v) (v = (DRW_vbo_requested(v) ? (v) : NULL))
-
- if (DRW_TEST_ASSIGN_VBO(vbo_pos_nor)) {
- GPU_vertbuf_init_with_format(vbo_pos_nor,
- do_hq_normals ? &format_pos_nor_hq : &format_pos_nor);
- GPU_vertbuf_data_alloc(vbo_pos_nor, vbo_len_capacity);
- GPU_vertbuf_attr_get_raw_data(vbo_pos_nor, pos_id, &pos_step);
- GPU_vertbuf_attr_get_raw_data(vbo_pos_nor, nor_id, &nor_step);
- }
- if (DRW_TEST_ASSIGN_VBO(vbo_uv)) {
- GPU_vertbuf_init_with_format(vbo_uv, &format_uv);
- GPU_vertbuf_data_alloc(vbo_uv, vbo_len_capacity);
- GPU_vertbuf_attr_get_raw_data(vbo_uv, attr_id.uv, &uv_step);
- }
- if (DRW_TEST_ASSIGN_VBO(vbo_tan)) {
- GPU_vertbuf_init_with_format(vbo_tan, do_hq_normals ? &format_tan_hq : &format_tan);
- GPU_vertbuf_data_alloc(vbo_tan, vbo_len_capacity);
- GPU_vertbuf_attr_get_raw_data(vbo_tan, tan_id, &tan_step);
- }
-
-#undef DRW_TEST_ASSIGN_VBO
-
- BKE_displist_normals_add(lb);
-
- LISTBASE_FOREACH (const DispList *, dl, lb) {
- const bool is_smooth = (dl->rt & CU_SMOOTH) != 0;
- if (ELEM(dl->type, DL_INDEX3, DL_INDEX4, DL_SURF)) {
- const float(*verts)[3] = (const float(*)[3])dl->verts;
- const float(*nors)[3] = (const float(*)[3])dl->nors;
- const int *idx = dl->index;
- float uv[4][2];
-
- if (dl->type == DL_INDEX3) {
- /* Currently 'DL_INDEX3' is always a flat surface with a single normal. */
- GPUNormal tangent_packed;
- GPUNormal normal_packed;
- GPU_normal_convert_v3(&normal_packed, dl->nors, do_hq_normals);
- if (vbo_tan) {
- float tan[4];
- float(*tan_ptr)[4] = &tan;
- BKE_displist_tangent_calc(dl, NULL, &tan_ptr);
- GPU_normal_convert_v3(&tangent_packed, tan, do_hq_normals);
- normal_float_to_short_v3(tangent_packed.high, tan);
- }
- else {
- if (do_hq_normals) {
- tangent_packed.high[0] = 0;
- tangent_packed.high[1] = 0;
- tangent_packed.high[2] = 0;
- }
- else {
- tangent_packed.low = (GPUPackedNormal){0, 0, 0, 1};
- }
- }
-
- const float x_max = (float)(dl->nr - 1);
- uv[0][1] = uv[1][1] = uv[2][1] = 0.0f;
- const int i_end = dl->parts;
- for (int i = 0; i < i_end; i++, idx += 3) {
- if (vbo_uv) {
- uv[0][0] = idx[0] / x_max;
- uv[1][0] = idx[1] / x_max;
- uv[2][0] = idx[2] / x_max;
- }
-
- displist_vertbuf_attr_set_tri_pos_nor_uv(&pos_step,
- &nor_step,
- &uv_step,
- &tan_step,
- verts[idx[0]],
- verts[idx[2]],
- verts[idx[1]],
- &normal_packed,
- &normal_packed,
- &normal_packed,
- &tangent_packed,
- &tangent_packed,
- &tangent_packed,
- uv[0],
- uv[2],
- uv[1],
- do_hq_normals);
- }
- }
- else if (dl->type == DL_SURF) {
- float(*tangents)[4] = NULL;
- float(*fnors)[3] = NULL;
-
- if (!is_smooth) {
- displist_surf_fnors_ensure(dl, &fnors);
- }
-
- if (vbo_tan) {
- BKE_displist_tangent_calc(dl, fnors, &tangents);
- }
-
- SURFACE_QUAD_ITER_BEGIN (dl) {
- if (vbo_uv) {
- surf_uv_quad(dl, quad, uv);
- }
- GPUNormal pnors_quad[4];
- GPUNormal ptans_quad[4];
-
- if (is_smooth) {
- for (int j = 0; j < 4; j++) {
- GPU_normal_convert_v3(&pnors_quad[j], nors[quad[j]], do_hq_normals);
- }
- }
- else {
- GPU_normal_convert_v3(&pnors_quad[0], fnors[quad_index], do_hq_normals);
- pnors_quad[1] = pnors_quad[2] = pnors_quad[3] = pnors_quad[0];
- }
-
- if (vbo_tan) {
- for (int j = 0; j < 4; j++) {
- float *tan = tangents[quad_index * 4 + j];
- GPU_normal_convert_v3(&ptans_quad[j], tan, do_hq_normals);
- }
- }
-
- displist_vertbuf_attr_set_tri_pos_nor_uv(&pos_step,
- &nor_step,
- &uv_step,
- &tan_step,
- verts[quad[2]],
- verts[quad[0]],
- verts[quad[1]],
- &pnors_quad[2],
- &pnors_quad[0],
- &pnors_quad[1],
- &ptans_quad[2],
- &ptans_quad[0],
- &ptans_quad[1],
- uv[2],
- uv[0],
- uv[1],
- do_hq_normals);
-
- displist_vertbuf_attr_set_tri_pos_nor_uv(&pos_step,
- &nor_step,
- &uv_step,
- &tan_step,
- verts[quad[0]],
- verts[quad[2]],
- verts[quad[3]],
- &pnors_quad[0],
- &pnors_quad[2],
- &pnors_quad[3],
- &ptans_quad[0],
- &ptans_quad[2],
- &ptans_quad[3],
- uv[0],
- uv[2],
- uv[3],
- do_hq_normals);
- }
- SURFACE_QUAD_ITER_END
-
- MEM_SAFE_FREE(tangents);
- MEM_SAFE_FREE(fnors);
- }
- else {
- BLI_assert(dl->type == DL_INDEX4);
- uv[0][0] = uv[0][1] = uv[1][0] = uv[3][1] = 0.0f;
- uv[1][1] = uv[2][0] = uv[2][1] = uv[3][0] = 1.0f;
-
- const int i_end = dl->parts;
- for (int i = 0; i < i_end; i++, idx += 4) {
- const bool is_tri = idx[2] != idx[3];
-
- GPUNormal ptan = {0};
- GPUNormal pnors_idx[4];
- if (is_smooth) {
- int idx_len = is_tri ? 3 : 4;
- for (int j = 0; j < idx_len; j++) {
- GPU_normal_convert_v3(&pnors_idx[j], nors[idx[j]], do_hq_normals);
- }
- }
- else {
- float nor_flat[3];
- if (is_tri) {
- normal_tri_v3(nor_flat, verts[idx[0]], verts[idx[1]], verts[idx[2]]);
- }
- else {
- normal_quad_v3(nor_flat, verts[idx[0]], verts[idx[1]], verts[idx[2]], verts[idx[3]]);
- }
- GPU_normal_convert_v3(&pnors_idx[0], nor_flat, do_hq_normals);
- pnors_idx[1] = pnors_idx[2] = pnors_idx[3] = pnors_idx[0];
- }
-
- displist_vertbuf_attr_set_tri_pos_nor_uv(&pos_step,
- &nor_step,
- &uv_step,
- &tan_step,
- verts[idx[0]],
- verts[idx[2]],
- verts[idx[1]],
- &pnors_idx[0],
- &pnors_idx[2],
- &pnors_idx[1],
- &ptan,
- &ptan,
- &ptan,
- uv[0],
- uv[2],
- uv[1],
- do_hq_normals);
-
- if (is_tri) {
- displist_vertbuf_attr_set_tri_pos_nor_uv(&pos_step,
- &nor_step,
- &uv_step,
- &tan_step,
- verts[idx[2]],
- verts[idx[0]],
- verts[idx[3]],
- &pnors_idx[2],
- &pnors_idx[0],
- &pnors_idx[3],
- &ptan,
- &ptan,
- &ptan,
- uv[2],
- uv[0],
- uv[3],
- do_hq_normals);
- }
- }
- }
- }
- }
- /* Resize and finish. */
- if (pos_step.size != 0) {
- int vbo_len_used = GPU_vertbuf_raw_used(&pos_step);
- if (vbo_len_used < vbo_len_capacity) {
- GPU_vertbuf_data_resize(vbo_pos_nor, vbo_len_used);
- }
- }
- if (uv_step.size != 0) {
- int vbo_len_used = GPU_vertbuf_raw_used(&uv_step);
- if (vbo_len_used < vbo_len_capacity) {
- GPU_vertbuf_data_resize(vbo_uv, vbo_len_used);
- }
- }
-}
-
/* Edge detection/adjacency. */
#define NO_EDGE INT_MAX
static void set_edge_adjacency_lines_indices(