Welcome to mirror list, hosted at ThFree Co, Russian Federation.

git.blender.org/blender.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
path: root/source
diff options
context:
space:
mode:
Diffstat (limited to 'source')
-rw-r--r--source/blender/blenkernel/BKE_customdata.h1
-rw-r--r--source/blender/blenkernel/BKE_pbvh.h18
-rw-r--r--source/blender/blenkernel/intern/pbvh.c257
-rw-r--r--source/blender/blenkernel/intern/pbvh.cc2
-rw-r--r--source/blender/blenkernel/intern/pbvh_bmesh.c6
-rw-r--r--source/blender/blenkernel/intern/pbvh_intern.h2
-rw-r--r--source/blender/blenlib/BLI_math_vec_types.hh9
-rw-r--r--source/blender/draw/CMakeLists.txt3
-rw-r--r--source/blender/draw/DRW_pbvh.h98
-rw-r--r--source/blender/draw/engines/basic/basic_engine.c2
-rw-r--r--source/blender/draw/engines/eevee/eevee_materials.c9
-rw-r--r--source/blender/draw/engines/overlay/overlay_facing.cc2
-rw-r--r--source/blender/draw/engines/overlay/overlay_fade.cc2
-rw-r--r--source/blender/draw/engines/overlay/overlay_mode_transfer.cc2
-rw-r--r--source/blender/draw/engines/overlay/overlay_sculpt.cc2
-rw-r--r--source/blender/draw/engines/overlay/overlay_wireframe.cc2
-rw-r--r--source/blender/draw/engines/workbench/workbench_engine.c8
-rw-r--r--source/blender/draw/intern/DRW_render.h22
-rw-r--r--source/blender/draw/intern/draw_attributes.h18
-rw-r--r--source/blender/draw/intern/draw_cache.c2
-rw-r--r--source/blender/draw/intern/draw_cache_extract.hh16
-rw-r--r--source/blender/draw/intern/draw_cache_impl.h1
-rw-r--r--source/blender/draw/intern/draw_cache_impl_mesh.cc22
-rw-r--r--source/blender/draw/intern/draw_manager.h10
-rw-r--r--source/blender/draw/intern/draw_manager_data.c146
-rw-r--r--source/blender/draw/intern/draw_pbvh.cc1216
-rw-r--r--source/blender/draw/intern/draw_pbvh.h22
-rw-r--r--source/blender/gpu/CMakeLists.txt2
-rw-r--r--source/blender/gpu/GPU_buffers.h157
-rw-r--r--source/blender/gpu/intern/gpu_buffers.c1475
-rw-r--r--source/blender/gpu/intern/gpu_init_exit.c8
31 files changed, 1706 insertions, 1836 deletions
diff --git a/source/blender/blenkernel/BKE_customdata.h b/source/blender/blenkernel/BKE_customdata.h
index 22e4a2bce87..0e172abd9a2 100644
--- a/source/blender/blenkernel/BKE_customdata.h
+++ b/source/blender/blenkernel/BKE_customdata.h
@@ -450,6 +450,7 @@ int CustomData_get_stencil_layer(const struct CustomData *data, int type);
* if no such active layer is defined.
*/
const char *CustomData_get_active_layer_name(const struct CustomData *data, int type);
+const char *CustomData_get_render_layer_name(const struct CustomData *data, int type);
/**
* Returns name of the default layer of the given type or NULL
diff --git a/source/blender/blenkernel/BKE_pbvh.h b/source/blender/blenkernel/BKE_pbvh.h
index ff2140732cc..9e0884a8c76 100644
--- a/source/blender/blenkernel/BKE_pbvh.h
+++ b/source/blender/blenkernel/BKE_pbvh.h
@@ -16,6 +16,7 @@
/* For embedding CCGKey in iterator. */
#include "BKE_attribute.h"
#include "BKE_ccg.h"
+#include "DNA_customdata_types.h"
#ifdef __cplusplus
extern "C" {
@@ -27,7 +28,6 @@ struct CCGElem;
struct CCGKey;
struct CustomData;
struct DMFlagMat;
-struct GPU_PBVH_Buffers;
struct IsectRayPrecalc;
struct MLoop;
struct MLoopTri;
@@ -36,7 +36,9 @@ struct MVert;
struct Mesh;
struct MeshElemMap;
struct PBVH;
+struct PBVHBatches;
struct PBVHNode;
+struct PBVH_GPU_Args;
struct SubdivCCG;
struct TaskParallelSettings;
struct Image;
@@ -98,6 +100,12 @@ typedef struct PBVHPixelsNode {
void *node_data;
} PBVHPixelsNode;
+typedef struct PBVHAttrReq {
+ char name[MAX_CUSTOMDATA_LAYER_NAME];
+ eAttrDomain domain;
+ eCustomDataType type;
+} PBVHAttrReq;
+
typedef enum {
PBVH_Leaf = 1 << 0,
@@ -348,9 +356,13 @@ void BKE_pbvh_draw_cb(PBVH *pbvh,
bool update_only_visible,
PBVHFrustumPlanes *update_frustum,
PBVHFrustumPlanes *draw_frustum,
- void (*draw_fn)(void *user_data, struct GPU_PBVH_Buffers *buffers),
+ void (*draw_fn)(void *user_data,
+ struct PBVHBatches *batches,
+ struct PBVH_GPU_Args *args),
void *user_data,
- bool full_render);
+ bool full_render,
+ PBVHAttrReq *attrs,
+ int attrs_num);
void BKE_pbvh_draw_debug_cb(PBVH *pbvh,
void (*draw_fn)(PBVHNode *node,
diff --git a/source/blender/blenkernel/intern/pbvh.c b/source/blender/blenkernel/intern/pbvh.c
index 1d65e958e1c..a1de8e40fe3 100644
--- a/source/blender/blenkernel/intern/pbvh.c
+++ b/source/blender/blenkernel/intern/pbvh.c
@@ -25,9 +25,9 @@
#include "BKE_pbvh.h"
#include "BKE_subdiv_ccg.h"
-#include "PIL_time.h"
+#include "DRW_pbvh.h"
-#include "GPU_buffers.h"
+#include "PIL_time.h"
#include "bmesh.h"
@@ -513,6 +513,80 @@ static void pbvh_build(PBVH *pbvh, BB *cb, BBC *prim_bbc, int totprim)
build_sub(pbvh, 0, cb, prim_bbc, 0, totprim);
}
+static void pbvh_draw_args_init(PBVH *pbvh, PBVH_GPU_Args *args, PBVHNode *node)
+{
+ memset((void *)args, 0, sizeof(*args));
+
+ args->pbvh_type = pbvh->header.type;
+ args->mesh_verts_num = pbvh->totvert;
+ args->mesh_grids_num = pbvh->totgrid;
+ args->node = node;
+
+ BKE_pbvh_node_num_verts(pbvh, node, NULL, &args->node_verts_num);
+
+ args->grid_hidden = pbvh->grid_hidden;
+ args->face_sets_color_default = pbvh->face_sets_color_default;
+ args->face_sets_color_seed = pbvh->face_sets_color_seed;
+ args->mvert = pbvh->verts;
+ args->mloop = pbvh->mloop;
+ args->mpoly = pbvh->mpoly;
+ args->mlooptri = pbvh->looptri;
+
+ if (ELEM(pbvh->header.type, PBVH_FACES, PBVH_GRIDS)) {
+ args->hide_poly = pbvh->pdata ?
+ CustomData_get_layer_named(pbvh->pdata, CD_PROP_BOOL, ".hide_poly") :
+ NULL;
+ }
+
+ switch (pbvh->header.type) {
+ case PBVH_FACES:
+ args->mesh_faces_num = pbvh->mesh->totpoly;
+ args->vdata = pbvh->vdata;
+ args->ldata = pbvh->ldata;
+ args->pdata = pbvh->pdata;
+ args->totprim = node->totprim;
+ args->me = pbvh->mesh;
+ args->mpoly = pbvh->mpoly;
+ args->vert_normals = pbvh->vert_normals;
+
+ args->prim_indices = node->prim_indices;
+ args->face_sets = pbvh->face_sets;
+ break;
+ case PBVH_GRIDS:
+ args->vdata = pbvh->vdata;
+ args->ldata = pbvh->ldata;
+ args->pdata = pbvh->pdata;
+ args->ccg_key = pbvh->gridkey;
+ args->me = pbvh->mesh;
+ args->totprim = node->totprim;
+ args->grid_indices = node->prim_indices;
+ args->subdiv_ccg = pbvh->subdiv_ccg;
+ args->face_sets = pbvh->face_sets;
+ args->mpoly = pbvh->mpoly;
+
+ args->mesh_grids_num = pbvh->totgrid;
+ args->grids = pbvh->grids;
+ args->gridfaces = pbvh->gridfaces;
+ args->grid_flag_mats = pbvh->grid_flag_mats;
+ args->vert_normals = pbvh->vert_normals;
+
+ args->face_sets = pbvh->face_sets;
+ break;
+ case PBVH_BMESH:
+ args->bm = pbvh->header.bm;
+ args->vdata = &args->bm->vdata;
+ args->ldata = &args->bm->ldata;
+ args->pdata = &args->bm->pdata;
+ args->bm_faces = node->bm_faces;
+ args->bm_other_verts = node->bm_other_verts;
+ args->bm_unique_vert = node->bm_unique_verts;
+ args->totprim = BLI_gset_len(node->bm_faces);
+ args->cd_mask_layer = CustomData_get_offset(&pbvh->header.bm->vdata, CD_PAINT_MASK);
+
+ break;
+ }
+}
+
void BKE_pbvh_build_mesh(PBVH *pbvh,
Mesh *mesh,
const MPoly *mpoly,
@@ -645,8 +719,8 @@ void BKE_pbvh_free(PBVH *pbvh)
PBVHNode *node = &pbvh->nodes[i];
if (node->flag & PBVH_Leaf) {
- if (node->draw_buffers) {
- GPU_pbvh_buffers_free(node->draw_buffers);
+ if (node->draw_batches) {
+ DRW_pbvh_node_free(node->draw_batches);
}
if (node->vert_indices) {
MEM_freeN((void *)node->vert_indices);
@@ -693,10 +767,6 @@ void BKE_pbvh_free(PBVH *pbvh)
MEM_SAFE_FREE(pbvh->vert_bitmap);
- if (pbvh->vbo_id) {
- GPU_pbvh_free_format(pbvh->vbo_id);
- }
-
MEM_freeN(pbvh);
}
@@ -988,6 +1058,8 @@ typedef struct PBVHUpdateData {
float (*vnors)[3];
int flag;
bool show_sculpt_face_sets;
+ PBVHAttrReq *attrs;
+ int attrs_num;
} PBVHUpdateData;
static void pbvh_update_normals_clear_task_cb(void *__restrict userdata,
@@ -1240,13 +1312,6 @@ void pbvh_update_BB_redraw(PBVH *pbvh, PBVHNode **nodes, int totnode, int flag)
BLI_task_parallel_range(0, totnode, &data, pbvh_update_BB_redraw_task_cb, &settings);
}
-static int pbvh_get_buffers_update_flags(PBVH *UNUSED(pbvh))
-{
- int update_flags = GPU_PBVH_BUFFERS_SHOW_VCOL | GPU_PBVH_BUFFERS_SHOW_MASK |
- GPU_PBVH_BUFFERS_SHOW_SCULPT_FACE_SETS;
- return update_flags;
-}
-
bool BKE_pbvh_get_color_layer(const Mesh *me, CustomDataLayer **r_layer, eAttrDomain *r_attr)
{
CustomDataLayer *layer = BKE_id_attributes_active_color_get((ID *)me);
@@ -1283,128 +1348,29 @@ static void pbvh_update_draw_buffer_cb(void *__restrict userdata,
PBVHNode *node = data->nodes[n];
if (node->flag & PBVH_RebuildDrawBuffers) {
- switch (pbvh->header.type) {
- case PBVH_GRIDS: {
- bool smooth = node->totprim > 0 ?
- pbvh->grid_flag_mats[node->prim_indices[0]].flag & ME_SMOOTH :
- false;
+ PBVH_GPU_Args args;
+ pbvh_draw_args_init(pbvh, &args, node);
- node->draw_buffers = GPU_pbvh_grid_buffers_build(node->totprim, pbvh->grid_hidden, smooth);
- break;
- }
- case PBVH_FACES:
- node->draw_buffers = GPU_pbvh_mesh_buffers_build(
- pbvh->mesh, pbvh->looptri, node->prim_indices, node->totprim);
- break;
- case PBVH_BMESH:
- node->draw_buffers = GPU_pbvh_bmesh_buffers_build(pbvh->flags &
- PBVH_DYNTOPO_SMOOTH_SHADING);
- break;
- }
+ node->draw_batches = DRW_pbvh_node_create(&args);
}
if (node->flag & PBVH_UpdateDrawBuffers) {
node->debug_draw_gen++;
- const int update_flags = pbvh_get_buffers_update_flags(pbvh);
- switch (pbvh->header.type) {
- case PBVH_GRIDS:
- GPU_pbvh_grid_buffers_update(pbvh->vbo_id,
- node->draw_buffers,
- pbvh->subdiv_ccg,
- pbvh->grids,
- pbvh->grid_flag_mats,
- node->prim_indices,
- node->totprim,
- pbvh->face_sets,
- pbvh->face_sets_color_seed,
- pbvh->face_sets_color_default,
- &pbvh->gridkey,
- update_flags);
- break;
- case PBVH_FACES: {
- /* Pass vertices separately because they may be not be the same as the mesh's vertices,
- * and pass normals separately because they are managed by the PBVH. */
- GPU_pbvh_mesh_buffers_update(
- pbvh->vbo_id,
- node->draw_buffers,
- pbvh->mesh,
- pbvh->verts,
- CustomData_get_layer(pbvh->vdata, CD_PAINT_MASK),
- CustomData_get_layer_named(pbvh->pdata, CD_PROP_INT32, ".sculpt_face_set"),
- pbvh->face_sets_color_seed,
- pbvh->face_sets_color_default,
- update_flags,
- pbvh->vert_normals);
- break;
- }
- case PBVH_BMESH:
- GPU_pbvh_bmesh_buffers_update(pbvh->vbo_id,
- node->draw_buffers,
- pbvh->header.bm,
- node->bm_faces,
- node->bm_unique_verts,
- node->bm_other_verts,
- update_flags);
- break;
+ if (node->draw_batches) {
+ PBVH_GPU_Args args;
+
+ pbvh_draw_args_init(pbvh, &args, node);
+ DRW_pbvh_node_update(node->draw_batches, &args);
}
}
}
void pbvh_free_draw_buffers(PBVH *pbvh, PBVHNode *node)
{
- if (node->draw_buffers) {
- pbvh->draw_cache_invalid = true;
-
- GPU_pbvh_buffers_free(node->draw_buffers);
- node->draw_buffers = NULL;
- }
-}
-
-static void pbvh_check_draw_layout(PBVH *pbvh)
-{
- const CustomData *vdata;
- const CustomData *ldata;
-
- if (!pbvh->vbo_id) {
- pbvh->vbo_id = GPU_pbvh_make_format();
- }
-
- switch (pbvh->header.type) {
- case PBVH_BMESH:
- if (!pbvh->header.bm) {
- /* BMesh hasn't been created yet */
- return;
- }
-
- vdata = &pbvh->header.bm->vdata;
- ldata = &pbvh->header.bm->ldata;
- break;
- case PBVH_FACES:
- vdata = pbvh->vdata;
- ldata = pbvh->ldata;
- break;
- case PBVH_GRIDS:
- ldata = vdata = NULL;
- break;
- }
-
- /* Rebuild all draw buffers if attribute layout changed.
- *
- * NOTE: The optimization where we only send active attributes
- * to the GPU in workbench mode is disabled due to bugs
- * (there's no guarantee there isn't another EEVEE viewport which would
- * free the draw buffers and corrupt the draw cache).
- */
- if (GPU_pbvh_attribute_names_update(pbvh->header.type, pbvh->vbo_id, vdata, ldata, false)) {
- /* attribute layout changed; force rebuild */
- for (int i = 0; i < pbvh->totnode; i++) {
- PBVHNode *node = pbvh->nodes + i;
-
- if (node->flag & PBVH_Leaf) {
- node->flag |= PBVH_RebuildDrawBuffers | PBVH_UpdateDrawBuffers | PBVH_UpdateRedraw;
- }
- }
+ if (node->draw_batches) {
+ DRW_pbvh_node_free(node->draw_batches);
+ node->draw_batches = NULL;
}
}
@@ -1412,10 +1378,6 @@ static void pbvh_update_draw_buffers(PBVH *pbvh, PBVHNode **nodes, int totnode,
{
const CustomData *vdata;
- if (!pbvh->vbo_id) {
- pbvh->vbo_id = GPU_pbvh_make_format();
- }
-
switch (pbvh->header.type) {
case PBVH_BMESH:
if (!pbvh->header.bm) {
@@ -1441,14 +1403,11 @@ static void pbvh_update_draw_buffers(PBVH *pbvh, PBVHNode **nodes, int totnode,
if (node->flag & PBVH_RebuildDrawBuffers) {
pbvh_free_draw_buffers(pbvh, node);
}
- else if ((node->flag & PBVH_UpdateDrawBuffers) && node->draw_buffers) {
- if (pbvh->header.type == PBVH_GRIDS) {
- GPU_pbvh_grid_buffers_update_free(
- node->draw_buffers, pbvh->grid_flag_mats, node->prim_indices);
- }
- else if (pbvh->header.type == PBVH_BMESH) {
- GPU_pbvh_bmesh_buffers_update_free(node->draw_buffers);
- }
+ else if ((node->flag & PBVH_UpdateDrawBuffers) && node->draw_batches) {
+ PBVH_GPU_Args args;
+
+ pbvh_draw_args_init(pbvh, &args, node);
+ DRW_pbvh_update_pre(node->draw_batches, &args);
}
}
}
@@ -1468,7 +1427,10 @@ static void pbvh_update_draw_buffers(PBVH *pbvh, PBVHNode **nodes, int totnode,
if (node->flag & PBVH_UpdateDrawBuffers) {
/* Flush buffers uses OpenGL, so not in parallel. */
- GPU_pbvh_buffers_update_flush(node->draw_buffers);
+
+ if (node->draw_batches) {
+ DRW_pbvh_node_gpu_flush(node->draw_batches);
+ }
}
node->flag &= ~(PBVH_RebuildDrawBuffers | PBVH_UpdateDrawBuffers);
@@ -2810,6 +2772,8 @@ void BKE_pbvh_face_sets_color_set(PBVH *pbvh, int seed, int color_default)
typedef struct PBVHDrawSearchData {
PBVHFrustumPlanes *frustum;
int accum_update_flag;
+ PBVHAttrReq *attrs;
+ int attrs_num;
} PBVHDrawSearchData;
static bool pbvh_draw_search_cb(PBVHNode *node, void *data_v)
@@ -2827,9 +2791,11 @@ void BKE_pbvh_draw_cb(PBVH *pbvh,
bool update_only_visible,
PBVHFrustumPlanes *update_frustum,
PBVHFrustumPlanes *draw_frustum,
- void (*draw_fn)(void *user_data, GPU_PBVH_Buffers *buffers),
+ void (*draw_fn)(void *user_data, PBVHBatches *batches, PBVH_GPU_Args *args),
void *user_data,
- bool UNUSED(full_render))
+ bool UNUSED(full_render),
+ PBVHAttrReq *attrs,
+ int attrs_num)
{
PBVHNode **nodes;
int totnode;
@@ -2840,7 +2806,8 @@ void BKE_pbvh_draw_cb(PBVH *pbvh,
/* Search for nodes that need updates. */
if (update_only_visible) {
/* Get visible nodes with draw updates. */
- PBVHDrawSearchData data = {.frustum = update_frustum, .accum_update_flag = 0};
+ PBVHDrawSearchData data = {
+ .frustum = update_frustum, .accum_update_flag = 0, attrs, attrs_num};
BKE_pbvh_search_gather(pbvh, pbvh_draw_search_cb, &data, &nodes, &totnode);
update_flag = data.accum_update_flag;
}
@@ -2852,8 +2819,6 @@ void BKE_pbvh_draw_cb(PBVH *pbvh,
update_flag = PBVH_RebuildDrawBuffers | PBVH_UpdateDrawBuffers;
}
- pbvh_check_draw_layout(pbvh);
-
/* Update draw buffers. */
if (totnode != 0 && (update_flag & (PBVH_RebuildDrawBuffers | PBVH_UpdateDrawBuffers))) {
pbvh_update_draw_buffers(pbvh, nodes, totnode, update_flag);
@@ -2864,10 +2829,14 @@ void BKE_pbvh_draw_cb(PBVH *pbvh,
PBVHDrawSearchData draw_data = {.frustum = draw_frustum, .accum_update_flag = 0};
BKE_pbvh_search_gather(pbvh, pbvh_draw_search_cb, &draw_data, &nodes, &totnode);
+ PBVH_GPU_Args args;
+
for (int i = 0; i < totnode; i++) {
PBVHNode *node = nodes[i];
if (!(node->flag & PBVH_FullyHidden)) {
- draw_fn(user_data, node->draw_buffers);
+ pbvh_draw_args_init(pbvh, &args, node);
+
+ draw_fn(user_data, node->draw_batches, &args);
}
}
diff --git a/source/blender/blenkernel/intern/pbvh.cc b/source/blender/blenkernel/intern/pbvh.cc
index b2f32a38f23..03382de34db 100644
--- a/source/blender/blenkernel/intern/pbvh.cc
+++ b/source/blender/blenkernel/intern/pbvh.cc
@@ -29,8 +29,6 @@
#include "PIL_time.h"
-#include "GPU_buffers.h"
-
#include "bmesh.h"
#include "atomic_ops.h"
diff --git a/source/blender/blenkernel/intern/pbvh_bmesh.c b/source/blender/blenkernel/intern/pbvh_bmesh.c
index 81dbff980d5..de908adac79 100644
--- a/source/blender/blenkernel/intern/pbvh_bmesh.c
+++ b/source/blender/blenkernel/intern/pbvh_bmesh.c
@@ -17,7 +17,7 @@
#include "BKE_ccg.h"
#include "BKE_pbvh.h"
-#include "GPU_buffers.h"
+#include "DRW_pbvh.h"
#include "bmesh.h"
#include "pbvh_intern.h"
@@ -347,8 +347,8 @@ static void pbvh_bmesh_node_split(PBVH *pbvh, const BBC *bbc_array, int node_ind
n->bm_other_verts = NULL;
n->layer_disp = NULL;
- if (n->draw_buffers) {
- pbvh_free_draw_buffers(pbvh, n);
+ if (n->draw_batches) {
+ DRW_pbvh_node_free(n->draw_batches);
}
n->flag &= ~PBVH_Leaf;
diff --git a/source/blender/blenkernel/intern/pbvh_intern.h b/source/blender/blenkernel/intern/pbvh_intern.h
index 8ab56839f9c..bdfd3ad3d09 100644
--- a/source/blender/blenkernel/intern/pbvh_intern.h
+++ b/source/blender/blenkernel/intern/pbvh_intern.h
@@ -33,7 +33,7 @@ struct MeshElemMap;
* union'd structs */
struct PBVHNode {
/* Opaque handle for drawing code */
- struct GPU_PBVH_Buffers *draw_buffers;
+ struct PBVHBatches *draw_batches;
/* Voxel bounds */
BB vb;
diff --git a/source/blender/blenlib/BLI_math_vec_types.hh b/source/blender/blenlib/BLI_math_vec_types.hh
index c19317867a9..5916193894b 100644
--- a/source/blender/blenlib/BLI_math_vec_types.hh
+++ b/source/blender/blenlib/BLI_math_vec_types.hh
@@ -556,6 +556,11 @@ template<typename T, int Size> struct vec_base : public vec_struct_base<T, Size>
}
};
+using char3 = blender::vec_base<int8_t, 3>;
+
+using uchar3 = blender::vec_base<uint8_t, 3>;
+using uchar4 = blender::vec_base<uint8_t, 4>;
+
using int2 = vec_base<int32_t, 2>;
using int3 = vec_base<int32_t, 3>;
using int4 = vec_base<int32_t, 4>;
@@ -564,7 +569,11 @@ using uint2 = vec_base<uint32_t, 2>;
using uint3 = vec_base<uint32_t, 3>;
using uint4 = vec_base<uint32_t, 4>;
+using short3 = blender::vec_base<int16_t, 3>;
+
using ushort2 = vec_base<uint16_t, 2>;
+using ushort3 = blender::vec_base<uint16_t, 3>;
+using ushort4 = blender::vec_base<uint16_t, 4>;
using float2 = vec_base<float, 2>;
using float3 = vec_base<float, 3>;
diff --git a/source/blender/draw/CMakeLists.txt b/source/blender/draw/CMakeLists.txt
index 45a7e3410eb..2acff89ce7e 100644
--- a/source/blender/draw/CMakeLists.txt
+++ b/source/blender/draw/CMakeLists.txt
@@ -94,6 +94,7 @@ set(SRC
intern/draw_manager_shader.c
intern/draw_manager_text.c
intern/draw_manager_texture.c
+ intern/draw_pbvh.cc
intern/draw_select_buffer.c
intern/draw_shader.cc
intern/draw_texture_pool.cc
@@ -207,6 +208,7 @@ set(SRC
DRW_engine.h
DRW_select_buffer.h
+ DRW_pbvh.h
intern/DRW_gpu_wrapper.hh
intern/DRW_render.h
intern/draw_attributes.h
@@ -229,6 +231,7 @@ set(SRC
intern/draw_manager_profiling.h
intern/draw_manager_testing.h
intern/draw_manager_text.h
+ intern/draw_pbvh.h
intern/draw_pass.hh
intern/draw_resource.cc
intern/draw_resource.hh
diff --git a/source/blender/draw/DRW_pbvh.h b/source/blender/draw/DRW_pbvh.h
new file mode 100644
index 00000000000..ffd4b92d87b
--- /dev/null
+++ b/source/blender/draw/DRW_pbvh.h
@@ -0,0 +1,98 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later
+ * Copyright 2022 Blender Foundation. */
+
+/** \file
+ * \ingroup draw
+ */
+
+#pragma once
+
+/* Needed for BKE_ccg.h. */
+#include "BLI_assert.h"
+#include "BLI_bitmap.h"
+
+#include "BKE_ccg.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+struct GPUViewport;
+struct PBVHAttrReq;
+struct GPUBatch;
+struct PBVHNode;
+struct GSet;
+struct DMFlagMat;
+struct Object;
+struct Mesh;
+struct MLoopTri;
+struct CustomData;
+struct MVert;
+struct MEdge;
+struct MLoop;
+struct MPoly;
+struct SubdivCCG;
+struct BMesh;
+
+typedef struct PBVHBatches PBVHBatches;
+
+typedef struct PBVH_GPU_Args {
+ int pbvh_type;
+
+ struct BMesh *bm;
+ const struct Mesh *me;
+ const struct MVert *mvert;
+ const struct MLoop *mloop;
+ const struct MPoly *mpoly;
+ int mesh_verts_num, mesh_faces_num, mesh_grids_num;
+ struct CustomData *vdata, *ldata, *pdata;
+ const float (*vert_normals)[3];
+
+ int face_sets_color_seed, face_sets_color_default;
+ int *face_sets; /* for PBVH_FACES and PBVH_GRIDS */
+
+ struct SubdivCCG *subdiv_ccg;
+ const struct DMFlagMat *grid_flag_mats;
+ const int *grid_indices;
+ CCGKey ccg_key;
+ CCGElem **grids;
+ void **gridfaces;
+ BLI_bitmap **grid_hidden;
+
+ int *prim_indices;
+ int totprim;
+
+ bool *hide_poly;
+
+ int node_verts_num;
+
+ const struct MLoopTri *mlooptri;
+ struct PBVHNode *node;
+
+ /* BMesh. */
+ struct GSet *bm_unique_vert, *bm_other_verts, *bm_faces;
+ int cd_mask_layer;
+} PBVH_GPU_Args;
+
+typedef struct PBVHGPUFormat PBVHGPUFormat;
+
+void DRW_pbvh_node_update(PBVHBatches *batches, PBVH_GPU_Args *args);
+void DRW_pbvh_update_pre(PBVHBatches *batches, PBVH_GPU_Args *args);
+
+void DRW_pbvh_node_gpu_flush(PBVHBatches *batches);
+struct PBVHBatches *DRW_pbvh_node_create(PBVH_GPU_Args *args);
+void DRW_pbvh_node_free(PBVHBatches *batches);
+struct GPUBatch *DRW_pbvh_tris_get(PBVHBatches *batches,
+ struct PBVHAttrReq *attrs,
+ int attrs_num,
+ PBVH_GPU_Args *args,
+ int *r_prim_count);
+struct GPUBatch *DRW_pbvh_lines_get(struct PBVHBatches *batches,
+ struct PBVHAttrReq *attrs,
+ int attrs_num,
+ PBVH_GPU_Args *args,
+ int *r_prim_count);
+
+#ifdef __cplusplus
+}
+#endif
diff --git a/source/blender/draw/engines/basic/basic_engine.c b/source/blender/draw/engines/basic/basic_engine.c
index 975d9e299bf..86c565371c1 100644
--- a/source/blender/draw/engines/basic/basic_engine.c
+++ b/source/blender/draw/engines/basic/basic_engine.c
@@ -199,7 +199,7 @@ static void basic_cache_populate(void *vedata, Object *ob)
}
if (use_sculpt_pbvh) {
- DRW_shgroup_call_sculpt(shgrp, ob, false, false);
+ DRW_shgroup_call_sculpt(shgrp, ob, false, false, false, false, false);
}
else {
if (stl->g_data->use_material_slot_selection && BKE_object_supports_material_slots(ob)) {
diff --git a/source/blender/draw/engines/eevee/eevee_materials.c b/source/blender/draw/engines/eevee/eevee_materials.c
index 94f29d64628..1cb630e6d59 100644
--- a/source/blender/draw/engines/eevee/eevee_materials.c
+++ b/source/blender/draw/engines/eevee/eevee_materials.c
@@ -824,14 +824,17 @@ void EEVEE_materials_cache_populate(EEVEE_Data *vedata,
if (use_sculpt_pbvh) {
struct DRWShadingGroup **shgrps_array = BLI_array_alloca(shgrps_array, materials_len);
+ struct GPUMaterial **gpumat_array = BLI_array_alloca(gpumat_array, materials_len);
+ MATCACHE_AS_ARRAY(matcache, shading_gpumat, materials_len, gpumat_array);
+
MATCACHE_AS_ARRAY(matcache, shading_grp, materials_len, shgrps_array);
- DRW_shgroup_call_sculpt_with_materials(shgrps_array, materials_len, ob);
+ DRW_shgroup_call_sculpt_with_materials(shgrps_array, gpumat_array, materials_len, ob);
MATCACHE_AS_ARRAY(matcache, depth_grp, materials_len, shgrps_array);
- DRW_shgroup_call_sculpt_with_materials(shgrps_array, materials_len, ob);
+ DRW_shgroup_call_sculpt_with_materials(shgrps_array, gpumat_array, materials_len, ob);
MATCACHE_AS_ARRAY(matcache, shadow_grp, materials_len, shgrps_array);
- DRW_shgroup_call_sculpt_with_materials(shgrps_array, materials_len, ob);
+ DRW_shgroup_call_sculpt_with_materials(shgrps_array, gpumat_array, materials_len, ob);
}
else {
struct GPUMaterial **gpumat_array = BLI_array_alloca(gpumat_array, materials_len);
diff --git a/source/blender/draw/engines/overlay/overlay_facing.cc b/source/blender/draw/engines/overlay/overlay_facing.cc
index 9a501c8f1bb..e40c32b30fc 100644
--- a/source/blender/draw/engines/overlay/overlay_facing.cc
+++ b/source/blender/draw/engines/overlay/overlay_facing.cc
@@ -48,7 +48,7 @@ void OVERLAY_facing_cache_populate(OVERLAY_Data *vedata, Object *ob)
const bool is_xray = (ob->dtx & OB_DRAW_IN_FRONT) != 0;
if (use_sculpt_pbvh) {
- DRW_shgroup_call_sculpt(pd->facing_grp[is_xray], ob, false, false);
+ DRW_shgroup_call_sculpt(pd->facing_grp[is_xray], ob, false, false, false, false, false);
}
else {
struct GPUBatch *geom = DRW_cache_object_surface_get(ob);
diff --git a/source/blender/draw/engines/overlay/overlay_fade.cc b/source/blender/draw/engines/overlay/overlay_fade.cc
index ee5540d91eb..f99e77b75f2 100644
--- a/source/blender/draw/engines/overlay/overlay_fade.cc
+++ b/source/blender/draw/engines/overlay/overlay_fade.cc
@@ -58,7 +58,7 @@ void OVERLAY_fade_cache_populate(OVERLAY_Data *vedata, Object *ob)
const bool is_xray = (ob->dtx & OB_DRAW_IN_FRONT) != 0;
if (use_sculpt_pbvh) {
- DRW_shgroup_call_sculpt(pd->fade_grp[is_xray], ob, false, false);
+ DRW_shgroup_call_sculpt(pd->fade_grp[is_xray], ob, false, false, false, false, false);
}
else {
struct GPUBatch *geom = DRW_cache_object_surface_get(ob);
diff --git a/source/blender/draw/engines/overlay/overlay_mode_transfer.cc b/source/blender/draw/engines/overlay/overlay_mode_transfer.cc
index b312a12a07b..5222abfc743 100644
--- a/source/blender/draw/engines/overlay/overlay_mode_transfer.cc
+++ b/source/blender/draw/engines/overlay/overlay_mode_transfer.cc
@@ -110,7 +110,7 @@ void OVERLAY_mode_transfer_cache_populate(OVERLAY_Data *vedata, Object *ob)
pd->mode_transfer.any_animated = true;
if (use_sculpt_pbvh) {
- DRW_shgroup_call_sculpt(mode_transfer_grp[is_xray], ob, false, false);
+ DRW_shgroup_call_sculpt(mode_transfer_grp[is_xray], ob, false, false, false, false, false);
}
else {
struct GPUBatch *geom = DRW_cache_object_surface_get(ob);
diff --git a/source/blender/draw/engines/overlay/overlay_sculpt.cc b/source/blender/draw/engines/overlay/overlay_sculpt.cc
index ddad1f06537..8f142d68350 100644
--- a/source/blender/draw/engines/overlay/overlay_sculpt.cc
+++ b/source/blender/draw/engines/overlay/overlay_sculpt.cc
@@ -53,7 +53,7 @@ void OVERLAY_sculpt_cache_populate(OVERLAY_Data *vedata, Object *ob)
}
if (use_pbvh) {
- DRW_shgroup_call_sculpt(pd->sculpt_mask_grp, ob, false, true);
+ DRW_shgroup_call_sculpt(pd->sculpt_mask_grp, ob, false, true, true, false, false);
}
else {
sculpt_overlays = DRW_mesh_batch_cache_get_sculpt_overlays(static_cast<Mesh *>(ob->data));
diff --git a/source/blender/draw/engines/overlay/overlay_wireframe.cc b/source/blender/draw/engines/overlay/overlay_wireframe.cc
index edaa96651b2..bff1bf46923 100644
--- a/source/blender/draw/engines/overlay/overlay_wireframe.cc
+++ b/source/blender/draw/engines/overlay/overlay_wireframe.cc
@@ -302,7 +302,7 @@ void OVERLAY_wireframe_cache_populate(OVERLAY_Data *vedata,
DRW_shgroup_call_no_cull(shgrp, geom, ob);
}
else if (use_sculpt_pbvh) {
- DRW_shgroup_call_sculpt(shgrp, ob, true, false);
+ DRW_shgroup_call_sculpt(shgrp, ob, true, false, false, false, false);
}
else {
DRW_shgroup_call(shgrp, geom, ob);
diff --git a/source/blender/draw/engines/workbench/workbench_engine.c b/source/blender/draw/engines/workbench/workbench_engine.c
index ee9521289d9..36a980bd506 100644
--- a/source/blender/draw/engines/workbench/workbench_engine.c
+++ b/source/blender/draw/engines/workbench/workbench_engine.c
@@ -102,7 +102,11 @@ static void workbench_cache_sculpt_populate(WORKBENCH_PrivateData *wpd,
const bool use_single_drawcall = !ELEM(color_type, V3D_SHADING_MATERIAL_COLOR);
if (use_single_drawcall) {
DRWShadingGroup *grp = workbench_material_setup(wpd, ob, ob->actcol, color_type, NULL);
- DRW_shgroup_call_sculpt(grp, ob, false, false);
+
+ bool use_color = color_type == V3D_SHADING_VERTEX_COLOR;
+ bool use_uv = color_type == V3D_SHADING_TEXTURE_COLOR;
+
+ DRW_shgroup_call_sculpt(grp, ob, false, false, false, use_color, use_uv);
}
else {
const int materials_len = DRW_cache_object_material_count_get(ob);
@@ -110,7 +114,7 @@ static void workbench_cache_sculpt_populate(WORKBENCH_PrivateData *wpd,
for (int i = 0; i < materials_len; i++) {
shgrps[i] = workbench_material_setup(wpd, ob, i + 1, color_type, NULL);
}
- DRW_shgroup_call_sculpt_with_materials(shgrps, materials_len, ob);
+ DRW_shgroup_call_sculpt_with_materials(shgrps, NULL, materials_len, ob);
}
}
diff --git a/source/blender/draw/intern/DRW_render.h b/source/blender/draw/intern/DRW_render.h
index 7b80ffd2b88..1752198c349 100644
--- a/source/blender/draw/intern/DRW_render.h
+++ b/source/blender/draw/intern/DRW_render.h
@@ -398,8 +398,18 @@ void DRW_shgroup_call_instances_with_attrs(DRWShadingGroup *shgroup,
struct GPUBatch *geom,
struct GPUBatch *inst_attributes);
-void DRW_shgroup_call_sculpt(DRWShadingGroup *sh, Object *ob, bool wire, bool mask);
-void DRW_shgroup_call_sculpt_with_materials(DRWShadingGroup **sh, int num_sh, Object *ob);
+void DRW_shgroup_call_sculpt(DRWShadingGroup *shgroup,
+ Object *ob,
+ bool use_wire,
+ bool use_mask,
+ bool use_fset,
+ bool use_color,
+ bool use_uv);
+
+void DRW_shgroup_call_sculpt_with_materials(DRWShadingGroup **shgroups,
+ struct GPUMaterial **gpumats,
+ int num_shgroups,
+ Object *ob);
DRWCallBuffer *DRW_shgroup_call_buffer(DRWShadingGroup *shgroup,
struct GPUVertFormat *format,
@@ -934,6 +944,14 @@ typedef struct DRWContextState {
const DRWContextState *DRW_context_state_get(void);
+struct DRW_Attributes;
+struct DRW_MeshCDMask;
+
+void DRW_mesh_batch_cache_get_attributes(struct Object *object,
+ struct Mesh *me,
+ struct DRW_Attributes **r_attrs,
+ struct DRW_MeshCDMask **r_cd_needed);
+
#ifdef __cplusplus
}
#endif
diff --git a/source/blender/draw/intern/draw_attributes.h b/source/blender/draw/intern/draw_attributes.h
index b577c6c4162..786301d0164 100644
--- a/source/blender/draw/intern/draw_attributes.h
+++ b/source/blender/draw/intern/draw_attributes.h
@@ -16,6 +16,7 @@
#include "BLI_sys_types.h"
#include "BLI_threads.h"
+#include "BLI_utildefines.h"
#include "GPU_shader.h"
#include "GPU_vertex_format.h"
@@ -36,6 +37,23 @@ typedef struct DRW_Attributes {
int num_requests;
} DRW_Attributes;
+typedef struct DRW_MeshCDMask {
+ uint32_t uv : 8;
+ uint32_t tan : 8;
+ uint32_t orco : 1;
+ uint32_t tan_orco : 1;
+ uint32_t sculpt_overlays : 1;
+ /**
+ * Edit uv layer is from the base edit mesh as modifiers could remove it. (see T68857)
+ */
+ uint32_t edit_uv : 1;
+} DRW_MeshCDMask;
+
+/* Keep `DRW_MeshCDMask` struct within a `uint32_t`.
+ * bit-wise and atomic operations are used to compare and update the struct.
+ * See `mesh_cd_layers_type_*` functions. */
+BLI_STATIC_ASSERT(sizeof(DRW_MeshCDMask) <= sizeof(uint32_t), "DRW_MeshCDMask exceeds 32 bits")
+
void drw_attributes_clear(DRW_Attributes *attributes);
void drw_attributes_merge(DRW_Attributes *dst,
diff --git a/source/blender/draw/intern/draw_cache.c b/source/blender/draw/intern/draw_cache.c
index 6da22039c2f..eec15a4668d 100644
--- a/source/blender/draw/intern/draw_cache.c
+++ b/source/blender/draw/intern/draw_cache.c
@@ -3388,7 +3388,7 @@ void DRW_cdlayer_attr_aliases_add(GPUVertFormat *format,
/* Active render layer name. */
if (is_active_render) {
- GPU_vertformat_alias_add(format, base_name);
+ GPU_vertformat_alias_add(format, cl->type == CD_MLOOPUV ? "a" : base_name);
}
/* Active display layer name. */
diff --git a/source/blender/draw/intern/draw_cache_extract.hh b/source/blender/draw/intern/draw_cache_extract.hh
index 6f4a652ce24..4fe360eecd7 100644
--- a/source/blender/draw/intern/draw_cache_extract.hh
+++ b/source/blender/draw/intern/draw_cache_extract.hh
@@ -52,22 +52,6 @@ enum {
DRW_MESH_WEIGHT_STATE_LOCK_RELATIVE = (1 << 2),
};
-struct DRW_MeshCDMask {
- uint32_t uv : 8;
- uint32_t tan : 8;
- uint32_t orco : 1;
- uint32_t tan_orco : 1;
- uint32_t sculpt_overlays : 1;
- /**
- * Edit uv layer is from the base edit mesh as modifiers could remove it. (see T68857)
- */
- uint32_t edit_uv : 1;
-};
-/* Keep `DRW_MeshCDMask` struct within a `uint32_t`.
- * bit-wise and atomic operations are used to compare and update the struct.
- * See `mesh_cd_layers_type_*` functions. */
-BLI_STATIC_ASSERT(sizeof(DRW_MeshCDMask) <= sizeof(uint32_t), "DRW_MeshCDMask exceeds 32 bits")
-
enum eMRIterType {
MR_ITER_LOOPTRI = 1 << 0,
MR_ITER_POLY = 1 << 1,
diff --git a/source/blender/draw/intern/draw_cache_impl.h b/source/blender/draw/intern/draw_cache_impl.h
index 89432b0da83..e49b37b451f 100644
--- a/source/blender/draw/intern/draw_cache_impl.h
+++ b/source/blender/draw/intern/draw_cache_impl.h
@@ -191,6 +191,7 @@ struct GPUBatch **DRW_mesh_batch_cache_get_surface_shaded(struct Object *object,
struct Mesh *me,
struct GPUMaterial **gpumat_array,
uint gpumat_array_len);
+
struct GPUBatch **DRW_mesh_batch_cache_get_surface_texpaint(struct Object *object,
struct Mesh *me);
struct GPUBatch *DRW_mesh_batch_cache_get_surface_texpaint_single(struct Object *object,
diff --git a/source/blender/draw/intern/draw_cache_impl_mesh.cc b/source/blender/draw/intern/draw_cache_impl_mesh.cc
index ed78cbbda39..acab4798ea8 100644
--- a/source/blender/draw/intern/draw_cache_impl_mesh.cc
+++ b/source/blender/draw/intern/draw_cache_impl_mesh.cc
@@ -62,6 +62,7 @@
#include "draw_subdivision.h"
#include "draw_cache_impl.h" /* own include */
+#include "draw_manager.h"
#include "mesh_extractors/extract_mesh.hh"
@@ -981,6 +982,27 @@ GPUBatch *DRW_mesh_batch_cache_get_edit_mesh_analysis(Mesh *me)
return DRW_batch_request(&cache->batch.edit_mesh_analysis);
}
+void DRW_mesh_get_attributes(Object *object,
+ Mesh *me,
+ struct GPUMaterial **gpumat_array,
+ int gpumat_array_len,
+ DRW_Attributes *r_attrs,
+ DRW_MeshCDMask *r_cd_needed)
+{
+ DRW_Attributes attrs_needed;
+ drw_attributes_clear(&attrs_needed);
+ DRW_MeshCDMask cd_needed = mesh_cd_calc_used_gpu_layers(
+ object, me, gpumat_array, gpumat_array_len, &attrs_needed);
+
+ if (r_attrs) {
+ *r_attrs = attrs_needed;
+ }
+
+ if (r_cd_needed) {
+ *r_cd_needed = cd_needed;
+ }
+}
+
GPUBatch **DRW_mesh_batch_cache_get_surface_shaded(Object *object,
Mesh *me,
struct GPUMaterial **gpumat_array,
diff --git a/source/blender/draw/intern/draw_manager.h b/source/blender/draw/intern/draw_manager.h
index 4f71e665390..67d0f79b83e 100644
--- a/source/blender/draw/intern/draw_manager.h
+++ b/source/blender/draw/intern/draw_manager.h
@@ -694,6 +694,16 @@ bool drw_engine_data_engines_data_validate(GPUViewport *viewport, void **engine_
void drw_engine_data_cache_release(GPUViewport *viewport);
void drw_engine_data_free(GPUViewport *viewport);
+struct DRW_Attributes;
+struct DRW_MeshCDMask;
+struct GPUMaterial;
+void DRW_mesh_get_attributes(struct Object *object,
+ struct Mesh *me,
+ struct GPUMaterial **gpumat_array,
+ int gpumat_array_len,
+ struct DRW_Attributes *r_attrs,
+ struct DRW_MeshCDMask *r_cd_needed);
+
void DRW_manager_begin_sync(void);
void DRW_manager_end_sync(void);
diff --git a/source/blender/draw/intern/draw_manager_data.c b/source/blender/draw/intern/draw_manager_data.c
index 820242720c8..3e0708d8b49 100644
--- a/source/blender/draw/intern/draw_manager_data.c
+++ b/source/blender/draw/intern/draw_manager_data.c
@@ -5,7 +5,11 @@
* \ingroup draw
*/
+#include "DRW_pbvh.h"
+
+#include "draw_attributes.h"
#include "draw_manager.h"
+#include "draw_pbvh.h"
#include "BKE_curve.h"
#include "BKE_duplilist.h"
@@ -32,12 +36,12 @@
#include "BLI_listbase.h"
#include "BLI_memblock.h"
#include "BLI_mempool.h"
+#include "BLI_math_bits.h"
#ifdef DRW_DEBUG_CULLING
# include "BLI_math_bits.h"
#endif
-#include "GPU_buffers.h"
#include "GPU_capabilities.h"
#include "GPU_material.h"
#include "GPU_uniform_buffer.h"
@@ -1144,6 +1148,8 @@ typedef struct DRWSculptCallbackData {
bool fast_mode; /* Set by draw manager. Do not init. */
int debug_node_nr;
+ PBVHAttrReq *attrs;
+ int attrs_num;
} DRWSculptCallbackData;
#define SCULPT_DEBUG_COLOR(id) (sculpt_debug_colors[id % 9])
@@ -1159,22 +1165,28 @@ static float sculpt_debug_colors[9][4] = {
{0.7f, 0.2f, 1.0f, 1.0f},
};
-static void sculpt_draw_cb(DRWSculptCallbackData *scd, GPU_PBVH_Buffers *buffers)
+static void sculpt_draw_cb(DRWSculptCallbackData *scd,
+ PBVHBatches *batches,
+ PBVH_GPU_Args *pbvh_draw_args)
{
- if (!buffers) {
+ if (!batches) {
return;
}
- /* Meh... use_mask is a bit misleading here. */
- if (scd->use_mask && !GPU_pbvh_buffers_has_overlays(buffers)) {
- return;
+ int primcount;
+ GPUBatch *geom;
+
+ if (!scd->use_wire) {
+ geom = DRW_pbvh_tris_get(batches, scd->attrs, scd->attrs_num, pbvh_draw_args, &primcount);
+ }
+ else {
+ geom = DRW_pbvh_lines_get(batches, scd->attrs, scd->attrs_num, pbvh_draw_args, &primcount);
}
- GPUBatch *geom = GPU_pbvh_buffers_batch_get(buffers, scd->fast_mode, scd->use_wire);
short index = 0;
if (scd->use_mats) {
- index = GPU_pbvh_buffers_material_index_get(buffers);
+ index = drw_pbvh_material_index_get(batches);
if (index >= scd->num_shading_groups) {
index = 0;
}
@@ -1298,9 +1310,11 @@ static void drw_sculpt_generate_calls(DRWSculptCallbackData *scd)
update_only_visible,
&update_frustum,
&draw_frustum,
- (void (*)(void *, GPU_PBVH_Buffers *))sculpt_draw_cb,
+ (void (*)(void *, PBVHBatches *, PBVH_GPU_Args *))sculpt_draw_cb,
scd,
- scd->use_mats);
+ scd->use_mats,
+ scd->attrs,
+ scd->attrs_num);
if (SCULPT_DEBUG_BUFFERS) {
int debug_node_nr = 0;
@@ -1313,7 +1327,13 @@ static void drw_sculpt_generate_calls(DRWSculptCallbackData *scd)
}
}
-void DRW_shgroup_call_sculpt(DRWShadingGroup *shgroup, Object *ob, bool use_wire, bool use_mask)
+void DRW_shgroup_call_sculpt(DRWShadingGroup *shgroup,
+ Object *ob,
+ bool use_wire,
+ bool use_mask,
+ bool use_fset,
+ bool use_color,
+ bool use_uv)
{
DRWSculptCallbackData scd = {
.ob = ob,
@@ -1323,13 +1343,115 @@ void DRW_shgroup_call_sculpt(DRWShadingGroup *shgroup, Object *ob, bool use_wire
.use_mats = false,
.use_mask = use_mask,
};
+
+ PBVHAttrReq attrs[16];
+ int attrs_num = 0;
+
+ memset(attrs, 0, sizeof(attrs));
+
+ attrs[attrs_num++].type = CD_PBVH_CO_TYPE;
+ attrs[attrs_num++].type = CD_PBVH_NO_TYPE;
+
+ if (use_mask) {
+ attrs[attrs_num++].type = CD_PBVH_MASK_TYPE;
+ }
+
+ if (use_fset) {
+ attrs[attrs_num++].type = CD_PBVH_FSET_TYPE;
+ }
+
+ Mesh *me = BKE_object_get_original_mesh(ob);
+
+ if (use_color) {
+ CustomDataLayer *layer = BKE_id_attributes_active_color_get(&me->id);
+
+ if (layer) {
+ eAttrDomain domain = BKE_id_attribute_domain(&me->id, layer);
+
+ attrs[attrs_num].type = layer->type;
+ attrs[attrs_num].domain = domain;
+
+ BLI_strncpy(attrs[attrs_num].name, layer->name, sizeof(attrs[attrs_num].name));
+ attrs_num++;
+ }
+ }
+
+ if (use_uv) {
+ int layer_i = CustomData_get_active_layer_index(&me->ldata, CD_MLOOPUV);
+ if (layer_i != -1) {
+ CustomDataLayer *layer = me->ldata.layers + layer_i;
+
+ attrs[attrs_num].type = CD_MLOOPUV;
+ attrs[attrs_num].domain = ATTR_DOMAIN_CORNER;
+ BLI_strncpy(attrs[attrs_num].name, layer->name, sizeof(attrs[attrs_num].name));
+
+ attrs_num++;
+ }
+ }
+
+ scd.attrs = attrs;
+ scd.attrs_num = attrs_num;
+
drw_sculpt_generate_calls(&scd);
}
void DRW_shgroup_call_sculpt_with_materials(DRWShadingGroup **shgroups,
+ GPUMaterial **gpumats,
int num_shgroups,
Object *ob)
{
+ DRW_Attributes draw_attrs;
+ DRW_MeshCDMask cd_needed;
+
+ if (gpumats) {
+ DRW_mesh_get_attributes(ob, (Mesh *)ob->data, gpumats, num_shgroups, &draw_attrs, &cd_needed);
+ }
+ else {
+ memset(&draw_attrs, 0, sizeof(draw_attrs));
+ memset(&cd_needed, 0, sizeof(cd_needed));
+ }
+
+ int attrs_num = 2 + draw_attrs.num_requests;
+
+ /* UV maps are not in attribute requests. */
+ attrs_num += count_bits_i(cd_needed.uv);
+
+ PBVHAttrReq *attrs = BLI_array_alloca(attrs, attrs_num);
+
+ memset(attrs, 0, sizeof(PBVHAttrReq) * attrs_num);
+ int attrs_i = 0;
+
+ attrs[attrs_i++].type = CD_PBVH_CO_TYPE;
+ attrs[attrs_i++].type = CD_PBVH_NO_TYPE;
+
+ for (int i = 0; i < draw_attrs.num_requests; i++) {
+ DRW_AttributeRequest *req = draw_attrs.requests + i;
+
+ attrs[attrs_i].type = req->cd_type;
+ attrs[attrs_i].domain = req->domain;
+ BLI_strncpy(attrs[attrs_i].name, req->attribute_name, sizeof(attrs->name));
+ attrs_i++;
+ }
+
+ /* UV maps are not in attribute requests. */
+ Mesh *me = (Mesh *)ob->data;
+
+ for (uint i = 0; i < 32; i++) {
+ if (cd_needed.uv & (1 << i)) {
+ int layer_i = CustomData_get_layer_index_n(&me->ldata, CD_MLOOPUV, i);
+ CustomDataLayer *layer = layer_i != -1 ? me->ldata.layers + layer_i : NULL;
+
+ if (layer) {
+ attrs[attrs_i].type = CD_MLOOPUV;
+ attrs[attrs_i].domain = ATTR_DOMAIN_CORNER;
+ BLI_strncpy(attrs[attrs_i].name, layer->name, sizeof(attrs->name));
+ attrs_i++;
+ }
+ }
+ }
+
+ attrs_num = attrs_i;
+
DRWSculptCallbackData scd = {
.ob = ob,
.shading_groups = shgroups,
@@ -1337,6 +1459,8 @@ void DRW_shgroup_call_sculpt_with_materials(DRWShadingGroup **shgroups,
.use_wire = false,
.use_mats = true,
.use_mask = false,
+ .attrs = attrs,
+ .attrs_num = attrs_num,
};
drw_sculpt_generate_calls(&scd);
}
diff --git a/source/blender/draw/intern/draw_pbvh.cc b/source/blender/draw/intern/draw_pbvh.cc
new file mode 100644
index 00000000000..126ba98d06c
--- /dev/null
+++ b/source/blender/draw/intern/draw_pbvh.cc
@@ -0,0 +1,1216 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later
+ * Copyright 2005 Blender Foundation. All rights reserved. */
+
+/** \file
+ * \ingroup gpu
+ *
+ * PBVH drawing. Embedds GPU meshes inside of PBVH nodes,
+ * used by mesh sculpt mode.
+ */
+
+/* Disable optimization for a function (for debugging use only!)*/
+#ifdef __clang__
+# define ATTR_NO_OPT __attribute__((optnone))
+#elif defined(_MSC_VER)
+# define ATTR_NO_OPT __pragma(optimize("", off))
+#elif defined(__GNUC__)
+# define ATTR_NO_OPT __attribute__((optimize("O0")))
+#else
+# define ATTR_NO_OPT
+#endif
+
+#include <limits.h>
+#include <stddef.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include "MEM_guardedalloc.h"
+
+#include "BLI_bitmap.h"
+#include "BLI_ghash.h"
+#include "BLI_math_color.h"
+#include "BLI_utildefines.h"
+
+#include "DNA_mesh_types.h"
+#include "DNA_meshdata_types.h"
+
+#include "BKE_DerivedMesh.h"
+#include "BKE_attribute.h"
+#include "BKE_ccg.h"
+#include "BKE_customdata.h"
+#include "BKE_mesh.h"
+#include "BKE_paint.h"
+#include "BKE_pbvh.h"
+#include "BKE_subdiv_ccg.h"
+
+#include "GPU_batch.h"
+
+#include "DRW_engine.h"
+#include "DRW_pbvh.h"
+
+#include "draw_pbvh.h"
+#include "gpu_private.h"
+#include "bmesh.h"
+
+#define MAX_PBVH_BATCH_KEY 512
+#define MAX_PBVH_VBOS 16
+
+#include "BLI_index_range.hh"
+#include "BLI_map.hh"
+#include "BLI_math_vec_types.hh"
+#include "BLI_vector.hh"
+#include <vector>
+
+#include <algorithm>
+#include <string>
+
+using blender::char3;
+using blender::float2;
+using blender::float3;
+using blender::float4;
+using blender::IndexRange;
+using blender::Map;
+using blender::short3;
+using blender::uchar3;
+using blender::uchar4;
+using blender::ushort3;
+using blender::ushort4;
+using blender::Vector;
+
+using string = std::string;
+
+struct PBVHVbo {
+ uint64_t type;
+ eAttrDomain domain;
+ string name;
+ GPUVertBuf *vert_buf = nullptr;
+ string key;
+
+ PBVHVbo(eAttrDomain _domain, uint64_t _type, string _name)
+ : type(_type), domain(_domain), name(_name)
+ {
+ }
+
+ void clear_data()
+ {
+ GPU_vertbuf_clear(vert_buf);
+ }
+
+ string build_key()
+ {
+ char buf[512];
+
+ sprintf(buf, "%d:%d:%s", (int)type, (int)domain, name.c_str());
+
+ key = string(buf);
+ return key;
+ }
+};
+
+struct PBVHBatch {
+ Vector<int> vbos;
+ string key;
+ GPUBatch *tris = nullptr, *lines = nullptr;
+ int tris_count = 0, lines_count = 0;
+
+ void sort_vbos(Vector<PBVHVbo> &master_vbos)
+ {
+ struct cmp {
+ Vector<PBVHVbo> &master_vbos;
+
+ cmp(Vector<PBVHVbo> &_master_vbos) : master_vbos(_master_vbos)
+ {
+ }
+
+ bool operator()(const int &a, const int &b)
+ {
+ return master_vbos[a].key < master_vbos[b].key;
+ }
+ };
+
+ std::sort(vbos.begin(), vbos.end(), cmp(master_vbos));
+ }
+
+ string build_key(Vector<PBVHVbo> &master_vbos)
+ {
+ key = "";
+
+ sort_vbos(master_vbos);
+
+ for (int vbo_i : vbos) {
+ key += master_vbos[vbo_i].key + ":";
+ }
+
+ return key;
+ }
+};
+
+static CustomData *get_cdata(eAttrDomain domain, PBVH_GPU_Args *args)
+{
+ switch (domain) {
+ case ATTR_DOMAIN_POINT:
+ return args->vdata;
+ case ATTR_DOMAIN_CORNER:
+ return args->ldata;
+ case ATTR_DOMAIN_FACE:
+ return args->pdata;
+ default:
+ return nullptr;
+ }
+}
+
+struct PBVHBatches {
+ Vector<PBVHVbo> vbos;
+ Map<string, PBVHBatch> batches;
+ GPUIndexBuf *tri_index = nullptr;
+ GPUIndexBuf *lines_index = nullptr;
+ int faces_count = 0; /* Used by PBVH_BMESH and PBVH_GRIDS */
+ int tris_count = 0, lines_count = 0;
+ bool needs_tri_index = false;
+
+ int material_index = 0;
+
+ int count_faces(PBVH_GPU_Args *args)
+ {
+ int count = 0;
+
+ switch (args->pbvh_type) {
+ case PBVH_FACES: {
+ for (int i = 0; i < args->totprim; i++) {
+ int face_index = args->mlooptri[args->prim_indices[i]].poly;
+
+ if (args->hide_poly && args->hide_poly[face_index]) {
+ continue;
+ }
+
+ count++;
+ }
+ break;
+ }
+ case PBVH_GRIDS: {
+ count = BKE_pbvh_count_grid_quads((BLI_bitmap **)args->grid_hidden,
+ args->grid_indices,
+ args->totprim,
+ args->ccg_key.grid_size);
+
+ break;
+ }
+ case PBVH_BMESH: {
+ GSET_FOREACH_BEGIN (BMFace *, f, args->bm_faces) {
+ if (!BM_elem_flag_test(f, BM_ELEM_HIDDEN)) {
+ count++;
+ }
+ }
+ GSET_FOREACH_END();
+ }
+ }
+
+ return count;
+ }
+
+ PBVHBatches(PBVH_GPU_Args *args)
+ {
+ faces_count = count_faces(args);
+
+ if (args->pbvh_type == PBVH_BMESH) {
+ tris_count = faces_count;
+ }
+ }
+
+ ~PBVHBatches()
+ {
+ for (PBVHBatch &batch : batches.values()) {
+ GPU_BATCH_DISCARD_SAFE(batch.tris);
+ GPU_BATCH_DISCARD_SAFE(batch.lines);
+ }
+
+ for (PBVHVbo &vbo : vbos) {
+ GPU_vertbuf_discard(vbo.vert_buf);
+ }
+
+ GPU_INDEXBUF_DISCARD_SAFE(tri_index);
+ GPU_INDEXBUF_DISCARD_SAFE(lines_index);
+ }
+
+ string build_key(PBVHAttrReq *attrs, int attrs_num)
+ {
+ string key;
+ PBVHBatch batch;
+ Vector<PBVHVbo> vbos;
+
+ for (int i : IndexRange(attrs_num)) {
+ PBVHAttrReq *attr = attrs + i;
+
+ PBVHVbo vbo(attr->domain, attr->type, string(attr->name));
+ vbo.build_key();
+
+ vbos.append(vbo);
+ batch.vbos.append(i);
+ }
+
+ batch.build_key(vbos);
+ return batch.key;
+ }
+
+ bool has_vbo(eAttrDomain domain, int type, string name)
+ {
+ for (PBVHVbo &vbo : vbos) {
+ if (vbo.domain == domain && vbo.type == type && vbo.name == name) {
+ return true;
+ }
+ }
+
+ return false;
+ }
+
+ int get_vbo_index(PBVHVbo *vbo)
+ {
+ for (int i : IndexRange(vbos.size())) {
+ if (vbo == &vbos[i]) {
+ return i;
+ }
+ }
+
+ return -1;
+ }
+
+ PBVHVbo *get_vbo(eAttrDomain domain, int type, string name)
+ {
+ for (PBVHVbo &vbo : vbos) {
+ if (vbo.domain == domain && vbo.type == type && vbo.name == name) {
+ return &vbo;
+ }
+ }
+
+ return nullptr;
+ }
+
+ bool has_batch(PBVHAttrReq *attrs, int attrs_num)
+ {
+ return batches.contains(build_key(attrs, attrs_num));
+ }
+
+ PBVHBatch &ensure_batch(PBVHAttrReq *attrs, int attrs_num, PBVH_GPU_Args *args)
+ {
+ if (!has_batch(attrs, attrs_num)) {
+ create_batch(attrs, attrs_num, args);
+ }
+
+ return batches.lookup(build_key(attrs, attrs_num));
+ }
+
+ void fill_vbo_normal_faces(
+ PBVHVbo &vbo,
+ PBVH_GPU_Args *args,
+ std::function<void(std::function<void(int, int, int, const MLoopTri *)> callback)>
+ foreach_faces,
+ GPUVertBufRaw *access)
+ {
+ float fno[3];
+ short no[3];
+ int last_poly = -1;
+ bool smooth = false;
+
+ foreach_faces([&](int buffer_i, int tri_i, int vertex_i, const MLoopTri *tri) {
+ const MPoly *mp = args->mpoly + tri->poly;
+
+ if (tri->poly != last_poly) {
+ last_poly = tri->poly;
+
+ if (!(mp->flag & ME_SMOOTH)) {
+ smooth = true;
+ BKE_mesh_calc_poly_normal(mp, args->mloop + mp->loopstart, args->mvert, fno);
+ normal_float_to_short_v3(no, fno);
+ }
+ else {
+ smooth = false;
+ }
+ }
+
+ if (!smooth) {
+ normal_float_to_short_v3(no, args->vert_normals[vertex_i]);
+ }
+
+ *static_cast<short3 *>(GPU_vertbuf_raw_step(access)) = no;
+ });
+ }
+
+ void fill_vbo_grids_intern(
+ PBVHVbo &vbo,
+ PBVH_GPU_Args *args,
+ std::function<
+ void(std::function<void(int x, int y, int grid_index, CCGElem *elems[4], int i)> func)>
+ foreach_grids)
+ {
+ uint vert_per_grid = square_i(args->ccg_key.grid_size - 1) * 4;
+ uint vert_count = args->totprim * vert_per_grid;
+
+ int existing_num = GPU_vertbuf_get_vertex_len(vbo.vert_buf);
+ void *existing_data = GPU_vertbuf_get_data(vbo.vert_buf);
+
+ if (existing_data == NULL || existing_num != vert_count) {
+ /* Allocate buffer if not allocated yet or size changed. */
+ GPU_vertbuf_data_alloc(vbo.vert_buf, vert_count);
+ }
+
+ GPUVertBufRaw access;
+ GPU_vertbuf_attr_get_raw_data(vbo.vert_buf, 0, &access);
+
+ switch (vbo.type) {
+ case CD_PBVH_CO_TYPE:
+ foreach_grids([&](int x, int y, int grid_index, CCGElem *elems[4], int i) {
+ float *co = CCG_elem_co(&args->ccg_key, elems[i]);
+
+ *static_cast<float3 *>(GPU_vertbuf_raw_step(&access)) = co;
+ });
+ break;
+
+ case CD_PBVH_NO_TYPE:
+ foreach_grids([&](int x, int y, int grid_index, CCGElem *elems[4], int i) {
+ float3 no(0.0f, 0.0f, 0.0f);
+
+ const bool smooth = args->grid_flag_mats[grid_index].flag & ME_SMOOTH;
+
+ if (smooth) {
+ no = CCG_elem_no(&args->ccg_key, elems[0]);
+ }
+ else {
+ for (int j = 0; j < 4; j++) {
+ no += CCG_elem_no(&args->ccg_key, elems[j]);
+ }
+ }
+
+ normalize_v3(no);
+ short sno[3];
+
+ normal_float_to_short_v3(sno, no);
+
+ *static_cast<short3 *>(GPU_vertbuf_raw_step(&access)) = sno;
+ });
+ break;
+
+ case CD_PBVH_MASK_TYPE:
+ foreach_grids([&](int x, int y, int grid_index, CCGElem *elems[4], int i) {
+ float *mask = CCG_elem_mask(&args->ccg_key, elems[i]);
+
+ *static_cast<uchar *>(GPU_vertbuf_raw_step(&access)) = mask ? (uchar)(*mask * 255.0f) :
+ 255;
+ });
+ break;
+
+ case CD_PBVH_FSET_TYPE: {
+ int *face_sets = args->face_sets;
+
+ if (!face_sets) {
+ uchar white[3] = {UCHAR_MAX, UCHAR_MAX, UCHAR_MAX};
+
+ foreach_grids([&](int x, int y, int grid_index, CCGElem *elems[4], int i) {
+ *static_cast<uchar4 *>(GPU_vertbuf_raw_step(&access)) = white;
+ });
+ }
+ else {
+ foreach_grids([&](int x, int y, int grid_index, CCGElem *elems[4], int i) {
+ uchar face_set_color[3] = {UCHAR_MAX, UCHAR_MAX, UCHAR_MAX};
+
+ if (face_sets) {
+ const int face_index = BKE_subdiv_ccg_grid_to_face_index(args->subdiv_ccg,
+ grid_index);
+ const int fset = face_sets[face_index];
+
+ /* Skip for the default color Face Set to render it white. */
+ if (fset != args->face_sets_color_default) {
+ BKE_paint_face_set_overlay_color_get(
+ fset, args->face_sets_color_seed, face_set_color);
+ }
+ }
+
+ *static_cast<uchar3 *>(GPU_vertbuf_raw_step(&access)) = face_set_color;
+ });
+ }
+ break;
+ }
+ }
+ }
+
+ void fill_vbo_grids(PBVHVbo &vbo, PBVH_GPU_Args *args)
+ {
+ int gridsize = args->ccg_key.grid_size;
+
+ uint totgrid = args->totprim;
+
+ auto foreach_solid =
+ [&](std::function<void(int x, int y, int grid_index, CCGElem *elems[4], int i)> func) {
+ for (int i = 0; i < totgrid; i++) {
+ const int grid_index = args->grid_indices[i];
+
+ CCGElem *grid = args->grids[grid_index];
+
+ for (int y = 0; y < gridsize - 1; y++) {
+ for (int x = 0; x < gridsize - 1; x++) {
+ CCGElem *elems[4] = {
+ CCG_grid_elem(&args->ccg_key, grid, x, y),
+ CCG_grid_elem(&args->ccg_key, grid, x + 1, y),
+ CCG_grid_elem(&args->ccg_key, grid, x + 1, y + 1),
+ CCG_grid_elem(&args->ccg_key, grid, x, y + 1),
+ };
+
+ func(x, y, grid_index, elems, 0);
+ func(x + 1, y, grid_index, elems, 1);
+ func(x + 1, y + 1, grid_index, elems, 2);
+ func(x, y + 1, grid_index, elems, 3);
+ }
+ }
+ }
+ };
+
+ auto foreach_indexed =
+ [&](std::function<void(int x, int y, int grid_index, CCGElem *elems[4], int i)> func) {
+ for (int i = 0; i < totgrid; i++) {
+ const int grid_index = args->grid_indices[i];
+
+ CCGElem *grid = args->grids[grid_index];
+
+ for (int y = 0; y < gridsize; y++) {
+ for (int x = 0; x < gridsize; x++) {
+ CCGElem *elems[4] = {
+ CCG_grid_elem(&args->ccg_key, grid, x, y),
+ CCG_grid_elem(&args->ccg_key, grid, x + 1, y),
+ CCG_grid_elem(&args->ccg_key, grid, x + 1, y + 1),
+ CCG_grid_elem(&args->ccg_key, grid, x, y + 1),
+ };
+
+ func(x, y, grid_index, elems, 0);
+ }
+ }
+ }
+ };
+
+ if (needs_tri_index) {
+ fill_vbo_grids_intern(vbo, args, foreach_indexed);
+ }
+ else {
+ fill_vbo_grids_intern(vbo, args, foreach_solid);
+ }
+ }
+
+ ATTR_NO_OPT
+ void fill_vbo_faces(PBVHVbo &vbo, PBVH_GPU_Args *args)
+ {
+ int totvert = args->totprim * 3;
+
+ auto foreach_faces =
+ [&](std::function<void(int buffer_i, int tri_i, int vertex_i, const MLoopTri *tri)> func) {
+ int buffer_i = 0;
+ const MLoop *mloop = args->mloop;
+
+ for (int i : IndexRange(args->totprim)) {
+ int face_index = args->mlooptri[args->prim_indices[i]].poly;
+
+ if (args->hide_poly && args->hide_poly[face_index]) {
+ continue;
+ }
+
+ const MLoopTri *tri = args->mlooptri + args->prim_indices[i];
+
+ for (int j : IndexRange(3)) {
+ func(buffer_i, j, mloop[tri->tri[j]].v, tri);
+ buffer_i++;
+ }
+ }
+ };
+
+ int existing_num = GPU_vertbuf_get_vertex_len(vbo.vert_buf);
+ void *existing_data = GPU_vertbuf_get_data(vbo.vert_buf);
+
+ if (existing_data == NULL || existing_num != totvert) {
+ /* Allocate buffer if not allocated yet or size changed. */
+ GPU_vertbuf_data_alloc(vbo.vert_buf, totvert);
+ }
+
+ GPUVertBufRaw access;
+ GPU_vertbuf_attr_get_raw_data(vbo.vert_buf, 0, &access);
+
+ switch (vbo.type) {
+ case CD_PBVH_CO_TYPE:
+ foreach_faces([&](int buffer_i, int tri_i, int vertex_i, const MLoopTri *tri) {
+ *static_cast<float3 *>(GPU_vertbuf_raw_step(&access)) = args->mvert[vertex_i].co;
+ });
+ break;
+ case CD_PBVH_NO_TYPE:
+ fill_vbo_normal_faces(vbo, args, foreach_faces, &access);
+ break;
+ case CD_PBVH_MASK_TYPE: {
+ float *mask = static_cast<float *>(CustomData_get_layer(args->vdata, CD_PAINT_MASK));
+
+ foreach_faces([&](int buffer_i, int tri_i, int vertex_i, const MLoopTri *tri) {
+ *static_cast<uchar *>(GPU_vertbuf_raw_step(&access)) = (uchar)(mask[vertex_i] * 255.0f);
+ });
+
+ break;
+ }
+ case CD_PBVH_FSET_TYPE: {
+ int *face_sets = static_cast<int *>(
+ CustomData_get_layer(args->pdata, CD_SCULPT_FACE_SETS));
+ int last_poly = -1;
+ uchar fset_color[3] = {255, 255, 255};
+
+ foreach_faces([&](int buffer_i, int tri_i, int vertex_i, const MLoopTri *tri) {
+ if (last_poly != tri->poly && args->face_sets) {
+ last_poly = tri->poly;
+
+ const int fset = abs(face_sets[tri->poly]);
+
+ if (fset != args->face_sets_color_default) {
+ BKE_paint_face_set_overlay_color_get(fset, args->face_sets_color_seed, fset_color);
+ }
+ else {
+ /* Skip for the default color face set to render it white. */
+ fset_color[0] = fset_color[1] = fset_color[2] = 255;
+ }
+ }
+
+ *static_cast<uchar3 *>(GPU_vertbuf_raw_step(&access)) = fset_color;
+ });
+
+ break;
+ }
+ case CD_MLOOPUV: {
+ MLoopUV *mloopuv = static_cast<MLoopUV *>(
+ CustomData_get_layer_named(args->ldata, CD_MLOOPUV, vbo.name.c_str()));
+
+ foreach_faces([&](int buffer_i, int tri_i, int vertex_i, const MLoopTri *tri) {
+ *static_cast<float2 *>(GPU_vertbuf_raw_step(&access)) = mloopuv[tri->tri[tri_i]].uv;
+ });
+ break;
+ }
+ case CD_PROP_COLOR:
+ if (vbo.domain == ATTR_DOMAIN_POINT) {
+ MPropCol *mpropcol = static_cast<MPropCol *>(
+ CustomData_get_layer_named(args->vdata, CD_PROP_COLOR, vbo.name.c_str()));
+
+ foreach_faces([&](int buffer_i, int tri_i, int vertex_i, const MLoopTri *tri) {
+ ushort color[4];
+ MPropCol *col = mpropcol + vertex_i;
+
+ color[0] = unit_float_to_ushort_clamp(col->color[0]);
+ color[1] = unit_float_to_ushort_clamp(col->color[1]);
+ color[2] = unit_float_to_ushort_clamp(col->color[2]);
+ color[3] = unit_float_to_ushort_clamp(col->color[3]);
+
+ *static_cast<ushort4 *>(GPU_vertbuf_raw_step(&access)) = color;
+ });
+ }
+ else if (vbo.domain == ATTR_DOMAIN_CORNER) {
+ MPropCol *mpropcol = static_cast<MPropCol *>(
+ CustomData_get_layer_named(args->ldata, CD_PROP_COLOR, vbo.name.c_str()));
+
+ foreach_faces([&](int buffer_i, int tri_i, int vertex_i, const MLoopTri *tri) {
+ ushort color[4];
+ MPropCol *col = mpropcol + tri->tri[tri_i];
+
+ color[0] = unit_float_to_ushort_clamp(col->color[0]);
+ color[1] = unit_float_to_ushort_clamp(col->color[1]);
+ color[2] = unit_float_to_ushort_clamp(col->color[2]);
+ color[3] = unit_float_to_ushort_clamp(col->color[3]);
+
+ *static_cast<ushort4 *>(GPU_vertbuf_raw_step(&access)) = color;
+ });
+ }
+ break;
+ case CD_PROP_BYTE_COLOR:
+ if (vbo.domain == ATTR_DOMAIN_POINT) {
+ MLoopCol *mbytecol = static_cast<MLoopCol *>(
+ CustomData_get_layer_named(args->vdata, CD_PROP_BYTE_COLOR, vbo.name.c_str()));
+
+ foreach_faces([&](int buffer_i, int tri_i, int vertex_i, const MLoopTri *tri) {
+ ushort color[4];
+ MLoopCol *col = mbytecol + vertex_i;
+
+ color[0] = unit_float_to_ushort_clamp(BLI_color_from_srgb_table[col->r]);
+ color[1] = unit_float_to_ushort_clamp(BLI_color_from_srgb_table[col->g]);
+ color[2] = unit_float_to_ushort_clamp(BLI_color_from_srgb_table[col->b]);
+ color[3] = col->a * 257;
+
+ *static_cast<ushort4 *>(GPU_vertbuf_raw_step(&access)) = color;
+ });
+ }
+ else if (vbo.domain == ATTR_DOMAIN_CORNER) {
+ MLoopCol *mbytecol = static_cast<MLoopCol *>(
+ CustomData_get_layer_named(args->ldata, CD_PROP_BYTE_COLOR, vbo.name.c_str()));
+
+ foreach_faces([&](int buffer_i, int tri_i, int vertex_i, const MLoopTri *tri) {
+ ushort color[4];
+ MLoopCol *col = mbytecol + tri->tri[tri_i];
+
+ color[0] = unit_float_to_ushort_clamp(BLI_color_from_srgb_table[col->r]);
+ color[1] = unit_float_to_ushort_clamp(BLI_color_from_srgb_table[col->g]);
+ color[2] = unit_float_to_ushort_clamp(BLI_color_from_srgb_table[col->b]);
+ color[3] = col->a * 257;
+
+ *static_cast<ushort4 *>(GPU_vertbuf_raw_step(&access)) = color;
+ });
+ }
+ break;
+ }
+ }
+
+ void gpu_flush()
+ {
+ for (PBVHVbo &vbo : vbos) {
+ if (vbo.vert_buf && GPU_vertbuf_get_data(vbo.vert_buf)) {
+ GPU_vertbuf_use(vbo.vert_buf);
+ }
+ }
+ }
+
+ void update(PBVH_GPU_Args *args)
+ {
+ check_index_buffers(args);
+
+ for (PBVHVbo &vbo : vbos) {
+ fill_vbo(vbo, args);
+ }
+ }
+
+ void fill_vbo_bmesh(PBVHVbo &vbo, PBVH_GPU_Args *args)
+ {
+ auto foreach_bmesh = [&](std::function<void(BMLoop * l)> callback) {
+ GSET_FOREACH_BEGIN (BMFace *, f, args->bm_faces) {
+ if (BM_elem_flag_test(f, BM_ELEM_HIDDEN)) {
+ continue;
+ }
+
+ BMLoop *l = f->l_first;
+ do {
+ callback(l);
+ } while ((l = l->next) != f->l_first);
+ }
+ GSET_FOREACH_END();
+ };
+
+ faces_count = 0;
+ GSET_FOREACH_BEGIN (BMFace *, f, args->bm_faces) {
+ if (BM_elem_flag_test(f, BM_ELEM_HIDDEN)) {
+ continue;
+ }
+
+ BMLoop *l = f->l_first;
+ do {
+ faces_count++;
+ } while ((l = l->next) != f->l_first);
+ }
+ GSET_FOREACH_END();
+ tris_count = faces_count;
+
+ int existing_num = GPU_vertbuf_get_vertex_len(vbo.vert_buf);
+ void *existing_data = GPU_vertbuf_get_data(vbo.vert_buf);
+
+ int vert_count = tris_count * 3;
+
+ if (existing_data == nullptr || existing_num != vert_count) {
+ /* Allocate buffer if not allocated yet or size changed. */
+ GPU_vertbuf_data_alloc(vbo.vert_buf, vert_count);
+ }
+
+ GPUVertBufRaw access;
+ GPU_vertbuf_attr_get_raw_data(vbo.vert_buf, 0, &access);
+
+ switch (vbo.type) {
+ case CD_PBVH_CO_TYPE:
+ foreach_bmesh(
+ [&](BMLoop *l) { *static_cast<float3 *>(GPU_vertbuf_raw_step(&access)) = l->v->co; });
+ break;
+
+ case CD_PBVH_NO_TYPE:
+ foreach_bmesh([&](BMLoop *l) {
+ short no[3];
+ bool smooth = BM_elem_flag_test(l->f, BM_ELEM_SMOOTH);
+
+ normal_float_to_short_v3(no, smooth ? l->v->no : l->f->no);
+ *static_cast<short3 *>(GPU_vertbuf_raw_step(&access)) = no;
+ });
+ break;
+
+ case CD_PBVH_MASK_TYPE: {
+ int cd_mask = args->cd_mask_layer;
+
+ if (cd_mask == -1) {
+ foreach_bmesh(
+ [&](BMLoop *l) { *static_cast<float *>(GPU_vertbuf_raw_step(&access)) = 255; });
+ }
+ else {
+ foreach_bmesh([&](BMLoop *l) {
+ float mask = BM_ELEM_CD_GET_FLOAT(l->v, cd_mask);
+
+ *static_cast<uchar *>(GPU_vertbuf_raw_step(&access)) = (uchar)(mask * 255.0f);
+ });
+ }
+ break;
+ }
+ case CD_PBVH_FSET_TYPE: {
+ uchar3 white(255, 255, 255);
+
+ foreach_bmesh(
+ [&](BMLoop *l) { *static_cast<uchar3 *>(GPU_vertbuf_raw_step(&access)) = white; });
+ }
+ }
+ }
+
+ void fill_vbo(PBVHVbo &vbo, PBVH_GPU_Args *args)
+ {
+ switch (args->pbvh_type) {
+ case PBVH_FACES:
+ fill_vbo_faces(vbo, args);
+ break;
+ case PBVH_GRIDS:
+ fill_vbo_grids(vbo, args);
+ break;
+ case PBVH_BMESH:
+ fill_vbo_bmesh(vbo, args);
+ break;
+ }
+ }
+
+ void create_vbo(eAttrDomain domain, const uint32_t type, string name, PBVH_GPU_Args *args)
+ {
+ PBVHVbo vbo(domain, type, name);
+ GPUVertFormat format;
+
+ bool need_aliases = !ELEM(
+ type, CD_PBVH_CO_TYPE, CD_PBVH_NO_TYPE, CD_PBVH_FSET_TYPE, CD_PBVH_MASK_TYPE);
+
+ GPU_vertformat_clear(&format);
+
+ switch (type) {
+ case CD_PBVH_CO_TYPE:
+ GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
+ break;
+ case CD_PROP_FLOAT3:
+ GPU_vertformat_attr_add(&format, "a", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
+ need_aliases = true;
+ break;
+ case CD_PBVH_NO_TYPE:
+ GPU_vertformat_attr_add(&format, "nor", GPU_COMP_I16, 3, GPU_FETCH_INT_TO_FLOAT_UNIT);
+ break;
+ case CD_PROP_FLOAT2:
+ GPU_vertformat_attr_add(&format, "a", GPU_COMP_F32, 2, GPU_FETCH_FLOAT);
+ need_aliases = true;
+ break;
+ case CD_MLOOPUV:
+ GPU_vertformat_attr_add(&format, "uvs", GPU_COMP_F32, 2, GPU_FETCH_FLOAT);
+ need_aliases = true;
+ break;
+ case CD_PBVH_FSET_TYPE:
+ GPU_vertformat_attr_add(&format, "fset", GPU_COMP_U8, 3, GPU_FETCH_INT_TO_FLOAT_UNIT);
+ break;
+ case CD_PBVH_MASK_TYPE:
+ GPU_vertformat_attr_add(&format, "msk", GPU_COMP_U8, 1, GPU_FETCH_INT_TO_FLOAT_UNIT);
+ break;
+ case CD_PROP_FLOAT:
+ GPU_vertformat_attr_add(&format, "f", GPU_COMP_F32, 1, GPU_FETCH_FLOAT);
+ need_aliases = true;
+ break;
+ case CD_PROP_COLOR:
+ case CD_PROP_BYTE_COLOR: {
+ GPU_vertformat_attr_add(&format, "c", GPU_COMP_U16, 4, GPU_FETCH_INT_TO_FLOAT_UNIT);
+ need_aliases = true;
+ break;
+ }
+ default:
+ BLI_assert(0);
+ printf("%s: error\n", __func__);
+
+ break;
+ }
+
+ if (need_aliases) {
+ CustomData *cdata = get_cdata(domain, args);
+ int layer_i = cdata ? CustomData_get_named_layer_index(cdata, type, name.c_str()) : -1;
+ CustomDataLayer *layer = layer_i != -1 ? cdata->layers + layer_i : nullptr;
+
+ if (layer) {
+ bool is_render, is_active;
+ const char *prefix = "a";
+
+ if (ELEM(type, CD_PROP_COLOR, CD_PROP_BYTE_COLOR)) {
+ prefix = "c";
+
+ CustomDataLayer *render = BKE_id_attributes_render_color_get(&args->me->id);
+ CustomDataLayer *active = BKE_id_attributes_active_color_get(&args->me->id);
+
+ is_render = render && layer && STREQ(render->name, layer->name);
+ is_active = active && layer && STREQ(active->name, layer->name);
+ }
+ else {
+ switch (type) {
+ case CD_MLOOPUV:
+ prefix = "u";
+ break;
+ default:
+ break;
+ }
+
+ const char *active_name = CustomData_get_active_layer_name(cdata, type);
+ const char *render_name = CustomData_get_render_layer_name(cdata, type);
+
+ is_active = active_name && STREQ(layer->name, active_name);
+ is_render = render_name && STREQ(layer->name, render_name);
+ }
+
+ DRW_cdlayer_attr_aliases_add(&format, prefix, cdata, layer, is_render, is_active);
+ }
+ else {
+ printf("%s: error looking up attribute %s\n", __func__, name.c_str());
+ }
+ }
+
+ vbo.vert_buf = GPU_vertbuf_create_with_format_ex(&format, GPU_USAGE_STATIC);
+ vbo.build_key();
+ fill_vbo(vbo, args);
+
+ vbos.append(vbo);
+ }
+
+ void update_pre(PBVH_GPU_Args *args)
+ {
+ if (args->pbvh_type == PBVH_BMESH) {
+ int count = count_faces(args);
+
+ if (faces_count != count) {
+ for (PBVHVbo &vbo : vbos) {
+ vbo.clear_data();
+ }
+
+ GPU_INDEXBUF_DISCARD_SAFE(tri_index);
+ GPU_INDEXBUF_DISCARD_SAFE(lines_index);
+
+ tri_index = lines_index = nullptr;
+ faces_count = tris_count = count;
+ }
+ }
+ }
+
+ void create_index_faces(PBVH_GPU_Args *args)
+ {
+ /* Calculate number of edges*/
+ int edge_count = 0;
+ for (int i = 0; i < args->totprim; i++) {
+ const MLoopTri *lt = args->mlooptri + args->prim_indices[i];
+
+ if (args->hide_poly && args->hide_poly[lt->poly]) {
+ continue;
+ }
+ int r_edges[3];
+ BKE_mesh_looptri_get_real_edges(args->me, lt, r_edges);
+
+ if (r_edges[0] != -1) {
+ edge_count++;
+ }
+ if (r_edges[1] != -1) {
+ edge_count++;
+ }
+ if (r_edges[2] != -1) {
+ edge_count++;
+ }
+ }
+
+ GPUIndexBufBuilder elb_lines;
+ GPU_indexbuf_init(&elb_lines, GPU_PRIM_LINES, edge_count * 2, INT_MAX);
+
+ int vertex_i = 0;
+ for (int i = 0; i < args->totprim; i++) {
+ const MLoopTri *lt = args->mlooptri + args->prim_indices[i];
+
+ if (args->hide_poly && args->hide_poly[lt->poly]) {
+ continue;
+ }
+
+ int r_edges[3];
+ BKE_mesh_looptri_get_real_edges(args->me, lt, r_edges);
+
+ if (r_edges[0] != -1) {
+ GPU_indexbuf_add_line_verts(&elb_lines, vertex_i, vertex_i + 1);
+ }
+ if (r_edges[1] != -1) {
+ GPU_indexbuf_add_line_verts(&elb_lines, vertex_i + 1, vertex_i + 2);
+ }
+ if (r_edges[2] != -1) {
+ GPU_indexbuf_add_line_verts(&elb_lines, vertex_i + 2, vertex_i);
+ }
+
+ vertex_i += 3;
+ }
+
+ lines_index = GPU_indexbuf_build(&elb_lines);
+ }
+
+ void create_index_bmesh(PBVH_GPU_Args *args)
+ {
+ GPUIndexBufBuilder elb_lines;
+ GPU_indexbuf_init(&elb_lines, GPU_PRIM_LINES, tris_count * 3 * 2, INT_MAX);
+
+ int v_index = 0;
+ lines_count = 0;
+
+ GSET_FOREACH_BEGIN (BMFace *, f, args->bm_faces) {
+ if (BM_elem_flag_test(f, BM_ELEM_HIDDEN)) {
+ continue;
+ }
+
+ GPU_indexbuf_add_line_verts(&elb_lines, v_index, v_index + 1);
+ GPU_indexbuf_add_line_verts(&elb_lines, v_index + 1, v_index + 2);
+ GPU_indexbuf_add_line_verts(&elb_lines, v_index + 2, v_index);
+
+ lines_count += 3;
+ v_index += 3;
+ }
+ GSET_FOREACH_END();
+
+ lines_index = GPU_indexbuf_build(&elb_lines);
+ }
+
+ void create_index_grids(PBVH_GPU_Args *args)
+ {
+ needs_tri_index = true;
+ int gridsize = args->ccg_key.grid_size;
+ int totgrid = args->totprim;
+
+ for (int i : IndexRange(args->totprim)) {
+ int grid_index = args->grid_indices[i];
+ bool smooth = args->grid_flag_mats[grid_index].flag & ME_SMOOTH;
+ BLI_bitmap *gh = args->grid_hidden[grid_index];
+
+ for (int y = 0; y < gridsize - 1; y++) {
+ for (int x = 0; x < gridsize - 1; x++) {
+ if (gh && paint_is_grid_face_hidden(gh, gridsize, x, y)) {
+ /* Skip hidden faces by just setting smooth to true. */
+ smooth = true;
+ goto outer_loop_break;
+ }
+ }
+ }
+
+ outer_loop_break:
+
+ if (!smooth) {
+ needs_tri_index = false;
+ break;
+ }
+ }
+
+ GPUIndexBufBuilder elb, elb_lines;
+
+ CCGKey *key = &args->ccg_key;
+
+ uint visible_quad_len = BKE_pbvh_count_grid_quads(
+ (BLI_bitmap **)args->grid_hidden, args->grid_indices, totgrid, key->grid_size);
+
+ GPU_indexbuf_init(&elb, GPU_PRIM_TRIS, 2 * visible_quad_len, INT_MAX);
+ GPU_indexbuf_init(
+ &elb_lines, GPU_PRIM_LINES, 2 * totgrid * gridsize * (gridsize - 1), INT_MAX);
+
+ if (needs_tri_index) {
+ uint offset = 0;
+ const uint grid_vert_len = gridsize * gridsize;
+ for (int i = 0; i < totgrid; i++, offset += grid_vert_len) {
+ uint v0, v1, v2, v3;
+ bool grid_visible = false;
+
+ BLI_bitmap *gh = args->grid_hidden[args->grid_indices[i]];
+
+ for (int j = 0; j < gridsize - 1; j++) {
+ for (int k = 0; k < gridsize - 1; k++) {
+ /* Skip hidden grid face */
+ if (gh && paint_is_grid_face_hidden(gh, gridsize, k, j)) {
+ continue;
+ }
+ /* Indices in a Clockwise QUAD disposition. */
+ v0 = offset + j * gridsize + k;
+ v1 = v0 + 1;
+ v2 = v1 + gridsize;
+ v3 = v2 - 1;
+
+ GPU_indexbuf_add_tri_verts(&elb, v0, v2, v1);
+ GPU_indexbuf_add_tri_verts(&elb, v0, v3, v2);
+
+ GPU_indexbuf_add_line_verts(&elb_lines, v0, v1);
+ GPU_indexbuf_add_line_verts(&elb_lines, v0, v3);
+
+ if (j + 2 == gridsize) {
+ GPU_indexbuf_add_line_verts(&elb_lines, v2, v3);
+ }
+ grid_visible = true;
+ }
+
+ if (grid_visible) {
+ GPU_indexbuf_add_line_verts(&elb_lines, v1, v2);
+ }
+ }
+ }
+ }
+ else {
+ uint offset = 0;
+ const uint grid_vert_len = square_uint(gridsize - 1) * 4;
+ for (int i = 0; i < totgrid; i++, offset += grid_vert_len) {
+ bool grid_visible = false;
+ BLI_bitmap *gh = args->grid_hidden[args->grid_indices[i]];
+
+ uint v0, v1, v2, v3;
+ for (int j = 0; j < gridsize - 1; j++) {
+ for (int k = 0; k < gridsize - 1; k++) {
+ /* Skip hidden grid face */
+ if (gh && paint_is_grid_face_hidden(gh, gridsize, k, j)) {
+ continue;
+ }
+ /* VBO data are in a Clockwise QUAD disposition. */
+ v0 = offset + (j * (gridsize - 1) + k) * 4;
+ v1 = v0 + 1;
+ v2 = v0 + 2;
+ v3 = v0 + 3;
+
+ GPU_indexbuf_add_tri_verts(&elb, v0, v2, v1);
+ GPU_indexbuf_add_tri_verts(&elb, v0, v3, v2);
+
+ GPU_indexbuf_add_line_verts(&elb_lines, v0, v1);
+ GPU_indexbuf_add_line_verts(&elb_lines, v0, v3);
+
+ if (j + 2 == gridsize) {
+ GPU_indexbuf_add_line_verts(&elb_lines, v2, v3);
+ }
+ grid_visible = true;
+ }
+
+ if (grid_visible) {
+ GPU_indexbuf_add_line_verts(&elb_lines, v1, v2);
+ }
+ }
+ }
+ }
+
+ tri_index = GPU_indexbuf_build(&elb);
+ lines_index = GPU_indexbuf_build(&elb_lines);
+ }
+
+ void create_index(PBVH_GPU_Args *args)
+ {
+ switch (args->pbvh_type) {
+ case PBVH_FACES:
+ create_index_faces(args);
+ break;
+ case PBVH_BMESH:
+ create_index_bmesh(args);
+ break;
+ case PBVH_GRIDS:
+ create_index_grids(args);
+ break;
+ }
+
+ for (PBVHBatch &batch : batches.values()) {
+ GPU_batch_elembuf_set(batch.tris, tri_index, false);
+ GPU_batch_elembuf_set(batch.lines, lines_index, false);
+ }
+ }
+
+ void check_index_buffers(PBVH_GPU_Args *args)
+ {
+ if (!lines_index) {
+ create_index(args);
+ }
+ }
+
+ void create_batch(PBVHAttrReq *attrs, int attrs_num, PBVH_GPU_Args *args)
+ {
+ check_index_buffers(args);
+
+ PBVHBatch batch;
+
+ batch.tris = GPU_batch_create(GPU_PRIM_TRIS,
+ nullptr,
+ /* can be NULL if buffer is empty */
+ tri_index);
+ batch.tris_count = tris_count;
+
+ if (lines_index) {
+ batch.lines = GPU_batch_create(GPU_PRIM_LINES, nullptr, lines_index);
+ batch.lines_count = lines_count;
+ }
+
+ for (int i : IndexRange(attrs_num)) {
+ PBVHAttrReq *attr = attrs + i;
+
+ if (!has_vbo(attr->domain, (int)attr->type, attr->name)) {
+ create_vbo(attr->domain, (uint32_t)attr->type, attr->name, args);
+ }
+
+ PBVHVbo *vbo = get_vbo(attr->domain, (uint32_t)attr->type, attr->name);
+ int vbo_i = get_vbo_index(vbo);
+
+ batch.vbos.append(vbo_i);
+ GPU_batch_vertbuf_add_ex(batch.tris, vbo->vert_buf, false);
+
+ if (batch.lines) {
+ GPU_batch_vertbuf_add_ex(batch.lines, vbo->vert_buf, false);
+ }
+ }
+
+ batch.build_key(vbos);
+ batches.add(batch.key, batch);
+ }
+};
+
+void DRW_pbvh_node_update(PBVHBatches *batches, PBVH_GPU_Args *args)
+{
+ batches->update(args);
+}
+
+void DRW_pbvh_node_gpu_flush(PBVHBatches *batches)
+{
+ batches->gpu_flush();
+}
+
+PBVHBatches *DRW_pbvh_node_create(PBVH_GPU_Args *args)
+{
+ PBVHBatches *batches = new PBVHBatches(args);
+ return batches;
+}
+
+void DRW_pbvh_node_free(PBVHBatches *batches)
+{
+ delete batches;
+}
+
+GPUBatch *DRW_pbvh_tris_get(PBVHBatches *batches,
+ PBVHAttrReq *attrs,
+ int attrs_num,
+ PBVH_GPU_Args *args,
+ int *r_prim_count)
+{
+ PBVHBatch &batch = batches->ensure_batch(attrs, attrs_num, args);
+
+ *r_prim_count = batch.tris_count;
+
+ return batch.tris;
+}
+
+GPUBatch *DRW_pbvh_lines_get(PBVHBatches *batches,
+ PBVHAttrReq *attrs,
+ int attrs_num,
+ PBVH_GPU_Args *args,
+ int *r_prim_count)
+{
+ PBVHBatch &batch = batches->ensure_batch(attrs, attrs_num, args);
+
+ *r_prim_count = batch.lines_count;
+
+ return batch.lines;
+}
+
+void DRW_pbvh_update_pre(struct PBVHBatches *batches, struct PBVH_GPU_Args *args)
+{
+ batches->update_pre(args);
+}
+
+int drw_pbvh_material_index_get(struct PBVHBatches *batches)
+{
+ return batches->material_index;
+}
diff --git a/source/blender/draw/intern/draw_pbvh.h b/source/blender/draw/intern/draw_pbvh.h
new file mode 100644
index 00000000000..eb3f3d55e0f
--- /dev/null
+++ b/source/blender/draw/intern/draw_pbvh.h
@@ -0,0 +1,22 @@
+#pragma once
+
+#include "DNA_customdata_types.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+struct PBVHBatches;
+
+enum {
+ CD_PBVH_CO_TYPE = CD_NUMTYPES,
+ CD_PBVH_NO_TYPE = CD_NUMTYPES + 1,
+ CD_PBVH_FSET_TYPE = CD_NUMTYPES + 2,
+ CD_PBVH_MASK_TYPE = CD_NUMTYPES + 3
+};
+
+int drw_pbvh_material_index_get(struct PBVHBatches *batches);
+
+#ifdef __cplusplus
+}
+#endif
diff --git a/source/blender/gpu/CMakeLists.txt b/source/blender/gpu/CMakeLists.txt
index 779dc052649..b221a93bf89 100644
--- a/source/blender/gpu/CMakeLists.txt
+++ b/source/blender/gpu/CMakeLists.txt
@@ -47,7 +47,6 @@ set(SRC
intern/gpu_batch.cc
intern/gpu_batch_presets.c
intern/gpu_batch_utils.c
- intern/gpu_buffers.c
intern/gpu_capabilities.cc
intern/gpu_codegen.cc
intern/gpu_compute.cc
@@ -84,7 +83,6 @@ set(SRC
GPU_batch.h
GPU_batch_presets.h
GPU_batch_utils.h
- GPU_buffers.h
GPU_capabilities.h
GPU_common.h
GPU_common_types.h
diff --git a/source/blender/gpu/GPU_buffers.h b/source/blender/gpu/GPU_buffers.h
deleted file mode 100644
index 5cdc5f19540..00000000000
--- a/source/blender/gpu/GPU_buffers.h
+++ /dev/null
@@ -1,157 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later
- * Copyright 2005 Blender Foundation. All rights reserved. */
-
-/** \file
- * \ingroup gpu
- */
-
-#pragma once
-
-#include <stddef.h>
-
-#include "BKE_attribute.h"
-#include "BKE_pbvh.h"
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-struct BMesh;
-struct CCGElem;
-struct CCGKey;
-struct DMFlagMat;
-struct GSet;
-struct TableGSet;
-struct Mesh;
-struct MLoop;
-struct MLoopCol;
-struct MLoopTri;
-struct MPoly;
-struct MPropCol;
-struct MVert;
-struct Mesh;
-struct PBVH;
-struct SubdivCCG;
-struct CustomData;
-
-typedef struct PBVHGPUFormat PBVHGPUFormat;
-
-/**
- * Buffers for drawing from PBVH grids.
- */
-typedef struct GPU_PBVH_Buffers GPU_PBVH_Buffers;
-
-/**
- * Build must be called once before using the other functions,
- * used every time mesh topology changes.
- *
- * Threaded: do not call any functions that use OpenGL calls!
- */
-GPU_PBVH_Buffers *GPU_pbvh_mesh_buffers_build(const struct Mesh *mesh,
- const struct MLoopTri *looptri,
- const int *face_indices,
- int face_indices_len);
-
-/**
- * Threaded: do not call any functions that use OpenGL calls!
- */
-GPU_PBVH_Buffers *GPU_pbvh_grid_buffers_build(int totgrid,
- unsigned int **grid_hidden,
- bool smooth);
-
-/**
- * Threaded: do not call any functions that use OpenGL calls!
- */
-GPU_PBVH_Buffers *GPU_pbvh_bmesh_buffers_build(bool smooth_shading);
-
-/**
- * Free part of data for update. Not thread safe, must run in OpenGL main thread.
- */
-void GPU_pbvh_bmesh_buffers_update_free(GPU_PBVH_Buffers *buffers);
-void GPU_pbvh_grid_buffers_update_free(GPU_PBVH_Buffers *buffers,
- const struct DMFlagMat *grid_flag_mats,
- const int *grid_indices);
-
-/**
- * Update mesh buffers without topology changes. Threaded.
- */
-enum {
- GPU_PBVH_BUFFERS_SHOW_MASK = (1 << 1),
- GPU_PBVH_BUFFERS_SHOW_VCOL = (1 << 2),
- GPU_PBVH_BUFFERS_SHOW_SCULPT_FACE_SETS = (1 << 3),
-};
-
-/**
- * Creates a vertex buffer (coordinate, normal, color) and,
- * if smooth shading, an element index buffer.
- * Threaded: do not call any functions that use OpenGL calls!
- */
-void GPU_pbvh_mesh_buffers_update(PBVHGPUFormat *vbo_id,
- GPU_PBVH_Buffers *buffers,
- const struct Mesh *mesh,
- const struct MVert *mvert,
- const float *vmask,
- const int *sculpt_face_sets,
- const int face_sets_color_seed,
- const int face_sets_color_default,
- const int update_flags,
- const float (*vert_normals)[3]);
-
-bool GPU_pbvh_attribute_names_update(PBVHType pbvh_type,
- PBVHGPUFormat *vbo_id,
- const struct CustomData *vdata,
- const struct CustomData *ldata,
- bool active_attrs_only);
-
-/**
- * Creates a vertex buffer (coordinate, normal, color) and,
- * if smooth shading, an element index buffer.
- * Threaded: do not call any functions that use OpenGL calls!
- */
-void GPU_pbvh_bmesh_buffers_update(PBVHGPUFormat *vbo_id,
- struct GPU_PBVH_Buffers *buffers,
- struct BMesh *bm,
- struct GSet *bm_faces,
- struct GSet *bm_unique_verts,
- struct GSet *bm_other_verts,
- const int update_flags);
-
-/**
- * Threaded: do not call any functions that use OpenGL calls!
- */
-void GPU_pbvh_grid_buffers_update(PBVHGPUFormat *vbo_id,
- GPU_PBVH_Buffers *buffers,
- struct SubdivCCG *subdiv_ccg,
- struct CCGElem **grids,
- const struct DMFlagMat *grid_flag_mats,
- int *grid_indices,
- int totgrid,
- const int *sculpt_face_sets,
- int face_sets_color_seed,
- int face_sets_color_default,
- const struct CCGKey *key,
- int update_flags);
-
-/**
- * Finish update. Not thread safe, must run in OpenGL main
- * thread.
- */
-void GPU_pbvh_buffers_update_flush(GPU_PBVH_Buffers *buffers);
-
-/**
- * Free buffers. Not thread safe, must run in OpenGL main thread.
- */
-void GPU_pbvh_buffers_free(GPU_PBVH_Buffers *buffers);
-
-/** Draw. */
-struct GPUBatch *GPU_pbvh_buffers_batch_get(GPU_PBVH_Buffers *buffers, bool fast, bool wires);
-
-short GPU_pbvh_buffers_material_index_get(GPU_PBVH_Buffers *buffers);
-bool GPU_pbvh_buffers_has_overlays(GPU_PBVH_Buffers *buffers);
-
-PBVHGPUFormat *GPU_pbvh_make_format(void);
-void GPU_pbvh_free_format(PBVHGPUFormat *vbo_id);
-
-#ifdef __cplusplus
-}
-#endif
diff --git a/source/blender/gpu/intern/gpu_buffers.c b/source/blender/gpu/intern/gpu_buffers.c
deleted file mode 100644
index c0527357663..00000000000
--- a/source/blender/gpu/intern/gpu_buffers.c
+++ /dev/null
@@ -1,1475 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later
- * Copyright 2005 Blender Foundation. All rights reserved. */
-
-/** \file
- * \ingroup gpu
- *
- * Mesh drawing using OpenGL VBO (Vertex Buffer Objects)
- */
-
-#include <limits.h>
-#include <stddef.h>
-#include <stdlib.h>
-#include <string.h>
-
-#include "MEM_guardedalloc.h"
-
-#include "BLI_bitmap.h"
-#include "BLI_ghash.h"
-#include "BLI_math_color.h"
-#include "BLI_utildefines.h"
-
-#include "DNA_mesh_types.h"
-#include "DNA_meshdata_types.h"
-
-#include "BKE_DerivedMesh.h"
-#include "BKE_attribute.h"
-#include "BKE_ccg.h"
-#include "BKE_customdata.h"
-#include "BKE_mesh.h"
-#include "BKE_paint.h"
-#include "BKE_pbvh.h"
-#include "BKE_subdiv_ccg.h"
-
-#include "GPU_batch.h"
-#include "GPU_buffers.h"
-
-#include "DRW_engine.h"
-
-#include "gpu_private.h"
-
-#include "bmesh.h"
-
-struct GPU_PBVH_Buffers {
- GPUIndexBuf *index_buf, *index_buf_fast;
- GPUIndexBuf *index_lines_buf, *index_lines_buf_fast;
- GPUVertBuf *vert_buf;
-
- GPUBatch *lines;
- GPUBatch *lines_fast;
- GPUBatch *triangles;
- GPUBatch *triangles_fast;
-
- /* mesh pointers in case buffer allocation fails */
- const MPoly *mpoly;
- const MLoop *mloop;
- const MLoopTri *looptri;
- const MVert *mvert;
-
- const int *face_indices;
- int face_indices_len;
-
- /* grid pointers */
- CCGKey gridkey;
- CCGElem **grids;
- const DMFlagMat *grid_flag_mats;
- BLI_bitmap *const *grid_hidden;
- const int *grid_indices;
- int totgrid;
-
- bool use_bmesh;
- bool clear_bmesh_on_flush;
-
- uint tot_tri, tot_quad;
-
- short material_index;
-
- /* The PBVH ensures that either all faces in the node are
- * smooth-shaded or all faces are flat-shaded */
- bool smooth;
-
- bool show_overlay;
-};
-
-typedef struct GPUAttrRef {
- uchar domain, type;
- ushort cd_offset;
- int layer_idx;
-} GPUAttrRef;
-
-#define MAX_GPU_ATTR 256
-
-typedef struct PBVHGPUFormat {
- GPUVertFormat format;
- uint pos, nor, msk, fset;
- uint col[MAX_GPU_ATTR];
- uint uv[MAX_GPU_ATTR];
- int totcol, totuv;
-
- /* Upload only the active color and UV attributes,
- * used for workbench mode. */
- bool active_attrs_only;
-} PBVHGPUFormat;
-
-PBVHGPUFormat *GPU_pbvh_make_format(void)
-{
- PBVHGPUFormat *vbo_id = MEM_callocN(sizeof(PBVHGPUFormat), "PBVHGPUFormat");
-
- GPU_pbvh_attribute_names_update(PBVH_FACES, vbo_id, NULL, NULL, false);
-
- return vbo_id;
-}
-
-void GPU_pbvh_free_format(PBVHGPUFormat *vbo_id)
-{
- MEM_SAFE_FREE(vbo_id);
-}
-
-static int gpu_pbvh_make_attr_offs(eAttrDomainMask domain_mask,
- eCustomDataMask type_mask,
- const CustomData *vdata,
- const CustomData *edata,
- const CustomData *ldata,
- const CustomData *pdata,
- GPUAttrRef r_cd_attrs[MAX_GPU_ATTR],
- bool active_only,
- int active_type,
- int active_domain,
- const CustomDataLayer *active_layer,
- const CustomDataLayer *render_layer);
-
-/** \} */
-
-/* -------------------------------------------------------------------- */
-/** \name PBVH Utils
- * \{ */
-
-void gpu_pbvh_init()
-{
-}
-
-void gpu_pbvh_exit()
-{
- /* Nothing to do. */
-}
-
-static CustomDataLayer *get_active_layer(const CustomData *cdata, int type)
-{
- int idx = CustomData_get_active_layer_index(cdata, type);
- return idx != -1 ? cdata->layers + idx : NULL;
-}
-
-static CustomDataLayer *get_render_layer(const CustomData *cdata, int type)
-{
- int idx = CustomData_get_render_layer_index(cdata, type);
- return idx != -1 ? cdata->layers + idx : NULL;
-}
-
-/* Allocates a non-initialized buffer to be sent to GPU.
- * Return is false it indicates that the memory map failed. */
-static bool gpu_pbvh_vert_buf_data_set(PBVHGPUFormat *vbo_id,
- GPU_PBVH_Buffers *buffers,
- uint vert_len)
-{
- /* Keep so we can test #GPU_USAGE_DYNAMIC buffer use.
- * Not that format initialization match in both blocks.
- * Do this to keep braces balanced - otherwise indentation breaks. */
-
- if (buffers->vert_buf == NULL) {
- /* Initialize vertex buffer (match 'VertexBufferFormat'). */
- buffers->vert_buf = GPU_vertbuf_create_with_format_ex(&vbo_id->format, GPU_USAGE_STATIC);
- }
- if (GPU_vertbuf_get_data(buffers->vert_buf) == NULL ||
- GPU_vertbuf_get_vertex_len(buffers->vert_buf) != vert_len) {
- /* Allocate buffer if not allocated yet or size changed. */
- GPU_vertbuf_data_alloc(buffers->vert_buf, vert_len);
- }
-
- return GPU_vertbuf_get_data(buffers->vert_buf) != NULL;
-}
-
-static void gpu_pbvh_batch_init(GPU_PBVH_Buffers *buffers, GPUPrimType prim)
-{
- if (buffers->triangles == NULL) {
- buffers->triangles = GPU_batch_create(prim,
- buffers->vert_buf,
- /* can be NULL if buffer is empty */
- buffers->index_buf);
- }
-
- if ((buffers->triangles_fast == NULL) && buffers->index_buf_fast) {
- buffers->triangles_fast = GPU_batch_create(prim, buffers->vert_buf, buffers->index_buf_fast);
- }
-
- if (buffers->lines == NULL) {
- buffers->lines = GPU_batch_create(GPU_PRIM_LINES,
- buffers->vert_buf,
- /* can be NULL if buffer is empty */
- buffers->index_lines_buf);
- }
-
- if ((buffers->lines_fast == NULL) && buffers->index_lines_buf_fast) {
- buffers->lines_fast = GPU_batch_create(
- GPU_PRIM_LINES, buffers->vert_buf, buffers->index_lines_buf_fast);
- }
-}
-
-/** \} */
-
-/* -------------------------------------------------------------------- */
-/** \name Mesh PBVH
- * \{ */
-
-static bool gpu_pbvh_is_looptri_visible(const MLoopTri *lt, const bool *hide_poly)
-{
- return !paint_is_face_hidden(lt, hide_poly);
-}
-
-void GPU_pbvh_mesh_buffers_update(PBVHGPUFormat *vbo_id,
- GPU_PBVH_Buffers *buffers,
- const Mesh *mesh,
- const MVert *mvert,
- const float *vmask,
- const int *sculpt_face_sets,
- int face_sets_color_seed,
- int face_sets_color_default,
- int update_flags,
- const float (*vert_normals)[3])
-{
- GPUAttrRef vcol_refs[MAX_GPU_ATTR];
- GPUAttrRef cd_uvs[MAX_GPU_ATTR];
-
- const bool *hide_poly = (const bool *)CustomData_get_layer_named(
- &mesh->pdata, CD_PROP_BOOL, ".hide_poly");
- const int *material_indices = (const int *)CustomData_get_layer_named(
- &mesh->pdata, CD_PROP_INT32, "material_index");
-
- const CustomDataLayer *actcol = BKE_id_attributes_active_color_get(&mesh->id);
- eAttrDomain actcol_domain = actcol ? BKE_id_attribute_domain(&mesh->id, actcol) :
- ATTR_DOMAIN_AUTO;
-
- const CustomDataLayer *rendercol = BKE_id_attributes_render_color_get(&mesh->id);
-
- int totcol;
-
- if (update_flags & GPU_PBVH_BUFFERS_SHOW_VCOL) {
- totcol = gpu_pbvh_make_attr_offs(ATTR_DOMAIN_MASK_COLOR,
- CD_MASK_COLOR_ALL,
- &mesh->vdata,
- NULL,
- &mesh->ldata,
- NULL,
- vcol_refs,
- vbo_id->active_attrs_only,
- actcol ? actcol->type : 0,
- actcol_domain,
- actcol,
- rendercol);
- }
- else {
- totcol = 0;
- }
-
- int totuv = gpu_pbvh_make_attr_offs(ATTR_DOMAIN_MASK_CORNER,
- CD_MASK_MLOOPUV,
- NULL,
- NULL,
- &mesh->ldata,
- NULL,
- cd_uvs,
- vbo_id->active_attrs_only,
- CD_MLOOPUV,
- ATTR_DOMAIN_CORNER,
- get_active_layer(&mesh->ldata, CD_MLOOPUV),
- get_render_layer(&mesh->ldata, CD_MLOOPUV));
-
- const bool show_mask = vmask && (update_flags & GPU_PBVH_BUFFERS_SHOW_MASK) != 0;
- const bool show_face_sets = sculpt_face_sets &&
- (update_flags & GPU_PBVH_BUFFERS_SHOW_SCULPT_FACE_SETS) != 0;
- bool empty_mask = true;
- bool default_face_set = true;
-
- {
- const int totelem = buffers->tot_tri * 3;
-
- /* Build VBO */
- if (gpu_pbvh_vert_buf_data_set(vbo_id, buffers, totelem)) {
- GPUVertBufRaw pos_step = {0};
- GPUVertBufRaw nor_step = {0};
- GPUVertBufRaw msk_step = {0};
- GPUVertBufRaw fset_step = {0};
- GPUVertBufRaw col_step = {0};
- GPUVertBufRaw uv_step = {0};
-
- GPU_vertbuf_attr_get_raw_data(buffers->vert_buf, vbo_id->pos, &pos_step);
- GPU_vertbuf_attr_get_raw_data(buffers->vert_buf, vbo_id->nor, &nor_step);
- GPU_vertbuf_attr_get_raw_data(buffers->vert_buf, vbo_id->msk, &msk_step);
- GPU_vertbuf_attr_get_raw_data(buffers->vert_buf, vbo_id->fset, &fset_step);
-
- /* calculate normal for each polygon only once */
- uint mpoly_prev = UINT_MAX;
- short no[3] = {0, 0, 0};
-
- if (totuv > 0) {
- for (int uv_i = 0; uv_i < totuv; uv_i++) {
- GPU_vertbuf_attr_get_raw_data(buffers->vert_buf, vbo_id->uv[uv_i], &uv_step);
-
- GPUAttrRef *ref = cd_uvs + uv_i;
- CustomDataLayer *layer = mesh->ldata.layers + ref->layer_idx;
- MLoopUV *muv = layer->data;
-
- for (uint i = 0; i < buffers->face_indices_len; i++) {
- const MLoopTri *lt = &buffers->looptri[buffers->face_indices[i]];
-
- if (!gpu_pbvh_is_looptri_visible(lt, hide_poly)) {
- continue;
- }
-
- for (uint j = 0; j < 3; j++) {
- MLoopUV *muv2 = muv + lt->tri[j];
-
- memcpy(GPU_vertbuf_raw_step(&uv_step), muv2->uv, sizeof(muv2->uv));
- }
- }
- }
- }
-
- for (int col_i = 0; col_i < totcol; col_i++) {
- GPU_vertbuf_attr_get_raw_data(buffers->vert_buf, vbo_id->col[col_i], &col_step);
-
- const MPropCol *pcol = NULL;
- const MLoopCol *mcol = NULL;
-
- GPUAttrRef *ref = vcol_refs + col_i;
- const CustomData *cdata = ref->domain == ATTR_DOMAIN_POINT ? &mesh->vdata : &mesh->ldata;
- const CustomDataLayer *layer = cdata->layers + ref->layer_idx;
-
- bool color_loops = ref->domain == ATTR_DOMAIN_CORNER;
-
- if (layer->type == CD_PROP_COLOR) {
- pcol = (const MPropCol *)layer->data;
- }
- else {
- mcol = (const MLoopCol *)layer->data;
- }
-
- for (uint i = 0; i < buffers->face_indices_len; i++) {
- const MLoopTri *lt = &buffers->looptri[buffers->face_indices[i]];
- const uint vtri[3] = {
- buffers->mloop[lt->tri[0]].v,
- buffers->mloop[lt->tri[1]].v,
- buffers->mloop[lt->tri[2]].v,
- };
-
- if (!gpu_pbvh_is_looptri_visible(lt, hide_poly)) {
- continue;
- }
-
- for (uint j = 0; j < 3; j++) {
- /* Vertex Colors. */
- const uint loop_index = lt->tri[j];
-
- ushort scol[4] = {USHRT_MAX, USHRT_MAX, USHRT_MAX, USHRT_MAX};
-
- if (pcol) {
- const MPropCol *pcol2 = pcol + (color_loops ? loop_index : vtri[j]);
-
- scol[0] = unit_float_to_ushort_clamp(pcol2->color[0]);
- scol[1] = unit_float_to_ushort_clamp(pcol2->color[1]);
- scol[2] = unit_float_to_ushort_clamp(pcol2->color[2]);
- scol[3] = unit_float_to_ushort_clamp(pcol2->color[3]);
- }
- else {
- const MLoopCol *mcol2 = mcol + (color_loops ? loop_index : vtri[j]);
-
- scol[0] = unit_float_to_ushort_clamp(BLI_color_from_srgb_table[mcol2->r]);
- scol[1] = unit_float_to_ushort_clamp(BLI_color_from_srgb_table[mcol2->g]);
- scol[2] = unit_float_to_ushort_clamp(BLI_color_from_srgb_table[mcol2->b]);
- scol[3] = unit_float_to_ushort_clamp(mcol2->a * (1.0f / 255.0f));
- }
-
- memcpy(GPU_vertbuf_raw_step(&col_step), scol, sizeof(scol));
- }
- }
- }
-
- for (uint i = 0; i < buffers->face_indices_len; i++) {
- const MLoopTri *lt = &buffers->looptri[buffers->face_indices[i]];
- const uint vtri[3] = {
- buffers->mloop[lt->tri[0]].v,
- buffers->mloop[lt->tri[1]].v,
- buffers->mloop[lt->tri[2]].v,
- };
-
- if (!gpu_pbvh_is_looptri_visible(lt, hide_poly)) {
- continue;
- }
-
- /* Face normal and mask */
- if (lt->poly != mpoly_prev && !buffers->smooth) {
- const MPoly *mp = &buffers->mpoly[lt->poly];
- float fno[3];
- BKE_mesh_calc_poly_normal(mp, &buffers->mloop[mp->loopstart], mvert, fno);
- normal_float_to_short_v3(no, fno);
- mpoly_prev = lt->poly;
- }
-
- uchar face_set_color[4] = {UCHAR_MAX, UCHAR_MAX, UCHAR_MAX, UCHAR_MAX};
- if (show_face_sets) {
- const int fset = sculpt_face_sets[lt->poly];
- /* Skip for the default color Face Set to render it white. */
- if (fset != face_sets_color_default) {
- BKE_paint_face_set_overlay_color_get(fset, face_sets_color_seed, face_set_color);
- default_face_set = false;
- }
- }
-
- float fmask = 0.0f;
- uchar cmask = 0;
- if (show_mask && !buffers->smooth) {
- fmask = (vmask[vtri[0]] + vmask[vtri[1]] + vmask[vtri[2]]) / 3.0f;
- cmask = (uchar)(fmask * 255);
- }
-
- for (uint j = 0; j < 3; j++) {
- const MVert *v = &mvert[vtri[j]];
- copy_v3_v3(GPU_vertbuf_raw_step(&pos_step), v->co);
-
- if (buffers->smooth) {
- normal_float_to_short_v3(no, vert_normals[vtri[j]]);
- }
- copy_v3_v3_short(GPU_vertbuf_raw_step(&nor_step), no);
-
- if (show_mask && buffers->smooth) {
- cmask = (uchar)(vmask[vtri[j]] * 255);
- }
-
- *(uchar *)GPU_vertbuf_raw_step(&msk_step) = cmask;
- empty_mask = empty_mask && (cmask == 0);
- /* Face Sets. */
- memcpy(GPU_vertbuf_raw_step(&fset_step), face_set_color, sizeof(uchar[3]));
- }
- }
- }
-
- gpu_pbvh_batch_init(buffers, GPU_PRIM_TRIS);
- }
-
- /* Get material index from the first face of this buffer. */
- const MLoopTri *lt = &buffers->looptri[buffers->face_indices[0]];
- buffers->material_index = material_indices ? material_indices[lt->poly] : 0;
-
- buffers->show_overlay = !empty_mask || !default_face_set;
- buffers->mvert = mvert;
-}
-
-GPU_PBVH_Buffers *GPU_pbvh_mesh_buffers_build(const Mesh *mesh,
- const MLoopTri *looptri,
- const int *face_indices,
- const int face_indices_len)
-{
- GPU_PBVH_Buffers *buffers;
- int i, tottri;
- int tot_real_edges = 0;
-
- const MPoly *polys = BKE_mesh_polys(mesh);
- const MLoop *loops = BKE_mesh_loops(mesh);
-
- buffers = MEM_callocN(sizeof(GPU_PBVH_Buffers), "GPU_Buffers");
-
- const bool *hide_poly = (bool *)CustomData_get_layer_named(
- &mesh->pdata, CD_PROP_BOOL, ".hide_poly");
-
- /* smooth or flat for all */
- buffers->smooth = polys[looptri[face_indices[0]].poly].flag & ME_SMOOTH;
-
- buffers->show_overlay = false;
-
- /* Count the number of visible triangles */
- for (i = 0, tottri = 0; i < face_indices_len; i++) {
- const MLoopTri *lt = &looptri[face_indices[i]];
- if (gpu_pbvh_is_looptri_visible(lt, hide_poly)) {
- int r_edges[3];
- BKE_mesh_looptri_get_real_edges(mesh, lt, r_edges);
- for (int j = 0; j < 3; j++) {
- if (r_edges[j] != -1) {
- tot_real_edges++;
- }
- }
- tottri++;
- }
- }
-
- if (tottri == 0) {
- buffers->tot_tri = 0;
-
- buffers->mpoly = polys;
- buffers->mloop = loops;
- buffers->looptri = looptri;
- buffers->face_indices = face_indices;
- buffers->face_indices_len = 0;
-
- return buffers;
- }
-
- /* Fill the only the line buffer. */
- GPUIndexBufBuilder elb_lines;
- GPU_indexbuf_init(&elb_lines, GPU_PRIM_LINES, tot_real_edges, INT_MAX);
- int vert_idx = 0;
-
- for (i = 0; i < face_indices_len; i++) {
- const MLoopTri *lt = &looptri[face_indices[i]];
-
- /* Skip hidden faces */
- if (!gpu_pbvh_is_looptri_visible(lt, hide_poly)) {
- continue;
- }
-
- int r_edges[3];
- BKE_mesh_looptri_get_real_edges(mesh, lt, r_edges);
- if (r_edges[0] != -1) {
- GPU_indexbuf_add_line_verts(&elb_lines, vert_idx * 3 + 0, vert_idx * 3 + 1);
- }
- if (r_edges[1] != -1) {
- GPU_indexbuf_add_line_verts(&elb_lines, vert_idx * 3 + 1, vert_idx * 3 + 2);
- }
- if (r_edges[2] != -1) {
- GPU_indexbuf_add_line_verts(&elb_lines, vert_idx * 3 + 2, vert_idx * 3 + 0);
- }
-
- vert_idx++;
- }
- buffers->index_lines_buf = GPU_indexbuf_build(&elb_lines);
-
- buffers->tot_tri = tottri;
-
- buffers->mpoly = polys;
- buffers->mloop = loops;
- buffers->looptri = looptri;
-
- buffers->face_indices = face_indices;
- buffers->face_indices_len = face_indices_len;
-
- return buffers;
-}
-
-/** \} */
-
-/* -------------------------------------------------------------------- */
-/** \name Grid PBVH
- * \{ */
-
-static void gpu_pbvh_grid_fill_index_buffers(GPU_PBVH_Buffers *buffers,
- SubdivCCG *UNUSED(subdiv_ccg),
- const int *UNUSED(face_sets),
- const int *grid_indices,
- uint visible_quad_len,
- int totgrid,
- int gridsize)
-{
- GPUIndexBufBuilder elb, elb_lines;
- GPUIndexBufBuilder elb_fast, elb_lines_fast;
-
- GPU_indexbuf_init(&elb, GPU_PRIM_TRIS, 2 * visible_quad_len, INT_MAX);
- GPU_indexbuf_init(&elb_fast, GPU_PRIM_TRIS, 2 * totgrid, INT_MAX);
- GPU_indexbuf_init(&elb_lines, GPU_PRIM_LINES, 2 * totgrid * gridsize * (gridsize - 1), INT_MAX);
- GPU_indexbuf_init(&elb_lines_fast, GPU_PRIM_LINES, 4 * totgrid, INT_MAX);
-
- if (buffers->smooth) {
- uint offset = 0;
- const uint grid_vert_len = gridsize * gridsize;
- for (int i = 0; i < totgrid; i++, offset += grid_vert_len) {
- uint v0, v1, v2, v3;
- bool grid_visible = false;
-
- BLI_bitmap *gh = buffers->grid_hidden[grid_indices[i]];
-
- for (int j = 0; j < gridsize - 1; j++) {
- for (int k = 0; k < gridsize - 1; k++) {
- /* Skip hidden grid face */
- if (gh && paint_is_grid_face_hidden(gh, gridsize, k, j)) {
- continue;
- }
- /* Indices in a Clockwise QUAD disposition. */
- v0 = offset + j * gridsize + k;
- v1 = v0 + 1;
- v2 = v1 + gridsize;
- v3 = v2 - 1;
-
- GPU_indexbuf_add_tri_verts(&elb, v0, v2, v1);
- GPU_indexbuf_add_tri_verts(&elb, v0, v3, v2);
-
- GPU_indexbuf_add_line_verts(&elb_lines, v0, v1);
- GPU_indexbuf_add_line_verts(&elb_lines, v0, v3);
-
- if (j + 2 == gridsize) {
- GPU_indexbuf_add_line_verts(&elb_lines, v2, v3);
- }
- grid_visible = true;
- }
-
- if (grid_visible) {
- GPU_indexbuf_add_line_verts(&elb_lines, v1, v2);
- }
- }
-
- if (grid_visible) {
- /* Grid corners */
- v0 = offset;
- v1 = offset + gridsize - 1;
- v2 = offset + grid_vert_len - 1;
- v3 = offset + grid_vert_len - gridsize;
-
- GPU_indexbuf_add_tri_verts(&elb_fast, v0, v2, v1);
- GPU_indexbuf_add_tri_verts(&elb_fast, v0, v3, v2);
-
- GPU_indexbuf_add_line_verts(&elb_lines_fast, v0, v1);
- GPU_indexbuf_add_line_verts(&elb_lines_fast, v1, v2);
- GPU_indexbuf_add_line_verts(&elb_lines_fast, v2, v3);
- GPU_indexbuf_add_line_verts(&elb_lines_fast, v3, v0);
- }
- }
- }
- else {
- uint offset = 0;
- const uint grid_vert_len = square_uint(gridsize - 1) * 4;
- for (int i = 0; i < totgrid; i++, offset += grid_vert_len) {
- bool grid_visible = false;
- BLI_bitmap *gh = buffers->grid_hidden[grid_indices[i]];
-
- uint v0, v1, v2, v3;
- for (int j = 0; j < gridsize - 1; j++) {
- for (int k = 0; k < gridsize - 1; k++) {
- /* Skip hidden grid face */
- if (gh && paint_is_grid_face_hidden(gh, gridsize, k, j)) {
- continue;
- }
- /* VBO data are in a Clockwise QUAD disposition. */
- v0 = offset + (j * (gridsize - 1) + k) * 4;
- v1 = v0 + 1;
- v2 = v0 + 2;
- v3 = v0 + 3;
-
- GPU_indexbuf_add_tri_verts(&elb, v0, v2, v1);
- GPU_indexbuf_add_tri_verts(&elb, v0, v3, v2);
-
- GPU_indexbuf_add_line_verts(&elb_lines, v0, v1);
- GPU_indexbuf_add_line_verts(&elb_lines, v0, v3);
-
- if (j + 2 == gridsize) {
- GPU_indexbuf_add_line_verts(&elb_lines, v2, v3);
- }
- grid_visible = true;
- }
-
- if (grid_visible) {
- GPU_indexbuf_add_line_verts(&elb_lines, v1, v2);
- }
- }
-
- if (grid_visible) {
- /* Grid corners */
- v0 = offset;
- v1 = offset + (gridsize - 1) * 4 - 3;
- v2 = offset + grid_vert_len - 2;
- v3 = offset + grid_vert_len - (gridsize - 1) * 4 + 3;
-
- GPU_indexbuf_add_tri_verts(&elb_fast, v0, v2, v1);
- GPU_indexbuf_add_tri_verts(&elb_fast, v0, v3, v2);
-
- GPU_indexbuf_add_line_verts(&elb_lines_fast, v0, v1);
- GPU_indexbuf_add_line_verts(&elb_lines_fast, v1, v2);
- GPU_indexbuf_add_line_verts(&elb_lines_fast, v2, v3);
- GPU_indexbuf_add_line_verts(&elb_lines_fast, v3, v0);
- }
- }
- }
-
- buffers->index_buf = GPU_indexbuf_build(&elb);
- buffers->index_buf_fast = GPU_indexbuf_build(&elb_fast);
- buffers->index_lines_buf = GPU_indexbuf_build(&elb_lines);
- buffers->index_lines_buf_fast = GPU_indexbuf_build(&elb_lines_fast);
-}
-
-void GPU_pbvh_grid_buffers_update_free(GPU_PBVH_Buffers *buffers,
- const struct DMFlagMat *grid_flag_mats,
- const int *grid_indices)
-{
- const bool smooth = grid_flag_mats[grid_indices[0]].flag & ME_SMOOTH;
-
- if (buffers->smooth != smooth) {
- buffers->smooth = smooth;
- GPU_BATCH_DISCARD_SAFE(buffers->triangles);
- GPU_BATCH_DISCARD_SAFE(buffers->triangles_fast);
- GPU_BATCH_DISCARD_SAFE(buffers->lines);
- GPU_BATCH_DISCARD_SAFE(buffers->lines_fast);
-
- GPU_INDEXBUF_DISCARD_SAFE(buffers->index_buf);
- GPU_INDEXBUF_DISCARD_SAFE(buffers->index_buf_fast);
- GPU_INDEXBUF_DISCARD_SAFE(buffers->index_lines_buf);
- GPU_INDEXBUF_DISCARD_SAFE(buffers->index_lines_buf_fast);
- }
-}
-
-void GPU_pbvh_grid_buffers_update(PBVHGPUFormat *vbo_id,
- GPU_PBVH_Buffers *buffers,
- SubdivCCG *subdiv_ccg,
- CCGElem **grids,
- const struct DMFlagMat *grid_flag_mats,
- int *grid_indices,
- int totgrid,
- const int *sculpt_face_sets,
- const int face_sets_color_seed,
- const int face_sets_color_default,
- const struct CCGKey *key,
- const int update_flags)
-{
- const bool show_mask = (update_flags & GPU_PBVH_BUFFERS_SHOW_MASK) != 0;
- const bool show_vcol = (update_flags & GPU_PBVH_BUFFERS_SHOW_VCOL) != 0;
- const bool show_face_sets = sculpt_face_sets &&
- (update_flags & GPU_PBVH_BUFFERS_SHOW_SCULPT_FACE_SETS) != 0;
- bool empty_mask = true;
- bool default_face_set = true;
-
- int i, j, k, x, y;
-
- /* Build VBO */
- const int has_mask = key->has_mask;
-
- uint vert_per_grid = (buffers->smooth) ? key->grid_area : (square_i(key->grid_size - 1) * 4);
- uint vert_count = totgrid * vert_per_grid;
-
- if (buffers->index_buf == NULL) {
- uint visible_quad_len = BKE_pbvh_count_grid_quads(
- (BLI_bitmap **)buffers->grid_hidden, grid_indices, totgrid, key->grid_size);
-
- /* totally hidden node, return here to avoid BufferData with zero below. */
- if (visible_quad_len == 0) {
- return;
- }
-
- gpu_pbvh_grid_fill_index_buffers(buffers,
- subdiv_ccg,
- sculpt_face_sets,
- grid_indices,
- visible_quad_len,
- totgrid,
- key->grid_size);
- }
-
- uint vbo_index_offset = 0;
- /* Build VBO */
- if (gpu_pbvh_vert_buf_data_set(vbo_id, buffers, vert_count)) {
- GPUIndexBufBuilder elb_lines;
-
- if (buffers->index_lines_buf == NULL) {
- GPU_indexbuf_init(&elb_lines, GPU_PRIM_LINES, totgrid * key->grid_area * 2, vert_count);
- }
-
- for (i = 0; i < totgrid; i++) {
- const int grid_index = grid_indices[i];
- CCGElem *grid = grids[grid_index];
- int vbo_index = vbo_index_offset;
-
- uchar face_set_color[4] = {UCHAR_MAX, UCHAR_MAX, UCHAR_MAX, UCHAR_MAX};
-
- if (show_face_sets && subdiv_ccg && sculpt_face_sets) {
- const int face_index = BKE_subdiv_ccg_grid_to_face_index(subdiv_ccg, grid_index);
-
- const int fset = sculpt_face_sets[face_index];
- /* Skip for the default color Face Set to render it white. */
- if (fset != face_sets_color_default) {
- BKE_paint_face_set_overlay_color_get(fset, face_sets_color_seed, face_set_color);
- default_face_set = false;
- }
- }
-
- if (buffers->smooth) {
- for (y = 0; y < key->grid_size; y++) {
- for (x = 0; x < key->grid_size; x++) {
- CCGElem *elem = CCG_grid_elem(key, grid, x, y);
- GPU_vertbuf_attr_set(
- buffers->vert_buf, vbo_id->pos, vbo_index, CCG_elem_co(key, elem));
-
- short no_short[3];
- normal_float_to_short_v3(no_short, CCG_elem_no(key, elem));
- GPU_vertbuf_attr_set(buffers->vert_buf, vbo_id->nor, vbo_index, no_short);
-
- if (has_mask && show_mask) {
- float fmask = *CCG_elem_mask(key, elem);
- uchar cmask = (uchar)(fmask * 255);
- GPU_vertbuf_attr_set(buffers->vert_buf, vbo_id->msk, vbo_index, &cmask);
- empty_mask = empty_mask && (cmask == 0);
- }
-
- if (show_vcol) {
- const ushort vcol[4] = {USHRT_MAX, USHRT_MAX, USHRT_MAX, USHRT_MAX};
- GPU_vertbuf_attr_set(buffers->vert_buf, vbo_id->col[0], vbo_index, &vcol);
- }
-
- GPU_vertbuf_attr_set(buffers->vert_buf, vbo_id->fset, vbo_index, &face_set_color);
-
- vbo_index += 1;
- }
- }
- vbo_index_offset += key->grid_area;
- }
- else {
- for (j = 0; j < key->grid_size - 1; j++) {
- for (k = 0; k < key->grid_size - 1; k++) {
- CCGElem *elems[4] = {
- CCG_grid_elem(key, grid, k, j),
- CCG_grid_elem(key, grid, k + 1, j),
- CCG_grid_elem(key, grid, k + 1, j + 1),
- CCG_grid_elem(key, grid, k, j + 1),
- };
- float *co[4] = {
- CCG_elem_co(key, elems[0]),
- CCG_elem_co(key, elems[1]),
- CCG_elem_co(key, elems[2]),
- CCG_elem_co(key, elems[3]),
- };
-
- float fno[3];
- short no_short[3];
- /* NOTE: Clockwise indices ordering, that's why we invert order here. */
- normal_quad_v3(fno, co[3], co[2], co[1], co[0]);
- normal_float_to_short_v3(no_short, fno);
-
- GPU_vertbuf_attr_set(buffers->vert_buf, vbo_id->pos, vbo_index + 0, co[0]);
- GPU_vertbuf_attr_set(buffers->vert_buf, vbo_id->nor, vbo_index + 0, no_short);
- GPU_vertbuf_attr_set(buffers->vert_buf, vbo_id->pos, vbo_index + 1, co[1]);
- GPU_vertbuf_attr_set(buffers->vert_buf, vbo_id->nor, vbo_index + 1, no_short);
- GPU_vertbuf_attr_set(buffers->vert_buf, vbo_id->pos, vbo_index + 2, co[2]);
- GPU_vertbuf_attr_set(buffers->vert_buf, vbo_id->nor, vbo_index + 2, no_short);
- GPU_vertbuf_attr_set(buffers->vert_buf, vbo_id->pos, vbo_index + 3, co[3]);
- GPU_vertbuf_attr_set(buffers->vert_buf, vbo_id->nor, vbo_index + 3, no_short);
-
- if (has_mask && show_mask) {
- float fmask = (*CCG_elem_mask(key, elems[0]) + *CCG_elem_mask(key, elems[1]) +
- *CCG_elem_mask(key, elems[2]) + *CCG_elem_mask(key, elems[3])) *
- 0.25f;
- uchar cmask = (uchar)(fmask * 255);
- GPU_vertbuf_attr_set(buffers->vert_buf, vbo_id->msk, vbo_index + 0, &cmask);
- GPU_vertbuf_attr_set(buffers->vert_buf, vbo_id->msk, vbo_index + 1, &cmask);
- GPU_vertbuf_attr_set(buffers->vert_buf, vbo_id->msk, vbo_index + 2, &cmask);
- GPU_vertbuf_attr_set(buffers->vert_buf, vbo_id->msk, vbo_index + 3, &cmask);
- empty_mask = empty_mask && (cmask == 0);
- }
-
- const ushort vcol[4] = {USHRT_MAX, USHRT_MAX, USHRT_MAX, USHRT_MAX};
- GPU_vertbuf_attr_set(buffers->vert_buf, vbo_id->col[0], vbo_index + 0, &vcol);
- GPU_vertbuf_attr_set(buffers->vert_buf, vbo_id->col[0], vbo_index + 1, &vcol);
- GPU_vertbuf_attr_set(buffers->vert_buf, vbo_id->col[0], vbo_index + 2, &vcol);
- GPU_vertbuf_attr_set(buffers->vert_buf, vbo_id->col[0], vbo_index + 3, &vcol);
-
- GPU_vertbuf_attr_set(buffers->vert_buf, vbo_id->fset, vbo_index + 0, &face_set_color);
- GPU_vertbuf_attr_set(buffers->vert_buf, vbo_id->fset, vbo_index + 1, &face_set_color);
- GPU_vertbuf_attr_set(buffers->vert_buf, vbo_id->fset, vbo_index + 2, &face_set_color);
- GPU_vertbuf_attr_set(buffers->vert_buf, vbo_id->fset, vbo_index + 3, &face_set_color);
-
- vbo_index += 4;
- }
- }
- vbo_index_offset += square_i(key->grid_size - 1) * 4;
- }
- }
-
- gpu_pbvh_batch_init(buffers, GPU_PRIM_TRIS);
- }
-
- /* Get material index from the first face of this buffer. */
- buffers->material_index = grid_flag_mats[grid_indices[0]].mat_nr;
-
- buffers->grids = grids;
- buffers->grid_indices = grid_indices;
- buffers->totgrid = totgrid;
- buffers->grid_flag_mats = grid_flag_mats;
- buffers->gridkey = *key;
- buffers->show_overlay = !empty_mask || !default_face_set;
-}
-
-GPU_PBVH_Buffers *GPU_pbvh_grid_buffers_build(int totgrid, BLI_bitmap **grid_hidden, bool smooth)
-{
- GPU_PBVH_Buffers *buffers;
-
- buffers = MEM_callocN(sizeof(GPU_PBVH_Buffers), "GPU_Buffers");
- buffers->grid_hidden = grid_hidden;
- buffers->totgrid = totgrid;
- buffers->smooth = smooth;
-
- buffers->show_overlay = false;
-
- return buffers;
-}
-
-#undef FILL_QUAD_BUFFER
-
-/** \} */
-
-/* -------------------------------------------------------------------- */
-/** \name BMesh PBVH
- * \{ */
-
-/* Output a BMVert into a VertexBufferFormat array at v_index. */
-static void gpu_bmesh_vert_to_buffer_copy(PBVHGPUFormat *vbo_id,
- BMVert *v,
- GPUVertBuf *vert_buf,
- int v_index,
- const float fno[3],
- const float *fmask,
- const int cd_vert_mask_offset,
- const bool show_mask,
- const bool show_vcol,
- bool *empty_mask)
-{
- /* Vertex should always be visible if it's used by a visible face. */
- BLI_assert(!BM_elem_flag_test(v, BM_ELEM_HIDDEN));
-
- /* Set coord, normal, and mask */
- GPU_vertbuf_attr_set(vert_buf, vbo_id->pos, v_index, v->co);
-
- short no_short[3];
- normal_float_to_short_v3(no_short, fno ? fno : v->no);
- GPU_vertbuf_attr_set(vert_buf, vbo_id->nor, v_index, no_short);
-
- if (show_mask) {
- float effective_mask = fmask ? *fmask : BM_ELEM_CD_GET_FLOAT(v, cd_vert_mask_offset);
- uchar cmask = (uchar)(effective_mask * 255);
- GPU_vertbuf_attr_set(vert_buf, vbo_id->msk, v_index, &cmask);
- *empty_mask = *empty_mask && (cmask == 0);
- }
-
- if (show_vcol) {
- const ushort vcol[4] = {USHRT_MAX, USHRT_MAX, USHRT_MAX, USHRT_MAX};
- GPU_vertbuf_attr_set(vert_buf, vbo_id->col[0], v_index, &vcol);
- }
-
- /* Add default face sets color to avoid artifacts. */
- const uchar face_set[3] = {UCHAR_MAX, UCHAR_MAX, UCHAR_MAX};
- GPU_vertbuf_attr_set(vert_buf, vbo_id->fset, v_index, &face_set);
-}
-
-/* Return the total number of vertices that don't have BM_ELEM_HIDDEN set */
-static int gpu_bmesh_vert_visible_count(GSet *bm_unique_verts, GSet *bm_other_verts)
-{
- GSetIterator gs_iter;
- int totvert = 0;
-
- GSET_ITER (gs_iter, bm_unique_verts) {
- BMVert *v = BLI_gsetIterator_getKey(&gs_iter);
- if (!BM_elem_flag_test(v, BM_ELEM_HIDDEN)) {
- totvert++;
- }
- }
- GSET_ITER (gs_iter, bm_other_verts) {
- BMVert *v = BLI_gsetIterator_getKey(&gs_iter);
- if (!BM_elem_flag_test(v, BM_ELEM_HIDDEN)) {
- totvert++;
- }
- }
-
- return totvert;
-}
-
-/* Return the total number of visible faces */
-static int gpu_bmesh_face_visible_count(GSet *bm_faces)
-{
- GSetIterator gh_iter;
- int totface = 0;
-
- GSET_ITER (gh_iter, bm_faces) {
- BMFace *f = BLI_gsetIterator_getKey(&gh_iter);
-
- if (!BM_elem_flag_test(f, BM_ELEM_HIDDEN)) {
- totface++;
- }
- }
-
- return totface;
-}
-
-void GPU_pbvh_bmesh_buffers_update_free(GPU_PBVH_Buffers *buffers)
-{
- if (buffers->smooth) {
- /* Smooth needs to recreate index buffer, so we have to invalidate the batch. */
- GPU_BATCH_DISCARD_SAFE(buffers->triangles);
- GPU_BATCH_DISCARD_SAFE(buffers->lines);
- GPU_INDEXBUF_DISCARD_SAFE(buffers->index_lines_buf);
- GPU_INDEXBUF_DISCARD_SAFE(buffers->index_buf);
- }
- else {
- GPU_BATCH_DISCARD_SAFE(buffers->lines);
- GPU_INDEXBUF_DISCARD_SAFE(buffers->index_lines_buf);
- }
-}
-
-void GPU_pbvh_bmesh_buffers_update(PBVHGPUFormat *vbo_id,
- GPU_PBVH_Buffers *buffers,
- BMesh *bm,
- GSet *bm_faces,
- GSet *bm_unique_verts,
- GSet *bm_other_verts,
- const int update_flags)
-{
- const bool show_mask = (update_flags & GPU_PBVH_BUFFERS_SHOW_MASK) != 0;
- const bool show_vcol = (update_flags & GPU_PBVH_BUFFERS_SHOW_VCOL) != 0;
- int tottri, totvert;
- bool empty_mask = true;
- BMFace *f = NULL;
-
- /* Count visible triangles */
- tottri = gpu_bmesh_face_visible_count(bm_faces);
-
- if (buffers->smooth) {
- /* Count visible vertices */
- totvert = gpu_bmesh_vert_visible_count(bm_unique_verts, bm_other_verts);
- }
- else {
- totvert = tottri * 3;
- }
-
- if (!tottri) {
- if (BLI_gset_len(bm_faces) != 0) {
- /* Node is just hidden. */
- }
- else {
- buffers->clear_bmesh_on_flush = true;
- }
- buffers->tot_tri = 0;
- return;
- }
-
- /* TODO: make mask layer optional for bmesh buffer. */
- const int cd_vert_mask_offset = CustomData_get_offset(&bm->vdata, CD_PAINT_MASK);
-
- /* Fill vertex buffer */
- if (!gpu_pbvh_vert_buf_data_set(vbo_id, buffers, totvert)) {
- /* Memory map failed */
- return;
- }
-
- int v_index = 0;
-
- if (buffers->smooth) {
- /* Fill the vertex and triangle buffer in one pass over faces. */
- GPUIndexBufBuilder elb, elb_lines;
- GPU_indexbuf_init(&elb, GPU_PRIM_TRIS, tottri, totvert);
- GPU_indexbuf_init(&elb_lines, GPU_PRIM_LINES, tottri * 3, totvert);
-
- GHash *bm_vert_to_index = BLI_ghash_int_new_ex("bm_vert_to_index", totvert);
-
- GSetIterator gs_iter;
- GSET_ITER (gs_iter, bm_faces) {
- f = BLI_gsetIterator_getKey(&gs_iter);
-
- if (!BM_elem_flag_test(f, BM_ELEM_HIDDEN)) {
- BMVert *v[3];
- BM_face_as_array_vert_tri(f, v);
-
- uint idx[3];
- for (int i = 0; i < 3; i++) {
- void **idx_p;
- if (!BLI_ghash_ensure_p(bm_vert_to_index, v[i], &idx_p)) {
- /* Add vertex to the vertex buffer each time a new one is encountered */
- *idx_p = POINTER_FROM_UINT(v_index);
-
- gpu_bmesh_vert_to_buffer_copy(vbo_id,
- v[i],
- buffers->vert_buf,
- v_index,
- NULL,
- NULL,
- cd_vert_mask_offset,
- show_mask,
- show_vcol,
- &empty_mask);
-
- idx[i] = v_index;
- v_index++;
- }
- else {
- /* Vertex already in the vertex buffer, just get the index. */
- idx[i] = POINTER_AS_UINT(*idx_p);
- }
- }
-
- GPU_indexbuf_add_tri_verts(&elb, idx[0], idx[1], idx[2]);
-
- GPU_indexbuf_add_line_verts(&elb_lines, idx[0], idx[1]);
- GPU_indexbuf_add_line_verts(&elb_lines, idx[1], idx[2]);
- GPU_indexbuf_add_line_verts(&elb_lines, idx[2], idx[0]);
- }
- }
-
- BLI_ghash_free(bm_vert_to_index, NULL, NULL);
-
- buffers->tot_tri = tottri;
- if (buffers->index_buf == NULL) {
- buffers->index_buf = GPU_indexbuf_build(&elb);
- }
- else {
- GPU_indexbuf_build_in_place(&elb, buffers->index_buf);
- }
- buffers->index_lines_buf = GPU_indexbuf_build(&elb_lines);
- }
- else {
- GSetIterator gs_iter;
-
- GPUIndexBufBuilder elb_lines;
- GPU_indexbuf_init(&elb_lines, GPU_PRIM_LINES, tottri * 3, tottri * 3);
-
- GSET_ITER (gs_iter, bm_faces) {
- f = BLI_gsetIterator_getKey(&gs_iter);
-
- BLI_assert(f->len == 3);
-
- if (!BM_elem_flag_test(f, BM_ELEM_HIDDEN)) {
- BMVert *v[3];
- float fmask = 0.0f;
- int i;
-
- BM_face_as_array_vert_tri(f, v);
-
- /* Average mask value */
- for (i = 0; i < 3; i++) {
- fmask += BM_ELEM_CD_GET_FLOAT(v[i], cd_vert_mask_offset);
- }
- fmask /= 3.0f;
-
- GPU_indexbuf_add_line_verts(&elb_lines, v_index + 0, v_index + 1);
- GPU_indexbuf_add_line_verts(&elb_lines, v_index + 1, v_index + 2);
- GPU_indexbuf_add_line_verts(&elb_lines, v_index + 2, v_index + 0);
-
- for (i = 0; i < 3; i++) {
- gpu_bmesh_vert_to_buffer_copy(vbo_id,
- v[i],
- buffers->vert_buf,
- v_index++,
- f->no,
- &fmask,
- cd_vert_mask_offset,
- show_mask,
- show_vcol,
- &empty_mask);
- }
- }
- }
-
- buffers->index_lines_buf = GPU_indexbuf_build(&elb_lines);
- buffers->tot_tri = tottri;
- }
-
- /* Get material index from the last face we iterated on. */
- buffers->material_index = (f) ? f->mat_nr : 0;
-
- buffers->show_overlay = !empty_mask;
-
- gpu_pbvh_batch_init(buffers, GPU_PRIM_TRIS);
-}
-
-/** \} */
-
-/* -------------------------------------------------------------------- */
-/** \name Generic
- * \{ */
-
-GPU_PBVH_Buffers *GPU_pbvh_bmesh_buffers_build(bool smooth_shading)
-{
- GPU_PBVH_Buffers *buffers;
-
- buffers = MEM_callocN(sizeof(GPU_PBVH_Buffers), "GPU_Buffers");
- buffers->use_bmesh = true;
- buffers->smooth = smooth_shading;
- buffers->show_overlay = true;
-
- return buffers;
-}
-
-/**
- * Builds a list of attributes from a set of domains and a set of
- * customdata types.
- *
- * \param active_only: Returns only one item, a #GPUAttrRef to active_layer.
- * \param active_layer: #CustomDataLayer to use for the active layer.
- * \param active_layer: #CustomDataLayer to use for the render layer.
- */
-static int gpu_pbvh_make_attr_offs(eAttrDomainMask domain_mask,
- eCustomDataMask type_mask,
- const CustomData *vdata,
- const CustomData *edata,
- const CustomData *ldata,
- const CustomData *pdata,
- GPUAttrRef r_cd_attrs[MAX_GPU_ATTR],
- bool active_only,
- int active_type,
- int active_domain,
- const CustomDataLayer *active_layer,
- const CustomDataLayer *render_layer)
-{
- const CustomData *cdata_active = active_domain == ATTR_DOMAIN_POINT ? vdata : ldata;
-
- if (!cdata_active) {
- return 0;
- }
-
- if (active_only) {
- int idx = active_layer ? active_layer - cdata_active->layers : -1;
-
- if (idx >= 0 && idx < cdata_active->totlayer) {
- r_cd_attrs[0].cd_offset = cdata_active->layers[idx].offset;
- r_cd_attrs[0].domain = active_domain;
- r_cd_attrs[0].type = active_type;
- r_cd_attrs[0].layer_idx = idx;
-
- return 1;
- }
-
- return 0;
- }
-
- const CustomData *datas[4] = {vdata, edata, pdata, ldata};
-
- int count = 0;
- for (eAttrDomain domain = 0; domain < 4; domain++) {
- const CustomData *cdata = datas[domain];
-
- if (!cdata || !((1 << domain) & domain_mask)) {
- continue;
- }
-
- const CustomDataLayer *cl = cdata->layers;
-
- for (int i = 0; count < MAX_GPU_ATTR && i < cdata->totlayer; i++, cl++) {
- if ((CD_TYPE_AS_MASK(cl->type) & type_mask) && !(cl->flag & CD_FLAG_TEMPORARY)) {
- GPUAttrRef *ref = r_cd_attrs + count;
-
- ref->cd_offset = cl->offset;
- ref->type = cl->type;
- ref->layer_idx = i;
- ref->domain = domain;
-
- count++;
- }
- }
- }
-
- /* Ensure render layer is last, draw cache code seems to need this. */
-
- for (int i = 0; i < count; i++) {
- GPUAttrRef *ref = r_cd_attrs + i;
- const CustomData *cdata = datas[ref->domain];
-
- if (cdata->layers + ref->layer_idx == render_layer) {
- SWAP(GPUAttrRef, r_cd_attrs[i], r_cd_attrs[count - 1]);
- break;
- }
- }
-
- return count;
-}
-
-static bool gpu_pbvh_format_equals(PBVHGPUFormat *a, PBVHGPUFormat *b)
-{
- bool bad = false;
-
- bad |= a->active_attrs_only != b->active_attrs_only;
-
- bad |= a->pos != b->pos;
- bad |= a->fset != b->fset;
- bad |= a->msk != b->msk;
- bad |= a->nor != b->nor;
-
- for (int i = 0; i < MIN2(a->totuv, b->totuv); i++) {
- bad |= a->uv[i] != b->uv[i];
- }
-
- for (int i = 0; i < MIN2(a->totcol, b->totcol); i++) {
- bad |= a->col[i] != b->col[i];
- }
-
- bad |= a->totuv != b->totuv;
- bad |= a->totcol != b->totcol;
-
- return !bad;
-}
-
-bool GPU_pbvh_attribute_names_update(PBVHType pbvh_type,
- PBVHGPUFormat *vbo_id,
- const CustomData *vdata,
- const CustomData *ldata,
- bool active_attrs_only)
-{
- const bool active_only = active_attrs_only;
- PBVHGPUFormat old_format = *vbo_id;
-
- GPU_vertformat_clear(&vbo_id->format);
-
- vbo_id->active_attrs_only = active_attrs_only;
-
- if (vbo_id->format.attr_len == 0) {
- vbo_id->pos = GPU_vertformat_attr_add(
- &vbo_id->format, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
- vbo_id->nor = GPU_vertformat_attr_add(
- &vbo_id->format, "nor", GPU_COMP_I16, 3, GPU_FETCH_INT_TO_FLOAT_UNIT);
-
- /* TODO: Do not allocate these `.msk` and `.col` when they are not used. */
- vbo_id->msk = GPU_vertformat_attr_add(
- &vbo_id->format, "msk", GPU_COMP_U8, 1, GPU_FETCH_INT_TO_FLOAT_UNIT);
-
- vbo_id->totcol = 0;
- if (pbvh_type == PBVH_FACES) {
- int ci = 0;
-
- Mesh me_query;
-
- BKE_id_attribute_copy_domains_temp(ID_ME, vdata, NULL, ldata, NULL, NULL, &me_query.id);
-
- const CustomDataLayer *active_color_layer = BKE_id_attributes_active_color_get(&me_query.id);
- const CustomDataLayer *render_color_layer = BKE_id_attributes_render_color_get(&me_query.id);
- eAttrDomain active_color_domain = active_color_layer ?
- BKE_id_attribute_domain(&me_query.id,
- active_color_layer) :
- ATTR_DOMAIN_POINT;
-
- GPUAttrRef vcol_layers[MAX_GPU_ATTR];
- int totlayer = gpu_pbvh_make_attr_offs(ATTR_DOMAIN_MASK_COLOR,
- CD_MASK_COLOR_ALL,
- vdata,
- NULL,
- ldata,
- NULL,
- vcol_layers,
- active_only,
- active_color_layer ? active_color_layer->type : -1,
- active_color_domain,
- active_color_layer,
- render_color_layer);
-
- for (int i = 0; i < totlayer; i++) {
- GPUAttrRef *ref = vcol_layers + i;
- const CustomData *cdata = ref->domain == ATTR_DOMAIN_POINT ? vdata : ldata;
-
- const CustomDataLayer *layer = cdata->layers + ref->layer_idx;
-
- if (vbo_id->totcol < MAX_GPU_ATTR) {
- vbo_id->col[ci++] = GPU_vertformat_attr_add(
- &vbo_id->format, "c", GPU_COMP_U16, 4, GPU_FETCH_INT_TO_FLOAT_UNIT);
- vbo_id->totcol++;
-
- bool is_render = render_color_layer == layer;
- bool is_active = active_color_layer == layer;
-
- DRW_cdlayer_attr_aliases_add(&vbo_id->format, "c", cdata, layer, is_render, is_active);
- }
- }
- }
-
- /* ensure at least one vertex color layer */
- if (vbo_id->totcol == 0) {
- vbo_id->col[0] = GPU_vertformat_attr_add(
- &vbo_id->format, "c", GPU_COMP_U16, 4, GPU_FETCH_INT_TO_FLOAT_UNIT);
- vbo_id->totcol = 1;
-
- GPU_vertformat_alias_add(&vbo_id->format, "ac");
- }
-
- vbo_id->fset = GPU_vertformat_attr_add(
- &vbo_id->format, "fset", GPU_COMP_U8, 3, GPU_FETCH_INT_TO_FLOAT_UNIT);
-
- vbo_id->totuv = 0;
- if (pbvh_type == PBVH_FACES && ldata && CustomData_has_layer(ldata, CD_MLOOPUV)) {
- GPUAttrRef uv_layers[MAX_GPU_ATTR];
- const CustomDataLayer *active = NULL, *render = NULL;
-
- active = get_active_layer(ldata, CD_MLOOPUV);
- render = get_render_layer(ldata, CD_MLOOPUV);
-
- int totlayer = gpu_pbvh_make_attr_offs(ATTR_DOMAIN_MASK_CORNER,
- CD_MASK_MLOOPUV,
- NULL,
- NULL,
- ldata,
- NULL,
- uv_layers,
- active_only,
- CD_MLOOPUV,
- ATTR_DOMAIN_CORNER,
- active,
- render);
-
- vbo_id->totuv = totlayer;
-
- for (int i = 0; i < totlayer; i++) {
- GPUAttrRef *ref = uv_layers + i;
-
- vbo_id->uv[i] = GPU_vertformat_attr_add(
- &vbo_id->format, "uvs", GPU_COMP_F32, 2, GPU_FETCH_FLOAT);
-
- const CustomDataLayer *cl = ldata->layers + ref->layer_idx;
- bool is_active = ref->layer_idx == CustomData_get_active_layer_index(ldata, CD_MLOOPUV);
-
- DRW_cdlayer_attr_aliases_add(&vbo_id->format, "u", ldata, cl, cl == render, is_active);
-
- /* Apparently the render attribute is 'a' while active is 'au',
- * at least going by the draw cache extractor code.
- */
- if (cl == render) {
- GPU_vertformat_alias_add(&vbo_id->format, "a");
- }
- }
- }
- }
-
- if (!gpu_pbvh_format_equals(&old_format, vbo_id)) {
- return true;
- }
-
- return false;
-}
-
-GPUBatch *GPU_pbvh_buffers_batch_get(GPU_PBVH_Buffers *buffers, bool fast, bool wires)
-{
- if (wires) {
- return (fast && buffers->lines_fast) ? buffers->lines_fast : buffers->lines;
- }
-
- return (fast && buffers->triangles_fast) ? buffers->triangles_fast : buffers->triangles;
-}
-
-bool GPU_pbvh_buffers_has_overlays(GPU_PBVH_Buffers *buffers)
-{
- return buffers->show_overlay;
-}
-
-short GPU_pbvh_buffers_material_index_get(GPU_PBVH_Buffers *buffers)
-{
- return buffers->material_index;
-}
-
-static void gpu_pbvh_buffers_clear(GPU_PBVH_Buffers *buffers)
-{
- GPU_BATCH_DISCARD_SAFE(buffers->lines);
- GPU_BATCH_DISCARD_SAFE(buffers->lines_fast);
- GPU_BATCH_DISCARD_SAFE(buffers->triangles);
- GPU_BATCH_DISCARD_SAFE(buffers->triangles_fast);
- GPU_INDEXBUF_DISCARD_SAFE(buffers->index_lines_buf_fast);
- GPU_INDEXBUF_DISCARD_SAFE(buffers->index_lines_buf);
- GPU_INDEXBUF_DISCARD_SAFE(buffers->index_buf_fast);
- GPU_INDEXBUF_DISCARD_SAFE(buffers->index_buf);
- GPU_VERTBUF_DISCARD_SAFE(buffers->vert_buf);
-}
-
-void GPU_pbvh_buffers_update_flush(GPU_PBVH_Buffers *buffers)
-{
- /* Free empty bmesh node buffers. */
- if (buffers->clear_bmesh_on_flush) {
- gpu_pbvh_buffers_clear(buffers);
- buffers->clear_bmesh_on_flush = false;
- }
-
- /* Force flushing to the GPU. */
- if (buffers->vert_buf && GPU_vertbuf_get_data(buffers->vert_buf)) {
- GPU_vertbuf_use(buffers->vert_buf);
- }
-}
-
-void GPU_pbvh_buffers_free(GPU_PBVH_Buffers *buffers)
-{
- if (buffers) {
- gpu_pbvh_buffers_clear(buffers);
- MEM_freeN(buffers);
- }
-}
-
-/** \} */
diff --git a/source/blender/gpu/intern/gpu_init_exit.c b/source/blender/gpu/intern/gpu_init_exit.c
index 34b355eefaf..2dbb4b215bb 100644
--- a/source/blender/gpu/intern/gpu_init_exit.c
+++ b/source/blender/gpu/intern/gpu_init_exit.c
@@ -36,18 +36,10 @@ void GPU_init(void)
gpu_codegen_init();
gpu_batch_init();
-
-#ifndef GPU_STANDALONE
- gpu_pbvh_init();
-#endif
}
void GPU_exit(void)
{
-#ifndef GPU_STANDALONE
- gpu_pbvh_exit();
-#endif
-
gpu_batch_exit();
gpu_codegen_exit();