Welcome to mirror list, hosted at ThFree Co, Russian Federation.

git.blender.org/blender.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
Diffstat (limited to 'source/blender/draw/intern')
-rw-r--r--source/blender/draw/intern/DRW_render.h22
-rw-r--r--source/blender/draw/intern/draw_attributes.h18
-rw-r--r--source/blender/draw/intern/draw_cache.c2
-rw-r--r--source/blender/draw/intern/draw_cache_extract.hh16
-rw-r--r--source/blender/draw/intern/draw_cache_impl.h1
-rw-r--r--source/blender/draw/intern/draw_cache_impl_mesh.cc22
-rw-r--r--source/blender/draw/intern/draw_manager.h10
-rw-r--r--source/blender/draw/intern/draw_manager_data.c146
-rw-r--r--source/blender/draw/intern/draw_pbvh.cc1216
-rw-r--r--source/blender/draw/intern/draw_pbvh.h22
10 files changed, 1445 insertions, 30 deletions
diff --git a/source/blender/draw/intern/DRW_render.h b/source/blender/draw/intern/DRW_render.h
index 7b80ffd2b88..1752198c349 100644
--- a/source/blender/draw/intern/DRW_render.h
+++ b/source/blender/draw/intern/DRW_render.h
@@ -398,8 +398,18 @@ void DRW_shgroup_call_instances_with_attrs(DRWShadingGroup *shgroup,
struct GPUBatch *geom,
struct GPUBatch *inst_attributes);
-void DRW_shgroup_call_sculpt(DRWShadingGroup *sh, Object *ob, bool wire, bool mask);
-void DRW_shgroup_call_sculpt_with_materials(DRWShadingGroup **sh, int num_sh, Object *ob);
+void DRW_shgroup_call_sculpt(DRWShadingGroup *shgroup,
+ Object *ob,
+ bool use_wire,
+ bool use_mask,
+ bool use_fset,
+ bool use_color,
+ bool use_uv);
+
+void DRW_shgroup_call_sculpt_with_materials(DRWShadingGroup **shgroups,
+ struct GPUMaterial **gpumats,
+ int num_shgroups,
+ Object *ob);
DRWCallBuffer *DRW_shgroup_call_buffer(DRWShadingGroup *shgroup,
struct GPUVertFormat *format,
@@ -934,6 +944,14 @@ typedef struct DRWContextState {
const DRWContextState *DRW_context_state_get(void);
+struct DRW_Attributes;
+struct DRW_MeshCDMask;
+
+void DRW_mesh_batch_cache_get_attributes(struct Object *object,
+ struct Mesh *me,
+ struct DRW_Attributes **r_attrs,
+ struct DRW_MeshCDMask **r_cd_needed);
+
#ifdef __cplusplus
}
#endif
diff --git a/source/blender/draw/intern/draw_attributes.h b/source/blender/draw/intern/draw_attributes.h
index b577c6c4162..786301d0164 100644
--- a/source/blender/draw/intern/draw_attributes.h
+++ b/source/blender/draw/intern/draw_attributes.h
@@ -16,6 +16,7 @@
#include "BLI_sys_types.h"
#include "BLI_threads.h"
+#include "BLI_utildefines.h"
#include "GPU_shader.h"
#include "GPU_vertex_format.h"
@@ -36,6 +37,23 @@ typedef struct DRW_Attributes {
int num_requests;
} DRW_Attributes;
+typedef struct DRW_MeshCDMask {
+ uint32_t uv : 8;
+ uint32_t tan : 8;
+ uint32_t orco : 1;
+ uint32_t tan_orco : 1;
+ uint32_t sculpt_overlays : 1;
+ /**
+ * Edit uv layer is from the base edit mesh as modifiers could remove it. (see T68857)
+ */
+ uint32_t edit_uv : 1;
+} DRW_MeshCDMask;
+
+/* Keep `DRW_MeshCDMask` struct within a `uint32_t`.
+ * bit-wise and atomic operations are used to compare and update the struct.
+ * See `mesh_cd_layers_type_*` functions. */
+BLI_STATIC_ASSERT(sizeof(DRW_MeshCDMask) <= sizeof(uint32_t), "DRW_MeshCDMask exceeds 32 bits")
+
void drw_attributes_clear(DRW_Attributes *attributes);
void drw_attributes_merge(DRW_Attributes *dst,
diff --git a/source/blender/draw/intern/draw_cache.c b/source/blender/draw/intern/draw_cache.c
index 6da22039c2f..eec15a4668d 100644
--- a/source/blender/draw/intern/draw_cache.c
+++ b/source/blender/draw/intern/draw_cache.c
@@ -3388,7 +3388,7 @@ void DRW_cdlayer_attr_aliases_add(GPUVertFormat *format,
/* Active render layer name. */
if (is_active_render) {
- GPU_vertformat_alias_add(format, base_name);
+ GPU_vertformat_alias_add(format, cl->type == CD_MLOOPUV ? "a" : base_name);
}
/* Active display layer name. */
diff --git a/source/blender/draw/intern/draw_cache_extract.hh b/source/blender/draw/intern/draw_cache_extract.hh
index 6f4a652ce24..4fe360eecd7 100644
--- a/source/blender/draw/intern/draw_cache_extract.hh
+++ b/source/blender/draw/intern/draw_cache_extract.hh
@@ -52,22 +52,6 @@ enum {
DRW_MESH_WEIGHT_STATE_LOCK_RELATIVE = (1 << 2),
};
-struct DRW_MeshCDMask {
- uint32_t uv : 8;
- uint32_t tan : 8;
- uint32_t orco : 1;
- uint32_t tan_orco : 1;
- uint32_t sculpt_overlays : 1;
- /**
- * Edit uv layer is from the base edit mesh as modifiers could remove it. (see T68857)
- */
- uint32_t edit_uv : 1;
-};
-/* Keep `DRW_MeshCDMask` struct within a `uint32_t`.
- * bit-wise and atomic operations are used to compare and update the struct.
- * See `mesh_cd_layers_type_*` functions. */
-BLI_STATIC_ASSERT(sizeof(DRW_MeshCDMask) <= sizeof(uint32_t), "DRW_MeshCDMask exceeds 32 bits")
-
enum eMRIterType {
MR_ITER_LOOPTRI = 1 << 0,
MR_ITER_POLY = 1 << 1,
diff --git a/source/blender/draw/intern/draw_cache_impl.h b/source/blender/draw/intern/draw_cache_impl.h
index 89432b0da83..e49b37b451f 100644
--- a/source/blender/draw/intern/draw_cache_impl.h
+++ b/source/blender/draw/intern/draw_cache_impl.h
@@ -191,6 +191,7 @@ struct GPUBatch **DRW_mesh_batch_cache_get_surface_shaded(struct Object *object,
struct Mesh *me,
struct GPUMaterial **gpumat_array,
uint gpumat_array_len);
+
struct GPUBatch **DRW_mesh_batch_cache_get_surface_texpaint(struct Object *object,
struct Mesh *me);
struct GPUBatch *DRW_mesh_batch_cache_get_surface_texpaint_single(struct Object *object,
diff --git a/source/blender/draw/intern/draw_cache_impl_mesh.cc b/source/blender/draw/intern/draw_cache_impl_mesh.cc
index ed78cbbda39..acab4798ea8 100644
--- a/source/blender/draw/intern/draw_cache_impl_mesh.cc
+++ b/source/blender/draw/intern/draw_cache_impl_mesh.cc
@@ -62,6 +62,7 @@
#include "draw_subdivision.h"
#include "draw_cache_impl.h" /* own include */
+#include "draw_manager.h"
#include "mesh_extractors/extract_mesh.hh"
@@ -981,6 +982,27 @@ GPUBatch *DRW_mesh_batch_cache_get_edit_mesh_analysis(Mesh *me)
return DRW_batch_request(&cache->batch.edit_mesh_analysis);
}
+void DRW_mesh_get_attributes(Object *object,
+ Mesh *me,
+ struct GPUMaterial **gpumat_array,
+ int gpumat_array_len,
+ DRW_Attributes *r_attrs,
+ DRW_MeshCDMask *r_cd_needed)
+{
+ DRW_Attributes attrs_needed;
+ drw_attributes_clear(&attrs_needed);
+ DRW_MeshCDMask cd_needed = mesh_cd_calc_used_gpu_layers(
+ object, me, gpumat_array, gpumat_array_len, &attrs_needed);
+
+ if (r_attrs) {
+ *r_attrs = attrs_needed;
+ }
+
+ if (r_cd_needed) {
+ *r_cd_needed = cd_needed;
+ }
+}
+
GPUBatch **DRW_mesh_batch_cache_get_surface_shaded(Object *object,
Mesh *me,
struct GPUMaterial **gpumat_array,
diff --git a/source/blender/draw/intern/draw_manager.h b/source/blender/draw/intern/draw_manager.h
index 4f71e665390..67d0f79b83e 100644
--- a/source/blender/draw/intern/draw_manager.h
+++ b/source/blender/draw/intern/draw_manager.h
@@ -694,6 +694,16 @@ bool drw_engine_data_engines_data_validate(GPUViewport *viewport, void **engine_
void drw_engine_data_cache_release(GPUViewport *viewport);
void drw_engine_data_free(GPUViewport *viewport);
+struct DRW_Attributes;
+struct DRW_MeshCDMask;
+struct GPUMaterial;
+void DRW_mesh_get_attributes(struct Object *object,
+ struct Mesh *me,
+ struct GPUMaterial **gpumat_array,
+ int gpumat_array_len,
+ struct DRW_Attributes *r_attrs,
+ struct DRW_MeshCDMask *r_cd_needed);
+
void DRW_manager_begin_sync(void);
void DRW_manager_end_sync(void);
diff --git a/source/blender/draw/intern/draw_manager_data.c b/source/blender/draw/intern/draw_manager_data.c
index 820242720c8..3e0708d8b49 100644
--- a/source/blender/draw/intern/draw_manager_data.c
+++ b/source/blender/draw/intern/draw_manager_data.c
@@ -5,7 +5,11 @@
* \ingroup draw
*/
+#include "DRW_pbvh.h"
+
+#include "draw_attributes.h"
#include "draw_manager.h"
+#include "draw_pbvh.h"
#include "BKE_curve.h"
#include "BKE_duplilist.h"
@@ -32,12 +36,12 @@
#include "BLI_listbase.h"
#include "BLI_memblock.h"
#include "BLI_mempool.h"
+#include "BLI_math_bits.h"
#ifdef DRW_DEBUG_CULLING
# include "BLI_math_bits.h"
#endif
-#include "GPU_buffers.h"
#include "GPU_capabilities.h"
#include "GPU_material.h"
#include "GPU_uniform_buffer.h"
@@ -1144,6 +1148,8 @@ typedef struct DRWSculptCallbackData {
bool fast_mode; /* Set by draw manager. Do not init. */
int debug_node_nr;
+ PBVHAttrReq *attrs;
+ int attrs_num;
} DRWSculptCallbackData;
#define SCULPT_DEBUG_COLOR(id) (sculpt_debug_colors[id % 9])
@@ -1159,22 +1165,28 @@ static float sculpt_debug_colors[9][4] = {
{0.7f, 0.2f, 1.0f, 1.0f},
};
-static void sculpt_draw_cb(DRWSculptCallbackData *scd, GPU_PBVH_Buffers *buffers)
+static void sculpt_draw_cb(DRWSculptCallbackData *scd,
+ PBVHBatches *batches,
+ PBVH_GPU_Args *pbvh_draw_args)
{
- if (!buffers) {
+ if (!batches) {
return;
}
- /* Meh... use_mask is a bit misleading here. */
- if (scd->use_mask && !GPU_pbvh_buffers_has_overlays(buffers)) {
- return;
+ int primcount;
+ GPUBatch *geom;
+
+ if (!scd->use_wire) {
+ geom = DRW_pbvh_tris_get(batches, scd->attrs, scd->attrs_num, pbvh_draw_args, &primcount);
+ }
+ else {
+ geom = DRW_pbvh_lines_get(batches, scd->attrs, scd->attrs_num, pbvh_draw_args, &primcount);
}
- GPUBatch *geom = GPU_pbvh_buffers_batch_get(buffers, scd->fast_mode, scd->use_wire);
short index = 0;
if (scd->use_mats) {
- index = GPU_pbvh_buffers_material_index_get(buffers);
+ index = drw_pbvh_material_index_get(batches);
if (index >= scd->num_shading_groups) {
index = 0;
}
@@ -1298,9 +1310,11 @@ static void drw_sculpt_generate_calls(DRWSculptCallbackData *scd)
update_only_visible,
&update_frustum,
&draw_frustum,
- (void (*)(void *, GPU_PBVH_Buffers *))sculpt_draw_cb,
+ (void (*)(void *, PBVHBatches *, PBVH_GPU_Args *))sculpt_draw_cb,
scd,
- scd->use_mats);
+ scd->use_mats,
+ scd->attrs,
+ scd->attrs_num);
if (SCULPT_DEBUG_BUFFERS) {
int debug_node_nr = 0;
@@ -1313,7 +1327,13 @@ static void drw_sculpt_generate_calls(DRWSculptCallbackData *scd)
}
}
-void DRW_shgroup_call_sculpt(DRWShadingGroup *shgroup, Object *ob, bool use_wire, bool use_mask)
+void DRW_shgroup_call_sculpt(DRWShadingGroup *shgroup,
+ Object *ob,
+ bool use_wire,
+ bool use_mask,
+ bool use_fset,
+ bool use_color,
+ bool use_uv)
{
DRWSculptCallbackData scd = {
.ob = ob,
@@ -1323,13 +1343,115 @@ void DRW_shgroup_call_sculpt(DRWShadingGroup *shgroup, Object *ob, bool use_wire
.use_mats = false,
.use_mask = use_mask,
};
+
+ PBVHAttrReq attrs[16];
+ int attrs_num = 0;
+
+ memset(attrs, 0, sizeof(attrs));
+
+ attrs[attrs_num++].type = CD_PBVH_CO_TYPE;
+ attrs[attrs_num++].type = CD_PBVH_NO_TYPE;
+
+ if (use_mask) {
+ attrs[attrs_num++].type = CD_PBVH_MASK_TYPE;
+ }
+
+ if (use_fset) {
+ attrs[attrs_num++].type = CD_PBVH_FSET_TYPE;
+ }
+
+ Mesh *me = BKE_object_get_original_mesh(ob);
+
+ if (use_color) {
+ CustomDataLayer *layer = BKE_id_attributes_active_color_get(&me->id);
+
+ if (layer) {
+ eAttrDomain domain = BKE_id_attribute_domain(&me->id, layer);
+
+ attrs[attrs_num].type = layer->type;
+ attrs[attrs_num].domain = domain;
+
+ BLI_strncpy(attrs[attrs_num].name, layer->name, sizeof(attrs[attrs_num].name));
+ attrs_num++;
+ }
+ }
+
+ if (use_uv) {
+ int layer_i = CustomData_get_active_layer_index(&me->ldata, CD_MLOOPUV);
+ if (layer_i != -1) {
+ CustomDataLayer *layer = me->ldata.layers + layer_i;
+
+ attrs[attrs_num].type = CD_MLOOPUV;
+ attrs[attrs_num].domain = ATTR_DOMAIN_CORNER;
+ BLI_strncpy(attrs[attrs_num].name, layer->name, sizeof(attrs[attrs_num].name));
+
+ attrs_num++;
+ }
+ }
+
+ scd.attrs = attrs;
+ scd.attrs_num = attrs_num;
+
drw_sculpt_generate_calls(&scd);
}
void DRW_shgroup_call_sculpt_with_materials(DRWShadingGroup **shgroups,
+ GPUMaterial **gpumats,
int num_shgroups,
Object *ob)
{
+ DRW_Attributes draw_attrs;
+ DRW_MeshCDMask cd_needed;
+
+ if (gpumats) {
+ DRW_mesh_get_attributes(ob, (Mesh *)ob->data, gpumats, num_shgroups, &draw_attrs, &cd_needed);
+ }
+ else {
+ memset(&draw_attrs, 0, sizeof(draw_attrs));
+ memset(&cd_needed, 0, sizeof(cd_needed));
+ }
+
+ int attrs_num = 2 + draw_attrs.num_requests;
+
+ /* UV maps are not in attribute requests. */
+ attrs_num += count_bits_i(cd_needed.uv);
+
+ PBVHAttrReq *attrs = BLI_array_alloca(attrs, attrs_num);
+
+ memset(attrs, 0, sizeof(PBVHAttrReq) * attrs_num);
+ int attrs_i = 0;
+
+ attrs[attrs_i++].type = CD_PBVH_CO_TYPE;
+ attrs[attrs_i++].type = CD_PBVH_NO_TYPE;
+
+ for (int i = 0; i < draw_attrs.num_requests; i++) {
+ DRW_AttributeRequest *req = draw_attrs.requests + i;
+
+ attrs[attrs_i].type = req->cd_type;
+ attrs[attrs_i].domain = req->domain;
+ BLI_strncpy(attrs[attrs_i].name, req->attribute_name, sizeof(attrs->name));
+ attrs_i++;
+ }
+
+ /* UV maps are not in attribute requests. */
+ Mesh *me = (Mesh *)ob->data;
+
+ for (uint i = 0; i < 32; i++) {
+ if (cd_needed.uv & (1 << i)) {
+ int layer_i = CustomData_get_layer_index_n(&me->ldata, CD_MLOOPUV, i);
+ CustomDataLayer *layer = layer_i != -1 ? me->ldata.layers + layer_i : NULL;
+
+ if (layer) {
+ attrs[attrs_i].type = CD_MLOOPUV;
+ attrs[attrs_i].domain = ATTR_DOMAIN_CORNER;
+ BLI_strncpy(attrs[attrs_i].name, layer->name, sizeof(attrs->name));
+ attrs_i++;
+ }
+ }
+ }
+
+ attrs_num = attrs_i;
+
DRWSculptCallbackData scd = {
.ob = ob,
.shading_groups = shgroups,
@@ -1337,6 +1459,8 @@ void DRW_shgroup_call_sculpt_with_materials(DRWShadingGroup **shgroups,
.use_wire = false,
.use_mats = true,
.use_mask = false,
+ .attrs = attrs,
+ .attrs_num = attrs_num,
};
drw_sculpt_generate_calls(&scd);
}
diff --git a/source/blender/draw/intern/draw_pbvh.cc b/source/blender/draw/intern/draw_pbvh.cc
new file mode 100644
index 00000000000..126ba98d06c
--- /dev/null
+++ b/source/blender/draw/intern/draw_pbvh.cc
@@ -0,0 +1,1216 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later
+ * Copyright 2005 Blender Foundation. All rights reserved. */
+
+/** \file
+ * \ingroup gpu
+ *
+ * PBVH drawing. Embedds GPU meshes inside of PBVH nodes,
+ * used by mesh sculpt mode.
+ */
+
+/* Disable optimization for a function (for debugging use only!)*/
+#ifdef __clang__
+# define ATTR_NO_OPT __attribute__((optnone))
+#elif defined(_MSC_VER)
+# define ATTR_NO_OPT __pragma(optimize("", off))
+#elif defined(__GNUC__)
+# define ATTR_NO_OPT __attribute__((optimize("O0")))
+#else
+# define ATTR_NO_OPT
+#endif
+
+#include <limits.h>
+#include <stddef.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include "MEM_guardedalloc.h"
+
+#include "BLI_bitmap.h"
+#include "BLI_ghash.h"
+#include "BLI_math_color.h"
+#include "BLI_utildefines.h"
+
+#include "DNA_mesh_types.h"
+#include "DNA_meshdata_types.h"
+
+#include "BKE_DerivedMesh.h"
+#include "BKE_attribute.h"
+#include "BKE_ccg.h"
+#include "BKE_customdata.h"
+#include "BKE_mesh.h"
+#include "BKE_paint.h"
+#include "BKE_pbvh.h"
+#include "BKE_subdiv_ccg.h"
+
+#include "GPU_batch.h"
+
+#include "DRW_engine.h"
+#include "DRW_pbvh.h"
+
+#include "draw_pbvh.h"
+#include "gpu_private.h"
+#include "bmesh.h"
+
+#define MAX_PBVH_BATCH_KEY 512
+#define MAX_PBVH_VBOS 16
+
+#include "BLI_index_range.hh"
+#include "BLI_map.hh"
+#include "BLI_math_vec_types.hh"
+#include "BLI_vector.hh"
+#include <vector>
+
+#include <algorithm>
+#include <string>
+
+using blender::char3;
+using blender::float2;
+using blender::float3;
+using blender::float4;
+using blender::IndexRange;
+using blender::Map;
+using blender::short3;
+using blender::uchar3;
+using blender::uchar4;
+using blender::ushort3;
+using blender::ushort4;
+using blender::Vector;
+
+using string = std::string;
+
+struct PBVHVbo {
+ uint64_t type;
+ eAttrDomain domain;
+ string name;
+ GPUVertBuf *vert_buf = nullptr;
+ string key;
+
+ PBVHVbo(eAttrDomain _domain, uint64_t _type, string _name)
+ : type(_type), domain(_domain), name(_name)
+ {
+ }
+
+ void clear_data()
+ {
+ GPU_vertbuf_clear(vert_buf);
+ }
+
+ string build_key()
+ {
+ char buf[512];
+
+ sprintf(buf, "%d:%d:%s", (int)type, (int)domain, name.c_str());
+
+ key = string(buf);
+ return key;
+ }
+};
+
+struct PBVHBatch {
+ Vector<int> vbos;
+ string key;
+ GPUBatch *tris = nullptr, *lines = nullptr;
+ int tris_count = 0, lines_count = 0;
+
+ void sort_vbos(Vector<PBVHVbo> &master_vbos)
+ {
+ struct cmp {
+ Vector<PBVHVbo> &master_vbos;
+
+ cmp(Vector<PBVHVbo> &_master_vbos) : master_vbos(_master_vbos)
+ {
+ }
+
+ bool operator()(const int &a, const int &b)
+ {
+ return master_vbos[a].key < master_vbos[b].key;
+ }
+ };
+
+ std::sort(vbos.begin(), vbos.end(), cmp(master_vbos));
+ }
+
+ string build_key(Vector<PBVHVbo> &master_vbos)
+ {
+ key = "";
+
+ sort_vbos(master_vbos);
+
+ for (int vbo_i : vbos) {
+ key += master_vbos[vbo_i].key + ":";
+ }
+
+ return key;
+ }
+};
+
+static CustomData *get_cdata(eAttrDomain domain, PBVH_GPU_Args *args)
+{
+ switch (domain) {
+ case ATTR_DOMAIN_POINT:
+ return args->vdata;
+ case ATTR_DOMAIN_CORNER:
+ return args->ldata;
+ case ATTR_DOMAIN_FACE:
+ return args->pdata;
+ default:
+ return nullptr;
+ }
+}
+
+struct PBVHBatches {
+ Vector<PBVHVbo> vbos;
+ Map<string, PBVHBatch> batches;
+ GPUIndexBuf *tri_index = nullptr;
+ GPUIndexBuf *lines_index = nullptr;
+ int faces_count = 0; /* Used by PBVH_BMESH and PBVH_GRIDS */
+ int tris_count = 0, lines_count = 0;
+ bool needs_tri_index = false;
+
+ int material_index = 0;
+
+ int count_faces(PBVH_GPU_Args *args)
+ {
+ int count = 0;
+
+ switch (args->pbvh_type) {
+ case PBVH_FACES: {
+ for (int i = 0; i < args->totprim; i++) {
+ int face_index = args->mlooptri[args->prim_indices[i]].poly;
+
+ if (args->hide_poly && args->hide_poly[face_index]) {
+ continue;
+ }
+
+ count++;
+ }
+ break;
+ }
+ case PBVH_GRIDS: {
+ count = BKE_pbvh_count_grid_quads((BLI_bitmap **)args->grid_hidden,
+ args->grid_indices,
+ args->totprim,
+ args->ccg_key.grid_size);
+
+ break;
+ }
+ case PBVH_BMESH: {
+ GSET_FOREACH_BEGIN (BMFace *, f, args->bm_faces) {
+ if (!BM_elem_flag_test(f, BM_ELEM_HIDDEN)) {
+ count++;
+ }
+ }
+ GSET_FOREACH_END();
+ }
+ }
+
+ return count;
+ }
+
+ PBVHBatches(PBVH_GPU_Args *args)
+ {
+ faces_count = count_faces(args);
+
+ if (args->pbvh_type == PBVH_BMESH) {
+ tris_count = faces_count;
+ }
+ }
+
+ ~PBVHBatches()
+ {
+ for (PBVHBatch &batch : batches.values()) {
+ GPU_BATCH_DISCARD_SAFE(batch.tris);
+ GPU_BATCH_DISCARD_SAFE(batch.lines);
+ }
+
+ for (PBVHVbo &vbo : vbos) {
+ GPU_vertbuf_discard(vbo.vert_buf);
+ }
+
+ GPU_INDEXBUF_DISCARD_SAFE(tri_index);
+ GPU_INDEXBUF_DISCARD_SAFE(lines_index);
+ }
+
+ string build_key(PBVHAttrReq *attrs, int attrs_num)
+ {
+ string key;
+ PBVHBatch batch;
+ Vector<PBVHVbo> vbos;
+
+ for (int i : IndexRange(attrs_num)) {
+ PBVHAttrReq *attr = attrs + i;
+
+ PBVHVbo vbo(attr->domain, attr->type, string(attr->name));
+ vbo.build_key();
+
+ vbos.append(vbo);
+ batch.vbos.append(i);
+ }
+
+ batch.build_key(vbos);
+ return batch.key;
+ }
+
+ bool has_vbo(eAttrDomain domain, int type, string name)
+ {
+ for (PBVHVbo &vbo : vbos) {
+ if (vbo.domain == domain && vbo.type == type && vbo.name == name) {
+ return true;
+ }
+ }
+
+ return false;
+ }
+
+ int get_vbo_index(PBVHVbo *vbo)
+ {
+ for (int i : IndexRange(vbos.size())) {
+ if (vbo == &vbos[i]) {
+ return i;
+ }
+ }
+
+ return -1;
+ }
+
+ PBVHVbo *get_vbo(eAttrDomain domain, int type, string name)
+ {
+ for (PBVHVbo &vbo : vbos) {
+ if (vbo.domain == domain && vbo.type == type && vbo.name == name) {
+ return &vbo;
+ }
+ }
+
+ return nullptr;
+ }
+
+ bool has_batch(PBVHAttrReq *attrs, int attrs_num)
+ {
+ return batches.contains(build_key(attrs, attrs_num));
+ }
+
+ PBVHBatch &ensure_batch(PBVHAttrReq *attrs, int attrs_num, PBVH_GPU_Args *args)
+ {
+ if (!has_batch(attrs, attrs_num)) {
+ create_batch(attrs, attrs_num, args);
+ }
+
+ return batches.lookup(build_key(attrs, attrs_num));
+ }
+
+ void fill_vbo_normal_faces(
+ PBVHVbo &vbo,
+ PBVH_GPU_Args *args,
+ std::function<void(std::function<void(int, int, int, const MLoopTri *)> callback)>
+ foreach_faces,
+ GPUVertBufRaw *access)
+ {
+ float fno[3];
+ short no[3];
+ int last_poly = -1;
+ bool smooth = false;
+
+ foreach_faces([&](int buffer_i, int tri_i, int vertex_i, const MLoopTri *tri) {
+ const MPoly *mp = args->mpoly + tri->poly;
+
+ if (tri->poly != last_poly) {
+ last_poly = tri->poly;
+
+ if (!(mp->flag & ME_SMOOTH)) {
+ smooth = true;
+ BKE_mesh_calc_poly_normal(mp, args->mloop + mp->loopstart, args->mvert, fno);
+ normal_float_to_short_v3(no, fno);
+ }
+ else {
+ smooth = false;
+ }
+ }
+
+ if (!smooth) {
+ normal_float_to_short_v3(no, args->vert_normals[vertex_i]);
+ }
+
+ *static_cast<short3 *>(GPU_vertbuf_raw_step(access)) = no;
+ });
+ }
+
+ void fill_vbo_grids_intern(
+ PBVHVbo &vbo,
+ PBVH_GPU_Args *args,
+ std::function<
+ void(std::function<void(int x, int y, int grid_index, CCGElem *elems[4], int i)> func)>
+ foreach_grids)
+ {
+ uint vert_per_grid = square_i(args->ccg_key.grid_size - 1) * 4;
+ uint vert_count = args->totprim * vert_per_grid;
+
+ int existing_num = GPU_vertbuf_get_vertex_len(vbo.vert_buf);
+ void *existing_data = GPU_vertbuf_get_data(vbo.vert_buf);
+
+ if (existing_data == NULL || existing_num != vert_count) {
+ /* Allocate buffer if not allocated yet or size changed. */
+ GPU_vertbuf_data_alloc(vbo.vert_buf, vert_count);
+ }
+
+ GPUVertBufRaw access;
+ GPU_vertbuf_attr_get_raw_data(vbo.vert_buf, 0, &access);
+
+ switch (vbo.type) {
+ case CD_PBVH_CO_TYPE:
+ foreach_grids([&](int x, int y, int grid_index, CCGElem *elems[4], int i) {
+ float *co = CCG_elem_co(&args->ccg_key, elems[i]);
+
+ *static_cast<float3 *>(GPU_vertbuf_raw_step(&access)) = co;
+ });
+ break;
+
+ case CD_PBVH_NO_TYPE:
+ foreach_grids([&](int x, int y, int grid_index, CCGElem *elems[4], int i) {
+ float3 no(0.0f, 0.0f, 0.0f);
+
+ const bool smooth = args->grid_flag_mats[grid_index].flag & ME_SMOOTH;
+
+ if (smooth) {
+ no = CCG_elem_no(&args->ccg_key, elems[0]);
+ }
+ else {
+ for (int j = 0; j < 4; j++) {
+ no += CCG_elem_no(&args->ccg_key, elems[j]);
+ }
+ }
+
+ normalize_v3(no);
+ short sno[3];
+
+ normal_float_to_short_v3(sno, no);
+
+ *static_cast<short3 *>(GPU_vertbuf_raw_step(&access)) = sno;
+ });
+ break;
+
+ case CD_PBVH_MASK_TYPE:
+ foreach_grids([&](int x, int y, int grid_index, CCGElem *elems[4], int i) {
+ float *mask = CCG_elem_mask(&args->ccg_key, elems[i]);
+
+ *static_cast<uchar *>(GPU_vertbuf_raw_step(&access)) = mask ? (uchar)(*mask * 255.0f) :
+ 255;
+ });
+ break;
+
+ case CD_PBVH_FSET_TYPE: {
+ int *face_sets = args->face_sets;
+
+ if (!face_sets) {
+ uchar white[3] = {UCHAR_MAX, UCHAR_MAX, UCHAR_MAX};
+
+ foreach_grids([&](int x, int y, int grid_index, CCGElem *elems[4], int i) {
+ *static_cast<uchar4 *>(GPU_vertbuf_raw_step(&access)) = white;
+ });
+ }
+ else {
+ foreach_grids([&](int x, int y, int grid_index, CCGElem *elems[4], int i) {
+ uchar face_set_color[3] = {UCHAR_MAX, UCHAR_MAX, UCHAR_MAX};
+
+ if (face_sets) {
+ const int face_index = BKE_subdiv_ccg_grid_to_face_index(args->subdiv_ccg,
+ grid_index);
+ const int fset = face_sets[face_index];
+
+ /* Skip for the default color Face Set to render it white. */
+ if (fset != args->face_sets_color_default) {
+ BKE_paint_face_set_overlay_color_get(
+ fset, args->face_sets_color_seed, face_set_color);
+ }
+ }
+
+ *static_cast<uchar3 *>(GPU_vertbuf_raw_step(&access)) = face_set_color;
+ });
+ }
+ break;
+ }
+ }
+ }
+
+ void fill_vbo_grids(PBVHVbo &vbo, PBVH_GPU_Args *args)
+ {
+ int gridsize = args->ccg_key.grid_size;
+
+ uint totgrid = args->totprim;
+
+ auto foreach_solid =
+ [&](std::function<void(int x, int y, int grid_index, CCGElem *elems[4], int i)> func) {
+ for (int i = 0; i < totgrid; i++) {
+ const int grid_index = args->grid_indices[i];
+
+ CCGElem *grid = args->grids[grid_index];
+
+ for (int y = 0; y < gridsize - 1; y++) {
+ for (int x = 0; x < gridsize - 1; x++) {
+ CCGElem *elems[4] = {
+ CCG_grid_elem(&args->ccg_key, grid, x, y),
+ CCG_grid_elem(&args->ccg_key, grid, x + 1, y),
+ CCG_grid_elem(&args->ccg_key, grid, x + 1, y + 1),
+ CCG_grid_elem(&args->ccg_key, grid, x, y + 1),
+ };
+
+ func(x, y, grid_index, elems, 0);
+ func(x + 1, y, grid_index, elems, 1);
+ func(x + 1, y + 1, grid_index, elems, 2);
+ func(x, y + 1, grid_index, elems, 3);
+ }
+ }
+ }
+ };
+
+ auto foreach_indexed =
+ [&](std::function<void(int x, int y, int grid_index, CCGElem *elems[4], int i)> func) {
+ for (int i = 0; i < totgrid; i++) {
+ const int grid_index = args->grid_indices[i];
+
+ CCGElem *grid = args->grids[grid_index];
+
+ for (int y = 0; y < gridsize; y++) {
+ for (int x = 0; x < gridsize; x++) {
+ CCGElem *elems[4] = {
+ CCG_grid_elem(&args->ccg_key, grid, x, y),
+ CCG_grid_elem(&args->ccg_key, grid, x + 1, y),
+ CCG_grid_elem(&args->ccg_key, grid, x + 1, y + 1),
+ CCG_grid_elem(&args->ccg_key, grid, x, y + 1),
+ };
+
+ func(x, y, grid_index, elems, 0);
+ }
+ }
+ }
+ };
+
+ if (needs_tri_index) {
+ fill_vbo_grids_intern(vbo, args, foreach_indexed);
+ }
+ else {
+ fill_vbo_grids_intern(vbo, args, foreach_solid);
+ }
+ }
+
+ ATTR_NO_OPT
+ void fill_vbo_faces(PBVHVbo &vbo, PBVH_GPU_Args *args)
+ {
+ int totvert = args->totprim * 3;
+
+ auto foreach_faces =
+ [&](std::function<void(int buffer_i, int tri_i, int vertex_i, const MLoopTri *tri)> func) {
+ int buffer_i = 0;
+ const MLoop *mloop = args->mloop;
+
+ for (int i : IndexRange(args->totprim)) {
+ int face_index = args->mlooptri[args->prim_indices[i]].poly;
+
+ if (args->hide_poly && args->hide_poly[face_index]) {
+ continue;
+ }
+
+ const MLoopTri *tri = args->mlooptri + args->prim_indices[i];
+
+ for (int j : IndexRange(3)) {
+ func(buffer_i, j, mloop[tri->tri[j]].v, tri);
+ buffer_i++;
+ }
+ }
+ };
+
+ int existing_num = GPU_vertbuf_get_vertex_len(vbo.vert_buf);
+ void *existing_data = GPU_vertbuf_get_data(vbo.vert_buf);
+
+ if (existing_data == NULL || existing_num != totvert) {
+ /* Allocate buffer if not allocated yet or size changed. */
+ GPU_vertbuf_data_alloc(vbo.vert_buf, totvert);
+ }
+
+ GPUVertBufRaw access;
+ GPU_vertbuf_attr_get_raw_data(vbo.vert_buf, 0, &access);
+
+ switch (vbo.type) {
+ case CD_PBVH_CO_TYPE:
+ foreach_faces([&](int buffer_i, int tri_i, int vertex_i, const MLoopTri *tri) {
+ *static_cast<float3 *>(GPU_vertbuf_raw_step(&access)) = args->mvert[vertex_i].co;
+ });
+ break;
+ case CD_PBVH_NO_TYPE:
+ fill_vbo_normal_faces(vbo, args, foreach_faces, &access);
+ break;
+ case CD_PBVH_MASK_TYPE: {
+ float *mask = static_cast<float *>(CustomData_get_layer(args->vdata, CD_PAINT_MASK));
+
+ foreach_faces([&](int buffer_i, int tri_i, int vertex_i, const MLoopTri *tri) {
+ *static_cast<uchar *>(GPU_vertbuf_raw_step(&access)) = (uchar)(mask[vertex_i] * 255.0f);
+ });
+
+ break;
+ }
+ case CD_PBVH_FSET_TYPE: {
+ int *face_sets = static_cast<int *>(
+ CustomData_get_layer(args->pdata, CD_SCULPT_FACE_SETS));
+ int last_poly = -1;
+ uchar fset_color[3] = {255, 255, 255};
+
+ foreach_faces([&](int buffer_i, int tri_i, int vertex_i, const MLoopTri *tri) {
+ if (last_poly != tri->poly && args->face_sets) {
+ last_poly = tri->poly;
+
+ const int fset = abs(face_sets[tri->poly]);
+
+ if (fset != args->face_sets_color_default) {
+ BKE_paint_face_set_overlay_color_get(fset, args->face_sets_color_seed, fset_color);
+ }
+ else {
+ /* Skip for the default color face set to render it white. */
+ fset_color[0] = fset_color[1] = fset_color[2] = 255;
+ }
+ }
+
+ *static_cast<uchar3 *>(GPU_vertbuf_raw_step(&access)) = fset_color;
+ });
+
+ break;
+ }
+ case CD_MLOOPUV: {
+ MLoopUV *mloopuv = static_cast<MLoopUV *>(
+ CustomData_get_layer_named(args->ldata, CD_MLOOPUV, vbo.name.c_str()));
+
+ foreach_faces([&](int buffer_i, int tri_i, int vertex_i, const MLoopTri *tri) {
+ *static_cast<float2 *>(GPU_vertbuf_raw_step(&access)) = mloopuv[tri->tri[tri_i]].uv;
+ });
+ break;
+ }
+ case CD_PROP_COLOR:
+ if (vbo.domain == ATTR_DOMAIN_POINT) {
+ MPropCol *mpropcol = static_cast<MPropCol *>(
+ CustomData_get_layer_named(args->vdata, CD_PROP_COLOR, vbo.name.c_str()));
+
+ foreach_faces([&](int buffer_i, int tri_i, int vertex_i, const MLoopTri *tri) {
+ ushort color[4];
+ MPropCol *col = mpropcol + vertex_i;
+
+ color[0] = unit_float_to_ushort_clamp(col->color[0]);
+ color[1] = unit_float_to_ushort_clamp(col->color[1]);
+ color[2] = unit_float_to_ushort_clamp(col->color[2]);
+ color[3] = unit_float_to_ushort_clamp(col->color[3]);
+
+ *static_cast<ushort4 *>(GPU_vertbuf_raw_step(&access)) = color;
+ });
+ }
+ else if (vbo.domain == ATTR_DOMAIN_CORNER) {
+ MPropCol *mpropcol = static_cast<MPropCol *>(
+ CustomData_get_layer_named(args->ldata, CD_PROP_COLOR, vbo.name.c_str()));
+
+ foreach_faces([&](int buffer_i, int tri_i, int vertex_i, const MLoopTri *tri) {
+ ushort color[4];
+ MPropCol *col = mpropcol + tri->tri[tri_i];
+
+ color[0] = unit_float_to_ushort_clamp(col->color[0]);
+ color[1] = unit_float_to_ushort_clamp(col->color[1]);
+ color[2] = unit_float_to_ushort_clamp(col->color[2]);
+ color[3] = unit_float_to_ushort_clamp(col->color[3]);
+
+ *static_cast<ushort4 *>(GPU_vertbuf_raw_step(&access)) = color;
+ });
+ }
+ break;
+ case CD_PROP_BYTE_COLOR:
+ if (vbo.domain == ATTR_DOMAIN_POINT) {
+ MLoopCol *mbytecol = static_cast<MLoopCol *>(
+ CustomData_get_layer_named(args->vdata, CD_PROP_BYTE_COLOR, vbo.name.c_str()));
+
+ foreach_faces([&](int buffer_i, int tri_i, int vertex_i, const MLoopTri *tri) {
+ ushort color[4];
+ MLoopCol *col = mbytecol + vertex_i;
+
+ color[0] = unit_float_to_ushort_clamp(BLI_color_from_srgb_table[col->r]);
+ color[1] = unit_float_to_ushort_clamp(BLI_color_from_srgb_table[col->g]);
+ color[2] = unit_float_to_ushort_clamp(BLI_color_from_srgb_table[col->b]);
+ color[3] = col->a * 257;
+
+ *static_cast<ushort4 *>(GPU_vertbuf_raw_step(&access)) = color;
+ });
+ }
+ else if (vbo.domain == ATTR_DOMAIN_CORNER) {
+ MLoopCol *mbytecol = static_cast<MLoopCol *>(
+ CustomData_get_layer_named(args->ldata, CD_PROP_BYTE_COLOR, vbo.name.c_str()));
+
+ foreach_faces([&](int buffer_i, int tri_i, int vertex_i, const MLoopTri *tri) {
+ ushort color[4];
+ MLoopCol *col = mbytecol + tri->tri[tri_i];
+
+ color[0] = unit_float_to_ushort_clamp(BLI_color_from_srgb_table[col->r]);
+ color[1] = unit_float_to_ushort_clamp(BLI_color_from_srgb_table[col->g]);
+ color[2] = unit_float_to_ushort_clamp(BLI_color_from_srgb_table[col->b]);
+ color[3] = col->a * 257;
+
+ *static_cast<ushort4 *>(GPU_vertbuf_raw_step(&access)) = color;
+ });
+ }
+ break;
+ }
+ }
+
+ void gpu_flush()
+ {
+ for (PBVHVbo &vbo : vbos) {
+ if (vbo.vert_buf && GPU_vertbuf_get_data(vbo.vert_buf)) {
+ GPU_vertbuf_use(vbo.vert_buf);
+ }
+ }
+ }
+
+ void update(PBVH_GPU_Args *args)
+ {
+ check_index_buffers(args);
+
+ for (PBVHVbo &vbo : vbos) {
+ fill_vbo(vbo, args);
+ }
+ }
+
+ void fill_vbo_bmesh(PBVHVbo &vbo, PBVH_GPU_Args *args)
+ {
+ auto foreach_bmesh = [&](std::function<void(BMLoop * l)> callback) {
+ GSET_FOREACH_BEGIN (BMFace *, f, args->bm_faces) {
+ if (BM_elem_flag_test(f, BM_ELEM_HIDDEN)) {
+ continue;
+ }
+
+ BMLoop *l = f->l_first;
+ do {
+ callback(l);
+ } while ((l = l->next) != f->l_first);
+ }
+ GSET_FOREACH_END();
+ };
+
+ faces_count = 0;
+ GSET_FOREACH_BEGIN (BMFace *, f, args->bm_faces) {
+ if (BM_elem_flag_test(f, BM_ELEM_HIDDEN)) {
+ continue;
+ }
+
+ BMLoop *l = f->l_first;
+ do {
+ faces_count++;
+ } while ((l = l->next) != f->l_first);
+ }
+ GSET_FOREACH_END();
+ tris_count = faces_count;
+
+ int existing_num = GPU_vertbuf_get_vertex_len(vbo.vert_buf);
+ void *existing_data = GPU_vertbuf_get_data(vbo.vert_buf);
+
+ int vert_count = tris_count * 3;
+
+ if (existing_data == nullptr || existing_num != vert_count) {
+ /* Allocate buffer if not allocated yet or size changed. */
+ GPU_vertbuf_data_alloc(vbo.vert_buf, vert_count);
+ }
+
+ GPUVertBufRaw access;
+ GPU_vertbuf_attr_get_raw_data(vbo.vert_buf, 0, &access);
+
+ switch (vbo.type) {
+ case CD_PBVH_CO_TYPE:
+ foreach_bmesh(
+ [&](BMLoop *l) { *static_cast<float3 *>(GPU_vertbuf_raw_step(&access)) = l->v->co; });
+ break;
+
+ case CD_PBVH_NO_TYPE:
+ foreach_bmesh([&](BMLoop *l) {
+ short no[3];
+ bool smooth = BM_elem_flag_test(l->f, BM_ELEM_SMOOTH);
+
+ normal_float_to_short_v3(no, smooth ? l->v->no : l->f->no);
+ *static_cast<short3 *>(GPU_vertbuf_raw_step(&access)) = no;
+ });
+ break;
+
+ case CD_PBVH_MASK_TYPE: {
+ int cd_mask = args->cd_mask_layer;
+
+ if (cd_mask == -1) {
+ foreach_bmesh(
+ [&](BMLoop *l) { *static_cast<float *>(GPU_vertbuf_raw_step(&access)) = 255; });
+ }
+ else {
+ foreach_bmesh([&](BMLoop *l) {
+ float mask = BM_ELEM_CD_GET_FLOAT(l->v, cd_mask);
+
+ *static_cast<uchar *>(GPU_vertbuf_raw_step(&access)) = (uchar)(mask * 255.0f);
+ });
+ }
+ break;
+ }
+ case CD_PBVH_FSET_TYPE: {
+ uchar3 white(255, 255, 255);
+
+ foreach_bmesh(
+ [&](BMLoop *l) { *static_cast<uchar3 *>(GPU_vertbuf_raw_step(&access)) = white; });
+ }
+ }
+ }
+
+ void fill_vbo(PBVHVbo &vbo, PBVH_GPU_Args *args)
+ {
+ switch (args->pbvh_type) {
+ case PBVH_FACES:
+ fill_vbo_faces(vbo, args);
+ break;
+ case PBVH_GRIDS:
+ fill_vbo_grids(vbo, args);
+ break;
+ case PBVH_BMESH:
+ fill_vbo_bmesh(vbo, args);
+ break;
+ }
+ }
+
+ void create_vbo(eAttrDomain domain, const uint32_t type, string name, PBVH_GPU_Args *args)
+ {
+ PBVHVbo vbo(domain, type, name);
+ GPUVertFormat format;
+
+ bool need_aliases = !ELEM(
+ type, CD_PBVH_CO_TYPE, CD_PBVH_NO_TYPE, CD_PBVH_FSET_TYPE, CD_PBVH_MASK_TYPE);
+
+ GPU_vertformat_clear(&format);
+
+ switch (type) {
+ case CD_PBVH_CO_TYPE:
+ GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
+ break;
+ case CD_PROP_FLOAT3:
+ GPU_vertformat_attr_add(&format, "a", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
+ need_aliases = true;
+ break;
+ case CD_PBVH_NO_TYPE:
+ GPU_vertformat_attr_add(&format, "nor", GPU_COMP_I16, 3, GPU_FETCH_INT_TO_FLOAT_UNIT);
+ break;
+ case CD_PROP_FLOAT2:
+ GPU_vertformat_attr_add(&format, "a", GPU_COMP_F32, 2, GPU_FETCH_FLOAT);
+ need_aliases = true;
+ break;
+ case CD_MLOOPUV:
+ GPU_vertformat_attr_add(&format, "uvs", GPU_COMP_F32, 2, GPU_FETCH_FLOAT);
+ need_aliases = true;
+ break;
+ case CD_PBVH_FSET_TYPE:
+ GPU_vertformat_attr_add(&format, "fset", GPU_COMP_U8, 3, GPU_FETCH_INT_TO_FLOAT_UNIT);
+ break;
+ case CD_PBVH_MASK_TYPE:
+ GPU_vertformat_attr_add(&format, "msk", GPU_COMP_U8, 1, GPU_FETCH_INT_TO_FLOAT_UNIT);
+ break;
+ case CD_PROP_FLOAT:
+ GPU_vertformat_attr_add(&format, "f", GPU_COMP_F32, 1, GPU_FETCH_FLOAT);
+ need_aliases = true;
+ break;
+ case CD_PROP_COLOR:
+ case CD_PROP_BYTE_COLOR: {
+ GPU_vertformat_attr_add(&format, "c", GPU_COMP_U16, 4, GPU_FETCH_INT_TO_FLOAT_UNIT);
+ need_aliases = true;
+ break;
+ }
+ default:
+ BLI_assert(0);
+ printf("%s: error\n", __func__);
+
+ break;
+ }
+
+ if (need_aliases) {
+ CustomData *cdata = get_cdata(domain, args);
+ int layer_i = cdata ? CustomData_get_named_layer_index(cdata, type, name.c_str()) : -1;
+ CustomDataLayer *layer = layer_i != -1 ? cdata->layers + layer_i : nullptr;
+
+ if (layer) {
+ bool is_render, is_active;
+ const char *prefix = "a";
+
+ if (ELEM(type, CD_PROP_COLOR, CD_PROP_BYTE_COLOR)) {
+ prefix = "c";
+
+ CustomDataLayer *render = BKE_id_attributes_render_color_get(&args->me->id);
+ CustomDataLayer *active = BKE_id_attributes_active_color_get(&args->me->id);
+
+ is_render = render && layer && STREQ(render->name, layer->name);
+ is_active = active && layer && STREQ(active->name, layer->name);
+ }
+ else {
+ switch (type) {
+ case CD_MLOOPUV:
+ prefix = "u";
+ break;
+ default:
+ break;
+ }
+
+ const char *active_name = CustomData_get_active_layer_name(cdata, type);
+ const char *render_name = CustomData_get_render_layer_name(cdata, type);
+
+ is_active = active_name && STREQ(layer->name, active_name);
+ is_render = render_name && STREQ(layer->name, render_name);
+ }
+
+ DRW_cdlayer_attr_aliases_add(&format, prefix, cdata, layer, is_render, is_active);
+ }
+ else {
+ printf("%s: error looking up attribute %s\n", __func__, name.c_str());
+ }
+ }
+
+ vbo.vert_buf = GPU_vertbuf_create_with_format_ex(&format, GPU_USAGE_STATIC);
+ vbo.build_key();
+ fill_vbo(vbo, args);
+
+ vbos.append(vbo);
+ }
+
+ void update_pre(PBVH_GPU_Args *args)
+ {
+ if (args->pbvh_type == PBVH_BMESH) {
+ int count = count_faces(args);
+
+ if (faces_count != count) {
+ for (PBVHVbo &vbo : vbos) {
+ vbo.clear_data();
+ }
+
+ GPU_INDEXBUF_DISCARD_SAFE(tri_index);
+ GPU_INDEXBUF_DISCARD_SAFE(lines_index);
+
+ tri_index = lines_index = nullptr;
+ faces_count = tris_count = count;
+ }
+ }
+ }
+
+ void create_index_faces(PBVH_GPU_Args *args)
+ {
+ /* Calculate number of edges*/
+ int edge_count = 0;
+ for (int i = 0; i < args->totprim; i++) {
+ const MLoopTri *lt = args->mlooptri + args->prim_indices[i];
+
+ if (args->hide_poly && args->hide_poly[lt->poly]) {
+ continue;
+ }
+ int r_edges[3];
+ BKE_mesh_looptri_get_real_edges(args->me, lt, r_edges);
+
+ if (r_edges[0] != -1) {
+ edge_count++;
+ }
+ if (r_edges[1] != -1) {
+ edge_count++;
+ }
+ if (r_edges[2] != -1) {
+ edge_count++;
+ }
+ }
+
+ GPUIndexBufBuilder elb_lines;
+ GPU_indexbuf_init(&elb_lines, GPU_PRIM_LINES, edge_count * 2, INT_MAX);
+
+ int vertex_i = 0;
+ for (int i = 0; i < args->totprim; i++) {
+ const MLoopTri *lt = args->mlooptri + args->prim_indices[i];
+
+ if (args->hide_poly && args->hide_poly[lt->poly]) {
+ continue;
+ }
+
+ int r_edges[3];
+ BKE_mesh_looptri_get_real_edges(args->me, lt, r_edges);
+
+ if (r_edges[0] != -1) {
+ GPU_indexbuf_add_line_verts(&elb_lines, vertex_i, vertex_i + 1);
+ }
+ if (r_edges[1] != -1) {
+ GPU_indexbuf_add_line_verts(&elb_lines, vertex_i + 1, vertex_i + 2);
+ }
+ if (r_edges[2] != -1) {
+ GPU_indexbuf_add_line_verts(&elb_lines, vertex_i + 2, vertex_i);
+ }
+
+ vertex_i += 3;
+ }
+
+ lines_index = GPU_indexbuf_build(&elb_lines);
+ }
+
+ void create_index_bmesh(PBVH_GPU_Args *args)
+ {
+ GPUIndexBufBuilder elb_lines;
+ GPU_indexbuf_init(&elb_lines, GPU_PRIM_LINES, tris_count * 3 * 2, INT_MAX);
+
+ int v_index = 0;
+ lines_count = 0;
+
+ GSET_FOREACH_BEGIN (BMFace *, f, args->bm_faces) {
+ if (BM_elem_flag_test(f, BM_ELEM_HIDDEN)) {
+ continue;
+ }
+
+ GPU_indexbuf_add_line_verts(&elb_lines, v_index, v_index + 1);
+ GPU_indexbuf_add_line_verts(&elb_lines, v_index + 1, v_index + 2);
+ GPU_indexbuf_add_line_verts(&elb_lines, v_index + 2, v_index);
+
+ lines_count += 3;
+ v_index += 3;
+ }
+ GSET_FOREACH_END();
+
+ lines_index = GPU_indexbuf_build(&elb_lines);
+ }
+
+ void create_index_grids(PBVH_GPU_Args *args)
+ {
+ needs_tri_index = true;
+ int gridsize = args->ccg_key.grid_size;
+ int totgrid = args->totprim;
+
+ for (int i : IndexRange(args->totprim)) {
+ int grid_index = args->grid_indices[i];
+ bool smooth = args->grid_flag_mats[grid_index].flag & ME_SMOOTH;
+ BLI_bitmap *gh = args->grid_hidden[grid_index];
+
+ for (int y = 0; y < gridsize - 1; y++) {
+ for (int x = 0; x < gridsize - 1; x++) {
+ if (gh && paint_is_grid_face_hidden(gh, gridsize, x, y)) {
+ /* Skip hidden faces by just setting smooth to true. */
+ smooth = true;
+ goto outer_loop_break;
+ }
+ }
+ }
+
+ outer_loop_break:
+
+ if (!smooth) {
+ needs_tri_index = false;
+ break;
+ }
+ }
+
+ GPUIndexBufBuilder elb, elb_lines;
+
+ CCGKey *key = &args->ccg_key;
+
+ uint visible_quad_len = BKE_pbvh_count_grid_quads(
+ (BLI_bitmap **)args->grid_hidden, args->grid_indices, totgrid, key->grid_size);
+
+ GPU_indexbuf_init(&elb, GPU_PRIM_TRIS, 2 * visible_quad_len, INT_MAX);
+ GPU_indexbuf_init(
+ &elb_lines, GPU_PRIM_LINES, 2 * totgrid * gridsize * (gridsize - 1), INT_MAX);
+
+ if (needs_tri_index) {
+ uint offset = 0;
+ const uint grid_vert_len = gridsize * gridsize;
+ for (int i = 0; i < totgrid; i++, offset += grid_vert_len) {
+ uint v0, v1, v2, v3;
+ bool grid_visible = false;
+
+ BLI_bitmap *gh = args->grid_hidden[args->grid_indices[i]];
+
+ for (int j = 0; j < gridsize - 1; j++) {
+ for (int k = 0; k < gridsize - 1; k++) {
+ /* Skip hidden grid face */
+ if (gh && paint_is_grid_face_hidden(gh, gridsize, k, j)) {
+ continue;
+ }
+ /* Indices in a Clockwise QUAD disposition. */
+ v0 = offset + j * gridsize + k;
+ v1 = v0 + 1;
+ v2 = v1 + gridsize;
+ v3 = v2 - 1;
+
+ GPU_indexbuf_add_tri_verts(&elb, v0, v2, v1);
+ GPU_indexbuf_add_tri_verts(&elb, v0, v3, v2);
+
+ GPU_indexbuf_add_line_verts(&elb_lines, v0, v1);
+ GPU_indexbuf_add_line_verts(&elb_lines, v0, v3);
+
+ if (j + 2 == gridsize) {
+ GPU_indexbuf_add_line_verts(&elb_lines, v2, v3);
+ }
+ grid_visible = true;
+ }
+
+ if (grid_visible) {
+ GPU_indexbuf_add_line_verts(&elb_lines, v1, v2);
+ }
+ }
+ }
+ }
+ else {
+ uint offset = 0;
+ const uint grid_vert_len = square_uint(gridsize - 1) * 4;
+ for (int i = 0; i < totgrid; i++, offset += grid_vert_len) {
+ bool grid_visible = false;
+ BLI_bitmap *gh = args->grid_hidden[args->grid_indices[i]];
+
+ uint v0, v1, v2, v3;
+ for (int j = 0; j < gridsize - 1; j++) {
+ for (int k = 0; k < gridsize - 1; k++) {
+ /* Skip hidden grid face */
+ if (gh && paint_is_grid_face_hidden(gh, gridsize, k, j)) {
+ continue;
+ }
+ /* VBO data are in a Clockwise QUAD disposition. */
+ v0 = offset + (j * (gridsize - 1) + k) * 4;
+ v1 = v0 + 1;
+ v2 = v0 + 2;
+ v3 = v0 + 3;
+
+ GPU_indexbuf_add_tri_verts(&elb, v0, v2, v1);
+ GPU_indexbuf_add_tri_verts(&elb, v0, v3, v2);
+
+ GPU_indexbuf_add_line_verts(&elb_lines, v0, v1);
+ GPU_indexbuf_add_line_verts(&elb_lines, v0, v3);
+
+ if (j + 2 == gridsize) {
+ GPU_indexbuf_add_line_verts(&elb_lines, v2, v3);
+ }
+ grid_visible = true;
+ }
+
+ if (grid_visible) {
+ GPU_indexbuf_add_line_verts(&elb_lines, v1, v2);
+ }
+ }
+ }
+ }
+
+ tri_index = GPU_indexbuf_build(&elb);
+ lines_index = GPU_indexbuf_build(&elb_lines);
+ }
+
+ void create_index(PBVH_GPU_Args *args)
+ {
+ switch (args->pbvh_type) {
+ case PBVH_FACES:
+ create_index_faces(args);
+ break;
+ case PBVH_BMESH:
+ create_index_bmesh(args);
+ break;
+ case PBVH_GRIDS:
+ create_index_grids(args);
+ break;
+ }
+
+ for (PBVHBatch &batch : batches.values()) {
+ GPU_batch_elembuf_set(batch.tris, tri_index, false);
+ GPU_batch_elembuf_set(batch.lines, lines_index, false);
+ }
+ }
+
+ void check_index_buffers(PBVH_GPU_Args *args)
+ {
+ if (!lines_index) {
+ create_index(args);
+ }
+ }
+
+ void create_batch(PBVHAttrReq *attrs, int attrs_num, PBVH_GPU_Args *args)
+ {
+ check_index_buffers(args);
+
+ PBVHBatch batch;
+
+ batch.tris = GPU_batch_create(GPU_PRIM_TRIS,
+ nullptr,
+ /* can be NULL if buffer is empty */
+ tri_index);
+ batch.tris_count = tris_count;
+
+ if (lines_index) {
+ batch.lines = GPU_batch_create(GPU_PRIM_LINES, nullptr, lines_index);
+ batch.lines_count = lines_count;
+ }
+
+ for (int i : IndexRange(attrs_num)) {
+ PBVHAttrReq *attr = attrs + i;
+
+ if (!has_vbo(attr->domain, (int)attr->type, attr->name)) {
+ create_vbo(attr->domain, (uint32_t)attr->type, attr->name, args);
+ }
+
+ PBVHVbo *vbo = get_vbo(attr->domain, (uint32_t)attr->type, attr->name);
+ int vbo_i = get_vbo_index(vbo);
+
+ batch.vbos.append(vbo_i);
+ GPU_batch_vertbuf_add_ex(batch.tris, vbo->vert_buf, false);
+
+ if (batch.lines) {
+ GPU_batch_vertbuf_add_ex(batch.lines, vbo->vert_buf, false);
+ }
+ }
+
+ batch.build_key(vbos);
+ batches.add(batch.key, batch);
+ }
+};
+
+void DRW_pbvh_node_update(PBVHBatches *batches, PBVH_GPU_Args *args)
+{
+ batches->update(args);
+}
+
+void DRW_pbvh_node_gpu_flush(PBVHBatches *batches)
+{
+ batches->gpu_flush();
+}
+
+PBVHBatches *DRW_pbvh_node_create(PBVH_GPU_Args *args)
+{
+ PBVHBatches *batches = new PBVHBatches(args);
+ return batches;
+}
+
+void DRW_pbvh_node_free(PBVHBatches *batches)
+{
+ delete batches;
+}
+
+GPUBatch *DRW_pbvh_tris_get(PBVHBatches *batches,
+ PBVHAttrReq *attrs,
+ int attrs_num,
+ PBVH_GPU_Args *args,
+ int *r_prim_count)
+{
+ PBVHBatch &batch = batches->ensure_batch(attrs, attrs_num, args);
+
+ *r_prim_count = batch.tris_count;
+
+ return batch.tris;
+}
+
+GPUBatch *DRW_pbvh_lines_get(PBVHBatches *batches,
+ PBVHAttrReq *attrs,
+ int attrs_num,
+ PBVH_GPU_Args *args,
+ int *r_prim_count)
+{
+ PBVHBatch &batch = batches->ensure_batch(attrs, attrs_num, args);
+
+ *r_prim_count = batch.lines_count;
+
+ return batch.lines;
+}
+
+void DRW_pbvh_update_pre(struct PBVHBatches *batches, struct PBVH_GPU_Args *args)
+{
+ batches->update_pre(args);
+}
+
+int drw_pbvh_material_index_get(struct PBVHBatches *batches)
+{
+ return batches->material_index;
+}
diff --git a/source/blender/draw/intern/draw_pbvh.h b/source/blender/draw/intern/draw_pbvh.h
new file mode 100644
index 00000000000..eb3f3d55e0f
--- /dev/null
+++ b/source/blender/draw/intern/draw_pbvh.h
@@ -0,0 +1,22 @@
+#pragma once
+
+#include "DNA_customdata_types.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+struct PBVHBatches;
+
+enum {
+ CD_PBVH_CO_TYPE = CD_NUMTYPES,
+ CD_PBVH_NO_TYPE = CD_NUMTYPES + 1,
+ CD_PBVH_FSET_TYPE = CD_NUMTYPES + 2,
+ CD_PBVH_MASK_TYPE = CD_NUMTYPES + 3
+};
+
+int drw_pbvh_material_index_get(struct PBVHBatches *batches);
+
+#ifdef __cplusplus
+}
+#endif