Welcome to mirror list, hosted at ThFree Co, Russian Federation.

git.blender.org/blender.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
Diffstat (limited to 'source/blender/draw/intern')
-rw-r--r--source/blender/draw/intern/DRW_gpu_wrapper.hh102
-rw-r--r--source/blender/draw/intern/DRW_render.h29
-rw-r--r--source/blender/draw/intern/draw_attributes.cc7
-rw-r--r--source/blender/draw/intern/draw_attributes.h26
-rw-r--r--source/blender/draw/intern/draw_cache.c46
-rw-r--r--source/blender/draw/intern/draw_cache.h19
-rw-r--r--source/blender/draw/intern/draw_cache_extract.hh19
-rw-r--r--source/blender/draw/intern/draw_cache_extract_mesh.cc7
-rw-r--r--source/blender/draw/intern/draw_cache_extract_mesh_render_data.cc37
-rw-r--r--source/blender/draw/intern/draw_cache_impl.h16
-rw-r--r--source/blender/draw/intern/draw_cache_impl_curve.cc45
-rw-r--r--source/blender/draw/intern/draw_cache_impl_curves.cc184
-rw-r--r--source/blender/draw/intern/draw_cache_impl_gpencil.cc (renamed from source/blender/draw/intern/draw_cache_impl_gpencil.c)265
-rw-r--r--source/blender/draw/intern/draw_cache_impl_lattice.c8
-rw-r--r--source/blender/draw/intern/draw_cache_impl_mesh.cc113
-rw-r--r--source/blender/draw/intern/draw_cache_impl_particles.c47
-rw-r--r--source/blender/draw/intern/draw_cache_impl_pointcloud.cc408
-rw-r--r--source/blender/draw/intern/draw_cache_impl_subdivision.cc105
-rw-r--r--source/blender/draw/intern/draw_cache_impl_volume.cc (renamed from source/blender/draw/intern/draw_cache_impl_volume.c)57
-rw-r--r--source/blender/draw/intern/draw_command.cc34
-rw-r--r--source/blender/draw/intern/draw_command.hh41
-rw-r--r--source/blender/draw/intern/draw_common.c17
-rw-r--r--source/blender/draw/intern/draw_common.h9
-rw-r--r--source/blender/draw/intern/draw_common_shader_shared.h5
-rw-r--r--source/blender/draw/intern/draw_curves.cc57
-rw-r--r--source/blender/draw/intern/draw_curves_private.h10
-rw-r--r--source/blender/draw/intern/draw_debug.cc12
-rw-r--r--source/blender/draw/intern/draw_defines.h4
-rw-r--r--source/blender/draw/intern/draw_fluid.c2
-rw-r--r--source/blender/draw/intern/draw_hair.cc27
-rw-r--r--source/blender/draw/intern/draw_hair_private.h5
-rw-r--r--source/blender/draw/intern/draw_instance_data.c139
-rw-r--r--source/blender/draw/intern/draw_instance_data.h8
-rw-r--r--source/blender/draw/intern/draw_manager.c185
-rw-r--r--source/blender/draw/intern/draw_manager.cc34
-rw-r--r--source/blender/draw/intern/draw_manager.h28
-rw-r--r--source/blender/draw/intern/draw_manager.hh32
-rw-r--r--source/blender/draw/intern/draw_manager_data.cc (renamed from source/blender/draw/intern/draw_manager_data.c)811
-rw-r--r--source/blender/draw/intern/draw_manager_exec.c24
-rw-r--r--source/blender/draw/intern/draw_manager_profiling.c48
-rw-r--r--source/blender/draw/intern/draw_manager_shader.c63
-rw-r--r--source/blender/draw/intern/draw_manager_text.cc (renamed from source/blender/draw/intern/draw_manager_text.c)89
-rw-r--r--source/blender/draw/intern/draw_pass.hh46
-rw-r--r--source/blender/draw/intern/draw_pbvh.cc1301
-rw-r--r--source/blender/draw/intern/draw_pbvh.h24
-rw-r--r--source/blender/draw/intern/draw_pointcloud.cc103
-rw-r--r--source/blender/draw/intern/draw_pointcloud_private.hh19
-rw-r--r--source/blender/draw/intern/draw_resource.cc90
-rw-r--r--source/blender/draw/intern/draw_resource.hh4
-rw-r--r--source/blender/draw/intern/draw_shader.cc7
-rw-r--r--source/blender/draw/intern/draw_shader_shared.h64
-rw-r--r--source/blender/draw/intern/draw_view.cc110
-rw-r--r--source/blender/draw/intern/draw_view.hh32
-rw-r--r--source/blender/draw/intern/draw_volume.cc4
-rw-r--r--source/blender/draw/intern/mesh_extractors/extract_mesh.hh18
-rw-r--r--source/blender/draw/intern/mesh_extractors/extract_mesh_ibo_edituv.cc98
-rw-r--r--source/blender/draw/intern/mesh_extractors/extract_mesh_ibo_fdots.cc12
-rw-r--r--source/blender/draw/intern/mesh_extractors/extract_mesh_ibo_lines.cc37
-rw-r--r--source/blender/draw/intern/mesh_extractors/extract_mesh_ibo_lines_adjacency.cc32
-rw-r--r--source/blender/draw/intern/mesh_extractors/extract_mesh_ibo_lines_paint_mask.cc35
-rw-r--r--source/blender/draw/intern/mesh_extractors/extract_mesh_ibo_points.cc32
-rw-r--r--source/blender/draw/intern/mesh_extractors/extract_mesh_ibo_tris.cc14
-rw-r--r--source/blender/draw/intern/mesh_extractors/extract_mesh_vbo_attributes.cc48
-rw-r--r--source/blender/draw/intern/mesh_extractors/extract_mesh_vbo_edge_fac.cc22
-rw-r--r--source/blender/draw/intern/mesh_extractors/extract_mesh_vbo_edit_data.cc20
-rw-r--r--source/blender/draw/intern/mesh_extractors/extract_mesh_vbo_edituv_data.cc8
-rw-r--r--source/blender/draw/intern/mesh_extractors/extract_mesh_vbo_edituv_stretch_angle.cc12
-rw-r--r--source/blender/draw/intern/mesh_extractors/extract_mesh_vbo_edituv_stretch_area.cc8
-rw-r--r--source/blender/draw/intern/mesh_extractors/extract_mesh_vbo_fdots_edituv_data.cc6
-rw-r--r--source/blender/draw/intern/mesh_extractors/extract_mesh_vbo_fdots_nor.cc16
-rw-r--r--source/blender/draw/intern/mesh_extractors/extract_mesh_vbo_fdots_pos.cc12
-rw-r--r--source/blender/draw/intern/mesh_extractors/extract_mesh_vbo_fdots_uv.cc12
-rw-r--r--source/blender/draw/intern/mesh_extractors/extract_mesh_vbo_lnor.cc16
-rw-r--r--source/blender/draw/intern/mesh_extractors/extract_mesh_vbo_mesh_analysis.cc22
-rw-r--r--source/blender/draw/intern/mesh_extractors/extract_mesh_vbo_orco.cc8
-rw-r--r--source/blender/draw/intern/mesh_extractors/extract_mesh_vbo_pos_nor.cc34
-rw-r--r--source/blender/draw/intern/mesh_extractors/extract_mesh_vbo_sculpt_data.cc16
-rw-r--r--source/blender/draw/intern/mesh_extractors/extract_mesh_vbo_select_idx.cc48
-rw-r--r--source/blender/draw/intern/mesh_extractors/extract_mesh_vbo_skin_roots.cc4
-rw-r--r--source/blender/draw/intern/mesh_extractors/extract_mesh_vbo_tan.cc10
-rw-r--r--source/blender/draw/intern/mesh_extractors/extract_mesh_vbo_uv.cc8
-rw-r--r--source/blender/draw/intern/mesh_extractors/extract_mesh_vbo_weights.cc6
-rw-r--r--source/blender/draw/intern/shaders/common_debug_print_lib.glsl4
-rw-r--r--source/blender/draw/intern/shaders/common_fxaa_lib.glsl2
-rw-r--r--source/blender/draw/intern/shaders/common_globals_lib.glsl148
-rw-r--r--source/blender/draw/intern/shaders/common_gpencil_lib.glsl88
-rw-r--r--source/blender/draw/intern/shaders/common_intersect_lib.glsl14
-rw-r--r--source/blender/draw/intern/shaders/common_pointcloud_lib.glsl78
-rw-r--r--source/blender/draw/intern/shaders/common_shape_lib.glsl2
-rw-r--r--source/blender/draw/intern/shaders/common_smaa_lib.glsl37
-rw-r--r--source/blender/draw/intern/shaders/common_view_clipping_lib.glsl12
-rw-r--r--source/blender/draw/intern/shaders/common_view_lib.glsl75
-rw-r--r--source/blender/draw/intern/shaders/draw_debug_info.hh1
-rw-r--r--source/blender/draw/intern/shaders/draw_debug_print_display_vert.glsl3
-rw-r--r--source/blender/draw/intern/shaders/draw_object_infos_info.hh12
-rw-r--r--source/blender/draw/intern/shaders/draw_view_info.hh34
96 files changed, 4066 insertions, 2086 deletions
diff --git a/source/blender/draw/intern/DRW_gpu_wrapper.hh b/source/blender/draw/intern/DRW_gpu_wrapper.hh
index 890cd588527..3be50d471e2 100644
--- a/source/blender/draw/intern/DRW_gpu_wrapper.hh
+++ b/source/blender/draw/intern/DRW_gpu_wrapper.hh
@@ -31,6 +31,10 @@
* discarding all data inside it.
* Data can be accessed using the [] operator.
*
+ * `draw::StorageVectorBuffer<T, len>`
+ * Same as `StorageArrayBuffer` but has a length counter and act like a `blender::Vector` you can
+ * clear and append to.
+ *
* `draw::StorageBuffer<T>`
* A storage buffer object class inheriting from T.
* Data can be accessed just like a normal T object.
@@ -313,8 +317,8 @@ class UniformBuffer : public T, public detail::UniformCommon<T, 1, false> {
template<
/** Type of the values stored in this uniform buffer. */
typename T,
- /** The number of values that can be stored in this uniform buffer. */
- int64_t len,
+ /** The number of values that can be stored in this storage buffer at creation. */
+ int64_t len = 16u / sizeof(T),
/** True if created on device and no memory host memory is allocated. */
bool device_only = false>
class StorageArrayBuffer : public detail::StorageCommon<T, len, device_only> {
@@ -357,6 +361,71 @@ class StorageArrayBuffer : public detail::StorageCommon<T, len, device_only> {
}
return this->data_[index];
}
+
+ int64_t size() const
+ {
+ return this->len_;
+ }
+};
+
+template<
+ /** Type of the values stored in this uniform buffer. */
+ typename T,
+ /** The number of values that can be stored in this storage buffer at creation. */
+ int64_t len = 16u / sizeof(T)>
+class StorageVectorBuffer : public StorageArrayBuffer<T, len, false> {
+ private:
+ /* Number of items, not the allocated length. */
+ int64_t item_len_ = 0;
+
+ public:
+ StorageVectorBuffer(const char *name = nullptr) : StorageArrayBuffer<T, len, false>(name){};
+ ~StorageVectorBuffer(){};
+
+ /**
+ * Set item count to zero but does not free memory or resize the buffer.
+ */
+ void clear()
+ {
+ item_len_ = 0;
+ }
+
+ /**
+ * Insert a new element at the end of the vector.
+ * This might cause a reallocation with the capacity is exceeded.
+ *
+ * This is similar to std::vector::push_back.
+ */
+ void append(const T &value)
+ {
+ this->append_as(value);
+ }
+ void append(T &&value)
+ {
+ this->append_as(std::move(value));
+ }
+ template<typename... ForwardT> void append_as(ForwardT &&...value)
+ {
+ if (item_len_ >= this->len_) {
+ size_t size = power_of_2_max_u(item_len_ + 1);
+ this->resize(size);
+ }
+ T *ptr = &this->data_[item_len_++];
+ new (ptr) T(std::forward<ForwardT>(value)...);
+ }
+
+ int64_t size() const
+ {
+ return item_len_;
+ }
+
+ bool is_empty() const
+ {
+ return this->size() == 0;
+ }
+
+ /* Avoid confusion with the other clear. */
+ void clear_to_zero() = delete;
};
template<
@@ -855,6 +924,35 @@ class TextureFromPool : public Texture, NonMovable {
GPUTexture *stencil_view() = delete;
};
+class TextureRef : public Texture {
+ public:
+ TextureRef() = default;
+
+ ~TextureRef()
+ {
+ this->tx_ = nullptr;
+ }
+
+ void wrap(GPUTexture *tex)
+ {
+ this->tx_ = tex;
+ }
+
+ /** Remove methods that are forbidden with this type of textures. */
+ bool ensure_1d(int, int, eGPUTextureFormat, float *) = delete;
+ bool ensure_1d_array(int, int, int, eGPUTextureFormat, float *) = delete;
+ bool ensure_2d(int, int, int, eGPUTextureFormat, float *) = delete;
+ bool ensure_2d_array(int, int, int, int, eGPUTextureFormat, float *) = delete;
+ bool ensure_3d(int, int, int, int, eGPUTextureFormat, float *) = delete;
+ bool ensure_cube(int, int, eGPUTextureFormat, float *) = delete;
+ bool ensure_cube_array(int, int, int, eGPUTextureFormat, float *) = delete;
+ void filter_mode(bool) = delete;
+ void free() = delete;
+ GPUTexture *mip_view(int) = delete;
+ GPUTexture *layer_view(int) = delete;
+ GPUTexture *stencil_view() = delete;
+};
+
/**
* Dummy type to bind texture as image.
* It is just a GPUTexture in disguise.
diff --git a/source/blender/draw/intern/DRW_render.h b/source/blender/draw/intern/DRW_render.h
index b49203d85f6..b9444c58191 100644
--- a/source/blender/draw/intern/DRW_render.h
+++ b/source/blender/draw/intern/DRW_render.h
@@ -207,10 +207,6 @@ struct GPUShader *DRW_shader_create_with_lib_ex(const char *vert,
const char *lib,
const char *defines,
const char *name);
-struct GPUShader *DRW_shader_create_compute_with_shaderlib(const char *comp,
- const DRWShaderLibrary *lib,
- const char *defines,
- const char *name);
struct GPUShader *DRW_shader_create_with_shaderlib_ex(const char *vert,
const char *geom,
const char *frag,
@@ -402,8 +398,18 @@ void DRW_shgroup_call_instances_with_attrs(DRWShadingGroup *shgroup,
struct GPUBatch *geom,
struct GPUBatch *inst_attributes);
-void DRW_shgroup_call_sculpt(DRWShadingGroup *sh, Object *ob, bool wire, bool mask);
-void DRW_shgroup_call_sculpt_with_materials(DRWShadingGroup **sh, int num_sh, Object *ob);
+void DRW_shgroup_call_sculpt(DRWShadingGroup *shgroup,
+ Object *ob,
+ bool use_wire,
+ bool use_mask,
+ bool use_fset,
+ bool use_color,
+ bool use_uv);
+
+void DRW_shgroup_call_sculpt_with_materials(DRWShadingGroup **shgroups,
+ struct GPUMaterial **gpumats,
+ int num_shgroups,
+ Object *ob);
DRWCallBuffer *DRW_shgroup_call_buffer(DRWShadingGroup *shgroup,
struct GPUVertFormat *format,
@@ -677,8 +683,6 @@ const DRWView *DRW_view_get_active(void);
* \note planes must be in world space.
*/
void DRW_view_clip_planes_set(DRWView *view, float (*planes)[4], int plane_len);
-void DRW_view_camtexco_set(DRWView *view, float texco[4]);
-void DRW_view_camtexco_get(const DRWView *view, float r_texco[4]);
/* For all getters, if view is NULL, default view is assumed. */
@@ -734,7 +738,6 @@ void DRW_culling_frustum_planes_get(const DRWView *view, float planes[6][4]);
const float *DRW_viewport_size_get(void);
const float *DRW_viewport_invert_size_get(void);
-const float *DRW_viewport_screenvecs_get(void);
const float *DRW_viewport_pixelsize_get(void);
struct DefaultFramebufferList *DRW_viewport_framebuffer_list_get(void);
@@ -938,6 +941,14 @@ typedef struct DRWContextState {
const DRWContextState *DRW_context_state_get(void);
+struct DRW_Attributes;
+struct DRW_MeshCDMask;
+
+void DRW_mesh_batch_cache_get_attributes(struct Object *object,
+ struct Mesh *me,
+ struct DRW_Attributes **r_attrs,
+ struct DRW_MeshCDMask **r_cd_needed);
+
#ifdef __cplusplus
}
#endif
diff --git a/source/blender/draw/intern/draw_attributes.cc b/source/blender/draw/intern/draw_attributes.cc
index 011d72e9e8f..cc7c9959850 100644
--- a/source/blender/draw/intern/draw_attributes.cc
+++ b/source/blender/draw/intern/draw_attributes.cc
@@ -44,13 +44,10 @@ void drw_attributes_clear(DRW_Attributes *attributes)
memset(attributes, 0, sizeof(DRW_Attributes));
}
-void drw_attributes_merge(DRW_Attributes *dst,
- const DRW_Attributes *src,
- ThreadMutex *render_mutex)
+void drw_attributes_merge(DRW_Attributes *dst, const DRW_Attributes *src, std::mutex &render_mutex)
{
- BLI_mutex_lock(render_mutex);
+ std::lock_guard lock{render_mutex};
drw_attributes_merge_requests(src, dst);
- BLI_mutex_unlock(render_mutex);
}
bool drw_attributes_overlap(const DRW_Attributes *a, const DRW_Attributes *b)
diff --git a/source/blender/draw/intern/draw_attributes.h b/source/blender/draw/intern/draw_attributes.h
index b577c6c4162..00621c711bf 100644
--- a/source/blender/draw/intern/draw_attributes.h
+++ b/source/blender/draw/intern/draw_attributes.h
@@ -9,6 +9,10 @@
#pragma once
+#ifdef __cplusplus
+# include <mutex>
+#endif
+
#include "DNA_customdata_types.h"
#include "DNA_meshdata_types.h"
@@ -16,6 +20,7 @@
#include "BLI_sys_types.h"
#include "BLI_threads.h"
+#include "BLI_utildefines.h"
#include "GPU_shader.h"
#include "GPU_vertex_format.h"
@@ -36,11 +41,30 @@ typedef struct DRW_Attributes {
int num_requests;
} DRW_Attributes;
+typedef struct DRW_MeshCDMask {
+ uint32_t uv : 8;
+ uint32_t tan : 8;
+ uint32_t orco : 1;
+ uint32_t tan_orco : 1;
+ uint32_t sculpt_overlays : 1;
+ /**
+ * Edit uv layer is from the base edit mesh as modifiers could remove it. (see T68857)
+ */
+ uint32_t edit_uv : 1;
+} DRW_MeshCDMask;
+
+/* Keep `DRW_MeshCDMask` struct within a `uint32_t`.
+ * bit-wise and atomic operations are used to compare and update the struct.
+ * See `mesh_cd_layers_type_*` functions. */
+BLI_STATIC_ASSERT(sizeof(DRW_MeshCDMask) <= sizeof(uint32_t), "DRW_MeshCDMask exceeds 32 bits")
+
void drw_attributes_clear(DRW_Attributes *attributes);
+#ifdef __cplusplus
void drw_attributes_merge(DRW_Attributes *dst,
const DRW_Attributes *src,
- ThreadMutex *render_mutex);
+ std::mutex &render_mutex);
+#endif
/* Return true if all requests in b are in a. */
bool drw_attributes_overlap(const DRW_Attributes *a, const DRW_Attributes *b);
diff --git a/source/blender/draw/intern/draw_cache.c b/source/blender/draw/intern/draw_cache.c
index 6537490c06c..00ac2563c43 100644
--- a/source/blender/draw/intern/draw_cache.c
+++ b/source/blender/draw/intern/draw_cache.c
@@ -899,8 +899,6 @@ GPUBatch *DRW_cache_object_surface_get(Object *ob)
switch (ob->type) {
case OB_MESH:
return DRW_cache_mesh_surface_get(ob);
- case OB_POINTCLOUD:
- return DRW_cache_pointcloud_surface_get(ob);
default:
return NULL;
}
@@ -959,8 +957,6 @@ GPUBatch **DRW_cache_object_surface_material_get(struct Object *ob,
switch (ob->type) {
case OB_MESH:
return DRW_cache_mesh_surface_shaded_get(ob, gpumat_array, gpumat_array_len);
- case OB_POINTCLOUD:
- return DRW_cache_pointcloud_surface_shaded_get(ob, gpumat_array, gpumat_array_len);
default:
return NULL;
}
@@ -2423,7 +2419,7 @@ static float x_axis_name[4][2] = {
{-0.9f * S_X, 1.0f * S_Y},
{1.0f * S_X, -1.0f * S_Y},
};
-#define X_LEN (sizeof(x_axis_name) / (sizeof(float[2])))
+#define X_LEN (sizeof(x_axis_name) / sizeof(float[2]))
#undef S_X
#undef S_Y
@@ -2437,7 +2433,7 @@ static float y_axis_name[6][2] = {
{0.0f * S_X, -0.1f * S_Y},
{0.0f * S_X, -1.0f * S_Y},
};
-#define Y_LEN (sizeof(y_axis_name) / (sizeof(float[2])))
+#define Y_LEN (sizeof(y_axis_name) / sizeof(float[2]))
#undef S_X
#undef S_Y
@@ -2455,7 +2451,7 @@ static float z_axis_name[10][2] = {
{-1.00f * S_X, -1.00f * S_Y},
{1.00f * S_X, -1.00f * S_Y},
};
-#define Z_LEN (sizeof(z_axis_name) / (sizeof(float[2])))
+#define Z_LEN (sizeof(z_axis_name) / sizeof(float[2]))
#undef S_X
#undef S_Y
@@ -2482,7 +2478,7 @@ static float axis_marker[8][2] = {
{-S_X, 0.0f}
#endif
};
-#define MARKER_LEN (sizeof(axis_marker) / (sizeof(float[2])))
+#define MARKER_LEN (sizeof(axis_marker) / sizeof(float[2]))
#define MARKER_FILL_LAYER 6
#undef S_X
#undef S_Y
@@ -2889,6 +2885,12 @@ GPUBatch *DRW_cache_mesh_surface_mesh_analysis_get(Object *ob)
return DRW_mesh_batch_cache_get_edit_mesh_analysis(ob->data);
}
+GPUBatch *DRW_cache_mesh_surface_viewer_attribute_get(Object *ob)
+{
+ BLI_assert(ob->type == OB_MESH);
+ return DRW_mesh_batch_cache_get_surface_viewer_attribute(ob->data);
+}
+
/** \} */
/* -------------------------------------------------------------------- */
@@ -2902,6 +2904,13 @@ GPUBatch *DRW_cache_curve_edge_wire_get(Object *ob)
return DRW_curve_batch_cache_get_wire_edge(cu);
}
+GPUBatch *DRW_cache_curve_edge_wire_viewer_attribute_get(Object *ob)
+{
+ BLI_assert(ob->type == OB_CURVES_LEGACY);
+ struct Curve *cu = ob->data;
+ return DRW_curve_batch_cache_get_wire_edge_viewer_attribute(cu);
+}
+
GPUBatch *DRW_cache_curve_edge_normal_get(Object *ob)
{
BLI_assert(ob->type == OB_CURVES_LEGACY);
@@ -2993,18 +3002,6 @@ GPUBatch *DRW_cache_lattice_vert_overlay_get(Object *ob)
/** \name PointCloud
* \{ */
-GPUBatch *DRW_cache_pointcloud_get_dots(Object *object)
-{
- BLI_assert(object->type == OB_POINTCLOUD);
- return DRW_pointcloud_batch_cache_get_dots(object);
-}
-
-GPUBatch *DRW_cache_pointcloud_surface_get(Object *object)
-{
- BLI_assert(object->type == OB_POINTCLOUD);
- return DRW_pointcloud_batch_cache_get_surface(object);
-}
-
/** \} */
/* -------------------------------------------------------------------- */
@@ -3289,6 +3286,9 @@ void drw_batch_cache_generate_requested(Object *ob)
case OB_CURVES:
DRW_curves_batch_cache_create_requested(ob);
break;
+ case OB_POINTCLOUD:
+ DRW_pointcloud_batch_cache_create_requested(ob);
+ break;
/* TODO: all cases. */
default:
break;
@@ -3339,7 +3339,9 @@ void DRW_batch_cache_free_old(Object *ob, int ctime)
case OB_CURVES:
DRW_curves_batch_cache_free_old((Curves *)ob->data, ctime);
break;
- /* TODO: all cases. */
+ case OB_POINTCLOUD:
+ DRW_pointcloud_batch_cache_free_old((PointCloud *)ob->data, ctime);
+ break;
default:
break;
}
@@ -3369,7 +3371,7 @@ void DRW_cdlayer_attr_aliases_add(GPUVertFormat *format,
/* Active render layer name. */
if (is_active_render) {
- GPU_vertformat_alias_add(format, base_name);
+ GPU_vertformat_alias_add(format, cl->type == CD_MLOOPUV ? "a" : base_name);
}
/* Active display layer name. */
diff --git a/source/blender/draw/intern/draw_cache.h b/source/blender/draw/intern/draw_cache.h
index 4e8788ada08..772cf3b12a5 100644
--- a/source/blender/draw/intern/draw_cache.h
+++ b/source/blender/draw/intern/draw_cache.h
@@ -13,6 +13,7 @@ extern "C" {
struct GPUBatch;
struct GPUMaterial;
+struct GPUVertBuf;
struct ModifierData;
struct Object;
struct PTCacheEdit;
@@ -170,10 +171,12 @@ struct GPUBatch *DRW_cache_mesh_surface_sculptcolors_get(struct Object *ob);
struct GPUBatch *DRW_cache_mesh_surface_weights_get(struct Object *ob);
struct GPUBatch *DRW_cache_mesh_surface_mesh_analysis_get(struct Object *ob);
struct GPUBatch *DRW_cache_mesh_face_wireframe_get(struct Object *ob);
+struct GPUBatch *DRW_cache_mesh_surface_viewer_attribute_get(struct Object *ob);
/* Curve */
struct GPUBatch *DRW_cache_curve_edge_wire_get(struct Object *ob);
+struct GPUBatch *DRW_cache_curve_edge_wire_viewer_attribute_get(struct Object *ob);
/* edit-mode */
@@ -222,11 +225,6 @@ struct GPUBatch **DRW_cache_curves_surface_shaded_get(struct Object *ob,
struct GPUBatch *DRW_cache_curves_face_wireframe_get(struct Object *ob);
struct GPUBatch *DRW_cache_curves_edge_detection_get(struct Object *ob, bool *r_is_manifold);
-/* PointCloud */
-
-struct GPUBatch *DRW_cache_pointcloud_get_dots(struct Object *obj);
-struct GPUBatch *DRW_cache_pointcloud_surface_get(struct Object *obj);
-
/* Volume */
typedef struct DRWVolumeGrid {
@@ -254,14 +252,17 @@ struct GPUBatch *DRW_cache_volume_selection_surface_get(struct Object *ob);
/* GPencil */
-struct GPUBatch *DRW_cache_gpencil_strokes_get(struct Object *ob, int cfra);
-struct GPUBatch *DRW_cache_gpencil_fills_get(struct Object *ob, int cfra);
+struct GPUBatch *DRW_cache_gpencil_get(struct Object *ob, int cfra);
+struct GPUVertBuf *DRW_cache_gpencil_position_buffer_get(struct Object *ob, int cfra);
+struct GPUVertBuf *DRW_cache_gpencil_color_buffer_get(struct Object *ob, int cfra);
struct GPUBatch *DRW_cache_gpencil_edit_lines_get(struct Object *ob, int cfra);
struct GPUBatch *DRW_cache_gpencil_edit_points_get(struct Object *ob, int cfra);
struct GPUBatch *DRW_cache_gpencil_edit_curve_handles_get(struct Object *ob, int cfra);
struct GPUBatch *DRW_cache_gpencil_edit_curve_points_get(struct Object *ob, int cfra);
-struct GPUBatch *DRW_cache_gpencil_sbuffer_stroke_get(struct Object *ob);
-struct GPUBatch *DRW_cache_gpencil_sbuffer_fill_get(struct Object *ob);
+struct GPUBatch *DRW_cache_gpencil_sbuffer_get(struct Object *ob, bool show_fill);
+struct GPUVertBuf *DRW_cache_gpencil_sbuffer_position_buffer_get(struct Object *ob,
+ bool show_fill);
+struct GPUVertBuf *DRW_cache_gpencil_sbuffer_color_buffer_get(struct Object *ob, bool show_fill);
int DRW_gpencil_material_count_get(struct bGPdata *gpd);
struct GPUBatch *DRW_cache_gpencil_face_wireframe_get(struct Object *ob);
diff --git a/source/blender/draw/intern/draw_cache_extract.hh b/source/blender/draw/intern/draw_cache_extract.hh
index 203da22406c..4fe360eecd7 100644
--- a/source/blender/draw/intern/draw_cache_extract.hh
+++ b/source/blender/draw/intern/draw_cache_extract.hh
@@ -52,22 +52,6 @@ enum {
DRW_MESH_WEIGHT_STATE_LOCK_RELATIVE = (1 << 2),
};
-struct DRW_MeshCDMask {
- uint32_t uv : 8;
- uint32_t tan : 8;
- uint32_t orco : 1;
- uint32_t tan_orco : 1;
- uint32_t sculpt_overlays : 1;
- /**
- * Edit uv layer is from the base edit mesh as modifiers could remove it. (see T68857)
- */
- uint32_t edit_uv : 1;
-};
-/* Keep `DRW_MeshCDMask` struct within a `uint32_t`.
- * bit-wise and atomic operations are used to compare and update the struct.
- * See `mesh_cd_layers_type_*` functions. */
-BLI_STATIC_ASSERT(sizeof(DRW_MeshCDMask) <= sizeof(uint32_t), "DRW_MeshCDMask exceeds 32 bits")
-
enum eMRIterType {
MR_ITER_LOOPTRI = 1 << 0,
MR_ITER_POLY = 1 << 1,
@@ -130,6 +114,7 @@ struct MeshBufferList {
GPUVertBuf *poly_idx;
GPUVertBuf *fdot_idx;
GPUVertBuf *attr[GPU_MAX_ATTR];
+ GPUVertBuf *attr_viewer;
} vbo;
/* Index Buffers:
* Only need to be updated when topology changes. */
@@ -191,6 +176,7 @@ struct MeshBatchList {
/* Same as wire_loops but only has uvs. */
GPUBatch *wire_loops_uvs;
GPUBatch *sculpt_overlays;
+ GPUBatch *surface_viewer_attribute;
};
#define MBC_BATCH_LEN (sizeof(MeshBatchList) / sizeof(void *))
@@ -228,6 +214,7 @@ enum DRWBatchFlag {
MBC_WIRE_LOOPS = (1u << MBC_BATCH_INDEX(wire_loops)),
MBC_WIRE_LOOPS_UVS = (1u << MBC_BATCH_INDEX(wire_loops_uvs)),
MBC_SCULPT_OVERLAYS = (1u << MBC_BATCH_INDEX(sculpt_overlays)),
+ MBC_VIEWER_ATTRIBUTE_OVERLAY = (1u << MBC_BATCH_INDEX(surface_viewer_attribute)),
MBC_SURFACE_PER_MAT = (1u << MBC_BATCH_LEN),
};
ENUM_OPERATORS(DRWBatchFlag, MBC_SURFACE_PER_MAT);
diff --git a/source/blender/draw/intern/draw_cache_extract_mesh.cc b/source/blender/draw/intern/draw_cache_extract_mesh.cc
index b1d1631cb6d..f533904f355 100644
--- a/source/blender/draw/intern/draw_cache_extract_mesh.cc
+++ b/source/blender/draw/intern/draw_cache_extract_mesh.cc
@@ -110,7 +110,7 @@ class ExtractorRunDatas : public Vector<ExtractorRunData> {
uint iter_types_len() const
{
const eMRIterType iter_type = iter_types();
- uint bits = static_cast<uint>(iter_type);
+ uint bits = uint(iter_type);
return count_bits_i(bits);
}
@@ -204,7 +204,7 @@ BLI_INLINE void extract_init(const MeshRenderData *mr,
run_data.buffer = mesh_extract_buffer_get(extractor, mbuflist);
run_data.data_offset = data_offset;
extractor->init(mr, cache, run_data.buffer, POINTER_OFFSET(data_stack, data_offset));
- data_offset += (uint32_t)extractor->data_size;
+ data_offset += uint32_t(extractor->data_size);
}
}
@@ -640,6 +640,7 @@ void mesh_buffer_cache_create_requested(struct TaskGraph *task_graph,
for (int i = 0; i < GPU_MAX_ATTR; i++) {
EXTRACT_ADD_REQUESTED(vbo, attr[i]);
}
+ EXTRACT_ADD_REQUESTED(vbo, attr_viewer);
EXTRACT_ADD_REQUESTED(ibo, tris);
if (DRW_ibo_requested(mbuflist->ibo.lines_loose)) {
@@ -685,7 +686,7 @@ void mesh_buffer_cache_create_requested(struct TaskGraph *task_graph,
MeshRenderData *mr = mesh_render_data_create(
object, me, is_editmode, is_paint_mode, is_mode_active, obmat, do_final, do_uvedit, ts);
mr->use_hide = use_hide;
- mr->use_subsurf_fdots = mr->me && mr->me->runtime.subsurf_face_dot_tags != nullptr;
+ mr->use_subsurf_fdots = mr->me && mr->me->runtime->subsurf_face_dot_tags != nullptr;
mr->use_final_mesh = do_final;
#ifdef DEBUG_TIME
diff --git a/source/blender/draw/intern/draw_cache_extract_mesh_render_data.cc b/source/blender/draw/intern/draw_cache_extract_mesh_render_data.cc
index eea19cbebf3..f606701ed09 100644
--- a/source/blender/draw/intern/draw_cache_extract_mesh_render_data.cc
+++ b/source/blender/draw/intern/draw_cache_extract_mesh_render_data.cc
@@ -18,6 +18,7 @@
#include "BKE_editmesh.h"
#include "BKE_editmesh_cache.h"
#include "BKE_mesh.h"
+#include "BKE_mesh_runtime.h"
#include "GPU_batch.h"
@@ -329,28 +330,10 @@ void mesh_render_data_update_looptris(MeshRenderData *mr,
const eMRIterType iter_type,
const eMRDataType data_flag)
{
- Mesh *me = mr->me;
if (mr->extract_type != MR_EXTRACT_BMESH) {
/* Mesh */
if ((iter_type & MR_ITER_LOOPTRI) || (data_flag & MR_DATA_LOOPTRI)) {
- /* NOTE(@campbellbarton): It's possible to skip allocating tessellation,
- * the tessellation can be calculated as part of the iterator, see: P2188.
- * The overall advantage is small (around 1%), so keep this as-is. */
- mr->mlooptri = static_cast<MLoopTri *>(
- MEM_mallocN(sizeof(*mr->mlooptri) * mr->tri_len, "MR_DATATYPE_LOOPTRI"));
- if (mr->poly_normals != nullptr) {
- BKE_mesh_recalc_looptri_with_normals(mr->mloop,
- mr->mpoly,
- mr->mvert,
- me->totloop,
- me->totpoly,
- mr->mlooptri,
- mr->poly_normals);
- }
- else {
- BKE_mesh_recalc_looptri(
- mr->mloop, mr->mpoly, mr->mvert, me->totloop, me->totpoly, mr->mlooptri);
- }
+ mr->mlooptri = BKE_mesh_runtime_looptri_ensure(mr->me);
}
}
else {
@@ -366,7 +349,7 @@ void mesh_render_data_update_normals(MeshRenderData *mr, const eMRDataType data_
{
Mesh *me = mr->me;
const bool is_auto_smooth = (me->flag & ME_AUTOSMOOTH) != 0;
- const float split_angle = is_auto_smooth ? me->smoothresh : (float)M_PI;
+ const float split_angle = is_auto_smooth ? me->smoothresh : float(M_PI);
if (mr->extract_type != MR_EXTRACT_BMESH) {
/* Mesh */
@@ -480,7 +463,7 @@ MeshRenderData *mesh_render_data_create(Object *object,
mr->bm = me->edit_mesh->bm;
mr->edit_bmesh = me->edit_mesh;
mr->me = (do_final) ? editmesh_eval_final : editmesh_eval_cage;
- mr->edit_data = is_mode_active ? mr->me->runtime.edit_data : nullptr;
+ mr->edit_data = is_mode_active ? mr->me->runtime->edit_data : nullptr;
if (mr->edit_data) {
EditMeshData *emd = mr->edit_data;
@@ -516,8 +499,8 @@ MeshRenderData *mesh_render_data_create(Object *object,
/* Use bmesh directly when the object is in edit mode unchanged by any modifiers.
* For non-final UVs, always use original bmesh since the UV editor does not support
* using the cage mesh with deformed coordinates. */
- if ((is_mode_active && mr->me->runtime.is_original_bmesh &&
- mr->me->runtime.wrapper_type == ME_WRAPPER_TYPE_BMESH) ||
+ if ((is_mode_active && mr->me->runtime->is_original_bmesh &&
+ mr->me->runtime->wrapper_type == ME_WRAPPER_TYPE_BMESH) ||
(do_uvedit && !do_final)) {
mr->extract_type = MR_EXTRACT_BMESH;
}
@@ -586,6 +569,13 @@ MeshRenderData *mesh_render_data_create(Object *object,
CustomData_get_layer_named(&me->edata, CD_PROP_BOOL, ".hide_edge"));
mr->hide_poly = static_cast<const bool *>(
CustomData_get_layer_named(&me->pdata, CD_PROP_BOOL, ".hide_poly"));
+
+ mr->select_vert = static_cast<const bool *>(
+ CustomData_get_layer_named(&me->vdata, CD_PROP_BOOL, ".select_vert"));
+ mr->select_edge = static_cast<const bool *>(
+ CustomData_get_layer_named(&me->edata, CD_PROP_BOOL, ".select_edge"));
+ mr->select_poly = static_cast<const bool *>(
+ CustomData_get_layer_named(&me->pdata, CD_PROP_BOOL, ".select_poly"));
}
else {
/* #BMesh */
@@ -605,7 +595,6 @@ MeshRenderData *mesh_render_data_create(Object *object,
void mesh_render_data_free(MeshRenderData *mr)
{
- MEM_SAFE_FREE(mr->mlooptri);
MEM_SAFE_FREE(mr->loop_normals);
/* Loose geometry are owned by #MeshBufferCache. */
diff --git a/source/blender/draw/intern/draw_cache_impl.h b/source/blender/draw/intern/draw_cache_impl.h
index 7f7d0a7613f..5aa2203ca68 100644
--- a/source/blender/draw/intern/draw_cache_impl.h
+++ b/source/blender/draw/intern/draw_cache_impl.h
@@ -80,6 +80,7 @@ void DRW_batch_cache_free_old(struct Object *ob, int ctime);
*/
void DRW_mesh_batch_cache_free_old(struct Mesh *me, int ctime);
void DRW_curves_batch_cache_free_old(struct Curves *curves, int ctime);
+void DRW_pointcloud_batch_cache_free_old(struct PointCloud *pointcloud, int ctime);
/** \} */
@@ -100,6 +101,7 @@ void DRW_curve_batch_cache_create_requested(struct Object *ob, const struct Scen
int DRW_curve_material_count_get(struct Curve *cu);
struct GPUBatch *DRW_curve_batch_cache_get_wire_edge(struct Curve *cu);
+struct GPUBatch *DRW_curve_batch_cache_get_wire_edge_viewer_attribute(struct Curve *cu);
struct GPUBatch *DRW_curve_batch_cache_get_normal_edge(struct Curve *cu);
struct GPUBatch *DRW_curve_batch_cache_get_edit_edges(struct Curve *cu);
struct GPUBatch *DRW_curve_batch_cache_get_edit_verts(struct Curve *cu);
@@ -130,7 +132,7 @@ int DRW_curves_material_count_get(struct Curves *curves);
* \return A pointer to location where the texture will be
* stored, which will be filled by #DRW_shgroup_curves_create_sub.
*/
-struct GPUTexture **DRW_curves_texture_for_evaluated_attribute(struct Curves *curves,
+struct GPUVertBuf **DRW_curves_texture_for_evaluated_attribute(struct Curves *curves,
const char *name,
bool *r_is_point_domain);
@@ -146,11 +148,11 @@ void DRW_curves_batch_cache_create_requested(struct Object *ob);
int DRW_pointcloud_material_count_get(struct PointCloud *pointcloud);
+struct GPUVertBuf **DRW_pointcloud_evaluated_attribute(struct PointCloud *pointcloud,
+ const char *name);
struct GPUBatch *DRW_pointcloud_batch_cache_get_dots(struct Object *ob);
-struct GPUBatch *DRW_pointcloud_batch_cache_get_surface(struct Object *ob);
-struct GPUBatch **DRW_cache_pointcloud_surface_shaded_get(struct Object *ob,
- struct GPUMaterial **gpumat_array,
- uint gpumat_array_len);
+
+void DRW_pointcloud_batch_cache_create_requested(struct Object *ob);
/** \} */
@@ -189,6 +191,7 @@ struct GPUBatch **DRW_mesh_batch_cache_get_surface_shaded(struct Object *object,
struct Mesh *me,
struct GPUMaterial **gpumat_array,
uint gpumat_array_len);
+
struct GPUBatch **DRW_mesh_batch_cache_get_surface_texpaint(struct Object *object,
struct Mesh *me);
struct GPUBatch *DRW_mesh_batch_cache_get_surface_texpaint_single(struct Object *object,
@@ -198,6 +201,7 @@ struct GPUBatch *DRW_mesh_batch_cache_get_surface_vertpaint(struct Object *objec
struct GPUBatch *DRW_mesh_batch_cache_get_surface_sculpt(struct Object *object, struct Mesh *me);
struct GPUBatch *DRW_mesh_batch_cache_get_surface_weights(struct Mesh *me);
struct GPUBatch *DRW_mesh_batch_cache_get_sculpt_overlays(struct Mesh *me);
+struct GPUBatch *DRW_mesh_batch_cache_get_surface_viewer_attribute(struct Mesh *me);
/** \} */
@@ -275,8 +279,6 @@ struct GPUVertBuf *DRW_mesh_batch_cache_pos_vertbuf_get(struct Mesh *me);
int DRW_mesh_material_count_get(const struct Object *object, const struct Mesh *me);
-/* See 'common_globals_lib.glsl' for duplicate defines. */
-
/* Edit mesh bitflags (is this the right place?) */
enum {
VFLAG_VERT_ACTIVE = 1 << 0,
diff --git a/source/blender/draw/intern/draw_cache_impl_curve.cc b/source/blender/draw/intern/draw_cache_impl_curve.cc
index 695c348d8e2..6188b1e0544 100644
--- a/source/blender/draw/intern/draw_cache_impl_curve.cc
+++ b/source/blender/draw/intern/draw_cache_impl_curve.cc
@@ -10,6 +10,7 @@
#include "MEM_guardedalloc.h"
#include "BLI_array.hh"
+#include "BLI_color.hh"
#include "BLI_listbase.h"
#include "BLI_math_vec_types.hh"
#include "BLI_math_vector.h"
@@ -38,6 +39,7 @@
#include "draw_cache_impl.h" /* own include */
using blender::Array;
+using blender::ColorGeometry4f;
using blender::float3;
using blender::IndexRange;
using blender::Span;
@@ -296,6 +298,7 @@ static int curve_render_data_normal_len_get(const CurveRenderData *rdata)
struct CurveBatchCache {
struct {
GPUVertBuf *curves_pos;
+ GPUVertBuf *attr_viewer;
} ordered;
struct {
@@ -314,6 +317,7 @@ struct CurveBatchCache {
struct {
GPUBatch *curves;
+ GPUBatch *curves_viewer_attribute;
/* control handles and vertices */
GPUBatch *edit_edges;
GPUBatch *edit_verts;
@@ -474,6 +478,31 @@ static void curve_create_curves_pos(CurveRenderData *rdata, GPUVertBuf *vbo_curv
GPU_vertbuf_attr_fill(vbo_curves_pos, attr_id.pos, positions.data());
}
+static void curve_create_attribute(CurveRenderData *rdata, GPUVertBuf *vbo_attr)
+{
+ if (rdata->curve_eval == nullptr) {
+ return;
+ }
+
+ static GPUVertFormat format = {0};
+ if (format.attr_len == 0) {
+ GPU_vertformat_attr_add(&format, "attribute_value", GPU_COMP_F32, 4, GPU_FETCH_FLOAT);
+ }
+
+ const int vert_len = curve_render_data_wire_verts_len_get(rdata);
+ GPU_vertbuf_init_with_format(vbo_attr, &format);
+ GPU_vertbuf_data_alloc(vbo_attr, vert_len);
+
+ const blender::bke::CurvesGeometry &curves = blender::bke::CurvesGeometry::wrap(
+ rdata->curve_eval->geometry);
+ curves.ensure_can_interpolate_to_evaluated();
+ const blender::VArraySpan<ColorGeometry4f> colors = curves.attributes().lookup<ColorGeometry4f>(
+ ".viewer", ATTR_DOMAIN_POINT);
+ ColorGeometry4f *vbo_data = static_cast<ColorGeometry4f *>(GPU_vertbuf_get_data(vbo_attr));
+ curves.interpolate_to_evaluated(colors,
+ blender::MutableSpan<ColorGeometry4f>{vbo_data, vert_len});
+}
+
static void curve_create_curves_lines(CurveRenderData *rdata, GPUIndexBuf *ibo_curve_lines)
{
if (rdata->curve_eval == nullptr) {
@@ -769,6 +798,12 @@ GPUBatch *DRW_curve_batch_cache_get_wire_edge(Curve *cu)
return DRW_batch_request(&cache->batch.curves);
}
+GPUBatch *DRW_curve_batch_cache_get_wire_edge_viewer_attribute(Curve *cu)
+{
+ CurveBatchCache *cache = curve_batch_cache_get(cu);
+ return DRW_batch_request(&cache->batch.curves_viewer_attribute);
+}
+
GPUBatch *DRW_curve_batch_cache_get_normal_edge(Curve *cu)
{
CurveBatchCache *cache = curve_batch_cache_get(cu);
@@ -810,6 +845,11 @@ void DRW_curve_batch_cache_create_requested(Object *ob, const struct Scene *scen
DRW_ibo_request(cache->batch.curves, &cache->ibo.curves_lines);
DRW_vbo_request(cache->batch.curves, &cache->ordered.curves_pos);
}
+ if (DRW_batch_requested(cache->batch.curves_viewer_attribute, GPU_PRIM_LINE_STRIP)) {
+ DRW_ibo_request(cache->batch.curves_viewer_attribute, &cache->ibo.curves_lines);
+ DRW_vbo_request(cache->batch.curves_viewer_attribute, &cache->ordered.curves_pos);
+ DRW_vbo_request(cache->batch.curves_viewer_attribute, &cache->ordered.attr_viewer);
+ }
/* Edit mode */
if (DRW_batch_requested(cache->batch.edit_edges, GPU_PRIM_LINES)) {
@@ -833,6 +873,8 @@ void DRW_curve_batch_cache_create_requested(Object *ob, const struct Scene *scen
/* Generate MeshRenderData flags */
int mr_flag = 0;
DRW_ADD_FLAG_FROM_VBO_REQUEST(mr_flag, cache->ordered.curves_pos, CU_DATATYPE_WIRE);
+ DRW_ADD_FLAG_FROM_VBO_REQUEST(
+ mr_flag, cache->ordered.attr_viewer, CU_DATATYPE_WIRE | CU_DATATYPE_OVERLAY);
DRW_ADD_FLAG_FROM_IBO_REQUEST(mr_flag, cache->ibo.curves_lines, CU_DATATYPE_WIRE);
DRW_ADD_FLAG_FROM_VBO_REQUEST(mr_flag, cache->edit.pos, CU_DATATYPE_OVERLAY);
@@ -851,6 +893,9 @@ void DRW_curve_batch_cache_create_requested(Object *ob, const struct Scene *scen
if (DRW_vbo_requested(cache->ordered.curves_pos)) {
curve_create_curves_pos(rdata, cache->ordered.curves_pos);
}
+ if (DRW_vbo_requested(cache->ordered.attr_viewer)) {
+ curve_create_attribute(rdata, cache->ordered.attr_viewer);
+ }
if (DRW_ibo_requested(cache->ibo.curves_lines)) {
curve_create_curves_lines(rdata, cache->ibo.curves_lines);
}
diff --git a/source/blender/draw/intern/draw_cache_impl_curves.cc b/source/blender/draw/intern/draw_cache_impl_curves.cc
index 3bca17d9c56..0322d048fa5 100644
--- a/source/blender/draw/intern/draw_cache_impl_curves.cc
+++ b/source/blender/draw/intern/draw_cache_impl_curves.cc
@@ -14,9 +14,9 @@
#include "BLI_listbase.h"
#include "BLI_math_base.h"
#include "BLI_math_vec_types.hh"
-#include "BLI_math_vector.h"
#include "BLI_math_vector.hh"
#include "BLI_span.hh"
+#include "BLI_task.hh"
#include "BLI_utildefines.h"
#include "DNA_curves_types.h"
@@ -60,7 +60,7 @@ struct CurvesBatchCache {
* some locking would be necessary because multiple objects can use the same curves data with
* different materials, etc. This is a placeholder to make multi-threading easier in the future.
*/
- ThreadMutex render_mutex;
+ std::mutex render_mutex;
};
static bool curves_batch_cache_valid(const Curves &curves)
@@ -74,15 +74,13 @@ static void curves_batch_cache_init(Curves &curves)
CurvesBatchCache *cache = static_cast<CurvesBatchCache *>(curves.batch_cache);
if (!cache) {
- cache = MEM_cnew<CurvesBatchCache>(__func__);
+ cache = MEM_new<CurvesBatchCache>(__func__);
curves.batch_cache = cache;
}
else {
- memset(cache, 0, sizeof(*cache));
+ cache->curves_cache = {};
}
- BLI_mutex_init(&cache->render_mutex);
-
cache->is_dirty = false;
}
@@ -90,13 +88,11 @@ static void curves_discard_attributes(CurvesEvalCache &curves_cache)
{
for (const int i : IndexRange(GPU_MAX_ATTR)) {
GPU_VERTBUF_DISCARD_SAFE(curves_cache.proc_attributes_buf[i]);
- DRW_TEXTURE_FREE_SAFE(curves_cache.proc_attributes_tex[i]);
}
for (const int i : IndexRange(MAX_HAIR_SUBDIV)) {
for (const int j : IndexRange(GPU_MAX_ATTR)) {
GPU_VERTBUF_DISCARD_SAFE(curves_cache.final[i].attributes_buf[j]);
- DRW_TEXTURE_FREE_SAFE(curves_cache.final[i].attributes_tex[j]);
}
drw_attributes_clear(&curves_cache.final[i].attr_used);
@@ -108,17 +104,13 @@ static void curves_batch_cache_clear_data(CurvesEvalCache &curves_cache)
/* TODO: more granular update tagging. */
GPU_VERTBUF_DISCARD_SAFE(curves_cache.proc_point_buf);
GPU_VERTBUF_DISCARD_SAFE(curves_cache.proc_length_buf);
- DRW_TEXTURE_FREE_SAFE(curves_cache.point_tex);
- DRW_TEXTURE_FREE_SAFE(curves_cache.length_tex);
+ GPU_VERTBUF_DISCARD_SAFE(curves_cache.data_edit_points);
GPU_VERTBUF_DISCARD_SAFE(curves_cache.proc_strand_buf);
GPU_VERTBUF_DISCARD_SAFE(curves_cache.proc_strand_seg_buf);
- DRW_TEXTURE_FREE_SAFE(curves_cache.strand_tex);
- DRW_TEXTURE_FREE_SAFE(curves_cache.strand_seg_tex);
for (const int i : IndexRange(MAX_HAIR_SUBDIV)) {
GPU_VERTBUF_DISCARD_SAFE(curves_cache.final[i].proc_buf);
- DRW_TEXTURE_FREE_SAFE(curves_cache.final[i].proc_tex);
for (const int j : IndexRange(MAX_THICKRES)) {
GPU_BATCH_DISCARD_SAFE(curves_cache.final[i].proc_hairs[j]);
}
@@ -171,9 +163,8 @@ void DRW_curves_batch_cache_dirty_tag(Curves *curves, int mode)
void DRW_curves_batch_cache_free(Curves *curves)
{
curves_batch_cache_clear(*curves);
- CurvesBatchCache *cache = static_cast<CurvesBatchCache *>(curves->batch_cache);
- BLI_mutex_end(&cache->render_mutex);
- MEM_SAFE_FREE(curves->batch_cache);
+ MEM_delete(static_cast<CurvesBatchCache *>(curves->batch_cache));
+ curves->batch_cache = nullptr;
}
void DRW_curves_batch_cache_free_old(Curves *curves, int ctime)
@@ -225,43 +216,43 @@ static void curves_batch_cache_fill_segments_proc_pos(
MutableSpan<PositionAndParameter> posTime_data,
MutableSpan<float> hairLength_data)
{
+ using namespace blender;
/* TODO: use hair radius layer if available. */
- const int curve_num = curves_id.geometry.curve_num;
- const blender::bke::CurvesGeometry &curves = blender::bke::CurvesGeometry::wrap(
- curves_id.geometry);
- Span<float3> positions = curves.positions();
+ const bke::CurvesGeometry &curves = bke::CurvesGeometry::wrap(curves_id.geometry);
+ const Span<float3> positions = curves.positions();
- for (const int i_curve : IndexRange(curve_num)) {
- const IndexRange points = curves.points_for_curve(i_curve);
+ threading::parallel_for(curves.curves_range(), 1024, [&](const IndexRange range) {
+ for (const int i_curve : range) {
+ const IndexRange points = curves.points_for_curve(i_curve);
- Span<float3> curve_positions = positions.slice(points);
- MutableSpan<PositionAndParameter> curve_posTime_data = posTime_data.slice(points);
+ Span<float3> curve_positions = positions.slice(points);
+ MutableSpan<PositionAndParameter> curve_posTime_data = posTime_data.slice(points);
- float total_len = 0.0f;
- for (const int i_point : curve_positions.index_range()) {
- if (i_point > 0) {
- total_len += blender::math::distance(curve_positions[i_point - 1],
- curve_positions[i_point]);
- }
- curve_posTime_data[i_point].position = curve_positions[i_point];
- curve_posTime_data[i_point].parameter = total_len;
- }
- hairLength_data[i_curve] = total_len;
-
- /* Assign length value. */
- if (total_len > 0.0f) {
- const float factor = 1.0f / total_len;
- /* Divide by total length to have a [0-1] number. */
+ float total_len = 0.0f;
for (const int i_point : curve_positions.index_range()) {
- curve_posTime_data[i_point].parameter *= factor;
+ if (i_point > 0) {
+ total_len += math::distance(curve_positions[i_point - 1], curve_positions[i_point]);
+ }
+ curve_posTime_data[i_point].position = curve_positions[i_point];
+ curve_posTime_data[i_point].parameter = total_len;
+ }
+ hairLength_data[i_curve] = total_len;
+
+ /* Assign length value. */
+ if (total_len > 0.0f) {
+ const float factor = 1.0f / total_len;
+ /* Divide by total length to have a [0-1] number. */
+ for (const int i_point : curve_positions.index_range()) {
+ curve_posTime_data[i_point].parameter *= factor;
+ }
}
}
- }
+ });
}
static void curves_batch_cache_ensure_procedural_pos(const Curves &curves,
CurvesEvalCache &cache,
- GPUMaterial *gpu_material)
+ GPUMaterial *UNUSED(gpu_material))
{
if (cache.proc_point_buf == nullptr || DRW_vbo_requested(cache.proc_point_buf)) {
/* Initialize vertex format. */
@@ -274,7 +265,7 @@ static void curves_batch_cache_ensure_procedural_pos(const Curves &curves,
GPU_vertbuf_data_alloc(cache.proc_point_buf, cache.point_len);
MutableSpan posTime_data{
- reinterpret_cast<PositionAndParameter *>(GPU_vertbuf_get_data(cache.proc_point_buf)),
+ static_cast<PositionAndParameter *>(GPU_vertbuf_get_data(cache.proc_point_buf)),
cache.point_len};
GPUVertFormat length_format = {0};
@@ -284,25 +275,47 @@ static void curves_batch_cache_ensure_procedural_pos(const Curves &curves,
&length_format, GPU_USAGE_STATIC | GPU_USAGE_FLAG_BUFFER_TEXTURE_ONLY);
GPU_vertbuf_data_alloc(cache.proc_length_buf, cache.strands_len);
- MutableSpan hairLength_data{
- reinterpret_cast<float *>(GPU_vertbuf_get_data(cache.proc_length_buf)), cache.strands_len};
+ MutableSpan hairLength_data{static_cast<float *>(GPU_vertbuf_get_data(cache.proc_length_buf)),
+ cache.strands_len};
curves_batch_cache_fill_segments_proc_pos(curves, posTime_data, hairLength_data);
-
- /* Create vbo immediately to bind to texture buffer. */
- GPU_vertbuf_use(cache.proc_point_buf);
- cache.point_tex = GPU_texture_create_from_vertbuf("hair_point", cache.proc_point_buf);
}
+}
- if (gpu_material && cache.proc_length_buf != nullptr && cache.length_tex) {
- ListBase gpu_attrs = GPU_material_attributes(gpu_material);
- LISTBASE_FOREACH (GPUMaterialAttribute *, attr, &gpu_attrs) {
- if (attr->type == CD_HAIRLENGTH) {
- GPU_vertbuf_use(cache.proc_length_buf);
- cache.length_tex = GPU_texture_create_from_vertbuf("hair_length", cache.proc_length_buf);
- break;
+static void curves_batch_cache_ensure_data_edit_points(const Curves &curves_id,
+ CurvesEvalCache &cache)
+{
+ using namespace blender;
+ const bke::CurvesGeometry &curves = bke::CurvesGeometry::wrap(curves_id.geometry);
+
+ static GPUVertFormat format_data = {0};
+ uint data = GPU_vertformat_attr_add(&format_data, "data", GPU_COMP_U8, 1, GPU_FETCH_INT);
+ GPU_vertbuf_init_with_format(cache.data_edit_points, &format_data);
+ GPU_vertbuf_data_alloc(cache.data_edit_points, curves.points_num());
+
+ VArray<float> selection;
+ switch (curves_id.selection_domain) {
+ case ATTR_DOMAIN_POINT:
+ selection = curves.selection_point_float();
+ for (const int point_i : selection.index_range()) {
+ uint8_t vflag = 0;
+ const float point_selection = selection[point_i];
+ SET_FLAG_FROM_TEST(vflag, (point_selection > 0.0f), VFLAG_VERT_SELECTED);
+ GPU_vertbuf_attr_set(cache.data_edit_points, data, point_i, &vflag);
}
- }
+ break;
+ case ATTR_DOMAIN_CURVE:
+ selection = curves.selection_curve_float();
+ for (const int curve_i : curves.curves_range()) {
+ uint8_t vflag = 0;
+ const float curve_selection = selection[curve_i];
+ SET_FLAG_FROM_TEST(vflag, (curve_selection > 0.0f), VFLAG_VERT_SELECTED);
+ const IndexRange points = curves.points_for_curve(curve_i);
+ for (const int point_i : points) {
+ GPU_vertbuf_attr_set(cache.data_edit_points, data, point_i, &vflag);
+ }
+ }
+ break;
}
}
@@ -318,7 +331,7 @@ static void curves_batch_cache_ensure_procedural_final_attr(CurvesEvalCache &cac
const GPUVertFormat *format,
const int subdiv,
const int index,
- const char *name)
+ const char *UNUSED(name))
{
CurvesEvalFinalCache &final_cache = cache.final[subdiv];
final_cache.attributes_buf[index] = GPU_vertbuf_create_with_format_ex(
@@ -328,12 +341,6 @@ static void curves_batch_cache_ensure_procedural_final_attr(CurvesEvalCache &cac
/* Those are points! not line segments. */
GPU_vertbuf_data_alloc(final_cache.attributes_buf[index],
final_cache.strands_res * cache.strands_len);
-
- /* Create vbo immediately to bind to texture buffer. */
- GPU_vertbuf_use(final_cache.attributes_buf[index]);
-
- final_cache.attributes_tex[index] = GPU_texture_create_from_vertbuf(
- name, final_cache.attributes_buf[index]);
}
static void curves_batch_ensure_attribute(const Curves &curves,
@@ -342,8 +349,8 @@ static void curves_batch_ensure_attribute(const Curves &curves,
const int subdiv,
const int index)
{
+ using namespace blender;
GPU_VERTBUF_DISCARD_SAFE(cache.proc_attributes_buf[index]);
- DRW_TEXTURE_FREE_SAFE(cache.proc_attributes_tex[index]);
char sampler_name[32];
drw_curves_get_attribute_sampler_name(request.attribute_name, sampler_name);
@@ -361,15 +368,15 @@ static void curves_batch_ensure_attribute(const Curves &curves,
request.domain == ATTR_DOMAIN_POINT ? curves.geometry.point_num :
curves.geometry.curve_num);
- const blender::bke::AttributeAccessor attributes =
- blender::bke::CurvesGeometry::wrap(curves.geometry).attributes();
+ const bke::AttributeAccessor attributes =
+ bke::CurvesGeometry::wrap(curves.geometry).attributes();
/* TODO(@kevindietrich): float4 is used for scalar attributes as the implicit conversion done
* by OpenGL to vec4 for a scalar `s` will produce a `vec4(s, 0, 0, 1)`. However, following
* the Blender convention, it should be `vec4(s, s, s, 1)`. This could be resolved using a
* similar texture state swizzle to map the attribute correctly as for volume attributes, so we
* can control the conversion ourselves. */
- blender::VArray<ColorGeometry4f> attribute = attributes.lookup_or_default<ColorGeometry4f>(
+ VArray<ColorGeometry4f> attribute = attributes.lookup_or_default<ColorGeometry4f>(
request.attribute_name, request.domain, {0.0f, 0.0f, 0.0f, 1.0f});
MutableSpan<ColorGeometry4f> vbo_span{
@@ -378,13 +385,9 @@ static void curves_batch_ensure_attribute(const Curves &curves,
attribute.materialize(vbo_span);
- GPU_vertbuf_use(attr_vbo);
- cache.proc_attributes_tex[index] = GPU_texture_create_from_vertbuf(sampler_name, attr_vbo);
-
/* Existing final data may have been for a different attribute (with a different name or domain),
* free the data. */
GPU_VERTBUF_DISCARD_SAFE(cache.final[subdiv].attributes_buf[index]);
- DRW_TEXTURE_FREE_SAFE(cache.final[subdiv].attributes_tex[index]);
/* Ensure final data for points. */
if (request.domain == ATTR_DOMAIN_POINT) {
@@ -430,19 +433,11 @@ static void curves_batch_cache_ensure_procedural_strand_data(Curves &curves,
GPU_vertbuf_attr_get_raw_data(cache.proc_strand_seg_buf, seg_id, &seg_step);
curves_batch_cache_fill_strands_data(curves, data_step, seg_step);
-
- /* Create vbo immediately to bind to texture buffer. */
- GPU_vertbuf_use(cache.proc_strand_buf);
- cache.strand_tex = GPU_texture_create_from_vertbuf("curves_strand", cache.proc_strand_buf);
-
- GPU_vertbuf_use(cache.proc_strand_seg_buf);
- cache.strand_seg_tex = GPU_texture_create_from_vertbuf("curves_strand_seg",
- cache.proc_strand_seg_buf);
}
static void curves_batch_cache_ensure_procedural_final_points(CurvesEvalCache &cache, int subdiv)
{
- /* Same format as point_tex. */
+ /* Same format as proc_point_buf. */
GPUVertFormat format = {0};
GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 4, GPU_FETCH_FLOAT);
@@ -453,12 +448,6 @@ static void curves_batch_cache_ensure_procedural_final_points(CurvesEvalCache &c
/* Those are points! not line segments. */
GPU_vertbuf_data_alloc(cache.final[subdiv].proc_buf,
cache.final[subdiv].strands_res * cache.strands_len);
-
- /* Create vbo immediately to bind to texture buffer. */
- GPU_vertbuf_use(cache.final[subdiv].proc_buf);
-
- cache.final[subdiv].proc_tex = GPU_texture_create_from_vertbuf("hair_proc",
- cache.final[subdiv].proc_buf);
}
static void curves_batch_cache_fill_segments_indices(const Curves &curves,
@@ -516,7 +505,6 @@ static bool curves_ensure_attributes(const Curves &curves,
GPUMaterial *gpu_material,
int subdiv)
{
- ThreadMutex *render_mutex = &cache.render_mutex;
const CustomData *cd_curve = &curves.geometry.curve_data;
const CustomData *cd_point = &curves.geometry.point_data;
CurvesEvalFinalCache &final_cache = cache.curves_cache.final[subdiv];
@@ -548,11 +536,10 @@ static bool curves_ensure_attributes(const Curves &curves,
/* Some new attributes have been added, free all and start over. */
for (const int i : IndexRange(GPU_MAX_ATTR)) {
GPU_VERTBUF_DISCARD_SAFE(cache.curves_cache.proc_attributes_buf[i]);
- DRW_TEXTURE_FREE_SAFE(cache.curves_cache.proc_attributes_tex[i]);
}
- drw_attributes_merge(&final_cache.attr_used, &attrs_needed, render_mutex);
+ drw_attributes_merge(&final_cache.attr_used, &attrs_needed, cache.render_mutex);
}
- drw_attributes_merge(&final_cache.attr_used_over_time, &attrs_needed, render_mutex);
+ drw_attributes_merge(&final_cache.attr_used_over_time, &attrs_needed, cache.render_mutex);
}
bool need_tf_update = false;
@@ -596,7 +583,7 @@ bool curves_ensure_procedural_data(Curves *curves,
}
/* Refreshed if active layer or custom data changes. */
- if ((*r_hair_cache)->strand_tex == nullptr) {
+ if ((*r_hair_cache)->proc_strand_buf == nullptr) {
curves_batch_cache_ensure_procedural_strand_data(*curves, cache.curves_cache);
}
@@ -651,10 +638,10 @@ static void request_attribute(Curves &curves, const char *name)
drw_attributes_add_request(
&attributes, name, type, CustomData_get_named_layer(&custom_data, type, name), domain);
- drw_attributes_merge(&final_cache.attr_used, &attributes, &cache.render_mutex);
+ drw_attributes_merge(&final_cache.attr_used, &attributes, cache.render_mutex);
}
-GPUTexture **DRW_curves_texture_for_evaluated_attribute(Curves *curves,
+GPUVertBuf **DRW_curves_texture_for_evaluated_attribute(Curves *curves,
const char *name,
bool *r_is_point_domain)
{
@@ -680,10 +667,10 @@ GPUTexture **DRW_curves_texture_for_evaluated_attribute(Curves *curves,
switch (final_cache.attr_used.requests[request_i].domain) {
case ATTR_DOMAIN_POINT:
*r_is_point_domain = true;
- return &final_cache.attributes_tex[request_i];
+ return &final_cache.attributes_buf[request_i];
case ATTR_DOMAIN_CURVE:
*r_is_point_domain = false;
- return &cache.curves_cache.proc_attributes_tex[request_i];
+ return &cache.curves_cache.proc_attributes_buf[request_i];
default:
BLI_assert_unreachable();
return nullptr;
@@ -697,9 +684,14 @@ void DRW_curves_batch_cache_create_requested(Object *ob)
if (DRW_batch_requested(cache.edit_points, GPU_PRIM_POINTS)) {
DRW_vbo_request(cache.edit_points, &cache.curves_cache.proc_point_buf);
+ DRW_vbo_request(cache.edit_points, &cache.curves_cache.data_edit_points);
}
if (DRW_vbo_requested(cache.curves_cache.proc_point_buf)) {
curves_batch_cache_ensure_procedural_pos(*curves, cache.curves_cache, nullptr);
}
+
+ if (DRW_vbo_requested(cache.curves_cache.data_edit_points)) {
+ curves_batch_cache_ensure_data_edit_points(*curves, cache.curves_cache);
+ }
}
diff --git a/source/blender/draw/intern/draw_cache_impl_gpencil.c b/source/blender/draw/intern/draw_cache_impl_gpencil.cc
index aecf4b2f004..6860fae744b 100644
--- a/source/blender/draw/intern/draw_cache_impl_gpencil.c
+++ b/source/blender/draw/intern/draw_cache_impl_gpencil.cc
@@ -23,12 +23,14 @@
#include "DEG_depsgraph_query.h"
#include "BLI_hash.h"
+#include "BLI_math_vec_types.hh"
#include "BLI_polyfill_2d.h"
#include "draw_cache.h"
#include "draw_cache_impl.h"
#include "../engines/gpencil/gpencil_defines.h"
+#include "../engines/gpencil/gpencil_shader_shared.h"
#define BEZIER_HANDLE (1 << 3)
#define COLOR_SHIFT 5
@@ -41,11 +43,13 @@ typedef struct GpencilBatchCache {
/** Instancing Data */
GPUVertBuf *vbo;
GPUVertBuf *vbo_col;
- /** Fill Topology */
+ /** Indices in material order, then stroke order with fill first.
+ * Strokes can be individually rendered using `gps->runtime.stroke_start` and
+ * `gps->runtime.fill_start`. */
GPUIndexBuf *ibo;
- /** Instancing Batches */
- GPUBatch *stroke_batch;
- GPUBatch *fill_batch;
+ /** Batches */
+ GPUBatch *geom_batch;
+ /** Stroke lines only */
GPUBatch *lines_batch;
/** Edit Mode */
@@ -97,7 +101,8 @@ static GpencilBatchCache *gpencil_batch_cache_init(Object *ob, int cfra)
GpencilBatchCache *cache = gpd->runtime.gpencil_cache;
if (!cache) {
- cache = gpd->runtime.gpencil_cache = MEM_callocN(sizeof(*cache), __func__);
+ cache = gpd->runtime.gpencil_cache = (GpencilBatchCache *)MEM_callocN(sizeof(*cache),
+ __func__);
}
else {
memset(cache, 0, sizeof(*cache));
@@ -116,8 +121,7 @@ static void gpencil_batch_cache_clear(GpencilBatchCache *cache)
}
GPU_BATCH_DISCARD_SAFE(cache->lines_batch);
- GPU_BATCH_DISCARD_SAFE(cache->fill_batch);
- GPU_BATCH_DISCARD_SAFE(cache->stroke_batch);
+ GPU_BATCH_DISCARD_SAFE(cache->geom_batch);
GPU_VERTBUF_DISCARD_SAFE(cache->vbo);
GPU_VERTBUF_DISCARD_SAFE(cache->vbo_col);
GPU_INDEXBUF_DISCARD_SAFE(cache->ibo);
@@ -172,9 +176,10 @@ void DRW_gpencil_batch_cache_free(bGPdata *gpd)
/* MUST match the format below. */
typedef struct gpStrokeVert {
- int32_t mat, stroke_id, point_id, packed_asp_hard_rot;
/** Position and thickness packed in the same attribute. */
float pos[3], thickness;
+ /** Material Index, Stroke Index, Point Index, Packed aspect + hardness + rotation. */
+ int32_t mat, stroke_id, point_id, packed_asp_hard_rot;
/** UV and strength packed in the same attribute. */
float uv_fill[2], u_stroke, strength;
} gpStrokeVert;
@@ -183,12 +188,9 @@ static GPUVertFormat *gpencil_stroke_format(void)
{
static GPUVertFormat format = {0};
if (format.attr_len == 0) {
- GPU_vertformat_attr_add(&format, "ma", GPU_COMP_I32, 4, GPU_FETCH_INT);
GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 4, GPU_FETCH_FLOAT);
+ GPU_vertformat_attr_add(&format, "ma", GPU_COMP_I32, 4, GPU_FETCH_INT);
GPU_vertformat_attr_add(&format, "uv", GPU_COMP_F32, 4, GPU_FETCH_FLOAT);
- /* IMPORTANT: This means having only 4 attributes
- * to fit into GPU module limit of 16 attributes. */
- GPU_vertformat_multiload_enable(&format, 4);
}
return &format;
}
@@ -238,9 +240,6 @@ static GPUVertFormat *gpencil_color_format(void)
if (format.attr_len == 0) {
GPU_vertformat_attr_add(&format, "col", GPU_COMP_F32, 4, GPU_FETCH_FLOAT);
GPU_vertformat_attr_add(&format, "fcol", GPU_COMP_F32, 4, GPU_FETCH_FLOAT);
- /* IMPORTANT: This means having only 4 attributes
- * to fit into GPU module limit of 16 attributes. */
- GPU_vertformat_multiload_enable(&format, 4);
}
return &format;
}
@@ -295,7 +294,8 @@ BLI_INLINE int32_t pack_rotation_aspect_hardness(float rot, float asp, float har
return packed;
}
-static void gpencil_buffer_add_point(gpStrokeVert *verts,
+static void gpencil_buffer_add_point(GPUIndexBufBuilder *ibo,
+ gpStrokeVert *verts,
gpColorVert *cols,
const bGPDstroke *gps,
const bGPDspoint *pt,
@@ -315,11 +315,11 @@ static void gpencil_buffer_add_point(gpStrokeVert *verts,
/* Encode fill opacity defined by opacity modifier in vertex color alpha. If
* no opacity modifier, the value will be always 1.0f. The opacity factor can be any
* value between 0.0f and 2.0f */
- col->fcol[3] = (((int)(col->fcol[3] * 10000.0f)) * 10.0f) + gps->fill_opacity_fac;
+ col->fcol[3] = ((int)(col->fcol[3] * 10000.0f) * 10.0f) + gps->fill_opacity_fac;
vert->strength = (round_cap0) ? pt->strength : -pt->strength;
vert->u_stroke = pt->uv_fac;
- vert->stroke_id = gps->runtime.stroke_start;
+ vert->stroke_id = gps->runtime.vertex_start;
vert->point_id = v;
vert->thickness = max_ff(0.0f, gps->thickness * pt->pressure) * (round_cap1 ? 1.0f : -1.0f);
/* Tag endpoint material to -1 so they get discarded by vertex shader. */
@@ -329,27 +329,36 @@ static void gpencil_buffer_add_point(gpStrokeVert *verts,
vert->packed_asp_hard_rot = pack_rotation_aspect_hardness(
pt->uv_rot, aspect_ratio, gps->hardeness);
+
+ if (!is_endpoint) {
+ /* Issue a Quad per point. */
+ /* The attribute loading uses a different shader and will undo this bit packing. */
+ int v_mat = (v << GP_VERTEX_ID_SHIFT) | GP_IS_STROKE_VERTEX_BIT;
+ GPU_indexbuf_add_tri_verts(ibo, v_mat + 0, v_mat + 1, v_mat + 2);
+ GPU_indexbuf_add_tri_verts(ibo, v_mat + 2, v_mat + 1, v_mat + 3);
+ }
}
-static void gpencil_buffer_add_stroke(gpStrokeVert *verts,
+static void gpencil_buffer_add_stroke(GPUIndexBufBuilder *ibo,
+ gpStrokeVert *verts,
gpColorVert *cols,
const bGPDstroke *gps)
{
const bGPDspoint *pts = gps->points;
int pts_len = gps->totpoints;
bool is_cyclic = gpencil_stroke_is_cyclic(gps);
- int v = gps->runtime.stroke_start;
+ int v = gps->runtime.vertex_start;
/* First point for adjacency (not drawn). */
int adj_idx = (is_cyclic) ? (pts_len - 1) : min_ii(pts_len - 1, 1);
- gpencil_buffer_add_point(verts, cols, gps, &pts[adj_idx], v++, true);
+ gpencil_buffer_add_point(ibo, verts, cols, gps, &pts[adj_idx], v++, true);
for (int i = 0; i < pts_len; i++) {
- gpencil_buffer_add_point(verts, cols, gps, &pts[i], v++, false);
+ gpencil_buffer_add_point(ibo, verts, cols, gps, &pts[i], v++, false);
}
/* Draw line to first point to complete the loop for cyclic strokes. */
if (is_cyclic) {
- gpencil_buffer_add_point(verts, cols, gps, &pts[0], v, false);
+ gpencil_buffer_add_point(ibo, verts, cols, gps, &pts[0], v, false);
/* UV factor needs to be adjusted for the last point to not be equal to the UV factor of the
* first point. It should be the factor of the last point plus the distance from the last point
* to the first.
@@ -360,16 +369,20 @@ static void gpencil_buffer_add_stroke(gpStrokeVert *verts,
}
/* Last adjacency point (not drawn). */
adj_idx = (is_cyclic) ? 1 : max_ii(0, pts_len - 2);
- gpencil_buffer_add_point(verts, cols, gps, &pts[adj_idx], v++, true);
+ gpencil_buffer_add_point(ibo, verts, cols, gps, &pts[adj_idx], v++, true);
}
static void gpencil_buffer_add_fill(GPUIndexBufBuilder *ibo, const bGPDstroke *gps)
{
int tri_len = gps->tot_triangles;
- int v = gps->runtime.stroke_start;
+ int v = gps->runtime.vertex_start + 1;
for (int i = 0; i < tri_len; i++) {
uint *tri = gps->triangles[i].verts;
- GPU_indexbuf_add_tri_verts(ibo, v + tri[0], v + tri[1], v + tri[2]);
+ /* The attribute loading uses a different shader and will undo this bit packing. */
+ GPU_indexbuf_add_tri_verts(ibo,
+ (v + tri[0]) << GP_VERTEX_ID_SHIFT,
+ (v + tri[1]) << GP_VERTEX_ID_SHIFT,
+ (v + tri[2]) << GP_VERTEX_ID_SHIFT);
}
}
@@ -379,10 +392,10 @@ static void gpencil_stroke_iter_cb(bGPDlayer *UNUSED(gpl),
void *thunk)
{
gpIterData *iter = (gpIterData *)thunk;
- gpencil_buffer_add_stroke(iter->verts, iter->cols, gps);
if (gps->tot_triangles > 0) {
gpencil_buffer_add_fill(&iter->ibo, gps);
}
+ gpencil_buffer_add_stroke(&iter->ibo, iter->verts, iter->cols, gps);
}
static void gpencil_object_verts_count_cb(bGPDlayer *UNUSED(gpl),
@@ -391,12 +404,15 @@ static void gpencil_object_verts_count_cb(bGPDlayer *UNUSED(gpl),
void *thunk)
{
gpIterData *iter = (gpIterData *)thunk;
-
- /* Store first index offset */
- gps->runtime.stroke_start = iter->vert_len;
+ int stroke_vert_len = gps->totpoints + gpencil_stroke_is_cyclic(gps);
+ gps->runtime.vertex_start = iter->vert_len;
+ /* Add additional padding at the start and end. */
+ iter->vert_len += 1 + stroke_vert_len + 1;
+ /* Store first index offset. */
gps->runtime.fill_start = iter->tri_len;
- iter->vert_len += gps->totpoints + 2 + gpencil_stroke_is_cyclic(gps);
iter->tri_len += gps->tot_triangles;
+ gps->runtime.stroke_start = iter->tri_len;
+ iter->tri_len += stroke_vert_len * 2;
}
static void gpencil_batches_ensure(Object *ob, GpencilBatchCache *cache, int cfra)
@@ -406,7 +422,7 @@ static void gpencil_batches_ensure(Object *ob, GpencilBatchCache *cache, int cfr
if (cache->vbo == NULL) {
/* Should be discarded together. */
BLI_assert(cache->vbo == NULL && cache->ibo == NULL);
- BLI_assert(cache->fill_batch == NULL && cache->stroke_batch == NULL);
+ BLI_assert(cache->geom_batch == NULL);
/* TODO/PERF: Could be changed to only do it if needed.
* For now it's simpler to assume we always need it
* since multiple viewport could or could not need it.
@@ -415,29 +431,29 @@ static void gpencil_batches_ensure(Object *ob, GpencilBatchCache *cache, int cfr
bool do_onion = true;
/* First count how many vertices and triangles are needed for the whole object. */
- gpIterData iter = {
- .gpd = gpd,
- .verts = NULL,
- .ibo = {0},
- .vert_len = 1, /* Start at 1 for the gl_InstanceID trick to work (see vert shader). */
- .tri_len = 0,
- .curve_len = 0,
- };
+ gpIterData iter = {};
+ iter.gpd = gpd;
+ iter.verts = NULL;
+ iter.ibo = {0};
+ iter.vert_len = 0;
+ iter.tri_len = 0;
+ iter.curve_len = 0;
BKE_gpencil_visible_stroke_advanced_iter(
NULL, ob, NULL, gpencil_object_verts_count_cb, &iter, do_onion, cfra);
+ GPUUsageType vbo_flag = GPU_USAGE_STATIC | GPU_USAGE_FLAG_BUFFER_TEXTURE_ONLY;
/* Create VBOs. */
GPUVertFormat *format = gpencil_stroke_format();
GPUVertFormat *format_col = gpencil_color_format();
- cache->vbo = GPU_vertbuf_create_with_format(format);
- cache->vbo_col = GPU_vertbuf_create_with_format(format_col);
+ cache->vbo = GPU_vertbuf_create_with_format_ex(format, vbo_flag);
+ cache->vbo_col = GPU_vertbuf_create_with_format_ex(format_col, vbo_flag);
/* Add extra space at the end of the buffer because of quad load. */
GPU_vertbuf_data_alloc(cache->vbo, iter.vert_len + 2);
GPU_vertbuf_data_alloc(cache->vbo_col, iter.vert_len + 2);
iter.verts = (gpStrokeVert *)GPU_vertbuf_get_data(cache->vbo);
iter.cols = (gpColorVert *)GPU_vertbuf_get_data(cache->vbo_col);
/* Create IBO. */
- GPU_indexbuf_init(&iter.ibo, GPU_PRIM_TRIS, iter.tri_len, iter.vert_len);
+ GPU_indexbuf_init(&iter.ibo, GPU_PRIM_TRIS, iter.tri_len, 0xFFFFFFFFu);
/* Fill buffers with data. */
BKE_gpencil_visible_stroke_advanced_iter(
@@ -452,33 +468,39 @@ static void gpencil_batches_ensure(Object *ob, GpencilBatchCache *cache, int cfr
/* Finish the IBO. */
cache->ibo = GPU_indexbuf_build(&iter.ibo);
-
/* Create the batches */
- cache->fill_batch = GPU_batch_create(GPU_PRIM_TRIS, cache->vbo, cache->ibo);
- GPU_batch_vertbuf_add(cache->fill_batch, cache->vbo_col);
- cache->stroke_batch = GPU_batch_create(GPU_PRIM_TRI_STRIP, gpencil_dummy_buffer_get(), NULL);
- GPU_batch_instbuf_add_ex(cache->stroke_batch, cache->vbo, 0);
- GPU_batch_instbuf_add_ex(cache->stroke_batch, cache->vbo_col, 0);
+ cache->geom_batch = GPU_batch_create(GPU_PRIM_TRIS, cache->vbo, cache->ibo);
+ /* Allow creation of buffer texture. */
+ GPU_vertbuf_use(cache->vbo);
+ GPU_vertbuf_use(cache->vbo_col);
gpd->flag &= ~GP_DATA_CACHE_IS_DIRTY;
cache->is_dirty = false;
}
}
-GPUBatch *DRW_cache_gpencil_strokes_get(Object *ob, int cfra)
+GPUBatch *DRW_cache_gpencil_get(Object *ob, int cfra)
+{
+ GpencilBatchCache *cache = gpencil_batch_cache_get(ob, cfra);
+ gpencil_batches_ensure(ob, cache, cfra);
+
+ return cache->geom_batch;
+}
+
+GPUVertBuf *DRW_cache_gpencil_position_buffer_get(Object *ob, int cfra)
{
GpencilBatchCache *cache = gpencil_batch_cache_get(ob, cfra);
gpencil_batches_ensure(ob, cache, cfra);
- return cache->stroke_batch;
+ return cache->vbo;
}
-GPUBatch *DRW_cache_gpencil_fills_get(Object *ob, int cfra)
+GPUVertBuf *DRW_cache_gpencil_color_buffer_get(Object *ob, int cfra)
{
GpencilBatchCache *cache = gpencil_batch_cache_get(ob, cfra);
gpencil_batches_ensure(ob, cache, cfra);
- return cache->fill_batch;
+ return cache->vbo_col;
}
static void gpencil_lines_indices_cb(bGPDlayer *UNUSED(gpl),
@@ -489,7 +511,7 @@ static void gpencil_lines_indices_cb(bGPDlayer *UNUSED(gpl),
gpIterData *iter = (gpIterData *)thunk;
int pts_len = gps->totpoints + gpencil_stroke_is_cyclic(gps);
- int start = gps->runtime.stroke_start + 1;
+ int start = gps->runtime.vertex_start + 1;
int end = start + pts_len;
for (int i = start; i < end; i++) {
GPU_indexbuf_add_generic_vert(&iter->ibo, i);
@@ -508,10 +530,9 @@ GPUBatch *DRW_cache_gpencil_face_wireframe_get(Object *ob)
if (cache->lines_batch == NULL) {
GPUVertBuf *vbo = cache->vbo;
- gpIterData iter = {
- .gpd = ob->data,
- .ibo = {0},
- };
+ gpIterData iter = {};
+ iter.gpd = (bGPdata *)ob->data;
+ iter.ibo = {0};
uint vert_len = GPU_vertbuf_get_vertex_len(vbo);
GPU_indexbuf_init_ex(&iter.ibo, GPU_PRIM_LINE_STRIP, vert_len, vert_len);
@@ -540,7 +561,7 @@ bGPDstroke *DRW_cache_gpencil_sbuffer_stroke_data_get(Object *ob)
Brush *brush = gpd->runtime.sbuffer_brush;
/* Convert the sbuffer to a bGPDstroke. */
if (gpd->runtime.sbuffer_gps == NULL) {
- bGPDstroke *gps = MEM_callocN(sizeof(*gps), "bGPDstroke sbuffer");
+ bGPDstroke *gps = (bGPDstroke *)MEM_callocN(sizeof(*gps), "bGPDstroke sbuffer");
gps->totpoints = gpd->runtime.sbuffer_used;
gps->mat_nr = max_ii(0, gpd->runtime.matid - 1);
gps->flag = gpd->runtime.sbuffer_sflag;
@@ -553,7 +574,9 @@ bGPDstroke *DRW_cache_gpencil_sbuffer_stroke_data_get(Object *ob)
gps->tot_triangles = max_ii(0, gpd->runtime.sbuffer_used - 2);
gps->caps[0] = gps->caps[1] = GP_STROKE_CAP_ROUND;
- gps->runtime.stroke_start = 1; /* Add one for the adjacency index. */
+ gps->runtime.vertex_start = 0;
+ gps->runtime.fill_start = 0;
+ gps->runtime.stroke_start = 0;
copy_v4_v4(gps->vert_color_fill, gpd->runtime.vert_color_fill);
/* Caps. */
gps->caps[0] = gps->caps[1] = (short)brush->gpencil_settings->caps_type;
@@ -563,17 +586,17 @@ bGPDstroke *DRW_cache_gpencil_sbuffer_stroke_data_get(Object *ob)
return gpd->runtime.sbuffer_gps;
}
-static void gpencil_sbuffer_stroke_ensure(bGPdata *gpd, bool do_stroke, bool do_fill)
+static void gpencil_sbuffer_stroke_ensure(bGPdata *gpd, bool do_fill)
{
- tGPspoint *tpoints = gpd->runtime.sbuffer;
+ tGPspoint *tpoints = (tGPspoint *)gpd->runtime.sbuffer;
bGPDstroke *gps = gpd->runtime.sbuffer_gps;
int vert_len = gpd->runtime.sbuffer_used;
/* DRW_cache_gpencil_sbuffer_stroke_data_get need to have been called previously. */
BLI_assert(gps != NULL);
- if (do_stroke && (gpd->runtime.sbuffer_stroke_batch == NULL)) {
- gps->points = MEM_mallocN(vert_len * sizeof(*gps->points), __func__);
+ if (gpd->runtime.sbuffer_batch == NULL) {
+ gps->points = (bGPDspoint *)MEM_mallocN(vert_len * sizeof(*gps->points), __func__);
const DRWContextState *draw_ctx = DRW_context_state_get();
Scene *scene = draw_ctx->scene;
@@ -589,43 +612,32 @@ static void gpencil_sbuffer_stroke_ensure(bGPdata *gpd, bool do_stroke, bool do_
for (int i = 0; i < vert_len; i++) {
ED_gpencil_tpoint_to_point(region, origin, &tpoints[i], &gps->points[i]);
- mul_m4_v3(ob->imat, &gps->points[i].x);
+ mul_m4_v3(ob->world_to_object, &gps->points[i].x);
bGPDspoint *pt = &gps->points[i];
copy_v4_v4(pt->vert_color, tpoints[i].vert_color);
}
/* Calc uv data along the stroke. */
BKE_gpencil_stroke_uv_update(gps);
+ int tri_len = gps->tot_triangles + (gps->totpoints + gpencil_stroke_is_cyclic(gps)) * 2;
+ /* Create IBO. */
+ GPUIndexBufBuilder ibo_builder;
+ GPU_indexbuf_init(&ibo_builder, GPU_PRIM_TRIS, tri_len, 0xFFFFFFFFu);
/* Create VBO. */
+ GPUUsageType vbo_flag = GPU_USAGE_STATIC | GPU_USAGE_FLAG_BUFFER_TEXTURE_ONLY;
GPUVertFormat *format = gpencil_stroke_format();
GPUVertFormat *format_color = gpencil_color_format();
- GPUVertBuf *vbo = GPU_vertbuf_create_with_format(format);
- GPUVertBuf *vbo_col = GPU_vertbuf_create_with_format(format_color);
- /* Add extra space at the end (and start) of the buffer because of quad load and cyclic. */
+ GPUVertBuf *vbo = GPU_vertbuf_create_with_format_ex(format, vbo_flag);
+ GPUVertBuf *vbo_col = GPU_vertbuf_create_with_format_ex(format_color, vbo_flag);
+ /* Add extra space at the start and end the buffer because of quad load and cyclic. */
GPU_vertbuf_data_alloc(vbo, 1 + vert_len + 1 + 2);
GPU_vertbuf_data_alloc(vbo_col, 1 + vert_len + 1 + 2);
gpStrokeVert *verts = (gpStrokeVert *)GPU_vertbuf_get_data(vbo);
gpColorVert *cols = (gpColorVert *)GPU_vertbuf_get_data(vbo_col);
- /* Fill buffers with data. */
- gpencil_buffer_add_stroke(verts, cols, gps);
-
- GPUBatch *batch = GPU_batch_create(GPU_PRIM_TRI_STRIP, gpencil_dummy_buffer_get(), NULL);
- GPU_batch_instbuf_add_ex(batch, vbo, true);
- GPU_batch_instbuf_add_ex(batch, vbo_col, true);
-
- gpd->runtime.sbuffer_stroke_batch = batch;
-
- MEM_freeN(gps->points);
- }
-
- if (do_fill && (gpd->runtime.sbuffer_fill_batch == NULL)) {
- /* Create IBO. */
- GPUIndexBufBuilder ibo_builder;
- GPU_indexbuf_init(&ibo_builder, GPU_PRIM_TRIS, gps->tot_triangles, vert_len);
-
- if (gps->tot_triangles > 0) {
- float(*tpoints2d)[2] = MEM_mallocN(sizeof(*tpoints2d) * vert_len, __func__);
+ /* Create fill indices. */
+ if (do_fill && gps->tot_triangles > 0) {
+ float(*tpoints2d)[2] = (float(*)[2])MEM_mallocN(sizeof(*tpoints2d) * vert_len, __func__);
/* Triangulate in 2D. */
for (int i = 0; i < vert_len; i++) {
copy_v2_v2(tpoints2d[i], tpoints[i].m_xy);
@@ -633,51 +645,72 @@ static void gpencil_sbuffer_stroke_ensure(bGPdata *gpd, bool do_stroke, bool do_
/* Compute directly inside the IBO data buffer. */
/* OPTI: This is a bottleneck if the stroke is very long. */
BLI_polyfill_calc(tpoints2d, (uint)vert_len, 0, (uint(*)[3])ibo_builder.data);
- /* Add stroke start offset. */
+ /* Add stroke start offset and shift. */
for (int i = 0; i < gps->tot_triangles * 3; i++) {
- ibo_builder.data[i] += gps->runtime.stroke_start;
+ ibo_builder.data[i] = (ibo_builder.data[i] + 1) << GP_VERTEX_ID_SHIFT;
}
/* HACK since we didn't use the builder API to avoid another malloc and copy,
* we need to set the number of indices manually. */
ibo_builder.index_len = gps->tot_triangles * 3;
+ ibo_builder.index_min = 0;
+ /* For this case, do not allow index compaction to avoid yet another preprocessing step. */
+ ibo_builder.index_max = 0xFFFFFFFFu - 1u;
+
+ gps->runtime.stroke_start = gps->tot_triangles;
MEM_freeN(tpoints2d);
}
- GPUIndexBuf *ibo = GPU_indexbuf_build(&ibo_builder);
- GPUVertBuf *vbo = gpd->runtime.sbuffer_stroke_batch->inst[0];
- GPUVertBuf *vbo_col = gpd->runtime.sbuffer_stroke_batch->inst[1];
+ /* Fill buffers with data. */
+ gpencil_buffer_add_stroke(&ibo_builder, verts, cols, gps);
- GPUBatch *batch = GPU_batch_create_ex(GPU_PRIM_TRIS, vbo, ibo, GPU_BATCH_OWNS_INDEX);
- GPU_batch_vertbuf_add(batch, vbo_col);
+ GPUBatch *batch = GPU_batch_create_ex(GPU_PRIM_TRIS,
+ gpencil_dummy_buffer_get(),
+ GPU_indexbuf_build(&ibo_builder),
+ GPU_BATCH_OWNS_INDEX);
- gpd->runtime.sbuffer_fill_batch = batch;
+ gpd->runtime.sbuffer_position_buf = vbo;
+ gpd->runtime.sbuffer_color_buf = vbo_col;
+ gpd->runtime.sbuffer_batch = batch;
+
+ MEM_freeN(gps->points);
}
}
-GPUBatch *DRW_cache_gpencil_sbuffer_stroke_get(Object *ob)
+GPUBatch *DRW_cache_gpencil_sbuffer_get(Object *ob, bool show_fill)
{
bGPdata *gpd = (bGPdata *)ob->data;
- gpencil_sbuffer_stroke_ensure(gpd, true, false);
+ /* Fill batch also need stroke batch to be created (vbo is shared). */
+ gpencil_sbuffer_stroke_ensure(gpd, show_fill);
- return gpd->runtime.sbuffer_stroke_batch;
+ return gpd->runtime.sbuffer_batch;
}
-GPUBatch *DRW_cache_gpencil_sbuffer_fill_get(Object *ob)
+GPUVertBuf *DRW_cache_gpencil_sbuffer_position_buffer_get(Object *ob, bool show_fill)
{
bGPdata *gpd = (bGPdata *)ob->data;
/* Fill batch also need stroke batch to be created (vbo is shared). */
- gpencil_sbuffer_stroke_ensure(gpd, true, true);
+ gpencil_sbuffer_stroke_ensure(gpd, show_fill);
- return gpd->runtime.sbuffer_fill_batch;
+ return gpd->runtime.sbuffer_position_buf;
+}
+
+GPUVertBuf *DRW_cache_gpencil_sbuffer_color_buffer_get(Object *ob, bool show_fill)
+{
+ bGPdata *gpd = (bGPdata *)ob->data;
+ /* Fill batch also need stroke batch to be created (vbo is shared). */
+ gpencil_sbuffer_stroke_ensure(gpd, show_fill);
+
+ return gpd->runtime.sbuffer_color_buf;
}
void DRW_cache_gpencil_sbuffer_clear(Object *ob)
{
bGPdata *gpd = (bGPdata *)ob->data;
MEM_SAFE_FREE(gpd->runtime.sbuffer_gps);
- GPU_BATCH_DISCARD_SAFE(gpd->runtime.sbuffer_fill_batch);
- GPU_BATCH_DISCARD_SAFE(gpd->runtime.sbuffer_stroke_batch);
+ GPU_BATCH_DISCARD_SAFE(gpd->runtime.sbuffer_batch);
+ GPU_VERTBUF_DISCARD_SAFE(gpd->runtime.sbuffer_position_buf);
+ GPU_VERTBUF_DISCARD_SAFE(gpd->runtime.sbuffer_color_buf);
}
/** \} */
@@ -728,7 +761,7 @@ static void gpencil_edit_stroke_iter_cb(bGPDlayer *gpl,
{
gpEditIterData *iter = (gpEditIterData *)thunk;
const int v_len = gps->totpoints;
- const int v = gps->runtime.stroke_start + 1;
+ const int v = gps->runtime.vertex_start + 1;
MDeformVert *dvert = ((iter->vgindex > -1) && gps->dvert) ? gps->dvert : NULL;
gpEditVert *vert_ptr = iter->verts + v;
@@ -743,9 +776,12 @@ static void gpencil_edit_stroke_iter_cb(bGPDlayer *gpl,
vert_ptr->weight = gpencil_point_edit_weight(dvert, i, iter->vgindex);
vert_ptr++;
}
- /* Draw line to first point to complete the loop for cyclic strokes. */
- vert_ptr->vflag = sflag | gpencil_point_edit_flag(layer_lock, &gps->points[0], 0, v_len);
- vert_ptr->weight = gpencil_point_edit_weight(dvert, 0, iter->vgindex);
+
+ if (gpencil_stroke_is_cyclic(gps)) {
+ /* Draw line to first point to complete the loop for cyclic strokes. */
+ vert_ptr->vflag = sflag | gpencil_point_edit_flag(layer_lock, &gps->points[0], 0, v_len);
+ vert_ptr->weight = gpencil_point_edit_weight(dvert, 0, iter->vgindex);
+ }
}
static void gpencil_edit_curve_stroke_count_cb(bGPDlayer *gpl,
@@ -876,14 +912,13 @@ static void gpencil_edit_batches_ensure(Object *ob, GpencilBatchCache *cache, in
/* Curve Handles and Points for Editing. */
if (cache->edit_curve_vbo == NULL) {
- gpIterData iterdata = {
- .gpd = gpd,
- .verts = NULL,
- .ibo = {0},
- .vert_len = 0,
- .tri_len = 0,
- .curve_len = 0,
- };
+ gpIterData iterdata = {};
+ iterdata.gpd = gpd;
+ iterdata.verts = NULL;
+ iterdata.ibo = {0};
+ iterdata.vert_len = 0;
+ iterdata.tri_len = 0;
+ iterdata.curve_len = 0;
/* Create VBO. */
GPUVertFormat *format = gpencil_edit_curve_format();
diff --git a/source/blender/draw/intern/draw_cache_impl_lattice.c b/source/blender/draw/intern/draw_cache_impl_lattice.c
index 0f12e78d60e..eb94f46fe50 100644
--- a/source/blender/draw/intern/draw_cache_impl_lattice.c
+++ b/source/blender/draw/intern/draw_cache_impl_lattice.c
@@ -231,7 +231,7 @@ static bool lattice_batch_cache_valid(Lattice *lt)
if ((cache->dims.u_len != lt->pntsu) || (cache->dims.v_len != lt->pntsv) ||
(cache->dims.w_len != lt->pntsw) ||
- ((cache->show_only_outside != ((lt->flag & LT_OUTSIDE) != 0)))) {
+ (cache->show_only_outside != ((lt->flag & LT_OUTSIDE) != 0))) {
return false;
}
@@ -367,11 +367,11 @@ static GPUIndexBuf *lattice_batch_cache_get_edges(LatticeRenderData *rdata,
#define LATT_INDEX(u, v, w) ((((w)*rdata->dims.v_len + (v)) * rdata->dims.u_len) + (u))
for (int w = 0; w < rdata->dims.w_len; w++) {
- int wxt = (ELEM(w, 0, rdata->dims.w_len - 1));
+ int wxt = ELEM(w, 0, rdata->dims.w_len - 1);
for (int v = 0; v < rdata->dims.v_len; v++) {
- int vxt = (ELEM(v, 0, rdata->dims.v_len - 1));
+ int vxt = ELEM(v, 0, rdata->dims.v_len - 1);
for (int u = 0; u < rdata->dims.u_len; u++) {
- int uxt = (ELEM(u, 0, rdata->dims.u_len - 1));
+ int uxt = ELEM(u, 0, rdata->dims.u_len - 1);
if (w && ((uxt || vxt) || !rdata->show_only_outside)) {
GPU_indexbuf_add_line_verts(&elb, LATT_INDEX(u, v, w - 1), LATT_INDEX(u, v, w));
diff --git a/source/blender/draw/intern/draw_cache_impl_mesh.cc b/source/blender/draw/intern/draw_cache_impl_mesh.cc
index c22382b3e09..031de3e4ef2 100644
--- a/source/blender/draw/intern/draw_cache_impl_mesh.cc
+++ b/source/blender/draw/intern/draw_cache_impl_mesh.cc
@@ -62,6 +62,7 @@
#include "draw_subdivision.h"
#include "draw_cache_impl.h" /* own include */
+#include "draw_manager.h"
#include "mesh_extractors/extract_mesh.hh"
@@ -104,7 +105,8 @@ static constexpr DRWBatchFlag batches_that_use_buffer(const int buffer_index)
MBC_EDIT_EDGES | MBC_EDIT_VNOR | MBC_EDIT_LNOR | MBC_EDIT_MESH_ANALYSIS |
MBC_EDIT_SELECTION_VERTS | MBC_EDIT_SELECTION_EDGES | MBC_EDIT_SELECTION_FACES |
MBC_ALL_VERTS | MBC_ALL_EDGES | MBC_LOOSE_EDGES | MBC_EDGE_DETECTION |
- MBC_WIRE_EDGES | MBC_WIRE_LOOPS | MBC_SCULPT_OVERLAYS | MBC_SURFACE_PER_MAT;
+ MBC_WIRE_EDGES | MBC_WIRE_LOOPS | MBC_SCULPT_OVERLAYS | MBC_VIEWER_ATTRIBUTE_OVERLAY |
+ MBC_SURFACE_PER_MAT;
case BUFFER_INDEX(vbo.lnor):
return MBC_SURFACE | MBC_EDIT_LNOR | MBC_WIRE_LOOPS | MBC_SURFACE_PER_MAT;
case BUFFER_INDEX(vbo.edge_fac):
@@ -166,9 +168,12 @@ static constexpr DRWBatchFlag batches_that_use_buffer(const int buffer_index)
case BUFFER_INDEX(vbo.attr[13]):
case BUFFER_INDEX(vbo.attr[14]):
return MBC_SURFACE | MBC_SURFACE_PER_MAT;
+ case BUFFER_INDEX(vbo.attr_viewer):
+ return MBC_VIEWER_ATTRIBUTE_OVERLAY;
case BUFFER_INDEX(ibo.tris):
return MBC_SURFACE | MBC_SURFACE_WEIGHTS | MBC_EDIT_TRIANGLES | MBC_EDIT_LNOR |
- MBC_EDIT_MESH_ANALYSIS | MBC_EDIT_SELECTION_FACES | MBC_SCULPT_OVERLAYS;
+ MBC_EDIT_MESH_ANALYSIS | MBC_EDIT_SELECTION_FACES | MBC_SCULPT_OVERLAYS |
+ MBC_VIEWER_ATTRIBUTE_OVERLAY;
case BUFFER_INDEX(ibo.lines):
return MBC_EDIT_EDGES | MBC_EDIT_SELECTION_EDGES | MBC_ALL_EDGES | MBC_WIRE_EDGES;
case BUFFER_INDEX(ibo.lines_loose):
@@ -236,7 +241,7 @@ BLI_INLINE void mesh_cd_layers_type_clear(DRW_MeshCDMask *a)
*((uint32_t *)a) = 0;
}
-static void mesh_cd_calc_edit_uv_layer(const Mesh *UNUSED(me), DRW_MeshCDMask *cd_used)
+static void mesh_cd_calc_edit_uv_layer(const Mesh * /*me*/, DRW_MeshCDMask *cd_used)
{
cd_used->edit_uv = 1;
}
@@ -549,22 +554,13 @@ BLI_INLINE void mesh_batch_cache_add_request(MeshBatchCache *cache, DRWBatchFlag
static bool mesh_batch_cache_valid(Object *object, Mesh *me)
{
- MeshBatchCache *cache = static_cast<MeshBatchCache *>(me->runtime.batch_cache);
+ MeshBatchCache *cache = static_cast<MeshBatchCache *>(me->runtime->batch_cache);
if (cache == nullptr) {
return false;
}
- if (object->sculpt && object->sculpt->pbvh) {
- if (cache->pbvh_is_drawing != BKE_pbvh_is_drawing(object->sculpt->pbvh)) {
- return false;
- }
-
- if (BKE_pbvh_is_drawing(object->sculpt->pbvh) &&
- BKE_pbvh_draw_cache_invalid(object->sculpt->pbvh)) {
- return false;
- }
- }
+ /* Note: PBVH draw data should not be checked here. */
if (cache->is_editmode != (me->edit_mesh != nullptr)) {
return false;
@@ -583,11 +579,11 @@ static bool mesh_batch_cache_valid(Object *object, Mesh *me)
static void mesh_batch_cache_init(Object *object, Mesh *me)
{
- MeshBatchCache *cache = static_cast<MeshBatchCache *>(me->runtime.batch_cache);
+ MeshBatchCache *cache = static_cast<MeshBatchCache *>(me->runtime->batch_cache);
if (!cache) {
- me->runtime.batch_cache = MEM_cnew<MeshBatchCache>(__func__);
- cache = static_cast<MeshBatchCache *>(me->runtime.batch_cache);
+ me->runtime->batch_cache = MEM_cnew<MeshBatchCache>(__func__);
+ cache = static_cast<MeshBatchCache *>(me->runtime->batch_cache);
}
else {
memset(cache, 0, sizeof(*cache));
@@ -629,7 +625,7 @@ void DRW_mesh_batch_cache_validate(Object *object, Mesh *me)
static MeshBatchCache *mesh_batch_cache_get(Mesh *me)
{
- return static_cast<MeshBatchCache *>(me->runtime.batch_cache);
+ return static_cast<MeshBatchCache *>(me->runtime->batch_cache);
}
static void mesh_batch_cache_check_vertex_group(MeshBatchCache *cache,
@@ -737,7 +733,7 @@ static void mesh_batch_cache_discard_uvedit_select(MeshBatchCache *cache)
void DRW_mesh_batch_cache_dirty_tag(Mesh *me, eMeshBatchDirtyMode mode)
{
- MeshBatchCache *cache = static_cast<MeshBatchCache *>(me->runtime.batch_cache);
+ MeshBatchCache *cache = static_cast<MeshBatchCache *>(me->runtime->batch_cache);
if (cache == nullptr) {
return;
}
@@ -825,7 +821,7 @@ static void mesh_batch_cache_free_subdiv_cache(MeshBatchCache *cache)
static void mesh_batch_cache_clear(Mesh *me)
{
- MeshBatchCache *cache = static_cast<MeshBatchCache *>(me->runtime.batch_cache);
+ MeshBatchCache *cache = static_cast<MeshBatchCache *>(me->runtime->batch_cache);
if (!cache) {
return;
}
@@ -857,7 +853,7 @@ static void mesh_batch_cache_clear(Mesh *me)
void DRW_mesh_batch_cache_free(Mesh *me)
{
mesh_batch_cache_clear(me);
- MEM_SAFE_FREE(me->runtime.batch_cache);
+ MEM_SAFE_FREE(me->runtime->batch_cache);
}
/** \} */
@@ -977,6 +973,27 @@ GPUBatch *DRW_mesh_batch_cache_get_edit_mesh_analysis(Mesh *me)
return DRW_batch_request(&cache->batch.edit_mesh_analysis);
}
+void DRW_mesh_get_attributes(Object *object,
+ Mesh *me,
+ struct GPUMaterial **gpumat_array,
+ int gpumat_array_len,
+ DRW_Attributes *r_attrs,
+ DRW_MeshCDMask *r_cd_needed)
+{
+ DRW_Attributes attrs_needed;
+ drw_attributes_clear(&attrs_needed);
+ DRW_MeshCDMask cd_needed = mesh_cd_calc_used_gpu_layers(
+ object, me, gpumat_array, gpumat_array_len, &attrs_needed);
+
+ if (r_attrs) {
+ *r_attrs = attrs_needed;
+ }
+
+ if (r_cd_needed) {
+ *r_cd_needed = cd_needed;
+ }
+}
+
GPUBatch **DRW_mesh_batch_cache_get_surface_shaded(Object *object,
Mesh *me,
struct GPUMaterial **gpumat_array,
@@ -991,8 +1008,7 @@ GPUBatch **DRW_mesh_batch_cache_get_surface_shaded(Object *object,
BLI_assert(gpumat_array_len == cache->mat_len);
mesh_cd_layers_type_merge(&cache->cd_needed, cd_needed);
- ThreadMutex *mesh_render_mutex = (ThreadMutex *)me->runtime.render_mutex;
- drw_attributes_merge(&cache->attr_needed, &attrs_needed, mesh_render_mutex);
+ drw_attributes_merge(&cache->attr_needed, &attrs_needed, me->runtime->render_mutex);
mesh_batch_cache_request_surface_batches(cache);
return cache->surface_per_mat;
}
@@ -1020,8 +1036,7 @@ GPUBatch *DRW_mesh_batch_cache_get_surface_vertpaint(Object *object, Mesh *me)
DRW_Attributes attrs_needed{};
request_active_and_default_color_attributes(*object, *me, attrs_needed);
- ThreadMutex *mesh_render_mutex = (ThreadMutex *)me->runtime.render_mutex;
- drw_attributes_merge(&cache->attr_needed, &attrs_needed, mesh_render_mutex);
+ drw_attributes_merge(&cache->attr_needed, &attrs_needed, me->runtime->render_mutex);
mesh_batch_cache_request_surface_batches(cache);
return cache->batch.surface;
@@ -1034,8 +1049,7 @@ GPUBatch *DRW_mesh_batch_cache_get_surface_sculpt(Object *object, Mesh *me)
DRW_Attributes attrs_needed{};
request_active_and_default_color_attributes(*object, *me, attrs_needed);
- ThreadMutex *mesh_render_mutex = (ThreadMutex *)me->runtime.render_mutex;
- drw_attributes_merge(&cache->attr_needed, &attrs_needed, mesh_render_mutex);
+ drw_attributes_merge(&cache->attr_needed, &attrs_needed, me->runtime->render_mutex);
mesh_batch_cache_request_surface_batches(cache);
return cache->batch.surface;
@@ -1057,6 +1071,16 @@ GPUBatch *DRW_mesh_batch_cache_get_sculpt_overlays(Mesh *me)
return cache->batch.sculpt_overlays;
}
+GPUBatch *DRW_mesh_batch_cache_get_surface_viewer_attribute(Mesh *me)
+{
+ MeshBatchCache *cache = mesh_batch_cache_get(me);
+
+ mesh_batch_cache_add_request(cache, MBC_VIEWER_ATTRIBUTE_OVERLAY);
+ DRW_batch_request(&cache->batch.surface_viewer_attribute);
+
+ return cache->batch.surface_viewer_attribute;
+}
+
/** \} */
/* ---------------------------------------------------------------------- */
@@ -1264,7 +1288,7 @@ GPUBatch *DRW_mesh_batch_cache_get_surface_edges(Object *object, Mesh *me)
void DRW_mesh_batch_cache_free_old(Mesh *me, int ctime)
{
- MeshBatchCache *cache = static_cast<MeshBatchCache *>(me->runtime.batch_cache);
+ MeshBatchCache *cache = static_cast<MeshBatchCache *>(me->runtime->batch_cache);
if (cache == nullptr) {
return;
@@ -1366,8 +1390,7 @@ void DRW_mesh_batch_cache_create_requested(struct TaskGraph *task_graph,
}
};
#else
- auto assert_deps_valid = [&](DRWBatchFlag UNUSED(batch_flag),
- Span<int> UNUSED(used_buffer_indices)) {};
+ auto assert_deps_valid = [&](DRWBatchFlag /*batch_flag*/, Span<int> /*used_buffer_indices*/) {};
#endif
@@ -1411,8 +1434,6 @@ void DRW_mesh_batch_cache_create_requested(struct TaskGraph *task_graph,
}
}
- ThreadMutex *mesh_render_mutex = (ThreadMutex *)me->runtime.render_mutex;
-
/* Verify that all surface batches have needed attribute layers.
*/
/* TODO(fclem): We could be a bit smarter here and only do it per
@@ -1450,12 +1471,13 @@ void DRW_mesh_batch_cache_create_requested(struct TaskGraph *task_graph,
cache->batch_ready &= ~(MBC_SURFACE);
mesh_cd_layers_type_merge(&cache->cd_used, cache->cd_needed);
- drw_attributes_merge(&cache->attr_used, &cache->attr_needed, mesh_render_mutex);
+ drw_attributes_merge(&cache->attr_used, &cache->attr_needed, me->runtime->render_mutex);
}
mesh_cd_layers_type_merge(&cache->cd_used_over_time, cache->cd_needed);
mesh_cd_layers_type_clear(&cache->cd_needed);
- drw_attributes_merge(&cache->attr_used_over_time, &cache->attr_needed, mesh_render_mutex);
+ drw_attributes_merge(
+ &cache->attr_used_over_time, &cache->attr_needed, me->runtime->render_mutex);
drw_attributes_clear(&cache->attr_needed);
}
@@ -1502,7 +1524,7 @@ void DRW_mesh_batch_cache_create_requested(struct TaskGraph *task_graph,
const bool do_update_sculpt_normals = ob->sculpt && ob->sculpt->pbvh;
if (do_update_sculpt_normals) {
Mesh *mesh = static_cast<Mesh *>(ob->data);
- BKE_pbvh_update_normals(ob->sculpt->pbvh, mesh->runtime.subdiv_ccg);
+ BKE_pbvh_update_normals(ob->sculpt->pbvh, mesh->runtime->subdiv_ccg);
}
cache->batch_ready |= batch_requested;
@@ -1513,8 +1535,8 @@ void DRW_mesh_batch_cache_create_requested(struct TaskGraph *task_graph,
Mesh *editmesh_eval_cage = BKE_object_get_editmesh_eval_cage(ob);
do_cage = editmesh_eval_final != editmesh_eval_cage;
- do_uvcage = !(editmesh_eval_final->runtime.is_original_bmesh &&
- editmesh_eval_final->runtime.wrapper_type == ME_WRAPPER_TYPE_BMESH);
+ do_uvcage = !(editmesh_eval_final->runtime->is_original_bmesh &&
+ editmesh_eval_final->runtime->wrapper_type == ME_WRAPPER_TYPE_BMESH);
}
const bool do_subdivision = BKE_subsurf_modifier_has_gpu_subdiv(me);
@@ -1802,6 +1824,14 @@ void DRW_mesh_batch_cache_create_requested(struct TaskGraph *task_graph,
DRW_vbo_request(cache->batch.edituv_fdots, &mbuflist->vbo.fdots_uv);
DRW_vbo_request(cache->batch.edituv_fdots, &mbuflist->vbo.fdots_edituv_data);
}
+ assert_deps_valid(
+ MBC_VIEWER_ATTRIBUTE_OVERLAY,
+ {BUFFER_INDEX(ibo.tris), BUFFER_INDEX(vbo.pos_nor), BUFFER_INDEX(vbo.attr_viewer)});
+ if (DRW_batch_requested(cache->batch.surface_viewer_attribute, GPU_PRIM_TRIS)) {
+ DRW_ibo_request(cache->batch.surface_viewer_attribute, &mbuflist->ibo.tris);
+ DRW_vbo_request(cache->batch.surface_viewer_attribute, &mbuflist->vbo.pos_nor);
+ DRW_vbo_request(cache->batch.surface_viewer_attribute, &mbuflist->vbo.attr_viewer);
+ }
#ifdef DEBUG
auto assert_final_deps_valid = [&](const int buffer_index) {
@@ -1833,6 +1863,7 @@ void DRW_mesh_batch_cache_create_requested(struct TaskGraph *task_graph,
for (const int i : IndexRange(GPU_MAX_ATTR)) {
assert_final_deps_valid(BUFFER_INDEX(vbo.attr[i]));
}
+ assert_final_deps_valid(BUFFER_INDEX(vbo.attr_viewer));
assert_final_deps_valid(BUFFER_INDEX(ibo.tris));
assert_final_deps_valid(BUFFER_INDEX(ibo.lines));
@@ -1858,7 +1889,7 @@ void DRW_mesh_batch_cache_create_requested(struct TaskGraph *task_graph,
is_editmode,
is_paint_mode,
is_mode_active,
- ob->obmat,
+ ob->object_to_world,
false,
true,
scene,
@@ -1875,7 +1906,7 @@ void DRW_mesh_batch_cache_create_requested(struct TaskGraph *task_graph,
is_editmode,
is_paint_mode,
is_mode_active,
- ob->obmat,
+ ob->object_to_world,
false,
false,
scene,
@@ -1891,7 +1922,7 @@ void DRW_mesh_batch_cache_create_requested(struct TaskGraph *task_graph,
is_editmode,
is_paint_mode,
is_mode_active,
- ob->obmat,
+ ob->object_to_world,
true,
false,
do_cage,
@@ -1912,7 +1943,7 @@ void DRW_mesh_batch_cache_create_requested(struct TaskGraph *task_graph,
is_editmode,
is_paint_mode,
is_mode_active,
- ob->obmat,
+ ob->object_to_world,
true,
false,
scene,
diff --git a/source/blender/draw/intern/draw_cache_impl_particles.c b/source/blender/draw/intern/draw_cache_impl_particles.c
index 9c1784b1de2..cddab74c46f 100644
--- a/source/blender/draw/intern/draw_cache_impl_particles.c
+++ b/source/blender/draw/intern/draw_cache_impl_particles.c
@@ -173,13 +173,9 @@ static void particle_batch_cache_clear_hair(ParticleHairCache *hair_cache)
/* TODO: more granular update tagging. */
GPU_VERTBUF_DISCARD_SAFE(hair_cache->proc_point_buf);
GPU_VERTBUF_DISCARD_SAFE(hair_cache->proc_length_buf);
- DRW_TEXTURE_FREE_SAFE(hair_cache->point_tex);
- DRW_TEXTURE_FREE_SAFE(hair_cache->length_tex);
GPU_VERTBUF_DISCARD_SAFE(hair_cache->proc_strand_buf);
GPU_VERTBUF_DISCARD_SAFE(hair_cache->proc_strand_seg_buf);
- DRW_TEXTURE_FREE_SAFE(hair_cache->strand_tex);
- DRW_TEXTURE_FREE_SAFE(hair_cache->strand_seg_tex);
for (int i = 0; i < MAX_MTFACE; i++) {
GPU_VERTBUF_DISCARD_SAFE(hair_cache->proc_uv_buf[i]);
@@ -192,7 +188,6 @@ static void particle_batch_cache_clear_hair(ParticleHairCache *hair_cache)
for (int i = 0; i < MAX_HAIR_SUBDIV; i++) {
GPU_VERTBUF_DISCARD_SAFE(hair_cache->final[i].proc_buf);
- DRW_TEXTURE_FREE_SAFE(hair_cache->final[i].proc_tex);
for (int j = 0; j < MAX_THICKRES; j++) {
GPU_BATCH_DISCARD_SAFE(hair_cache->final[i].proc_hairs[j]);
}
@@ -810,7 +805,7 @@ static int particle_batch_cache_fill_strands_data(ParticleSystem *psys,
static void particle_batch_cache_ensure_procedural_final_points(ParticleHairCache *cache,
int subdiv)
{
- /* Same format as point_tex. */
+ /* Same format as proc_point_buf. */
GPUVertFormat format = {0};
GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 4, GPU_FETCH_FLOAT);
@@ -823,12 +818,6 @@ static void particle_batch_cache_ensure_procedural_final_points(ParticleHairCach
/* Those are points! not line segments. */
GPU_vertbuf_data_alloc(cache->final[subdiv].proc_buf,
cache->final[subdiv].strands_res * cache->strands_len);
-
- /* Create vbo immediately to bind to texture buffer. */
- GPU_vertbuf_use(cache->final[subdiv].proc_buf);
-
- cache->final[subdiv].proc_tex = GPU_texture_create_from_vertbuf("part_proc",
- cache->final[subdiv].proc_buf);
}
static void particle_batch_cache_ensure_procedural_strand_data(PTCacheEdit *edit,
@@ -1034,14 +1023,6 @@ static void particle_batch_cache_ensure_procedural_strand_data(PTCacheEdit *edit
MEM_freeN(parent_mcol);
}
- /* Create vbo immediately to bind to texture buffer. */
- GPU_vertbuf_use(cache->proc_strand_buf);
- cache->strand_tex = GPU_texture_create_from_vertbuf("part_strand", cache->proc_strand_buf);
-
- GPU_vertbuf_use(cache->proc_strand_seg_buf);
- cache->strand_seg_tex = GPU_texture_create_from_vertbuf("part_strand_seg",
- cache->proc_strand_seg_buf);
-
for (int i = 0; i < cache->num_uv_layers; i++) {
GPU_vertbuf_use(cache->proc_uv_buf[i]);
cache->uv_tex[i] = GPU_texture_create_from_vertbuf("part_uv", cache->proc_uv_buf[i]);
@@ -1107,7 +1088,7 @@ static void particle_batch_cache_ensure_procedural_indices(PTCacheEdit *edit,
static void particle_batch_cache_ensure_procedural_pos(PTCacheEdit *edit,
ParticleSystem *psys,
ParticleHairCache *cache,
- GPUMaterial *gpu_material)
+ GPUMaterial *UNUSED(gpu_material))
{
if (cache->proc_point_buf == NULL) {
/* initialize vertex format */
@@ -1149,22 +1130,6 @@ static void particle_batch_cache_ensure_procedural_pos(PTCacheEdit *edit,
psys->childcache, child_count, &pos_step, &length_step);
}
}
-
- /* Create vbo immediately to bind to texture buffer. */
- GPU_vertbuf_use(cache->proc_point_buf);
- cache->point_tex = GPU_texture_create_from_vertbuf("part_point", cache->proc_point_buf);
- }
-
- /* Checking hair length separately, only allocating gpu memory when needed. */
- if (gpu_material && cache->proc_length_buf != NULL && cache->length_tex == NULL) {
- ListBase gpu_attrs = GPU_material_attributes(gpu_material);
- LISTBASE_FOREACH (GPUMaterialAttribute *, attr, &gpu_attrs) {
- if (attr->type == CD_HAIRLENGTH) {
- GPU_vertbuf_use(cache->proc_length_buf);
- cache->length_tex = GPU_texture_create_from_vertbuf("hair_length", cache->proc_length_buf);
- break;
- }
- }
}
}
@@ -1381,7 +1346,7 @@ static void particle_batch_cache_ensure_pos(Object *object,
sim.ob = object;
sim.psys = psys;
sim.psmd = psys_get_modifier(object, psys);
- sim.psys->lattice_deform_data = psys_create_lattice_deform_data(&sim);
+ psys_sim_data_init(&sim);
GPU_VERTBUF_DISCARD_SAFE(point_cache->pos);
@@ -1427,6 +1392,8 @@ static void particle_batch_cache_ensure_pos(Object *object,
if (curr_point != psys->totpart) {
GPU_vertbuf_data_resize(point_cache->pos, curr_point);
}
+
+ psys_sim_data_free(&sim);
}
static void drw_particle_update_ptcache_edit(Object *object_eval,
@@ -1722,7 +1689,7 @@ bool particles_ensure_procedural_data(Object *object,
/* Refreshed on combing and simulation. */
if ((*r_hair_cache)->proc_point_buf == NULL ||
- (gpu_material && (*r_hair_cache)->length_tex == NULL)) {
+ (gpu_material && (*r_hair_cache)->proc_length_buf == NULL)) {
ensure_seg_pt_count(source.edit, source.psys, &cache->hair);
particle_batch_cache_ensure_procedural_pos(
source.edit, source.psys, &cache->hair, gpu_material);
@@ -1730,7 +1697,7 @@ bool particles_ensure_procedural_data(Object *object,
}
/* Refreshed if active layer or custom data changes. */
- if ((*r_hair_cache)->strand_tex == NULL) {
+ if ((*r_hair_cache)->proc_strand_buf == NULL) {
particle_batch_cache_ensure_procedural_strand_data(
source.edit, source.psys, source.md, &cache->hair);
}
diff --git a/source/blender/draw/intern/draw_cache_impl_pointcloud.cc b/source/blender/draw/intern/draw_cache_impl_pointcloud.cc
index a43b23c8969..ddbfe232361 100644
--- a/source/blender/draw/intern/draw_cache_impl_pointcloud.cc
+++ b/source/blender/draw/intern/draw_cache_impl_pointcloud.cc
@@ -7,7 +7,7 @@
* \brief PointCloud API for render engines
*/
-#include <string.h>
+#include <cstring>
#include "MEM_guardedalloc.h"
@@ -23,28 +23,69 @@
#include "BKE_pointcloud.h"
#include "GPU_batch.h"
+#include "GPU_material.h"
-#include "draw_cache_impl.h" /* own include */
+#include "draw_attributes.h"
+#include "draw_cache_impl.h"
+#include "draw_cache_inline.h"
+#include "draw_pointcloud_private.hh" /* own include */
-/* ---------------------------------------------------------------------- */
-/* PointCloud GPUBatch Cache */
+using namespace blender;
-struct PointCloudBatchCache {
- GPUVertBuf *pos; /* Position and radius. */
- GPUVertBuf *geom; /* Instanced geometry for each point in the cloud (small sphere). */
- GPUIndexBuf *geom_indices;
+/** \} */
+/* -------------------------------------------------------------------- */
+/** \name GPUBatch cache management
+ * \{ */
+
+struct PointCloudEvalCache {
+ /* Dot primitive types. */
GPUBatch *dots;
+ /* Triangle primitive types. */
GPUBatch *surface;
GPUBatch **surface_per_mat;
- /* settings to determine if cache is invalid */
- bool is_dirty;
+ /* Triangles indices to draw the points. */
+ GPUIndexBuf *geom_indices;
+
+ /* Position and radius. */
+ GPUVertBuf *pos_rad;
+ /* Active attribute in 3D view. */
+ GPUVertBuf *attr_viewer;
+ /* Requested attributes */
+ GPUVertBuf *attributes_buf[GPU_MAX_ATTR];
+
+ /** Attributes currently being drawn or about to be drawn. */
+ DRW_Attributes attr_used;
+ /**
+ * Attributes that were used at some point. This is used for garbage collection, to remove
+ * attributes that are not used in shaders anymore due to user edits.
+ */
+ DRW_Attributes attr_used_over_time;
+
+ /**
+ * The last time in seconds that the `attr_used` and `attr_used_over_time` were exactly the same.
+ * If the delta between this time and the current scene time is greater than the timeout set in
+ * user preferences (`U.vbotimeout`) then garbage collection is performed.
+ */
+ int last_attr_matching_time;
int mat_len;
};
-/* GPUBatch cache management. */
+struct PointCloudBatchCache {
+ PointCloudEvalCache eval_cache;
+
+ /* settings to determine if cache is invalid */
+ bool is_dirty;
+
+ /**
+ * The draw cache extraction is currently not multi-threaded for multiple objects, but if it was,
+ * some locking would be necessary because multiple objects can use the same object data with
+ * different materials, etc. This is a placeholder to make multi-threading easier in the future.
+ */
+ std::mutex render_mutex;
+};
static PointCloudBatchCache *pointcloud_batch_cache_get(PointCloud &pointcloud)
{
@@ -58,7 +99,7 @@ static bool pointcloud_batch_cache_valid(PointCloud &pointcloud)
if (cache == nullptr) {
return false;
}
- if (cache->mat_len != DRW_pointcloud_material_count_get(&pointcloud)) {
+ if (cache->eval_cache.mat_len != DRW_pointcloud_material_count_get(&pointcloud)) {
return false;
}
return cache->is_dirty == false;
@@ -69,16 +110,16 @@ static void pointcloud_batch_cache_init(PointCloud &pointcloud)
PointCloudBatchCache *cache = pointcloud_batch_cache_get(pointcloud);
if (!cache) {
- cache = MEM_cnew<PointCloudBatchCache>(__func__);
+ cache = MEM_new<PointCloudBatchCache>(__func__);
pointcloud.batch_cache = cache;
}
else {
- memset(cache, 0, sizeof(*cache));
+ cache->eval_cache = {};
}
- cache->mat_len = DRW_pointcloud_material_count_get(&pointcloud);
- cache->surface_per_mat = static_cast<GPUBatch **>(
- MEM_callocN(sizeof(GPUBatch *) * cache->mat_len, __func__));
+ cache->eval_cache.mat_len = DRW_pointcloud_material_count_get(&pointcloud);
+ cache->eval_cache.surface_per_mat = static_cast<GPUBatch **>(
+ MEM_callocN(sizeof(GPUBatch *) * cache->eval_cache.mat_len, __func__));
cache->is_dirty = false;
}
@@ -98,6 +139,15 @@ void DRW_pointcloud_batch_cache_dirty_tag(PointCloud *pointcloud, int mode)
}
}
+static void pointcloud_discard_attributes(PointCloudBatchCache &cache)
+{
+ for (const int j : IndexRange(GPU_MAX_ATTR)) {
+ GPU_VERTBUF_DISCARD_SAFE(cache.eval_cache.attributes_buf[j]);
+ }
+
+ drw_attributes_clear(&cache.eval_cache.attr_used);
+}
+
static void pointcloud_batch_cache_clear(PointCloud &pointcloud)
{
PointCloudBatchCache *cache = pointcloud_batch_cache_get(pointcloud);
@@ -105,18 +155,20 @@ static void pointcloud_batch_cache_clear(PointCloud &pointcloud)
return;
}
- GPU_BATCH_DISCARD_SAFE(cache->dots);
- GPU_BATCH_DISCARD_SAFE(cache->surface);
- GPU_VERTBUF_DISCARD_SAFE(cache->pos);
- GPU_VERTBUF_DISCARD_SAFE(cache->geom);
- GPU_INDEXBUF_DISCARD_SAFE(cache->geom_indices);
+ GPU_BATCH_DISCARD_SAFE(cache->eval_cache.dots);
+ GPU_BATCH_DISCARD_SAFE(cache->eval_cache.surface);
+ GPU_VERTBUF_DISCARD_SAFE(cache->eval_cache.pos_rad);
+ GPU_VERTBUF_DISCARD_SAFE(cache->eval_cache.attr_viewer);
+ GPU_INDEXBUF_DISCARD_SAFE(cache->eval_cache.geom_indices);
- if (cache->surface_per_mat) {
- for (int i = 0; i < cache->mat_len; i++) {
- GPU_BATCH_DISCARD_SAFE(cache->surface_per_mat[i]);
+ if (cache->eval_cache.surface_per_mat) {
+ for (int i = 0; i < cache->eval_cache.mat_len; i++) {
+ GPU_BATCH_DISCARD_SAFE(cache->eval_cache.surface_per_mat[i]);
}
}
- MEM_SAFE_FREE(cache->surface_per_mat);
+ MEM_SAFE_FREE(cache->eval_cache.surface_per_mat);
+
+ pointcloud_discard_attributes(*cache);
}
void DRW_pointcloud_batch_cache_validate(PointCloud *pointcloud)
@@ -133,32 +185,86 @@ void DRW_pointcloud_batch_cache_free(PointCloud *pointcloud)
MEM_SAFE_FREE(pointcloud->batch_cache);
}
-static void pointcloud_batch_cache_ensure_pos(const PointCloud &pointcloud,
- PointCloudBatchCache &cache)
+void DRW_pointcloud_batch_cache_free_old(PointCloud *pointcloud, int ctime)
{
- using namespace blender;
- if (cache.pos != nullptr) {
+ PointCloudBatchCache *cache = pointcloud_batch_cache_get(*pointcloud);
+ if (!cache) {
return;
}
+ bool do_discard = false;
+
+ if (drw_attributes_overlap(&cache->eval_cache.attr_used_over_time,
+ &cache->eval_cache.attr_used)) {
+ cache->eval_cache.last_attr_matching_time = ctime;
+ }
+
+ if (ctime - cache->eval_cache.last_attr_matching_time > U.vbotimeout) {
+ do_discard = true;
+ }
+
+ drw_attributes_clear(&cache->eval_cache.attr_used_over_time);
+
+ if (do_discard) {
+ pointcloud_discard_attributes(*cache);
+ }
+}
+
+/** \} */
+
+/* -------------------------------------------------------------------- */
+/** \name PointCloud extraction
+ * \{ */
+
+static const uint half_octahedron_tris[4][3] = {
+ {0, 1, 2},
+ {0, 2, 3},
+ {0, 3, 4},
+ {0, 4, 1},
+};
+
+static void pointcloud_extract_indices(const PointCloud &pointcloud, PointCloudBatchCache &cache)
+{
+ /** \note: Avoid modulo by non-power-of-two in shader. */
+ uint32_t vertid_max = pointcloud.totpoint * 32;
+ uint32_t index_len = pointcloud.totpoint * ARRAY_SIZE(half_octahedron_tris);
+
+ GPUIndexBufBuilder builder;
+ GPU_indexbuf_init(&builder, GPU_PRIM_TRIS, index_len, vertid_max);
+
+ for (int p = 0; p < pointcloud.totpoint; p++) {
+ for (int i = 0; i < ARRAY_SIZE(half_octahedron_tris); i++) {
+ GPU_indexbuf_add_tri_verts(&builder,
+ half_octahedron_tris[i][0] + p * 32,
+ half_octahedron_tris[i][1] + p * 32,
+ half_octahedron_tris[i][2] + p * 32);
+ }
+ }
+
+ GPU_indexbuf_build_in_place(&builder, cache.eval_cache.geom_indices);
+}
+
+static void pointcloud_extract_position_and_radius(const PointCloud &pointcloud,
+ PointCloudBatchCache &cache)
+{
+ using namespace blender;
+
const bke::AttributeAccessor attributes = pointcloud.attributes();
const VArraySpan<float3> positions = attributes.lookup<float3>("position", ATTR_DOMAIN_POINT);
const VArray<float> radii = attributes.lookup<float>("radius", ATTR_DOMAIN_POINT);
- /* From the opengl wiki:
- * Note that size does not have to exactly match the size used by the vertex shader. If the
- * vertex shader has fewer components than the attribute provides, then the extras are ignored.
- * If the vertex shader has more components than the array provides, the extras are given
- * values from the vector (0, 0, 0, 1) for the missing XYZW components. */
+ static GPUVertFormat format = {0};
+ if (format.attr_len == 0) {
+ GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 4, GPU_FETCH_FLOAT);
+ }
+
+ GPUUsageType usage_flag = GPU_USAGE_STATIC | GPU_USAGE_FLAG_BUFFER_TEXTURE_ONLY;
+ GPU_vertbuf_init_with_format_ex(cache.eval_cache.pos_rad, &format, usage_flag);
+
+ GPU_vertbuf_data_alloc(cache.eval_cache.pos_rad, positions.size());
+ MutableSpan<float4> vbo_data{
+ static_cast<float4 *>(GPU_vertbuf_get_data(cache.eval_cache.pos_rad)), pointcloud.totpoint};
if (radii) {
- static GPUVertFormat format = {0};
- if (format.attr_len == 0) {
- GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 4, GPU_FETCH_FLOAT);
- }
- cache.pos = GPU_vertbuf_create_with_format(&format);
- GPU_vertbuf_data_alloc(cache.pos, positions.size());
const VArraySpan<float> radii_span(radii);
- MutableSpan<float4> vbo_data{static_cast<float4 *>(GPU_vertbuf_get_data(cache.pos)),
- pointcloud.totpoint};
threading::parallel_for(vbo_data.index_range(), 4096, [&](IndexRange range) {
for (const int i : range) {
vbo_data[i].x = positions[i].x;
@@ -170,113 +276,183 @@ static void pointcloud_batch_cache_ensure_pos(const PointCloud &pointcloud,
});
}
else {
- static GPUVertFormat format = {0};
- static uint pos;
- if (format.attr_len == 0) {
- pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
- }
- cache.pos = GPU_vertbuf_create_with_format(&format);
- GPU_vertbuf_data_alloc(cache.pos, positions.size());
- GPU_vertbuf_attr_fill(cache.pos, pos, positions.data());
+ threading::parallel_for(vbo_data.index_range(), 4096, [&](IndexRange range) {
+ for (const int i : range) {
+ vbo_data[i].x = positions[i].x;
+ vbo_data[i].y = positions[i].y;
+ vbo_data[i].z = positions[i].z;
+ vbo_data[i].w = 1.0f;
+ }
+ });
}
}
-static const float half_octahedron_normals[5][3] = {
- {0.0f, 0.0f, 1.0f},
- {1.0f, 0.0f, 0.0f},
- {0.0f, 1.0f, 0.0f},
- {-1.0f, 0.0f, 0.0f},
- {0.0f, -1.0f, 0.0f},
-};
+static void pointcloud_extract_attribute(const PointCloud &pointcloud,
+ PointCloudBatchCache &cache,
+ const DRW_AttributeRequest &request,
+ int index)
+{
+ using namespace blender;
-static const uint half_octahedron_tris[4][3] = {
- {0, 1, 2},
- {0, 2, 3},
- {0, 3, 4},
- {0, 4, 1},
-};
+ GPUVertBuf *&attr_buf = cache.eval_cache.attributes_buf[index];
-static void pointcloud_batch_cache_ensure_geom(PointCloudBatchCache &cache)
-{
- if (cache.geom != nullptr) {
- return;
- }
+ const bke::AttributeAccessor attributes = pointcloud.attributes();
+
+ /* TODO(@kevindietrich): float4 is used for scalar attributes as the implicit conversion done
+ * by OpenGL to vec4 for a scalar `s` will produce a `vec4(s, 0, 0, 1)`. However, following
+ * the Blender convention, it should be `vec4(s, s, s, 1)`. This could be resolved using a
+ * similar texture state swizzle to map the attribute correctly as for volume attributes, so we
+ * can control the conversion ourselves. */
+ VArray<ColorGeometry4f> attribute = attributes.lookup_or_default<ColorGeometry4f>(
+ request.attribute_name, request.domain, {0.0f, 0.0f, 0.0f, 1.0f});
static GPUVertFormat format = {0};
- static uint pos;
if (format.attr_len == 0) {
- pos = GPU_vertformat_attr_add(&format, "pos_inst", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
- GPU_vertformat_alias_add(&format, "nor");
+ GPU_vertformat_attr_add(&format, "attr", GPU_COMP_F32, 4, GPU_FETCH_FLOAT);
}
+ GPUUsageType usage_flag = GPU_USAGE_STATIC | GPU_USAGE_FLAG_BUFFER_TEXTURE_ONLY;
+ GPU_vertbuf_init_with_format_ex(attr_buf, &format, usage_flag);
+ GPU_vertbuf_data_alloc(attr_buf, pointcloud.totpoint);
- cache.geom = GPU_vertbuf_create_with_format(&format);
- GPU_vertbuf_data_alloc(cache.geom, ARRAY_SIZE(half_octahedron_normals));
-
- GPU_vertbuf_attr_fill(cache.geom, pos, half_octahedron_normals);
+ MutableSpan<ColorGeometry4f> vbo_data{
+ static_cast<ColorGeometry4f *>(GPU_vertbuf_get_data(attr_buf)), pointcloud.totpoint};
+ attribute.materialize(vbo_data);
+}
- GPUIndexBufBuilder builder;
- GPU_indexbuf_init(&builder,
- GPU_PRIM_TRIS,
- ARRAY_SIZE(half_octahedron_tris),
- ARRAY_SIZE(half_octahedron_normals));
+/** \} */
- for (int i = 0; i < ARRAY_SIZE(half_octahedron_tris); i++) {
- GPU_indexbuf_add_tri_verts(&builder, UNPACK3(half_octahedron_tris[i]));
- }
+/* -------------------------------------------------------------------- */
+/** \name Private API
+ * \{ */
- cache.geom_indices = GPU_indexbuf_build(&builder);
+GPUVertBuf *pointcloud_position_and_radius_get(PointCloud *pointcloud)
+{
+ PointCloudBatchCache *cache = pointcloud_batch_cache_get(*pointcloud);
+ DRW_vbo_request(nullptr, &cache->eval_cache.pos_rad);
+ return cache->eval_cache.pos_rad;
}
-GPUBatch *DRW_pointcloud_batch_cache_get_dots(Object *ob)
+GPUBatch **pointcloud_surface_shaded_get(PointCloud *pointcloud,
+ GPUMaterial **gpu_materials,
+ int mat_len)
{
- PointCloud &pointcloud = *static_cast<PointCloud *>(ob->data);
- PointCloudBatchCache *cache = pointcloud_batch_cache_get(pointcloud);
+ PointCloudBatchCache *cache = pointcloud_batch_cache_get(*pointcloud);
+ DRW_Attributes attrs_needed;
+ drw_attributes_clear(&attrs_needed);
+
+ for (GPUMaterial *gpu_material : Span<GPUMaterial *>(gpu_materials, mat_len)) {
+ ListBase gpu_attrs = GPU_material_attributes(gpu_material);
+ LISTBASE_FOREACH (GPUMaterialAttribute *, gpu_attr, &gpu_attrs) {
+ const char *name = gpu_attr->name;
+
+ int layer_index;
+ eCustomDataType type;
+ eAttrDomain domain = ATTR_DOMAIN_POINT;
+ if (!drw_custom_data_match_attribute(&pointcloud->pdata, name, &layer_index, &type)) {
+ continue;
+ }
+
+ drw_attributes_add_request(&attrs_needed, name, type, layer_index, domain);
+ }
+ }
- if (cache->dots == nullptr) {
- pointcloud_batch_cache_ensure_pos(pointcloud, *cache);
- cache->dots = GPU_batch_create(GPU_PRIM_POINTS, cache->pos, nullptr);
+ if (!drw_attributes_overlap(&cache->eval_cache.attr_used, &attrs_needed)) {
+ /* Some new attributes have been added, free all and start over. */
+ for (const int i : IndexRange(GPU_MAX_ATTR)) {
+ GPU_VERTBUF_DISCARD_SAFE(cache->eval_cache.attributes_buf[i]);
+ }
+ drw_attributes_merge(&cache->eval_cache.attr_used, &attrs_needed, cache->render_mutex);
}
+ drw_attributes_merge(&cache->eval_cache.attr_used_over_time, &attrs_needed, cache->render_mutex);
- return cache->dots;
+ DRW_batch_request(&cache->eval_cache.surface_per_mat[0]);
+ return cache->eval_cache.surface_per_mat;
}
-GPUBatch *DRW_pointcloud_batch_cache_get_surface(Object *ob)
+GPUBatch *pointcloud_surface_get(PointCloud *pointcloud)
{
- PointCloud &pointcloud = *static_cast<PointCloud *>(ob->data);
- PointCloudBatchCache *cache = pointcloud_batch_cache_get(pointcloud);
-
- if (cache->surface == nullptr) {
- pointcloud_batch_cache_ensure_pos(pointcloud, *cache);
- pointcloud_batch_cache_ensure_geom(*cache);
+ PointCloudBatchCache *cache = pointcloud_batch_cache_get(*pointcloud);
+ return DRW_batch_request(&cache->eval_cache.surface);
+}
- cache->surface = GPU_batch_create(GPU_PRIM_TRIS, cache->geom, cache->geom_indices);
- GPU_batch_instbuf_add_ex(cache->surface, cache->pos, false);
- }
+/** \} */
- return cache->surface;
-}
+/* -------------------------------------------------------------------- */
+/** \name API
+ * \{ */
-GPUBatch **DRW_cache_pointcloud_surface_shaded_get(Object *ob,
- struct GPUMaterial **UNUSED(gpumat_array),
- uint gpumat_array_len)
+GPUBatch *DRW_pointcloud_batch_cache_get_dots(Object *ob)
{
PointCloud &pointcloud = *static_cast<PointCloud *>(ob->data);
PointCloudBatchCache *cache = pointcloud_batch_cache_get(pointcloud);
- BLI_assert(cache->mat_len == gpumat_array_len);
- UNUSED_VARS(gpumat_array_len);
-
- if (cache->surface_per_mat[0] == nullptr) {
- pointcloud_batch_cache_ensure_pos(pointcloud, *cache);
- pointcloud_batch_cache_ensure_geom(*cache);
+ return DRW_batch_request(&cache->eval_cache.dots);
+}
- cache->surface_per_mat[0] = GPU_batch_create(GPU_PRIM_TRIS, cache->geom, cache->geom_indices);
- GPU_batch_instbuf_add_ex(cache->surface_per_mat[0], cache->pos, false);
+GPUVertBuf **DRW_pointcloud_evaluated_attribute(PointCloud *pointcloud, const char *name)
+{
+ PointCloudBatchCache &cache = *pointcloud_batch_cache_get(*pointcloud);
+
+ int layer_index;
+ eCustomDataType type;
+ eAttrDomain domain = ATTR_DOMAIN_POINT;
+ if (drw_custom_data_match_attribute(&pointcloud->pdata, name, &layer_index, &type)) {
+ DRW_Attributes attributes{};
+ drw_attributes_add_request(&attributes, name, type, layer_index, domain);
+ drw_attributes_merge(&cache.eval_cache.attr_used, &attributes, cache.render_mutex);
}
- return cache->surface_per_mat;
+ int request_i = -1;
+ for (const int i : IndexRange(cache.eval_cache.attr_used.num_requests)) {
+ if (STREQ(cache.eval_cache.attr_used.requests[i].attribute_name, name)) {
+ request_i = i;
+ break;
+ }
+ }
+ if (request_i == -1) {
+ return nullptr;
+ }
+ return &cache.eval_cache.attributes_buf[request_i];
}
int DRW_pointcloud_material_count_get(PointCloud *pointcloud)
{
return max_ii(1, pointcloud->totcol);
}
+
+void DRW_pointcloud_batch_cache_create_requested(Object *ob)
+{
+ PointCloud *pointcloud = static_cast<PointCloud *>(ob->data);
+ PointCloudBatchCache &cache = *pointcloud_batch_cache_get(*pointcloud);
+
+ if (DRW_batch_requested(cache.eval_cache.dots, GPU_PRIM_POINTS)) {
+ DRW_vbo_request(cache.eval_cache.dots, &cache.eval_cache.pos_rad);
+ }
+
+ if (DRW_batch_requested(cache.eval_cache.surface, GPU_PRIM_TRIS)) {
+ DRW_ibo_request(cache.eval_cache.surface, &cache.eval_cache.geom_indices);
+ DRW_vbo_request(cache.eval_cache.surface, &cache.eval_cache.pos_rad);
+ }
+ for (int i = 0; i < cache.eval_cache.mat_len; i++) {
+ if (DRW_batch_requested(cache.eval_cache.surface_per_mat[i], GPU_PRIM_TRIS)) {
+ /* TODO(fclem): Per material ranges. */
+ DRW_ibo_request(cache.eval_cache.surface_per_mat[i], &cache.eval_cache.geom_indices);
+ }
+ }
+ for (int j = 0; j < cache.eval_cache.attr_used.num_requests; j++) {
+ DRW_vbo_request(nullptr, &cache.eval_cache.attributes_buf[j]);
+
+ if (DRW_vbo_requested(cache.eval_cache.attributes_buf[j])) {
+ pointcloud_extract_attribute(*pointcloud, cache, cache.eval_cache.attr_used.requests[j], j);
+ }
+ }
+
+ if (DRW_ibo_requested(cache.eval_cache.geom_indices)) {
+ pointcloud_extract_indices(*pointcloud, cache);
+ }
+
+ if (DRW_vbo_requested(cache.eval_cache.pos_rad)) {
+ pointcloud_extract_position_and_radius(*pointcloud, cache);
+ }
+}
+
+/** \} */
diff --git a/source/blender/draw/intern/draw_cache_impl_subdivision.cc b/source/blender/draw/intern/draw_cache_impl_subdivision.cc
index ab935809f96..6a9e6c126e9 100644
--- a/source/blender/draw/intern/draw_cache_impl_subdivision.cc
+++ b/source/blender/draw/intern/draw_cache_impl_subdivision.cc
@@ -361,14 +361,14 @@ static GPUVertFormat *get_subdiv_vertex_format()
struct CompressedPatchCoord {
int ptex_face_index;
/* UV coordinate encoded as u << 16 | v, where u and v are quantized on 16-bits. */
- unsigned int encoded_uv;
+ uint encoded_uv;
};
MINLINE CompressedPatchCoord make_patch_coord(int ptex_face_index, float u, float v)
{
CompressedPatchCoord patch_coord = {
ptex_face_index,
- (static_cast<unsigned int>(u * 65535.0f) << 16) | static_cast<unsigned int>(v * 65535.0f),
+ (uint(u * 65535.0f) << 16) | uint(v * 65535.0f),
};
return patch_coord;
}
@@ -635,17 +635,9 @@ void draw_subdiv_cache_free(DRWSubdivCache *cache)
SUBDIV_COARSE_FACE_FLAG_ACTIVE | SUBDIV_COARSE_FACE_FLAG_HIDDEN) \
<< SUBDIV_COARSE_FACE_FLAG_OFFSET)
-static uint32_t compute_coarse_face_flag(BMFace *f, BMFace *efa_act)
+static uint32_t compute_coarse_face_flag_bm(BMFace *f, BMFace *efa_act)
{
- if (f == nullptr) {
- /* May happen during mapped extraction. */
- return 0;
- }
-
uint32_t flag = 0;
- if (BM_elem_flag_test(f, BM_ELEM_SMOOTH)) {
- flag |= SUBDIV_COARSE_FACE_FLAG_SMOOTH;
- }
if (BM_elem_flag_test(f, BM_ELEM_SELECT)) {
flag |= SUBDIV_COARSE_FACE_FLAG_SELECT;
}
@@ -655,8 +647,7 @@ static uint32_t compute_coarse_face_flag(BMFace *f, BMFace *efa_act)
if (f == efa_act) {
flag |= SUBDIV_COARSE_FACE_FLAG_ACTIVE;
}
- const int loopstart = BM_elem_index_get(f->l_first);
- return (uint)(loopstart) | (flag << SUBDIV_COARSE_FACE_FLAG_OFFSET);
+ return flag;
}
static void draw_subdiv_cache_extra_coarse_face_data_bm(BMesh *bm,
@@ -668,7 +659,12 @@ static void draw_subdiv_cache_extra_coarse_face_data_bm(BMesh *bm,
BM_ITER_MESH (f, &iter, bm, BM_FACES_OF_MESH) {
const int index = BM_elem_index_get(f);
- flags_data[index] = compute_coarse_face_flag(f, efa_act);
+ uint32_t flag = compute_coarse_face_flag_bm(f, efa_act);
+ if (BM_elem_flag_test(f, BM_ELEM_SMOOTH)) {
+ flag |= SUBDIV_COARSE_FACE_FLAG_SMOOTH;
+ }
+ const int loopstart = BM_elem_index_get(f->l_first);
+ flags_data[index] = uint(loopstart) | (flag << SUBDIV_COARSE_FACE_FLAG_OFFSET);
}
}
@@ -682,13 +678,13 @@ static void draw_subdiv_cache_extra_coarse_face_data_mesh(const MeshRenderData *
if ((polys[i].flag & ME_SMOOTH) != 0) {
flag |= SUBDIV_COARSE_FACE_FLAG_SMOOTH;
}
- if ((polys[i].flag & ME_FACE_SEL) != 0) {
+ if (mr->select_poly && mr->select_poly[i]) {
flag |= SUBDIV_COARSE_FACE_FLAG_SELECT;
}
if (mr->hide_poly && mr->hide_poly[i]) {
flag |= SUBDIV_COARSE_FACE_FLAG_HIDDEN;
}
- flags_data[i] = (uint)(polys[i].loopstart) | (flag << SUBDIV_COARSE_FACE_FLAG_OFFSET);
+ flags_data[i] = uint(polys[i].loopstart) | (flag << SUBDIV_COARSE_FACE_FLAG_OFFSET);
}
}
@@ -702,9 +698,16 @@ static void draw_subdiv_cache_extra_coarse_face_data_mapped(Mesh *mesh,
return;
}
- for (int i = 0; i < mesh->totpoly; i++) {
+ const Span<MPoly> polys = mesh->polys();
+ for (const int i : polys.index_range()) {
BMFace *f = bm_original_face_get(mr, i);
- flags_data[i] = compute_coarse_face_flag(f, mr->efa_act);
+ /* Selection and hiding from bmesh. */
+ uint32_t flag = (f) ? compute_coarse_face_flag_bm(f, mr->efa_act) : 0;
+ /* Smooth from mesh. */
+ if ((polys[i].flag & ME_SMOOTH) != 0) {
+ flag |= SUBDIV_COARSE_FACE_FLAG_SMOOTH;
+ }
+ flags_data[i] = uint(polys[i].loopstart) | (flag << SUBDIV_COARSE_FACE_FLAG_OFFSET);
}
}
@@ -724,7 +727,7 @@ static void draw_subdiv_cache_update_extra_coarse_face_data(DRWSubdivCache *cach
mesh->totpoly);
}
- uint32_t *flags_data = (uint32_t *)(GPU_vertbuf_get_data(cache->extra_coarse_face_data));
+ uint32_t *flags_data = (uint32_t *)GPU_vertbuf_get_data(cache->extra_coarse_face_data);
if (mr->extract_type == MR_EXTRACT_BMESH) {
draw_subdiv_cache_extra_coarse_face_data_bm(cache->bm, mr->efa_act, flags_data);
@@ -825,10 +828,10 @@ static bool draw_subdiv_topology_info_cb(const SubdivForeachContext *foreach_con
/* Set topology information only if we have loops. */
if (num_loops != 0) {
- cache->num_subdiv_edges = (uint)num_edges;
- cache->num_subdiv_loops = (uint)num_loops;
- cache->num_subdiv_verts = (uint)num_verts;
- cache->num_subdiv_quads = (uint)num_polys;
+ cache->num_subdiv_edges = uint(num_edges);
+ cache->num_subdiv_loops = uint(num_loops);
+ cache->num_subdiv_verts = uint(num_verts);
+ cache->num_subdiv_quads = uint(num_polys);
cache->subdiv_polygon_offset = static_cast<int *>(MEM_dupallocN(subdiv_polygon_offset));
}
@@ -900,13 +903,13 @@ static bool draw_subdiv_topology_info_cb(const SubdivForeachContext *foreach_con
}
static void draw_subdiv_vertex_corner_cb(const SubdivForeachContext *foreach_context,
- void *UNUSED(tls),
- const int UNUSED(ptex_face_index),
- const float UNUSED(u),
- const float UNUSED(v),
+ void * /*tls*/,
+ const int /*ptex_face_index*/,
+ const float /*u*/,
+ const float /*v*/,
const int coarse_vertex_index,
- const int UNUSED(coarse_poly_index),
- const int UNUSED(coarse_corner),
+ const int /*coarse_poly_index*/,
+ const int /*coarse_corner*/,
const int subdiv_vertex_index)
{
BLI_assert(coarse_vertex_index != ORIGINDEX_NONE);
@@ -914,26 +917,26 @@ static void draw_subdiv_vertex_corner_cb(const SubdivForeachContext *foreach_con
ctx->vert_origindex_map[subdiv_vertex_index] = coarse_vertex_index;
}
-static void draw_subdiv_vertex_edge_cb(const SubdivForeachContext *UNUSED(foreach_context),
- void *UNUSED(tls_v),
- const int UNUSED(ptex_face_index),
- const float UNUSED(u),
- const float UNUSED(v),
- const int UNUSED(coarse_edge_index),
- const int UNUSED(coarse_poly_index),
- const int UNUSED(coarse_corner),
- const int UNUSED(subdiv_vertex_index))
+static void draw_subdiv_vertex_edge_cb(const SubdivForeachContext * /*foreach_context*/,
+ void * /*tls_v*/,
+ const int /*ptex_face_index*/,
+ const float /*u*/,
+ const float /*v*/,
+ const int /*coarse_edge_index*/,
+ const int /*coarse_poly_index*/,
+ const int /*coarse_corner*/,
+ const int /*subdiv_vertex_index*/)
{
/* Required if SubdivForeachContext.vertex_corner is also set. */
}
static void draw_subdiv_edge_cb(const SubdivForeachContext *foreach_context,
- void *UNUSED(tls),
+ void * /*tls*/,
const int coarse_edge_index,
const int subdiv_edge_index,
- const bool UNUSED(is_loose),
- const int UNUSED(subdiv_v1),
- const int UNUSED(subdiv_v2))
+ const bool /*is_loose*/,
+ const int /*subdiv_v1*/,
+ const int /*subdiv_v2*/)
{
DRWCacheBuildingContext *ctx = (DRWCacheBuildingContext *)(foreach_context->user_data);
@@ -953,13 +956,13 @@ static void draw_subdiv_edge_cb(const SubdivForeachContext *foreach_context,
}
static void draw_subdiv_loop_cb(const SubdivForeachContext *foreach_context,
- void *UNUSED(tls_v),
+ void * /*tls_v*/,
const int ptex_face_index,
const float u,
const float v,
- const int UNUSED(coarse_loop_index),
+ const int /*coarse_loop_index*/,
const int coarse_poly_index,
- const int UNUSED(coarse_corner),
+ const int /*coarse_corner*/,
const int subdiv_loop_index,
const int subdiv_vertex_index,
const int subdiv_edge_index)
@@ -1292,7 +1295,7 @@ static void drw_subdiv_compute_dispatch(const DRWSubdivCache *cache,
uint total_dispatch_size,
const bool has_sculpt_mask = false)
{
- const uint max_res_x = static_cast<uint>(GPU_max_work_group_count(0));
+ const uint max_res_x = uint(GPU_max_work_group_count(0));
const uint dispatch_size = get_dispatch_size(total_dispatch_size);
uint dispatch_rx = dispatch_size;
@@ -1315,7 +1318,7 @@ static void drw_subdiv_compute_dispatch(const DRWSubdivCache *cache,
/* X and Y dimensions may have different limits so the above computation may not be right, but
* even with the standard 64k minimum on all dimensions we still have a lot of room. Therefore,
* we presume it all fits. */
- BLI_assert(dispatch_ry < static_cast<uint>(GPU_max_work_group_count(1)));
+ BLI_assert(dispatch_ry < uint(GPU_max_work_group_count(1)));
draw_subdiv_ubo_update_and_bind(
cache, shader, src_offset, dst_offset, total_dispatch_size, has_sculpt_mask);
@@ -2033,7 +2036,7 @@ static bool draw_subdiv_create_requested_buffers(Object *ob,
const bool use_hide,
OpenSubdiv_EvaluatorCache *evaluator_cache)
{
- SubsurfRuntimeData *runtime_data = mesh->runtime.subsurf_runtime_data;
+ SubsurfRuntimeData *runtime_data = mesh->runtime->subsurf_runtime_data;
BLI_assert(runtime_data && runtime_data->has_gpu_subdiv);
if (runtime_data->settings.level == 0) {
@@ -2134,7 +2137,7 @@ void DRW_subdivide_loose_geom(DRWSubdivCache *subdiv_cache, MeshBufferCache *cac
const bool is_simple = subdiv_cache->subdiv->settings.is_simple;
const int resolution = subdiv_cache->resolution;
const int resolution_1 = resolution - 1;
- const float inv_resolution_1 = 1.0f / (float)resolution_1;
+ const float inv_resolution_1 = 1.0f / float(resolution_1);
const int num_subdiv_vertices_per_coarse_edge = resolution - 2;
const int num_subdivided_edge = coarse_loose_edge_len *
@@ -2228,13 +2231,13 @@ void DRW_subdivide_loose_geom(DRWSubdivCache *subdiv_cache, MeshBufferCache *cac
blender::Span<DRWSubdivLooseEdge> draw_subdiv_cache_get_loose_edges(const DRWSubdivCache *cache)
{
- return {cache->loose_geom.edges, static_cast<int64_t>(cache->loose_geom.edge_len)};
+ return {cache->loose_geom.edges, int64_t(cache->loose_geom.edge_len)};
}
blender::Span<DRWSubdivLooseVertex> draw_subdiv_cache_get_loose_verts(const DRWSubdivCache *cache)
{
return {cache->loose_geom.verts + cache->loose_geom.edge_len * 2,
- static_cast<int64_t>(cache->loose_geom.vert_len)};
+ int64_t(cache->loose_geom.vert_len)};
}
static OpenSubdiv_EvaluatorCache *g_evaluator_cache = nullptr;
diff --git a/source/blender/draw/intern/draw_cache_impl_volume.c b/source/blender/draw/intern/draw_cache_impl_volume.cc
index 18a4c81514b..ac5e6fa05b9 100644
--- a/source/blender/draw/intern/draw_cache_impl_volume.c
+++ b/source/blender/draw/intern/draw_cache_impl_volume.cc
@@ -7,7 +7,7 @@
* \brief Volume API for render engines
*/
-#include <string.h>
+#include <cstring>
#include "MEM_guardedalloc.h"
@@ -39,7 +39,7 @@ static void volume_batch_cache_clear(Volume *volume);
/* ---------------------------------------------------------------------- */
/* Volume GPUBatch Cache */
-typedef struct VolumeBatchCache {
+struct VolumeBatchCache {
/* 3D textures */
ListBase grids;
@@ -54,22 +54,22 @@ typedef struct VolumeBatchCache {
/* settings to determine if cache is invalid */
bool is_dirty;
-} VolumeBatchCache;
+};
/* GPUBatch cache management. */
static bool volume_batch_cache_valid(Volume *volume)
{
- VolumeBatchCache *cache = volume->batch_cache;
+ VolumeBatchCache *cache = static_cast<VolumeBatchCache *>(volume->batch_cache);
return (cache && cache->is_dirty == false);
}
static void volume_batch_cache_init(Volume *volume)
{
- VolumeBatchCache *cache = volume->batch_cache;
+ VolumeBatchCache *cache = static_cast<VolumeBatchCache *>(volume->batch_cache);
if (!cache) {
- cache = volume->batch_cache = MEM_callocN(sizeof(*cache), __func__);
+ volume->batch_cache = cache = MEM_cnew<VolumeBatchCache>(__func__);
}
else {
memset(cache, 0, sizeof(*cache));
@@ -89,13 +89,13 @@ void DRW_volume_batch_cache_validate(Volume *volume)
static VolumeBatchCache *volume_batch_cache_get(Volume *volume)
{
DRW_volume_batch_cache_validate(volume);
- return volume->batch_cache;
+ return static_cast<VolumeBatchCache *>(volume->batch_cache);
}
void DRW_volume_batch_cache_dirty_tag(Volume *volume, int mode)
{
- VolumeBatchCache *cache = volume->batch_cache;
- if (cache == NULL) {
+ VolumeBatchCache *cache = static_cast<VolumeBatchCache *>(volume->batch_cache);
+ if (cache == nullptr) {
return;
}
switch (mode) {
@@ -109,7 +109,7 @@ void DRW_volume_batch_cache_dirty_tag(Volume *volume, int mode)
static void volume_batch_cache_clear(Volume *volume)
{
- VolumeBatchCache *cache = volume->batch_cache;
+ VolumeBatchCache *cache = static_cast<VolumeBatchCache *>(volume->batch_cache);
if (!cache) {
return;
}
@@ -130,18 +130,18 @@ void DRW_volume_batch_cache_free(Volume *volume)
volume_batch_cache_clear(volume);
MEM_SAFE_FREE(volume->batch_cache);
}
-typedef struct VolumeWireframeUserData {
+struct VolumeWireframeUserData {
Volume *volume;
Scene *scene;
-} VolumeWireframeUserData;
+};
static void drw_volume_wireframe_cb(
void *userdata, const float (*verts)[3], const int (*edges)[2], int totvert, int totedge)
{
- VolumeWireframeUserData *data = userdata;
+ VolumeWireframeUserData *data = static_cast<VolumeWireframeUserData *>(userdata);
Scene *scene = data->scene;
Volume *volume = data->volume;
- VolumeBatchCache *cache = volume->batch_cache;
+ VolumeBatchCache *cache = static_cast<VolumeBatchCache *>(volume->batch_cache);
const bool do_hq_normals = (scene->r.perf_flag & SCE_PERF_HQ_NORMALS) != 0 ||
GPU_use_hq_normals_workaround();
@@ -181,7 +181,7 @@ static void drw_volume_wireframe_cb(
if (volume->display.wireframe_type == VOLUME_WIREFRAME_POINTS) {
/* Create batch. */
cache->face_wire.batch = GPU_batch_create(
- GPU_PRIM_POINTS, cache->face_wire.pos_nor_in_order, NULL);
+ GPU_PRIM_POINTS, cache->face_wire.pos_nor_in_order, nullptr);
}
else {
/* Create edge index buffer. */
@@ -203,15 +203,15 @@ static void drw_volume_wireframe_cb(
GPUBatch *DRW_volume_batch_cache_get_wireframes_face(Volume *volume)
{
if (volume->display.wireframe_type == VOLUME_WIREFRAME_NONE) {
- return NULL;
+ return nullptr;
}
VolumeBatchCache *cache = volume_batch_cache_get(volume);
- if (cache->face_wire.batch == NULL) {
+ if (cache->face_wire.batch == nullptr) {
const VolumeGrid *volume_grid = BKE_volume_grid_active_get_for_read(volume);
- if (volume_grid == NULL) {
- return NULL;
+ if (volume_grid == nullptr) {
+ return nullptr;
}
/* Create wireframe from OpenVDB tree. */
@@ -228,8 +228,8 @@ GPUBatch *DRW_volume_batch_cache_get_wireframes_face(Volume *volume)
static void drw_volume_selection_surface_cb(
void *userdata, float (*verts)[3], int (*tris)[3], int totvert, int tottris)
{
- Volume *volume = userdata;
- VolumeBatchCache *cache = volume->batch_cache;
+ Volume *volume = static_cast<Volume *>(userdata);
+ VolumeBatchCache *cache = static_cast<VolumeBatchCache *>(volume->batch_cache);
static GPUVertFormat format = {0};
static uint pos_id;
@@ -257,10 +257,10 @@ static void drw_volume_selection_surface_cb(
GPUBatch *DRW_volume_batch_cache_get_selection_surface(Volume *volume)
{
VolumeBatchCache *cache = volume_batch_cache_get(volume);
- if (cache->selection_surface == NULL) {
+ if (cache->selection_surface == nullptr) {
const VolumeGrid *volume_grid = BKE_volume_grid_active_get_for_read(volume);
- if (volume_grid == NULL) {
- return NULL;
+ if (volume_grid == nullptr) {
+ return nullptr;
}
BKE_volume_grid_selection_surface(
volume, volume_grid, drw_volume_selection_surface_cb, volume);
@@ -275,15 +275,14 @@ static DRWVolumeGrid *volume_grid_cache_get(const Volume *volume,
const char *name = BKE_volume_grid_name(grid);
/* Return cached grid. */
- DRWVolumeGrid *cache_grid;
- for (cache_grid = cache->grids.first; cache_grid; cache_grid = cache_grid->next) {
+ LISTBASE_FOREACH (DRWVolumeGrid *, cache_grid, &cache->grids) {
if (STREQ(cache_grid->name, name)) {
return cache_grid;
}
}
/* Allocate new grid. */
- cache_grid = MEM_callocN(sizeof(DRWVolumeGrid), __func__);
+ DRWVolumeGrid *cache_grid = MEM_cnew<DRWVolumeGrid>(__func__);
cache_grid->name = BLI_strdup(name);
BLI_addtail(&cache->grids, cache_grid);
@@ -316,7 +315,7 @@ static DRWVolumeGrid *volume_grid_cache_get(const Volume *volume,
dense_grid.voxels);
/* The texture can be null if the resolution along one axis is larger than
* GL_MAX_3D_TEXTURE_SIZE. */
- if (cache_grid->texture != NULL) {
+ if (cache_grid->texture != nullptr) {
GPU_texture_swizzle_set(cache_grid->texture, (channels == 3) ? "rgb1" : "rrr1");
GPU_texture_wrap_mode(cache_grid->texture, false, false);
BKE_volume_dense_float_grid_clear(&dense_grid);
@@ -339,7 +338,7 @@ DRWVolumeGrid *DRW_volume_batch_cache_get_grid(Volume *volume, const VolumeGrid
{
VolumeBatchCache *cache = volume_batch_cache_get(volume);
DRWVolumeGrid *grid = volume_grid_cache_get(volume, volume_grid, cache);
- return (grid->texture != NULL) ? grid : NULL;
+ return (grid->texture != nullptr) ? grid : nullptr;
}
int DRW_volume_material_count_get(Volume *volume)
diff --git a/source/blender/draw/intern/draw_command.cc b/source/blender/draw/intern/draw_command.cc
index ff69885b3b6..6e999815e8d 100644
--- a/source/blender/draw/intern/draw_command.cc
+++ b/source/blender/draw/intern/draw_command.cc
@@ -30,6 +30,11 @@ void ShaderBind::execute(RecordingState &state) const
}
}
+void FramebufferBind::execute() const
+{
+ GPU_framebuffer_bind(framebuffer);
+}
+
void ResourceBind::execute() const
{
if (slot == -1) {
@@ -90,7 +95,7 @@ void DrawMulti::execute(RecordingState &state) const
DrawMultiBuf::DrawGroupBuf &groups = multi_draw_buf->group_buf_;
uint group_index = this->group_first;
- while (group_index != (uint)-1) {
+ while (group_index != uint(-1)) {
const DrawGroup &group = groups[group_index];
if (group.vertex_len > 0) {
@@ -161,7 +166,10 @@ void StateSet::execute(RecordingState &recording_state) const
*/
BLI_assert(DST.state_lock == 0);
- if (!assign_if_different(recording_state.pipeline_state, new_state)) {
+ bool state_changed = assign_if_different(recording_state.pipeline_state, new_state);
+ bool clip_changed = assign_if_different(recording_state.clip_plane_count, clip_plane_count);
+
+ if (!state_changed && !clip_changed) {
return;
}
@@ -185,12 +193,7 @@ void StateSet::execute(RecordingState &recording_state) const
}
/* TODO: this should be part of shader state. */
- if (new_state & DRW_STATE_CLIP_PLANES) {
- GPU_clip_distances(recording_state.view_clip_plane_count);
- }
- else {
- GPU_clip_distances(0);
- }
+ GPU_clip_distances(recording_state.clip_plane_count);
if (new_state & DRW_STATE_IN_FRONT_SELECT) {
/* XXX `GPU_depth_range` is not a perfect solution
@@ -229,6 +232,11 @@ std::string ShaderBind::serialize() const
return std::string(".shader_bind(") + GPU_shader_get_name(shader) + ")";
}
+std::string FramebufferBind::serialize() const
+{
+ return std::string(".framebuffer_bind(") + GPU_framebuffer_get_name(framebuffer) + ")";
+}
+
std::string ResourceBind::serialize() const
{
switch (type) {
@@ -345,9 +353,9 @@ std::string PushConstant::serialize() const
std::string Draw::serialize() const
{
- std::string inst_len = (instance_len == (uint)-1) ? "from_batch" : std::to_string(instance_len);
- std::string vert_len = (vertex_len == (uint)-1) ? "from_batch" : std::to_string(vertex_len);
- std::string vert_first = (vertex_first == (uint)-1) ? "from_batch" :
+ std::string inst_len = (instance_len == uint(-1)) ? "from_batch" : std::to_string(instance_len);
+ std::string vert_len = (vertex_len == uint(-1)) ? "from_batch" : std::to_string(vertex_len);
+ std::string vert_first = (vertex_first == uint(-1)) ? "from_batch" :
std::to_string(vertex_first);
return std::string(".draw(inst_len=") + inst_len + ", vert_len=" + vert_len +
", vert_first=" + vert_first + ", res_id=" + std::to_string(handle.resource_index()) +
@@ -379,7 +387,7 @@ std::string DrawMulti::serialize(std::string line_prefix) const
uint group_len = 0;
uint group_index = this->group_first;
- while (group_index != (uint)-1) {
+ while (group_index != uint(-1)) {
const DrawGroup &grp = groups[group_index];
ss << std::endl << line_prefix << " .group(id=" << group_index << ", len=" << grp.len << ")";
@@ -505,7 +513,7 @@ void DrawCommandBuf::bind(RecordingState &state,
* instance to set the correct resource_id. Workaround is a storage_buf + gl_InstanceID. */
BLI_assert(batch_inst_len == 1);
- if (cmd.vertex_len == (uint)-1) {
+ if (cmd.vertex_len == uint(-1)) {
cmd.vertex_len = batch_vert_len;
}
diff --git a/source/blender/draw/intern/draw_command.hh b/source/blender/draw/intern/draw_command.hh
index 46a9199a267..12c9916ee6d 100644
--- a/source/blender/draw/intern/draw_command.hh
+++ b/source/blender/draw/intern/draw_command.hh
@@ -39,7 +39,7 @@ struct RecordingState {
bool front_facing = true;
bool inverted_view = false;
DRWState pipeline_state = DRW_STATE_NO_DRAW;
- int view_clip_plane_count = 0;
+ int clip_plane_count = 0;
/** Used for gl_BaseInstance workaround. */
GPUStorageBuf *resource_id_buf = nullptr;
@@ -88,6 +88,7 @@ enum class Type : uint8_t {
DispatchIndirect,
Draw,
DrawIndirect,
+ FramebufferBind,
PushConstant,
ResourceBind,
ShaderBind,
@@ -118,6 +119,13 @@ struct ShaderBind {
std::string serialize() const;
};
+struct FramebufferBind {
+ GPUFrameBuffer *framebuffer;
+
+ void execute() const;
+ std::string serialize() const;
+};
+
struct ResourceBind {
eGPUSamplerState sampler;
int slot;
@@ -317,6 +325,7 @@ struct Clear {
struct StateSet {
DRWState new_state;
+ int clip_plane_count;
void execute(RecordingState &state) const;
std::string serialize() const;
@@ -387,7 +396,7 @@ class DrawCommandBuf {
instance_len = instance_len != -1 ? instance_len : 1;
int64_t index = commands.append_and_get_index({});
- headers.append({Type::Draw, static_cast<uint>(index)});
+ headers.append({Type::Draw, uint(index)});
commands[index].draw = {batch, instance_len, vertex_len, vertex_first, handle};
}
@@ -473,10 +482,8 @@ class DrawMultiBuf {
uint vertex_first,
ResourceHandle handle)
{
- /* Unsupported for now. Use PassSimple. */
- BLI_assert(vertex_first == 0 || vertex_first == -1);
- BLI_assert(vertex_len == -1);
- UNUSED_VARS_NDEBUG(vertex_len, vertex_first);
+ /* Custom draw-calls cannot be batched and will produce one group per draw. */
+ const bool custom_group = ((vertex_first != 0 && vertex_first != -1) || vertex_len != -1);
instance_len = instance_len != -1 ? instance_len : 1;
@@ -489,12 +496,18 @@ class DrawMultiBuf {
DrawMulti &cmd = commands.last().draw_multi;
- uint &group_id = group_ids_.lookup_or_add(DrawGroupKey(cmd.uuid, batch), (uint)-1);
+ uint &group_id = group_ids_.lookup_or_add(DrawGroupKey(cmd.uuid, batch), uint(-1));
bool inverted = handle.has_inverted_handedness();
- if (group_id == (uint)-1) {
+ DrawPrototype &draw = prototype_buf_.get_or_resize(prototype_count_++);
+ draw.resource_handle = handle.raw;
+ draw.instance_len = instance_len;
+ draw.group_id = group_id;
+
+ if (group_id == uint(-1) || custom_group) {
uint new_group_id = group_count_++;
+ draw.group_id = new_group_id;
DrawGroup &group = group_buf_.get_or_resize(new_group_id);
group.next = cmd.group_first;
@@ -503,11 +516,16 @@ class DrawMultiBuf {
group.gpu_batch = batch;
group.front_proto_len = 0;
group.back_proto_len = 0;
+ group.vertex_len = vertex_len;
+ group.vertex_first = vertex_first;
+ /* Custom group are not to be registered in the group_ids_. */
+ if (!custom_group) {
+ group_id = new_group_id;
+ }
/* For serialization only. */
(inverted ? group.back_proto_len : group.front_proto_len)++;
/* Append to list. */
cmd.group_first = new_group_id;
- group_id = new_group_id;
}
else {
DrawGroup &group = group_buf_[group_id];
@@ -516,11 +534,6 @@ class DrawMultiBuf {
/* For serialization only. */
(inverted ? group.back_proto_len : group.front_proto_len)++;
}
-
- DrawPrototype &draw = prototype_buf_.get_or_resize(prototype_count_++);
- draw.group_id = group_id;
- draw.resource_handle = handle.raw;
- draw.instance_len = instance_len;
}
void bind(RecordingState &state,
diff --git a/source/blender/draw/intern/draw_common.c b/source/blender/draw/intern/draw_common.c
index de56b34df78..f951b702403 100644
--- a/source/blender/draw/intern/draw_common.c
+++ b/source/blender/draw/intern/draw_common.c
@@ -172,20 +172,14 @@ void DRW_globals_update(void)
/* M_SQRT2 to be at least the same size of the old square */
gb->size_vertex = U.pixelsize *
- (max_ff(1.0f, UI_GetThemeValuef(TH_VERTEX_SIZE) * (float)M_SQRT2 / 2.0f));
+ max_ff(1.0f, UI_GetThemeValuef(TH_VERTEX_SIZE) * (float)M_SQRT2 / 2.0f);
gb->size_vertex_gpencil = U.pixelsize * UI_GetThemeValuef(TH_GP_VERTEX_SIZE);
gb->size_face_dot = U.pixelsize * UI_GetThemeValuef(TH_FACEDOT_SIZE);
gb->size_edge = U.pixelsize * (1.0f / 2.0f); /* TODO: Theme. */
gb->size_edge_fix = U.pixelsize * (0.5f + 2.0f * (2.0f * (gb->size_edge * (float)M_SQRT1_2)));
- const float(*screen_vecs)[3] = (float(*)[3])DRW_viewport_screenvecs_get();
- for (int i = 0; i < 2; i++) {
- copy_v3_v3(gb->screen_vecs[i], screen_vecs[i]);
- }
-
gb->pixel_fac = *DRW_viewport_pixelsize_get();
- /* Deprecated, use drw_view.viewport_size instead */
copy_v2_v2(&gb->size_viewport[0], DRW_viewport_size_get());
copy_v2_v2(&gb->size_viewport[2], &gb->size_viewport[0]);
invert_v2(&gb->size_viewport[2]);
@@ -280,10 +274,11 @@ int DRW_object_wire_theme_get(Object *ob, ViewLayer *view_layer, float **r_color
{
const DRWContextState *draw_ctx = DRW_context_state_get();
const bool is_edit = (draw_ctx->object_mode & OB_MODE_EDIT) && (ob->mode & OB_MODE_EDIT);
- const bool active = view_layer->basact &&
- ((ob->base_flag & BASE_FROM_DUPLI) ?
- (DRW_object_get_dupli_parent(ob) == view_layer->basact->object) :
- (view_layer->basact->object == ob));
+ BKE_view_layer_synced_ensure(draw_ctx->scene, view_layer);
+ const Base *base = BKE_view_layer_active_base_get(view_layer);
+ const bool active = base && ((ob->base_flag & BASE_FROM_DUPLI) ?
+ (DRW_object_get_dupli_parent(ob) == base->object) :
+ (base->object == ob));
/* confusing logic here, there are 2 methods of setting the color
* 'colortab[colindex]' and 'theme_id', colindex overrides theme_id.
diff --git a/source/blender/draw/intern/draw_common.h b/source/blender/draw/intern/draw_common.h
index d31c98e0dee..07d245e7dfe 100644
--- a/source/blender/draw/intern/draw_common.h
+++ b/source/blender/draw/intern/draw_common.h
@@ -88,6 +88,14 @@ void DRW_curves_ubos_pool_free(struct CurvesUniformBufPool *pool);
void DRW_curves_update(void);
void DRW_curves_free(void);
+/* draw_pointcloud.cc */
+
+struct DRWShadingGroup *DRW_shgroup_pointcloud_create_sub(struct Object *object,
+ struct DRWShadingGroup *shgrp_parent,
+ struct GPUMaterial *gpu_material);
+void DRW_pointcloud_init(void);
+void DRW_pointcloud_free(void);
+
/* draw_volume.cc */
/**
@@ -132,6 +140,7 @@ struct DRW_Global {
struct GPUTexture *weight_ramp;
struct GPUUniformBuf *view_ubo;
+ struct GPUUniformBuf *clipping_ubo;
};
extern struct DRW_Global G_draw;
diff --git a/source/blender/draw/intern/draw_common_shader_shared.h b/source/blender/draw/intern/draw_common_shader_shared.h
index 57cb7880ce6..9a5fce52c1e 100644
--- a/source/blender/draw/intern/draw_common_shader_shared.h
+++ b/source/blender/draw/intern/draw_common_shader_shared.h
@@ -124,8 +124,7 @@ struct GlobalsUboStorage {
float4 color_uv_shadow;
/* NOTE: Put all color before #UBO_LAST_COLOR. */
- float4 screen_vecs[2]; /* Padded as vec4. */
- float4 size_viewport; /* Packed as vec4. */
+ float4 size_viewport; /* Packed as vec4. */
/* Pack individual float at the end of the buffer to avoid alignment errors */
float size_pixel, pixel_fac;
@@ -228,8 +227,8 @@ BLI_STATIC_ASSERT_ALIGN(GlobalsUboStorage, 16)
# define colorFaceBack globalsBlock.color_face_back
# define colorFaceFront globalsBlock.color_face_front
# define colorUVShadow globalsBlock.color_uv_shadow
-# define screenVecs globalsBlock.screen_vecs
# define sizeViewport globalsBlock.size_viewport.xy
+# define sizeViewportInv globalsBlock.size_viewport.zw
# define sizePixel globalsBlock.size_pixel
# define pixelFac globalsBlock.pixel_fac
# define sizeObjectCenter globalsBlock.size_object_center
diff --git a/source/blender/draw/intern/draw_curves.cc b/source/blender/draw/intern/draw_curves.cc
index a61769e7a63..ee9ed4666e0 100644
--- a/source/blender/draw/intern/draw_curves.cc
+++ b/source/blender/draw/intern/draw_curves.cc
@@ -129,25 +129,25 @@ void DRW_curves_ubos_pool_free(CurvesUniformBufPool *pool)
static void drw_curves_cache_shgrp_attach_resources(DRWShadingGroup *shgrp,
CurvesEvalCache *cache,
- GPUTexture *tex,
+ GPUVertBuf *point_buf,
const int subdiv)
{
- DRW_shgroup_uniform_texture(shgrp, "hairPointBuffer", tex);
- DRW_shgroup_uniform_texture(shgrp, "hairStrandBuffer", cache->strand_tex);
- DRW_shgroup_uniform_texture(shgrp, "hairStrandSegBuffer", cache->strand_seg_tex);
+ DRW_shgroup_buffer_texture(shgrp, "hairPointBuffer", point_buf);
+ DRW_shgroup_buffer_texture(shgrp, "hairStrandBuffer", cache->proc_strand_buf);
+ DRW_shgroup_buffer_texture(shgrp, "hairStrandSegBuffer", cache->proc_strand_seg_buf);
DRW_shgroup_uniform_int(shgrp, "hairStrandsRes", &cache->final[subdiv].strands_res, 1);
}
static void drw_curves_cache_update_compute(CurvesEvalCache *cache,
const int subdiv,
const int strands_len,
- GPUVertBuf *buffer,
- GPUTexture *tex)
+ GPUVertBuf *output_buf,
+ GPUVertBuf *input_buf)
{
GPUShader *shader = curves_eval_shader_get(CURVES_EVAL_CATMULL_ROM);
DRWShadingGroup *shgrp = DRW_shgroup_create(shader, g_tf_pass);
- drw_curves_cache_shgrp_attach_resources(shgrp, cache, tex, subdiv);
- DRW_shgroup_vertex_buffer(shgrp, "posTime", buffer);
+ drw_curves_cache_shgrp_attach_resources(shgrp, cache, input_buf, subdiv);
+ DRW_shgroup_vertex_buffer(shgrp, "posTime", output_buf);
const int max_strands_per_call = GPU_max_work_group_count(0);
int strands_start = 0;
@@ -169,7 +169,7 @@ static void drw_curves_cache_update_compute(CurvesEvalCache *cache, const int su
}
drw_curves_cache_update_compute(
- cache, subdiv, strands_len, cache->final[subdiv].proc_buf, cache->point_tex);
+ cache, subdiv, strands_len, cache->final[subdiv].proc_buf, cache->proc_point_buf);
const DRW_Attributes &attrs = cache->final[subdiv].attr_used;
for (int i = 0; i < attrs.num_requests; i++) {
@@ -182,13 +182,13 @@ static void drw_curves_cache_update_compute(CurvesEvalCache *cache, const int su
subdiv,
strands_len,
cache->final[subdiv].attributes_buf[i],
- cache->proc_attributes_tex[i]);
+ cache->proc_attributes_buf[i]);
}
}
static void drw_curves_cache_update_transform_feedback(CurvesEvalCache *cache,
- GPUVertBuf *vbo,
- GPUTexture *tex,
+ GPUVertBuf *output_buf,
+ GPUVertBuf *input_buf,
const int subdiv,
const int final_points_len)
{
@@ -196,14 +196,14 @@ static void drw_curves_cache_update_transform_feedback(CurvesEvalCache *cache,
DRWShadingGroup *tf_shgrp = nullptr;
if (GPU_transform_feedback_support()) {
- tf_shgrp = DRW_shgroup_transform_feedback_create(tf_shader, g_tf_pass, vbo);
+ tf_shgrp = DRW_shgroup_transform_feedback_create(tf_shader, g_tf_pass, output_buf);
}
else {
tf_shgrp = DRW_shgroup_create(tf_shader, g_tf_pass);
CurvesEvalCall *pr_call = MEM_new<CurvesEvalCall>(__func__);
pr_call->next = g_tf_calls;
- pr_call->vbo = vbo;
+ pr_call->vbo = output_buf;
pr_call->shgrp = tf_shgrp;
pr_call->vert_len = final_points_len;
g_tf_calls = pr_call;
@@ -213,7 +213,7 @@ static void drw_curves_cache_update_transform_feedback(CurvesEvalCache *cache,
}
BLI_assert(tf_shgrp != nullptr);
- drw_curves_cache_shgrp_attach_resources(tf_shgrp, cache, tex, subdiv);
+ drw_curves_cache_shgrp_attach_resources(tf_shgrp, cache, input_buf, subdiv);
DRW_shgroup_call_procedural_points(tf_shgrp, nullptr, final_points_len);
}
@@ -225,7 +225,7 @@ static void drw_curves_cache_update_transform_feedback(CurvesEvalCache *cache, c
}
drw_curves_cache_update_transform_feedback(
- cache, cache->final[subdiv].proc_buf, cache->point_tex, subdiv, final_points_len);
+ cache, cache->final[subdiv].proc_buf, cache->proc_point_buf, subdiv, final_points_len);
const DRW_Attributes &attrs = cache->final[subdiv].attr_used;
for (int i = 0; i < attrs.num_requests; i++) {
@@ -236,7 +236,7 @@ static void drw_curves_cache_update_transform_feedback(CurvesEvalCache *cache, c
drw_curves_cache_update_transform_feedback(cache,
cache->final[subdiv].attributes_buf[i],
- cache->proc_attributes_tex[i],
+ cache->proc_attributes_buf[i],
subdiv,
final_points_len);
}
@@ -346,9 +346,9 @@ DRWShadingGroup *DRW_shgroup_curves_create_sub(Object *object,
1.0f);
}
- DRW_shgroup_uniform_texture(shgrp, "hairPointBuffer", curves_cache->final[subdiv].proc_tex);
- if (curves_cache->length_tex) {
- DRW_shgroup_uniform_texture(shgrp, "hairLen", curves_cache->length_tex);
+ DRW_shgroup_buffer_texture(shgrp, "hairPointBuffer", curves_cache->final[subdiv].proc_buf);
+ if (curves_cache->proc_length_buf) {
+ DRW_shgroup_buffer_texture(shgrp, "hairLen", curves_cache->proc_length_buf);
}
const DRW_Attributes &attrs = curves_cache->final[subdiv].attr_used;
@@ -359,18 +359,18 @@ DRWShadingGroup *DRW_shgroup_curves_create_sub(Object *object,
drw_curves_get_attribute_sampler_name(request.attribute_name, sampler_name);
if (request.domain == ATTR_DOMAIN_CURVE) {
- if (!curves_cache->proc_attributes_tex[i]) {
+ if (!curves_cache->proc_attributes_buf[i]) {
continue;
}
- DRW_shgroup_uniform_texture(shgrp, sampler_name, curves_cache->proc_attributes_tex[i]);
+ DRW_shgroup_buffer_texture(shgrp, sampler_name, curves_cache->proc_attributes_buf[i]);
}
else {
- if (!curves_cache->final[subdiv].attributes_tex[i]) {
+ if (!curves_cache->final[subdiv].attributes_buf[i]) {
continue;
}
- DRW_shgroup_uniform_texture(
- shgrp, sampler_name, curves_cache->final[subdiv].attributes_tex[i]);
+ DRW_shgroup_buffer_texture(
+ shgrp, sampler_name, curves_cache->final[subdiv].attributes_buf[i]);
}
/* Some attributes may not be used in the shader anymore and were not garbage collected yet, so
@@ -390,10 +390,15 @@ DRWShadingGroup *DRW_shgroup_curves_create_sub(Object *object,
DRW_shgroup_uniform_int(shgrp, "hairStrandsRes", &curves_cache->final[subdiv].strands_res, 1);
DRW_shgroup_uniform_int_copy(shgrp, "hairThicknessRes", thickness_res);
DRW_shgroup_uniform_float_copy(shgrp, "hairRadShape", hair_rad_shape);
- DRW_shgroup_uniform_mat4_copy(shgrp, "hairDupliMatrix", object->obmat);
+ DRW_shgroup_uniform_mat4_copy(shgrp, "hairDupliMatrix", object->object_to_world);
DRW_shgroup_uniform_float_copy(shgrp, "hairRadRoot", hair_rad_root);
DRW_shgroup_uniform_float_copy(shgrp, "hairRadTip", hair_rad_tip);
DRW_shgroup_uniform_bool_copy(shgrp, "hairCloseTip", hair_close_tip);
+ if (gpu_material) {
+ /* \note: This needs to happen before the drawcall to allow correct attribute extraction.
+ * (see T101896) */
+ DRW_shgroup_add_material_resources(shgrp, gpu_material);
+ }
/* TODO(fclem): Until we have a better way to cull the curves and render with orco, bypass
* culling test. */
GPUBatch *geom = curves_cache->final[subdiv].proc_hairs[thickness_res - 1];
diff --git a/source/blender/draw/intern/draw_curves_private.h b/source/blender/draw/intern/draw_curves_private.h
index 31122ed5248..715706fd7a6 100644
--- a/source/blender/draw/intern/draw_curves_private.h
+++ b/source/blender/draw/intern/draw_curves_private.h
@@ -34,7 +34,6 @@ typedef enum CurvesEvalShader {
typedef struct CurvesEvalFinalCache {
/* Output of the subdivision stage: vertex buffer sized to subdiv level. */
GPUVertBuf *proc_buf;
- GPUTexture *proc_tex;
/** Just contains a huge index buffer used to draw the final curves. */
GPUBatch *proc_hairs[MAX_THICKRES];
@@ -61,32 +60,29 @@ typedef struct CurvesEvalFinalCache {
/* Output of the subdivision stage: vertex buffers sized to subdiv level. This is only attributes
* on point domain. */
GPUVertBuf *attributes_buf[GPU_MAX_ATTR];
- GPUTexture *attributes_tex[GPU_MAX_ATTR];
} CurvesEvalFinalCache;
/* Curves procedural display: Evaluation is done on the GPU. */
typedef struct CurvesEvalCache {
/* Input control point positions combined with parameter data. */
GPUVertBuf *proc_point_buf;
- GPUTexture *point_tex;
+
+ /* Editmode data (such as selection flags) used by overlay_edit_curve_point.glsl */
+ GPUVertBuf *data_edit_points;
/** Info of control points strands (segment count and base index) */
GPUVertBuf *proc_strand_buf;
- GPUTexture *strand_tex;
/* Curve length data. */
GPUVertBuf *proc_length_buf;
- GPUTexture *length_tex;
GPUVertBuf *proc_strand_seg_buf;
- GPUTexture *strand_seg_tex;
CurvesEvalFinalCache final[MAX_HAIR_SUBDIV];
/* For point attributes, which need subdivision, these buffers contain the input data.
* For curve domain attributes, which do not need subdivision, these are the final data. */
GPUVertBuf *proc_attributes_buf[GPU_MAX_ATTR];
- GPUTexture *proc_attributes_tex[GPU_MAX_ATTR];
int strands_len;
int elems_len;
diff --git a/source/blender/draw/intern/draw_debug.cc b/source/blender/draw/intern/draw_debug.cc
index b0662a42ea0..55de779d85c 100644
--- a/source/blender/draw/intern/draw_debug.cc
+++ b/source/blender/draw/intern/draw_debug.cc
@@ -21,7 +21,7 @@
#include <iomanip>
-#ifdef DEBUG
+#if defined(DEBUG) || defined(WITH_DRAW_DEBUG)
# define DRAW_DEBUG
#else
/* Uncomment to forcibly enable debug draw in release mode. */
@@ -85,6 +85,9 @@ void DebugDraw::init()
gpu_draw_buf_.command.instance_first_array = 0;
gpu_draw_buf_used = false;
+ print_col_ = 0;
+ print_row_ = 0;
+
modelmat_reset();
}
@@ -316,8 +319,8 @@ template<> void DebugDraw::print_value<uint4>(const uint4 &value)
/* -------------------------------------------------------------------- */
/** \name Internals
*
- * IMPORTANT: All of these are copied from the shader libs (common_debug_draw_lib.glsl &
- * common_debug_print_lib.glsl). They need to be kept in sync to write the same data.
+ * IMPORTANT: All of these are copied from the shader libraries (`common_debug_draw_lib.glsl` &
+ * `common_debug_print_lib.glsl`). They need to be kept in sync to write the same data.
* \{ */
void DebugDraw::draw_line(float3 v1, float3 v2, uint color)
@@ -553,6 +556,9 @@ void DebugDraw::display_prints()
GPUShader *shader = DRW_shader_debug_print_display_get();
GPU_batch_set_shader(batch, shader);
int slot = GPU_shader_get_builtin_ssbo(shader, GPU_STORAGE_BUFFER_DEBUG_PRINT);
+ float f_viewport[4];
+ GPU_viewport_size_get_f(f_viewport);
+ GPU_shader_uniform_2fv(shader, "viewport_size", f_viewport);
if (gpu_print_buf_used) {
GPU_debug_group_begin("GPU");
diff --git a/source/blender/draw/intern/draw_defines.h b/source/blender/draw/intern/draw_defines.h
index 3df7e47cffb..57035717bd7 100644
--- a/source/blender/draw/intern/draw_defines.h
+++ b/source/blender/draw/intern/draw_defines.h
@@ -12,6 +12,10 @@
#pragma once
#define DRW_VIEW_UBO_SLOT 0
+#define DRW_VIEW_CULLING_UBO_SLOT 1
+#define DRW_CLIPPING_UBO_SLOT 2
+
+#define DRW_LAYER_ATTR_UBO_SLOT 3
#define DRW_RESOURCE_ID_SLOT 11
#define DRW_OBJ_MAT_SLOT 10
diff --git a/source/blender/draw/intern/draw_fluid.c b/source/blender/draw/intern/draw_fluid.c
index c34025ebe52..a14f6142cfb 100644
--- a/source/blender/draw/intern/draw_fluid.c
+++ b/source/blender/draw/intern/draw_fluid.c
@@ -208,7 +208,7 @@ static GPUTexture *create_volume_texture(const int dim[3],
}
else {
/* We need to resize the input. */
- int channels = (ELEM(texture_format, GPU_R8, GPU_R16F, GPU_R32F)) ? 1 : 4;
+ int channels = ELEM(texture_format, GPU_R8, GPU_R16F, GPU_R32F) ? 1 : 4;
float *rescaled_data = rescale_3d(dim, final_dim, channels, data);
if (rescaled_data) {
GPU_texture_update_sub(tex, GPU_DATA_FLOAT, rescaled_data, 0, 0, 0, UNPACK3(final_dim));
diff --git a/source/blender/draw/intern/draw_hair.cc b/source/blender/draw/intern/draw_hair.cc
index ceee1c7cb48..c5261f26f76 100644
--- a/source/blender/draw/intern/draw_hair.cc
+++ b/source/blender/draw/intern/draw_hair.cc
@@ -105,9 +105,9 @@ static void drw_hair_particle_cache_shgrp_attach_resources(DRWShadingGroup *shgr
ParticleHairCache *cache,
const int subdiv)
{
- DRW_shgroup_uniform_texture(shgrp, "hairPointBuffer", cache->point_tex);
- DRW_shgroup_uniform_texture(shgrp, "hairStrandBuffer", cache->strand_tex);
- DRW_shgroup_uniform_texture(shgrp, "hairStrandSegBuffer", cache->strand_seg_tex);
+ DRW_shgroup_buffer_texture(shgrp, "hairPointBuffer", cache->proc_point_buf);
+ DRW_shgroup_buffer_texture(shgrp, "hairStrandBuffer", cache->proc_strand_buf);
+ DRW_shgroup_buffer_texture(shgrp, "hairStrandSegBuffer", cache->proc_strand_seg_buf);
DRW_shgroup_uniform_int(shgrp, "hairStrandsRes", &cache->final[subdiv].strands_res, 1);
}
@@ -202,8 +202,8 @@ GPUVertBuf *DRW_hair_pos_buffer_get(Object *object, ParticleSystem *psys, Modifi
}
void DRW_hair_duplimat_get(Object *object,
- ParticleSystem *UNUSED(psys),
- ModifierData *UNUSED(md),
+ ParticleSystem * /*psys*/,
+ ModifierData * /*md*/,
float (*dupli_mat)[4])
{
Object *dupli_parent = DRW_object_get_dupli_parent(object);
@@ -216,12 +216,12 @@ void DRW_hair_duplimat_get(Object *object,
if (collection != nullptr) {
sub_v3_v3(dupli_mat[3], collection->instance_offset);
}
- mul_m4_m4m4(dupli_mat, dupli_parent->obmat, dupli_mat);
+ mul_m4_m4m4(dupli_mat, dupli_parent->object_to_world, dupli_mat);
}
else {
- copy_m4_m4(dupli_mat, dupli_object->ob->obmat);
+ copy_m4_m4(dupli_mat, dupli_object->ob->object_to_world);
invert_m4(dupli_mat);
- mul_m4_m4m4(dupli_mat, object->obmat, dupli_mat);
+ mul_m4_m4m4(dupli_mat, object->object_to_world, dupli_mat);
}
}
else {
@@ -280,9 +280,9 @@ DRWShadingGroup *DRW_shgroup_hair_create_sub(Object *object,
float hair_rad_tip = part->rad_tip * part->rad_scale * 0.5f;
bool hair_close_tip = (part->shape_flag & PART_SHAPE_CLOSE_TIP) != 0;
- DRW_shgroup_uniform_texture(shgrp, "hairPointBuffer", hair_cache->final[subdiv].proc_tex);
- if (hair_cache->length_tex) {
- DRW_shgroup_uniform_texture(shgrp, "l", hair_cache->length_tex);
+ DRW_shgroup_buffer_texture(shgrp, "hairPointBuffer", hair_cache->final[subdiv].proc_buf);
+ if (hair_cache->proc_length_buf) {
+ DRW_shgroup_buffer_texture(shgrp, "l", hair_cache->proc_length_buf);
}
DRW_shgroup_uniform_block(shgrp, "drw_curves", *g_dummy_curves_info);
@@ -293,6 +293,11 @@ DRWShadingGroup *DRW_shgroup_hair_create_sub(Object *object,
DRW_shgroup_uniform_float_copy(shgrp, "hairRadRoot", hair_rad_root);
DRW_shgroup_uniform_float_copy(shgrp, "hairRadTip", hair_rad_tip);
DRW_shgroup_uniform_bool_copy(shgrp, "hairCloseTip", hair_close_tip);
+ if (gpu_material) {
+ /* \note: This needs to happen before the drawcall to allow correct attribute extraction.
+ * (see T101896) */
+ DRW_shgroup_add_material_resources(shgrp, gpu_material);
+ }
/* TODO(fclem): Until we have a better way to cull the hair and render with orco, bypass
* culling test. */
GPUBatch *geom = hair_cache->final[subdiv].proc_hairs[thickness_res - 1];
diff --git a/source/blender/draw/intern/draw_hair_private.h b/source/blender/draw/intern/draw_hair_private.h
index c7e9e1e22de..a3019ab5aa5 100644
--- a/source/blender/draw/intern/draw_hair_private.h
+++ b/source/blender/draw/intern/draw_hair_private.h
@@ -29,7 +29,6 @@ struct ParticleSystem;
typedef struct ParticleHairFinalCache {
/* Output of the subdivision stage: vertex buff sized to subdiv level. */
GPUVertBuf *proc_buf;
- GPUTexture *proc_tex;
/* Just contains a huge index buffer used to draw the final hair. */
GPUBatch *proc_hairs[MAX_THICKRES];
@@ -44,18 +43,14 @@ typedef struct ParticleHairCache {
/* Hair Procedural display: Interpolation is done on the GPU. */
GPUVertBuf *proc_point_buf; /* Input control points */
- GPUTexture *point_tex;
/** Infos of control points strands (segment count and base index) */
GPUVertBuf *proc_strand_buf;
- GPUTexture *strand_tex;
/* Hair Length */
GPUVertBuf *proc_length_buf;
- GPUTexture *length_tex;
GPUVertBuf *proc_strand_seg_buf;
- GPUTexture *strand_seg_tex;
GPUVertBuf *proc_uv_buf[MAX_MTFACE];
GPUTexture *uv_tex[MAX_MTFACE];
diff --git a/source/blender/draw/intern/draw_instance_data.c b/source/blender/draw/intern/draw_instance_data.c
index ac2aea4524d..b44c1364af9 100644
--- a/source/blender/draw/intern/draw_instance_data.c
+++ b/source/blender/draw/intern/draw_instance_data.c
@@ -410,12 +410,12 @@ typedef struct DRWSparseUniformBuf {
BLI_bitmap *chunk_used;
int num_chunks;
- unsigned int item_size, chunk_size, chunk_bytes;
+ uint item_size, chunk_size, chunk_bytes;
} DRWSparseUniformBuf;
static void drw_sparse_uniform_buffer_init(DRWSparseUniformBuf *buffer,
- unsigned int item_size,
- unsigned int chunk_size)
+ uint item_size,
+ uint chunk_size)
{
buffer->chunk_buffers = NULL;
buffer->chunk_used = NULL;
@@ -426,7 +426,7 @@ static void drw_sparse_uniform_buffer_init(DRWSparseUniformBuf *buffer,
buffer->chunk_bytes = item_size * chunk_size;
}
-DRWSparseUniformBuf *DRW_sparse_uniform_buffer_new(unsigned int item_size, unsigned int chunk_size)
+DRWSparseUniformBuf *DRW_sparse_uniform_buffer_new(uint item_size, uint chunk_size)
{
DRWSparseUniformBuf *buf = MEM_mallocN(sizeof(DRWSparseUniformBuf), __func__);
drw_sparse_uniform_buffer_init(buf, item_size, chunk_size);
@@ -585,87 +585,18 @@ static DRWUniformAttrBuf *drw_uniform_attrs_pool_ensure(GHash *table,
return (DRWUniformAttrBuf *)*pval;
}
-/* This function mirrors lookup_property in cycles/blender/blender_object.cpp */
-static bool drw_uniform_property_lookup(ID *id, const char *name, float r_data[4])
-{
- PointerRNA ptr, id_ptr;
- PropertyRNA *prop;
-
- if (!id) {
- return false;
- }
-
- RNA_id_pointer_create(id, &id_ptr);
-
- if (!RNA_path_resolve(&id_ptr, name, &ptr, &prop)) {
- return false;
- }
-
- if (prop == NULL) {
- return false;
- }
-
- PropertyType type = RNA_property_type(prop);
- int arraylen = RNA_property_array_length(&ptr, prop);
-
- if (arraylen == 0) {
- float value;
-
- if (type == PROP_FLOAT) {
- value = RNA_property_float_get(&ptr, prop);
- }
- else if (type == PROP_INT) {
- value = RNA_property_int_get(&ptr, prop);
- }
- else {
- return false;
- }
-
- copy_v4_fl4(r_data, value, value, value, 1);
- return true;
- }
-
- if (type == PROP_FLOAT && arraylen <= 4) {
- copy_v4_fl4(r_data, 0, 0, 0, 1);
- RNA_property_float_get_array(&ptr, prop, r_data);
- return true;
- }
-
- return false;
-}
-
-/* This function mirrors lookup_instance_property in cycles/blender/blender_object.cpp */
static void drw_uniform_attribute_lookup(GPUUniformAttr *attr,
Object *ob,
Object *dupli_parent,
DupliObject *dupli_source,
float r_data[4])
{
- copy_v4_fl(r_data, 0);
-
/* If requesting instance data, check the parent particle system and object. */
if (attr->use_dupli) {
- if (dupli_source && dupli_source->particle_system) {
- ParticleSettings *settings = dupli_source->particle_system->part;
- if (drw_uniform_property_lookup((ID *)settings, attr->name_id_prop, r_data) ||
- drw_uniform_property_lookup((ID *)settings, attr->name, r_data)) {
- return;
- }
- }
- if (drw_uniform_property_lookup((ID *)dupli_parent, attr->name_id_prop, r_data) ||
- drw_uniform_property_lookup((ID *)dupli_parent, attr->name, r_data)) {
- return;
- }
+ BKE_object_dupli_find_rgba_attribute(ob, dupli_source, dupli_parent, attr->name, r_data);
}
-
- /* Check the object and mesh. */
- if (ob) {
- if (drw_uniform_property_lookup((ID *)ob, attr->name_id_prop, r_data) ||
- drw_uniform_property_lookup((ID *)ob, attr->name, r_data) ||
- drw_uniform_property_lookup((ID *)ob->data, attr->name_id_prop, r_data) ||
- drw_uniform_property_lookup((ID *)ob->data, attr->name, r_data)) {
- return;
- }
+ else {
+ BKE_object_dupli_find_rgba_attribute(ob, NULL, NULL, attr->name, r_data);
}
}
@@ -691,6 +622,62 @@ void drw_uniform_attrs_pool_update(GHash *table,
}
}
+GPUUniformBuf *drw_ensure_layer_attribute_buffer()
+{
+ DRWData *data = DST.vmempool;
+
+ if (data->vlattrs_ubo_ready && data->vlattrs_ubo != NULL) {
+ return data->vlattrs_ubo;
+ }
+
+ /* Allocate the buffer data. */
+ const int buf_size = DRW_RESOURCE_CHUNK_LEN;
+
+ if (data->vlattrs_buf == NULL) {
+ data->vlattrs_buf = MEM_calloc_arrayN(
+ buf_size, sizeof(LayerAttribute), "View Layer Attr Data");
+ }
+
+ /* Look up attributes.
+ *
+ * Mirrors code in draw_resource.cc and cycles/blender/shader.cpp.
+ */
+ LayerAttribute *buffer = data->vlattrs_buf;
+ int count = 0;
+
+ LISTBASE_FOREACH (GPULayerAttr *, attr, &data->vlattrs_name_list) {
+ float value[4];
+
+ if (BKE_view_layer_find_rgba_attribute(
+ DST.draw_ctx.scene, DST.draw_ctx.view_layer, attr->name, value)) {
+ LayerAttribute *item = &buffer[count++];
+
+ memcpy(item->data, value, sizeof(item->data));
+ item->hash_code = attr->hash_code;
+
+ /* Check if the buffer is full just in case. */
+ if (count >= buf_size) {
+ break;
+ }
+ }
+ }
+
+ buffer[0].buffer_length = count;
+
+ /* Update or create the UBO object. */
+ if (data->vlattrs_ubo != NULL) {
+ GPU_uniformbuf_update(data->vlattrs_ubo, buffer);
+ }
+ else {
+ data->vlattrs_ubo = GPU_uniformbuf_create_ex(
+ sizeof(*buffer) * buf_size, buffer, "View Layer Attributes");
+ }
+
+ data->vlattrs_ubo_ready = true;
+
+ return data->vlattrs_ubo;
+}
+
DRWSparseUniformBuf *DRW_uniform_attrs_pool_find_ubo(GHash *table,
const struct GPUUniformAttrList *key)
{
diff --git a/source/blender/draw/intern/draw_instance_data.h b/source/blender/draw/intern/draw_instance_data.h
index 9053544d98a..17978938ced 100644
--- a/source/blender/draw/intern/draw_instance_data.h
+++ b/source/blender/draw/intern/draw_instance_data.h
@@ -16,6 +16,10 @@
#define DRW_BUFFER_VERTS_CHUNK 128
+#ifdef __cplusplus
+extern "C" {
+#endif
+
struct GHash;
struct GPUUniformAttrList;
@@ -107,3 +111,7 @@ void DRW_uniform_attrs_pool_flush_all(struct GHash *table);
void DRW_uniform_attrs_pool_clear_all(struct GHash *table);
struct DRWSparseUniformBuf *DRW_uniform_attrs_pool_find_ubo(struct GHash *table,
const struct GPUUniformAttrList *key);
+
+#ifdef __cplusplus
+}
+#endif
diff --git a/source/blender/draw/intern/draw_manager.c b/source/blender/draw/intern/draw_manager.c
index 8212b38e74d..4fcfec833eb 100644
--- a/source/blender/draw/intern/draw_manager.c
+++ b/source/blender/draw/intern/draw_manager.c
@@ -38,6 +38,7 @@
#include "BKE_pointcloud.h"
#include "BKE_screen.h"
#include "BKE_subdiv_modifier.h"
+#include "BKE_viewer_path.h"
#include "BKE_volume.h"
#include "DNA_camera_types.h"
@@ -297,11 +298,6 @@ const float *DRW_viewport_invert_size_get(void)
return DST.inv_size;
}
-const float *DRW_viewport_screenvecs_get(void)
-{
- return &DST.screenvecs[0][0];
-}
-
const float *DRW_viewport_pixelsize_get(void)
{
return &DST.pixsize;
@@ -382,6 +378,8 @@ DRWData *DRW_viewport_data_create(void)
drw_data->views = BLI_memblock_create(sizeof(DRWView));
drw_data->images = BLI_memblock_create(sizeof(GPUTexture *));
drw_data->obattrs_ubo_pool = DRW_uniform_attrs_pool_new();
+ drw_data->vlattrs_name_cache = BLI_ghash_new(
+ BLI_ghashutil_inthash_p_simple, BLI_ghashutil_intcmp, "View Layer Attribute names");
{
uint chunk_len = sizeof(DRWObjectMatrix) * DRW_RESOURCE_CHUNK_LEN;
drw_data->obmats = BLI_memblock_create_ex(sizeof(DRWObjectMatrix), chunk_len);
@@ -417,9 +415,24 @@ static void draw_texture_release(DRWData *drw_data)
}
}
+static void draw_prune_vlattrs(DRWData *drw_data)
+{
+ drw_data->vlattrs_ubo_ready = false;
+
+ /* Forget known attributes after they are unused for a few frames. */
+ LISTBASE_FOREACH_MUTABLE (GPULayerAttr *, attr, &drw_data->vlattrs_name_list) {
+ if (++attr->users > 10) {
+ BLI_ghash_remove(
+ drw_data->vlattrs_name_cache, POINTER_FROM_UINT(attr->hash_code), NULL, NULL);
+ BLI_freelinkN(&drw_data->vlattrs_name_list, attr);
+ }
+ }
+}
+
static void drw_viewport_data_reset(DRWData *drw_data)
{
draw_texture_release(drw_data);
+ draw_prune_vlattrs(drw_data);
BLI_memblock_clear(drw_data->commands, NULL);
BLI_memblock_clear(drw_data->commands_small, NULL);
@@ -455,6 +468,12 @@ void DRW_viewport_data_free(DRWData *drw_data)
BLI_memblock_destroy(drw_data->passes, NULL);
BLI_memblock_destroy(drw_data->images, NULL);
DRW_uniform_attrs_pool_free(drw_data->obattrs_ubo_pool);
+ BLI_ghash_free(drw_data->vlattrs_name_cache, NULL, NULL);
+ BLI_freelistN(&drw_data->vlattrs_name_list);
+ if (drw_data->vlattrs_ubo) {
+ GPU_uniformbuf_free(drw_data->vlattrs_ubo);
+ MEM_freeN(drw_data->vlattrs_buf);
+ }
DRW_instance_data_list_free(drw_data->idatalist);
DRW_texture_pool_free(drw_data->texture_pool);
for (int i = 0; i < 2; i++) {
@@ -560,12 +579,8 @@ static void drw_manager_init(DRWManager *dst, GPUViewport *viewport, const int s
draw_unit_state_create();
if (rv3d != NULL) {
- normalize_v3_v3(dst->screenvecs[0], rv3d->viewinv[0]);
- normalize_v3_v3(dst->screenvecs[1], rv3d->viewinv[1]);
-
dst->pixsize = rv3d->pixsize;
dst->view_default = DRW_view_create(rv3d->viewmat, rv3d->winmat, NULL, NULL, NULL);
- DRW_view_camtexco_set(dst->view_default, rv3d->viewcamtexcofac);
if (dst->draw_ctx.sh_cfg == GPU_SHADER_CFG_CLIPPED) {
int plane_len = (RV3D_LOCK_FLAGS(rv3d) & RV3D_BOXCLIP) ? 4 : 6;
@@ -594,9 +609,6 @@ static void drw_manager_init(DRWManager *dst, GPUViewport *viewport, const int s
dst->view_previous = NULL;
}
else {
- zero_v3(dst->screenvecs[0]);
- zero_v3(dst->screenvecs[1]);
-
dst->pixsize = 1.0f;
dst->view_default = NULL;
dst->view_active = NULL;
@@ -609,7 +621,12 @@ static void drw_manager_init(DRWManager *dst, GPUViewport *viewport, const int s
}
if (G_draw.view_ubo == NULL) {
- G_draw.view_ubo = GPU_uniformbuf_create_ex(sizeof(ViewInfos), NULL, "G_draw.view_ubo");
+ G_draw.view_ubo = GPU_uniformbuf_create_ex(sizeof(ViewMatrices), NULL, "G_draw.view_ubo");
+ }
+
+ if (G_draw.clipping_ubo == NULL) {
+ G_draw.clipping_ubo = GPU_uniformbuf_create_ex(
+ sizeof(float4) * 6, NULL, "G_draw.clipping_ubo");
}
if (dst->draw_list == NULL) {
@@ -817,6 +834,7 @@ static bool id_type_can_have_drawdata(const short id_type)
/* has DrawData */
case ID_OB:
case ID_WO:
+ case ID_SCE:
return true;
/* no DrawData */
@@ -969,10 +987,13 @@ void DRW_cache_free_old_batches(Main *bmain)
/* TODO(fclem): This is not optimal since it iter over all dupli instances.
* In this case only the source object should be tagged. */
- DEG_OBJECT_ITER_FOR_RENDER_ENGINE_BEGIN (depsgraph, ob) {
+ DEGObjectIterSettings deg_iter_settings = {0};
+ deg_iter_settings.depsgraph = depsgraph;
+ deg_iter_settings.flags = DEG_OBJECT_ITER_FOR_RENDER_ENGINE_FLAGS;
+ DEG_OBJECT_ITER_BEGIN (&deg_iter_settings, ob) {
DRW_batch_cache_free_old(ob, ctime);
}
- DEG_OBJECT_ITER_FOR_RENDER_ENGINE_END;
+ DEG_OBJECT_ITER_END;
}
}
}
@@ -1324,6 +1345,7 @@ void DRW_notify_view_update(const DRWUpdateContext *update_ctx)
/* Reset before using it. */
drw_state_prepare_clean_for_draw(&DST);
+ BKE_view_layer_synced_ensure(scene, view_layer);
DST.draw_ctx = (DRWContextState){
.region = region,
.rv3d = rv3d,
@@ -1377,6 +1399,7 @@ static void drw_notify_view_update_offscreen(struct Depsgraph *depsgraph,
/* Reset before using it. */
drw_state_prepare_clean_for_draw(&DST);
+ BKE_view_layer_synced_ensure(scene, view_layer);
DST.draw_ctx = (DRWContextState){
.region = region,
.rv3d = rv3d,
@@ -1502,7 +1525,7 @@ void DRW_draw_callbacks_post_scene(void)
DRW_draw_region_info();
- /* annotations - temporary drawing buffer (screenspace) */
+ /* Annotations - temporary drawing buffer (screen-space). */
/* XXX: Or should we use a proper draw/overlay engine for this case? */
if (((v3d->flag2 & V3D_HIDE_OVERLAYS) == 0) && (do_annotations)) {
GPU_depth_test(GPU_DEPTH_NONE);
@@ -1629,6 +1652,7 @@ void DRW_draw_render_loop_ex(struct Depsgraph *depsgraph,
ViewLayer *view_layer = DEG_get_evaluated_view_layer(depsgraph);
RegionView3D *rv3d = region->regiondata;
+ BKE_view_layer_synced_ensure(scene, view_layer);
DST.draw_ctx.evil_C = evil_C;
DST.draw_ctx = (DRWContextState){
.region = region,
@@ -1666,6 +1690,7 @@ void DRW_draw_render_loop_ex(struct Depsgraph *depsgraph,
DRW_globals_update();
drw_debug_init();
+ DRW_pointcloud_init();
DRW_curves_init(DST.vmempool);
DRW_volume_init(DST.vmempool);
DRW_smoke_init(DST.vmempool);
@@ -1686,7 +1711,13 @@ void DRW_draw_render_loop_ex(struct Depsgraph *depsgraph,
if (do_populate_loop) {
DST.dupli_origin = NULL;
DST.dupli_origin_data = NULL;
- DEG_OBJECT_ITER_FOR_RENDER_ENGINE_BEGIN (depsgraph, ob) {
+ DEGObjectIterSettings deg_iter_settings = {0};
+ deg_iter_settings.depsgraph = depsgraph;
+ deg_iter_settings.flags = DEG_OBJECT_ITER_FOR_RENDER_ENGINE_FLAGS;
+ if (v3d->flag2 & V3D_SHOW_VIEWER) {
+ deg_iter_settings.viewer_path = &v3d->viewer_path;
+ }
+ DEG_OBJECT_ITER_BEGIN (&deg_iter_settings, ob) {
if ((object_type_exclude_viewport & (1 << ob->type)) != 0) {
continue;
}
@@ -1698,7 +1729,7 @@ void DRW_draw_render_loop_ex(struct Depsgraph *depsgraph,
drw_duplidata_load(ob);
drw_engines_cache_populate(ob);
}
- DEG_OBJECT_ITER_FOR_RENDER_ENGINE_END;
+ DEG_OBJECT_ITER_END;
}
drw_duplidata_free();
@@ -1814,7 +1845,7 @@ void DRW_draw_render_loop_offscreen(struct Depsgraph *depsgraph,
GPU_matrix_identity_set();
GPU_matrix_identity_projection_set();
const bool do_overlays = (v3d->flag2 & V3D_HIDE_OVERLAYS) == 0 ||
- (ELEM(v3d->shading.type, OB_WIRE, OB_SOLID)) ||
+ ELEM(v3d->shading.type, OB_WIRE, OB_SOLID) ||
(ELEM(v3d->shading.type, OB_MATERIAL) &&
(v3d->shading.flag & V3D_SHADING_SCENE_WORLD) == 0) ||
(ELEM(v3d->shading.type, OB_RENDER) &&
@@ -1838,14 +1869,17 @@ bool DRW_render_check_grease_pencil(Depsgraph *depsgraph)
return false;
}
- DEG_OBJECT_ITER_FOR_RENDER_ENGINE_BEGIN (depsgraph, ob) {
+ DEGObjectIterSettings deg_iter_settings = {0};
+ deg_iter_settings.depsgraph = depsgraph;
+ deg_iter_settings.flags = DEG_OBJECT_ITER_FOR_RENDER_ENGINE_FLAGS;
+ DEG_OBJECT_ITER_BEGIN (&deg_iter_settings, ob) {
if (ob->type == OB_GPENCIL) {
if (DRW_object_visibility_in_active_context(ob) & OB_VISIBLE_SELF) {
return true;
}
}
}
- DEG_OBJECT_ITER_FOR_RENDER_ENGINE_END;
+ DEG_OBJECT_ITER_END;
return false;
}
@@ -1926,20 +1960,6 @@ void DRW_render_gpencil(struct RenderEngine *engine, struct Depsgraph *depsgraph
DST.buffer_finish_called = false;
}
-/* Callback function for RE_engine_update_render_passes to ensure all
- * render passes are registered. */
-static void draw_render_result_ensure_pass_cb(void *user_data,
- struct Scene *UNUSED(scene),
- struct ViewLayer *view_layer,
- const char *name,
- int channels,
- const char *chanid,
- eNodeSocketDatatype UNUSED(type))
-{
- RenderEngine *engine = user_data;
- RE_engine_add_pass(engine, name, channels, chanid, view_layer->name);
-}
-
void DRW_render_to_image(RenderEngine *engine, struct Depsgraph *depsgraph)
{
Scene *scene = DEG_get_evaluated_scene(depsgraph);
@@ -1990,10 +2010,6 @@ void DRW_render_to_image(RenderEngine *engine, struct Depsgraph *depsgraph)
/* set default viewport */
GPU_viewport(0, 0, size[0], size[1]);
- /* Update the render passes. This needs to be done before acquiring the render result. */
- RE_engine_update_render_passes(
- engine, scene, view_layer, draw_render_result_ensure_pass_cb, engine);
-
/* Init render result. */
RenderResult *render_result = RE_engine_begin_result(engine,
0,
@@ -2038,6 +2054,7 @@ void DRW_render_object_iter(
void (*callback)(void *vedata, Object *ob, RenderEngine *engine, struct Depsgraph *depsgraph))
{
const DRWContextState *draw_ctx = DRW_context_state_get();
+ DRW_pointcloud_init();
DRW_curves_init(DST.vmempool);
DRW_volume_init(DST.vmempool);
DRW_smoke_init(DST.vmempool);
@@ -2048,7 +2065,10 @@ void DRW_render_object_iter(
0;
DST.dupli_origin = NULL;
DST.dupli_origin_data = NULL;
- DEG_OBJECT_ITER_FOR_RENDER_ENGINE_BEGIN (depsgraph, ob) {
+ DEGObjectIterSettings deg_iter_settings = {0};
+ deg_iter_settings.depsgraph = depsgraph;
+ deg_iter_settings.flags = DEG_OBJECT_ITER_FOR_RENDER_ENGINE_FLAGS;
+ DEG_OBJECT_ITER_BEGIN (&deg_iter_settings, ob) {
if ((object_type_exclude_viewport & (1 << ob->type)) == 0) {
DST.dupli_parent = data_.dupli_parent;
DST.dupli_source = data_.dupli_object_current;
@@ -2064,7 +2084,7 @@ void DRW_render_object_iter(
}
}
}
- DEG_OBJECT_ITER_FOR_RENDER_ENGINE_END;
+ DEG_OBJECT_ITER_END;
drw_duplidata_free();
drw_task_graph_deinit();
@@ -2095,6 +2115,7 @@ void DRW_custom_pipeline(DrawEngineType *draw_engine_type,
drw_manager_init(&DST, NULL, NULL);
+ DRW_pointcloud_init();
DRW_curves_init(DST.vmempool);
DRW_volume_init(DST.vmempool);
DRW_smoke_init(DST.vmempool);
@@ -2130,6 +2151,7 @@ void DRW_cache_restart(void)
DST.buffer_finish_called = false;
+ DRW_pointcloud_init();
DRW_curves_init(DST.vmempool);
DRW_volume_init(DST.vmempool);
DRW_smoke_init(DST.vmempool);
@@ -2143,6 +2165,7 @@ void DRW_draw_render_loop_2d_ex(struct Depsgraph *depsgraph,
Scene *scene = DEG_get_evaluated_scene(depsgraph);
ViewLayer *view_layer = DEG_get_evaluated_view_layer(depsgraph);
+ BKE_view_layer_synced_ensure(scene, view_layer);
DST.draw_ctx.evil_C = evil_C;
DST.draw_ctx = (DRWContextState){
.region = region,
@@ -2191,10 +2214,13 @@ void DRW_draw_render_loop_2d_ex(struct Depsgraph *depsgraph,
/* Only iterate over objects when overlay uses object data. */
if (do_populate_loop) {
- DEG_OBJECT_ITER_FOR_RENDER_ENGINE_BEGIN (depsgraph, ob) {
+ DEGObjectIterSettings deg_iter_settings = {0};
+ deg_iter_settings.depsgraph = depsgraph;
+ deg_iter_settings.flags = DEG_OBJECT_ITER_FOR_RENDER_ENGINE_FLAGS;
+ DEG_OBJECT_ITER_BEGIN (&deg_iter_settings, ob) {
drw_engines_cache_populate(ob);
}
- DEG_OBJECT_ITER_FOR_RENDER_ENGINE_END;
+ DEG_OBJECT_ITER_END;
}
drw_engines_cache_finish();
@@ -2349,6 +2375,8 @@ void DRW_draw_select_loop(struct Depsgraph *depsgraph,
Scene *scene = DEG_get_evaluated_scene(depsgraph);
RenderEngineType *engine_type = ED_view3d_engine_type(scene, v3d->shading.type);
ViewLayer *view_layer = DEG_get_evaluated_view_layer(depsgraph);
+
+ BKE_view_layer_synced_ensure(scene, view_layer);
Object *obact = BKE_view_layer_active_object_get(view_layer);
Object *obedit = use_obedit_skip ? NULL : OBEDIT_FROM_OBACT(obact);
#ifndef USE_GPU_SELECT
@@ -2449,6 +2477,7 @@ void DRW_draw_select_loop(struct Depsgraph *depsgraph,
/* Init engines */
drw_engines_init();
+ DRW_pointcloud_init();
DRW_curves_init(DST.vmempool);
DRW_volume_init(DST.vmempool);
DRW_smoke_init(DST.vmempool);
@@ -2458,7 +2487,7 @@ void DRW_draw_select_loop(struct Depsgraph *depsgraph,
drw_engines_world_update(scene);
if (use_obedit) {
- FOREACH_OBJECT_IN_MODE_BEGIN (view_layer, v3d, object_type, object_mode, ob_iter) {
+ FOREACH_OBJECT_IN_MODE_BEGIN (scene, view_layer, v3d, object_type, object_mode, ob_iter) {
drw_engines_cache_populate(ob_iter);
}
FOREACH_OBJECT_IN_MODE_END;
@@ -2473,7 +2502,13 @@ void DRW_draw_select_loop(struct Depsgraph *depsgraph,
bool filter_exclude = false;
DST.dupli_origin = NULL;
DST.dupli_origin_data = NULL;
- DEG_OBJECT_ITER_FOR_RENDER_ENGINE_BEGIN (depsgraph, ob) {
+ DEGObjectIterSettings deg_iter_settings = {0};
+ deg_iter_settings.depsgraph = depsgraph;
+ deg_iter_settings.flags = DEG_OBJECT_ITER_FOR_RENDER_ENGINE_FLAGS;
+ if (v3d->flag2 & V3D_SHOW_VIEWER) {
+ deg_iter_settings.viewer_path = &v3d->viewer_path;
+ }
+ DEG_OBJECT_ITER_BEGIN (&deg_iter_settings, ob) {
if (!BKE_object_is_visible_in_viewport(v3d, ob)) {
continue;
}
@@ -2509,7 +2544,7 @@ void DRW_draw_select_loop(struct Depsgraph *depsgraph,
drw_engines_cache_populate(ob);
}
}
- DEG_OBJECT_ITER_FOR_RENDER_ENGINE_END;
+ DEG_OBJECT_ITER_END;
}
drw_duplidata_free();
@@ -2561,13 +2596,13 @@ void DRW_draw_select_loop(struct Depsgraph *depsgraph,
/**
* object mode select-loop, see: ED_view3d_draw_depth_loop (legacy drawing).
*/
-static void drw_draw_depth_loop_impl(struct Depsgraph *depsgraph,
- ARegion *region,
- View3D *v3d,
- GPUViewport *viewport,
- const bool use_gpencil,
- const bool use_basic,
- const bool use_overlay)
+void DRW_draw_depth_loop(struct Depsgraph *depsgraph,
+ ARegion *region,
+ View3D *v3d,
+ GPUViewport *viewport,
+ const bool use_gpencil,
+ const bool use_basic,
+ const bool use_overlay)
{
Scene *scene = DEG_get_evaluated_scene(depsgraph);
RenderEngineType *engine_type = ED_view3d_engine_type(scene, v3d->shading.type);
@@ -2580,6 +2615,7 @@ static void drw_draw_depth_loop_impl(struct Depsgraph *depsgraph,
DST.options.is_depth = true;
/* Instead of 'DRW_context_state_init(C, &DST.draw_ctx)', assign from args */
+ BKE_view_layer_synced_ensure(scene, view_layer);
DST.draw_ctx = (DRWContextState){
.region = region,
.rv3d = rv3d,
@@ -2623,6 +2659,7 @@ static void drw_draw_depth_loop_impl(struct Depsgraph *depsgraph,
/* Init engines */
drw_engines_init();
+ DRW_pointcloud_init();
DRW_curves_init(DST.vmempool);
DRW_volume_init(DST.vmempool);
DRW_smoke_init(DST.vmempool);
@@ -2634,7 +2671,13 @@ static void drw_draw_depth_loop_impl(struct Depsgraph *depsgraph,
const int object_type_exclude_viewport = v3d->object_type_exclude_viewport;
DST.dupli_origin = NULL;
DST.dupli_origin_data = NULL;
- DEG_OBJECT_ITER_FOR_RENDER_ENGINE_BEGIN (DST.draw_ctx.depsgraph, ob) {
+ DEGObjectIterSettings deg_iter_settings = {0};
+ deg_iter_settings.depsgraph = DST.draw_ctx.depsgraph;
+ deg_iter_settings.flags = DEG_OBJECT_ITER_FOR_RENDER_ENGINE_FLAGS;
+ if (v3d->flag2 & V3D_SHOW_VIEWER) {
+ deg_iter_settings.viewer_path = &v3d->viewer_path;
+ }
+ DEG_OBJECT_ITER_BEGIN (&deg_iter_settings, ob) {
if ((object_type_exclude_viewport & (1 << ob->type)) != 0) {
continue;
}
@@ -2646,7 +2689,7 @@ static void drw_draw_depth_loop_impl(struct Depsgraph *depsgraph,
drw_duplidata_load(ob);
drw_engines_cache_populate(ob);
}
- DEG_OBJECT_ITER_FOR_RENDER_ENGINE_END;
+ DEG_OBJECT_ITER_END;
drw_duplidata_free();
drw_engines_cache_finish();
@@ -2676,23 +2719,6 @@ static void drw_draw_depth_loop_impl(struct Depsgraph *depsgraph,
drw_manager_exit(&DST);
}
-void DRW_draw_depth_loop(struct Depsgraph *depsgraph,
- ARegion *region,
- View3D *v3d,
- GPUViewport *viewport)
-{
- drw_draw_depth_loop_impl(
- depsgraph, region, v3d, viewport, false, true, DRW_state_draw_support());
-}
-
-void DRW_draw_depth_loop_gpencil(struct Depsgraph *depsgraph,
- ARegion *region,
- View3D *v3d,
- GPUViewport *viewport)
-{
- drw_draw_depth_loop_impl(depsgraph, region, v3d, viewport, true, false, false);
-}
-
void DRW_draw_select_id(Depsgraph *depsgraph, ARegion *region, View3D *v3d, const rcti *rect)
{
SELECTID_Context *sel_ctx = DRW_select_engine_context_get();
@@ -2713,6 +2739,7 @@ void DRW_draw_select_id(Depsgraph *depsgraph, ARegion *region, View3D *v3d, cons
drw_state_prepare_clean_for_draw(&DST);
/* Instead of 'DRW_context_state_init(C, &DST.draw_ctx)', assign from args */
+ BKE_view_layer_synced_ensure(scene, view_layer);
DST.draw_ctx = (DRWContextState){
.region = region,
.rv3d = region->regiondata,
@@ -2774,7 +2801,7 @@ void DRW_draw_depth_object(
GPU_matrix_projection_set(rv3d->winmat);
GPU_matrix_set(rv3d->viewmat);
- GPU_matrix_mul(object->obmat);
+ GPU_matrix_mul(object->object_to_world);
/* Setup frame-buffer. */
GPUTexture *depth_tx = GPU_viewport_depth_texture(viewport);
@@ -2794,11 +2821,11 @@ void DRW_draw_depth_object(
const bool use_clipping_planes = RV3D_CLIPPING_ENABLED(v3d, rv3d);
if (use_clipping_planes) {
GPU_clip_distances(6);
- ED_view3d_clipping_local(rv3d, object->obmat);
+ ED_view3d_clipping_local(rv3d, object->object_to_world);
for (int i = 0; i < 6; i++) {
copy_v4_v4(planes.world[i], rv3d->clip_local[i]);
}
- copy_m4_m4(planes.ModelMatrix, object->obmat);
+ copy_m4_m4(planes.ModelMatrix, object->object_to_world);
}
drw_batch_cache_validate(object);
@@ -3047,6 +3074,7 @@ void DRW_engines_free(void)
GPU_FRAMEBUFFER_FREE_SAFE(g_select_buffer.framebuffer_depth_only);
DRW_shaders_free();
+ DRW_pointcloud_free();
DRW_curves_free();
DRW_volume_free();
DRW_shape_cache_free();
@@ -3058,6 +3086,7 @@ void DRW_engines_free(void)
DRW_UBO_FREE_SAFE(G_draw.block_ubo);
DRW_UBO_FREE_SAFE(G_draw.view_ubo);
+ DRW_UBO_FREE_SAFE(G_draw.clipping_ubo);
DRW_TEXTURE_FREE_SAFE(G_draw.ramp);
DRW_TEXTURE_FREE_SAFE(G_draw.weight_ramp);
@@ -3074,6 +3103,8 @@ void DRW_render_context_enable(Render *render)
WM_init_opengl();
}
+ GPU_render_begin();
+
if (GPU_use_main_context_workaround()) {
GPU_context_main_lock();
DRW_opengl_context_enable();
@@ -3097,6 +3128,8 @@ void DRW_render_context_enable(Render *render)
void DRW_render_context_disable(Render *render)
{
+ GPU_render_end();
+
if (GPU_use_main_context_workaround()) {
DRW_opengl_context_disable();
GPU_context_main_unlock();
@@ -3131,7 +3164,7 @@ void DRW_opengl_context_create(void)
DST.gl_context = WM_opengl_context_create();
WM_opengl_context_activate(DST.gl_context);
/* Be sure to create gpu_context too. */
- DST.gpu_context = GPU_context_create(NULL);
+ DST.gpu_context = GPU_context_create(0, DST.gl_context);
/* So we activate the window's one afterwards. */
wm_window_reset_drawable();
}
diff --git a/source/blender/draw/intern/draw_manager.cc b/source/blender/draw/intern/draw_manager.cc
index 169d86b2ea1..c644cadd18c 100644
--- a/source/blender/draw/intern/draw_manager.cc
+++ b/source/blender/draw/intern/draw_manager.cc
@@ -35,6 +35,7 @@ void Manager::begin_sync()
}
acquired_textures.clear();
+ layer_attributes.clear();
#ifdef DEBUG
/* Detect uninitialized data. */
@@ -52,14 +53,46 @@ void Manager::begin_sync()
resource_handle(float4x4::identity());
}
+void Manager::sync_layer_attributes()
+{
+ /* Sort the attribute IDs - the shaders use binary search. */
+ Vector<uint32_t> id_list;
+
+ id_list.reserve(layer_attributes.size());
+
+ for (uint32_t id : layer_attributes.keys()) {
+ id_list.append(id);
+ }
+
+ std::sort(id_list.begin(), id_list.end());
+
+ /* Look up the attributes. */
+ int count = 0, size = layer_attributes_buf.end() - layer_attributes_buf.begin();
+
+ for (uint32_t id : id_list) {
+ if (layer_attributes_buf[count].sync(
+ DST.draw_ctx.scene, DST.draw_ctx.view_layer, layer_attributes.lookup(id))) {
+ /* Check if the buffer is full. */
+ if (++count == size) {
+ break;
+ }
+ }
+ }
+
+ layer_attributes_buf[0].buffer_length = count;
+}
+
void Manager::end_sync()
{
GPU_debug_group_begin("Manager.end_sync");
+ sync_layer_attributes();
+
matrix_buf.push_update();
bounds_buf.push_update();
infos_buf.push_update();
attributes_buf.push_update();
+ layer_attributes_buf.push_update();
attributes_buf_legacy.push_update();
/* Useful for debugging the following resource finalize. But will trigger the drawing of the GPU
@@ -100,6 +133,7 @@ void Manager::resource_bind()
GPU_storagebuf_bind(matrix_buf, DRW_OBJ_MAT_SLOT);
GPU_storagebuf_bind(infos_buf, DRW_OBJ_INFOS_SLOT);
GPU_storagebuf_bind(attributes_buf, DRW_OBJ_ATTR_SLOT);
+ GPU_uniformbuf_bind(layer_attributes_buf, DRW_LAYER_ATTR_UBO_SLOT);
/* 2 is the hardcoded location of the uniform attr UBO. */
/* TODO(@fclem): Remove this workaround. */
GPU_uniformbuf_bind(attributes_buf_legacy, 2);
diff --git a/source/blender/draw/intern/draw_manager.h b/source/blender/draw/intern/draw_manager.h
index 4f71e665390..52129049269 100644
--- a/source/blender/draw/intern/draw_manager.h
+++ b/source/blender/draw/intern/draw_manager.h
@@ -335,6 +335,7 @@ typedef enum {
DRW_UNIFORM_BLOCK_OBMATS,
DRW_UNIFORM_BLOCK_OBINFOS,
DRW_UNIFORM_BLOCK_OBATTRS,
+ DRW_UNIFORM_BLOCK_VLATTRS,
DRW_UNIFORM_RESOURCE_CHUNK,
DRW_UNIFORM_RESOURCE_ID,
/** Legacy / Fallback */
@@ -441,7 +442,12 @@ struct DRWView {
/** Parent view if this is a sub view. NULL otherwise. */
struct DRWView *parent;
- ViewInfos storage;
+ ViewMatrices storage;
+
+ float4 clip_planes[6];
+
+ float4x4 persmat;
+ float4x4 persinv;
/** Number of active clip planes. */
int clip_planes_len;
/** Does culling result needs to be updated. */
@@ -522,6 +528,11 @@ typedef struct DRWData {
struct GPUUniformBuf **matrices_ubo;
struct GPUUniformBuf **obinfos_ubo;
struct GHash *obattrs_ubo_pool;
+ struct GHash *vlattrs_name_cache;
+ struct ListBase vlattrs_name_list;
+ struct LayerAttribute *vlattrs_buf;
+ struct GPUUniformBuf *vlattrs_ubo;
+ bool vlattrs_ubo_ready;
uint ubo_len;
/** Per draw-call volume object data. */
void *volume_grids_ubos; /* VolumeUniformBufPool */
@@ -593,7 +604,6 @@ typedef struct DRWManager {
struct GPUFrameBuffer *default_framebuffer;
float size[2];
float inv_size[2];
- float screenvecs[2][3];
float pixsize;
struct {
@@ -620,7 +630,7 @@ typedef struct DRWManager {
uint primary_view_num;
/** TODO(@fclem): Remove this. Only here to support
* shaders without common_view_lib.glsl */
- ViewInfos view_storage_cpy;
+ ViewMatrices view_storage_cpy;
#ifdef USE_GPU_SELECT
uint select_id;
@@ -687,6 +697,8 @@ void drw_uniform_attrs_pool_update(struct GHash *table,
struct Object *dupli_parent,
struct DupliObject *dupli_source);
+GPUUniformBuf *drw_ensure_layer_attribute_buffer(void);
+
double *drw_engine_data_cache_time_get(GPUViewport *viewport);
void *drw_engine_data_engine_data_create(GPUViewport *viewport, void *engine_type);
void *drw_engine_data_engine_data_get(GPUViewport *viewport, void *engine_handle);
@@ -694,6 +706,16 @@ bool drw_engine_data_engines_data_validate(GPUViewport *viewport, void **engine_
void drw_engine_data_cache_release(GPUViewport *viewport);
void drw_engine_data_free(GPUViewport *viewport);
+struct DRW_Attributes;
+struct DRW_MeshCDMask;
+struct GPUMaterial;
+void DRW_mesh_get_attributes(struct Object *object,
+ struct Mesh *me,
+ struct GPUMaterial **gpumat_array,
+ int gpumat_array_len,
+ struct DRW_Attributes *r_attrs,
+ struct DRW_MeshCDMask *r_cd_needed);
+
void DRW_manager_begin_sync(void);
void DRW_manager_end_sync(void);
diff --git a/source/blender/draw/intern/draw_manager.hh b/source/blender/draw/intern/draw_manager.hh
index fbd3d28d3f4..0a865179cee 100644
--- a/source/blender/draw/intern/draw_manager.hh
+++ b/source/blender/draw/intern/draw_manager.hh
@@ -44,6 +44,7 @@ class Manager {
using ObjectBoundsBuf = StorageArrayBuffer<ObjectBounds, 128>;
using ObjectInfosBuf = StorageArrayBuffer<ObjectInfos, 128>;
using ObjectAttributeBuf = StorageArrayBuffer<ObjectAttribute, 128>;
+ using LayerAttributeBuf = UniformArrayBuffer<LayerAttribute, 512>;
/**
* TODO(@fclem): Remove once we get rid of old EEVEE code-base.
* `DRW_RESOURCE_CHUNK_LEN = 512`.
@@ -87,6 +88,16 @@ class Manager {
ObjectAttributeLegacyBuf attributes_buf_legacy;
/**
+ * Table of all View Layer attributes required by shaders, used to populate the buffer below.
+ */
+ Map<uint32_t, GPULayerAttr> layer_attributes;
+
+ /**
+ * Buffer of layer attribute values, indexed and sorted by the hash.
+ */
+ LayerAttributeBuf layer_attributes_buf;
+
+ /**
* List of textures coming from Image data-blocks.
* They need to be reference-counted in order to avoid being freed in another thread.
*/
@@ -131,6 +142,11 @@ class Manager {
Span<GPUMaterial *> materials);
/**
+ * Collect necessary View Layer attributes.
+ */
+ void register_layer_attributes(GPUMaterial *material);
+
+ /**
* Submit a pass for drawing. All resource reference will be dereferenced and commands will be
* sent to GPU.
*/
@@ -169,6 +185,9 @@ class Manager {
void debug_bind();
void resource_bind();
+
+ private:
+ void sync_layer_attributes();
};
inline ResourceHandle Manager::resource_handle(const ObjectRef ref)
@@ -229,6 +248,19 @@ inline void Manager::extract_object_attributes(ResourceHandle handle,
}
}
+inline void Manager::register_layer_attributes(GPUMaterial *material)
+{
+ const ListBase *attr_list = GPU_material_layer_attributes(material);
+
+ if (attr_list != nullptr) {
+ LISTBASE_FOREACH (const GPULayerAttr *, attr, attr_list) {
+ /** Since layer attributes are global to the whole render pass,
+ * this only collects a table of their names. */
+ layer_attributes.add(attr->hash_code, *attr);
+ }
+ }
+}
+
} // namespace blender::draw
/* TODO(@fclem): This is for testing. The manager should be passed to the engine through the
diff --git a/source/blender/draw/intern/draw_manager_data.c b/source/blender/draw/intern/draw_manager_data.cc
index c75049508f9..29b1493ec5e 100644
--- a/source/blender/draw/intern/draw_manager_data.c
+++ b/source/blender/draw/intern/draw_manager_data.cc
@@ -5,7 +5,11 @@
* \ingroup draw
*/
+#include "DRW_pbvh.h"
+
+#include "draw_attributes.h"
#include "draw_manager.h"
+#include "draw_pbvh.h"
#include "BKE_curve.h"
#include "BKE_duplilist.h"
@@ -26,10 +30,11 @@
#include "DNA_meta_types.h"
#include "DNA_screen_types.h"
-#include "BLI_alloca.h"
+#include "BLI_array.hh"
#include "BLI_hash.h"
#include "BLI_link_utils.h"
#include "BLI_listbase.h"
+#include "BLI_math_bits.h"
#include "BLI_memblock.h"
#include "BLI_mempool.h"
@@ -37,7 +42,6 @@
# include "BLI_math_bits.h"
#endif
-#include "GPU_buffers.h"
#include "GPU_capabilities.h"
#include "GPU_material.h"
#include "GPU_uniform_buffer.h"
@@ -60,13 +64,13 @@
static void draw_call_sort(DRWCommand *array, DRWCommand *array_tmp, int array_len)
{
- /* Count unique batches. Tt's not really important if
+ /* Count unique batches. It's not really important if
* there is collisions. If there is a lot of different batches,
* the sorting benefit will be negligible.
* So at least sort fast! */
uchar idx[128] = {0};
/* Shift by 6 positions knowing each GPUBatch is > 64 bytes */
-#define KEY(a) ((((size_t)((a).draw.batch)) >> 6) % ARRAY_SIZE(idx))
+#define KEY(a) ((size_t((a).draw.batch) >> 6) % ARRAY_SIZE(idx))
BLI_assert(array_len <= ARRAY_SIZE(idx));
for (int i = 0; i < array_len; i++) {
@@ -98,9 +102,9 @@ void drw_resource_buffer_finish(DRWData *vmempool)
/* TODO: find a better system. currently a lot of obinfos UBO are going to be unused
* if not rendering with Eevee. */
- if (vmempool->matrices_ubo == NULL) {
- vmempool->matrices_ubo = MEM_callocN(list_size, __func__);
- vmempool->obinfos_ubo = MEM_callocN(list_size, __func__);
+ if (vmempool->matrices_ubo == nullptr) {
+ vmempool->matrices_ubo = static_cast<GPUUniformBuf **>(MEM_callocN(list_size, __func__));
+ vmempool->obinfos_ubo = static_cast<GPUUniformBuf **>(MEM_callocN(list_size, __func__));
vmempool->ubo_len = ubo_len;
}
@@ -111,8 +115,10 @@ void drw_resource_buffer_finish(DRWData *vmempool)
}
if (ubo_len != vmempool->ubo_len) {
- vmempool->matrices_ubo = MEM_recallocN(vmempool->matrices_ubo, list_size);
- vmempool->obinfos_ubo = MEM_recallocN(vmempool->obinfos_ubo, list_size);
+ vmempool->matrices_ubo = static_cast<GPUUniformBuf **>(
+ MEM_recallocN(vmempool->matrices_ubo, list_size));
+ vmempool->obinfos_ubo = static_cast<GPUUniformBuf **>(
+ MEM_recallocN(vmempool->obinfos_ubo, list_size));
vmempool->ubo_len = ubo_len;
}
@@ -120,7 +126,7 @@ void drw_resource_buffer_finish(DRWData *vmempool)
for (int i = 0; i < ubo_len; i++) {
void *data_obmat = BLI_memblock_elem_get(vmempool->obmats, i, 0);
void *data_infos = BLI_memblock_elem_get(vmempool->obinfos, i, 0);
- if (vmempool->matrices_ubo[i] == NULL) {
+ if (vmempool->matrices_ubo[i] == nullptr) {
vmempool->matrices_ubo[i] = GPU_uniformbuf_create(sizeof(DRWObjectMatrix) *
DRW_RESOURCE_CHUNK_LEN);
vmempool->obinfos_ubo[i] = GPU_uniformbuf_create(sizeof(DRWObjectInfos) *
@@ -133,11 +139,12 @@ void drw_resource_buffer_finish(DRWData *vmempool)
DRW_uniform_attrs_pool_flush_all(vmempool->obattrs_ubo_pool);
/* Aligned alloc to avoid unaligned memcpy. */
- DRWCommandChunk *chunk_tmp = MEM_mallocN_aligned(sizeof(DRWCommandChunk), 16, "tmp call chunk");
+ DRWCommandChunk *chunk_tmp = static_cast<DRWCommandChunk *>(
+ MEM_mallocN_aligned(sizeof(DRWCommandChunk), 16, __func__));
DRWCommandChunk *chunk;
BLI_memblock_iter iter;
BLI_memblock_iternew(vmempool->commands, &iter);
- while ((chunk = BLI_memblock_iterstep(&iter))) {
+ while ((chunk = static_cast<DRWCommandChunk *>(BLI_memblock_iterstep(&iter)))) {
bool sortable = true;
/* We can only sort chunks that contain #DRWCommandDraw only. */
for (int i = 0; i < ARRAY_SIZE(chunk->command_type) && sortable; i++) {
@@ -175,7 +182,7 @@ static void drw_shgroup_uniform_create_ex(DRWShadingGroup *shgroup,
DRWUniformChunk *unichunk = shgroup->uniforms;
/* Happens on first uniform or if chunk is full. */
if (!unichunk || unichunk->uniform_used == unichunk->uniform_len) {
- unichunk = BLI_memblock_alloc(DST.vmempool->uniforms);
+ unichunk = static_cast<DRWUniformChunk *>(BLI_memblock_alloc(DST.vmempool->uniforms));
unichunk->uniform_len = ARRAY_SIZE(shgroup->uniforms->uniforms);
unichunk->uniform_used = 0;
BLI_LINKS_PREPEND(shgroup->uniforms, unichunk);
@@ -239,7 +246,8 @@ static void drw_shgroup_uniform(DRWShadingGroup *shgroup,
DRW_UNIFORM_TEXTURE,
DRW_UNIFORM_TEXTURE_REF));
int location = GPU_shader_get_uniform(shgroup->shader, name);
- drw_shgroup_uniform_create_ex(shgroup, location, type, value, 0, length, arraysize);
+ drw_shgroup_uniform_create_ex(
+ shgroup, location, type, value, GPU_SAMPLER_DEFAULT, length, arraysize);
}
void DRW_shgroup_uniform_texture_ex(DRWShadingGroup *shgroup,
@@ -247,7 +255,7 @@ void DRW_shgroup_uniform_texture_ex(DRWShadingGroup *shgroup,
const GPUTexture *tex,
eGPUSamplerState sampler_state)
{
- BLI_assert(tex != NULL);
+ BLI_assert(tex != nullptr);
int loc = GPU_shader_get_texture_binding(shgroup->shader, name);
drw_shgroup_uniform_create_ex(shgroup, loc, DRW_UNIFORM_TEXTURE, tex, sampler_state, 0, 1);
}
@@ -262,7 +270,7 @@ void DRW_shgroup_uniform_texture_ref_ex(DRWShadingGroup *shgroup,
GPUTexture **tex,
eGPUSamplerState sampler_state)
{
- BLI_assert(tex != NULL);
+ BLI_assert(tex != nullptr);
int loc = GPU_shader_get_texture_binding(shgroup->shader, name);
drw_shgroup_uniform_create_ex(shgroup, loc, DRW_UNIFORM_TEXTURE_REF, tex, sampler_state, 0, 1);
}
@@ -274,23 +282,24 @@ void DRW_shgroup_uniform_texture_ref(DRWShadingGroup *shgroup, const char *name,
void DRW_shgroup_uniform_image(DRWShadingGroup *shgroup, const char *name, const GPUTexture *tex)
{
- BLI_assert(tex != NULL);
+ BLI_assert(tex != nullptr);
int loc = GPU_shader_get_texture_binding(shgroup->shader, name);
- drw_shgroup_uniform_create_ex(shgroup, loc, DRW_UNIFORM_IMAGE, tex, 0, 0, 1);
+ drw_shgroup_uniform_create_ex(shgroup, loc, DRW_UNIFORM_IMAGE, tex, GPU_SAMPLER_DEFAULT, 0, 1);
}
void DRW_shgroup_uniform_image_ref(DRWShadingGroup *shgroup, const char *name, GPUTexture **tex)
{
- BLI_assert(tex != NULL);
+ BLI_assert(tex != nullptr);
int loc = GPU_shader_get_texture_binding(shgroup->shader, name);
- drw_shgroup_uniform_create_ex(shgroup, loc, DRW_UNIFORM_IMAGE_REF, tex, 0, 0, 1);
+ drw_shgroup_uniform_create_ex(
+ shgroup, loc, DRW_UNIFORM_IMAGE_REF, tex, GPU_SAMPLER_DEFAULT, 0, 1);
}
void DRW_shgroup_uniform_block_ex(DRWShadingGroup *shgroup,
const char *name,
const GPUUniformBuf *ubo DRW_DEBUG_FILE_LINE_ARGS)
{
- BLI_assert(ubo != NULL);
+ BLI_assert(ubo != nullptr);
int loc = GPU_shader_get_uniform_block_binding(shgroup->shader, name);
if (loc == -1) {
#ifdef DRW_UNUSED_RESOURCE_TRACKING
@@ -304,14 +313,14 @@ void DRW_shgroup_uniform_block_ex(DRWShadingGroup *shgroup,
#endif
return;
}
- drw_shgroup_uniform_create_ex(shgroup, loc, DRW_UNIFORM_BLOCK, ubo, 0, 0, 1);
+ drw_shgroup_uniform_create_ex(shgroup, loc, DRW_UNIFORM_BLOCK, ubo, GPU_SAMPLER_DEFAULT, 0, 1);
}
void DRW_shgroup_uniform_block_ref_ex(DRWShadingGroup *shgroup,
const char *name,
GPUUniformBuf **ubo DRW_DEBUG_FILE_LINE_ARGS)
{
- BLI_assert(ubo != NULL);
+ BLI_assert(ubo != nullptr);
int loc = GPU_shader_get_uniform_block_binding(shgroup->shader, name);
if (loc == -1) {
#ifdef DRW_UNUSED_RESOURCE_TRACKING
@@ -325,14 +334,15 @@ void DRW_shgroup_uniform_block_ref_ex(DRWShadingGroup *shgroup,
#endif
return;
}
- drw_shgroup_uniform_create_ex(shgroup, loc, DRW_UNIFORM_BLOCK_REF, ubo, 0, 0, 1);
+ drw_shgroup_uniform_create_ex(
+ shgroup, loc, DRW_UNIFORM_BLOCK_REF, ubo, GPU_SAMPLER_DEFAULT, 0, 1);
}
void DRW_shgroup_storage_block_ex(DRWShadingGroup *shgroup,
const char *name,
const GPUStorageBuf *ssbo DRW_DEBUG_FILE_LINE_ARGS)
{
- BLI_assert(ssbo != NULL);
+ BLI_assert(ssbo != nullptr);
/* TODO(@fclem): Fix naming inconsistency. */
int loc = GPU_shader_get_ssbo(shgroup->shader, name);
if (loc == -1) {
@@ -347,14 +357,15 @@ void DRW_shgroup_storage_block_ex(DRWShadingGroup *shgroup,
#endif
return;
}
- drw_shgroup_uniform_create_ex(shgroup, loc, DRW_UNIFORM_STORAGE_BLOCK, ssbo, 0, 0, 1);
+ drw_shgroup_uniform_create_ex(
+ shgroup, loc, DRW_UNIFORM_STORAGE_BLOCK, ssbo, GPU_SAMPLER_DEFAULT, 0, 1);
}
void DRW_shgroup_storage_block_ref_ex(DRWShadingGroup *shgroup,
const char *name,
GPUStorageBuf **ssbo DRW_DEBUG_FILE_LINE_ARGS)
{
- BLI_assert(ssbo != NULL);
+ BLI_assert(ssbo != nullptr);
/* TODO(@fclem): Fix naming inconsistency. */
int loc = GPU_shader_get_ssbo(shgroup->shader, name);
if (loc == -1) {
@@ -369,7 +380,8 @@ void DRW_shgroup_storage_block_ref_ex(DRWShadingGroup *shgroup,
#endif
return;
}
- drw_shgroup_uniform_create_ex(shgroup, loc, DRW_UNIFORM_STORAGE_BLOCK_REF, ssbo, 0, 0, 1);
+ drw_shgroup_uniform_create_ex(
+ shgroup, loc, DRW_UNIFORM_STORAGE_BLOCK_REF, ssbo, GPU_SAMPLER_DEFAULT, 0, 1);
}
void DRW_shgroup_uniform_bool(DRWShadingGroup *shgroup,
@@ -518,7 +530,8 @@ void DRW_shgroup_uniform_mat4_copy(DRWShadingGroup *shgroup,
* and array-size used to determine the number of elements
* copied in draw_update_uniforms. */
for (int i = 0; i < 4; i++) {
- drw_shgroup_uniform_create_ex(shgroup, location, DRW_UNIFORM_FLOAT_COPY, &value[i], 0, 4, 4);
+ drw_shgroup_uniform_create_ex(
+ shgroup, location, DRW_UNIFORM_FLOAT_COPY, &value[i], GPU_SAMPLER_DEFAULT, 4, 4);
}
}
@@ -538,8 +551,13 @@ void DRW_shgroup_vertex_buffer_ex(DRWShadingGroup *shgroup,
#endif
return;
}
- drw_shgroup_uniform_create_ex(
- shgroup, location, DRW_UNIFORM_VERTEX_BUFFER_AS_STORAGE, vertex_buffer, 0, 0, 1);
+ drw_shgroup_uniform_create_ex(shgroup,
+ location,
+ DRW_UNIFORM_VERTEX_BUFFER_AS_STORAGE,
+ vertex_buffer,
+ GPU_SAMPLER_DEFAULT,
+ 0,
+ 1);
}
void DRW_shgroup_vertex_buffer_ref_ex(DRWShadingGroup *shgroup,
@@ -558,32 +576,47 @@ void DRW_shgroup_vertex_buffer_ref_ex(DRWShadingGroup *shgroup,
#endif
return;
}
- drw_shgroup_uniform_create_ex(
- shgroup, location, DRW_UNIFORM_VERTEX_BUFFER_AS_STORAGE_REF, vertex_buffer, 0, 0, 1);
+ drw_shgroup_uniform_create_ex(shgroup,
+ location,
+ DRW_UNIFORM_VERTEX_BUFFER_AS_STORAGE_REF,
+ vertex_buffer,
+ GPU_SAMPLER_DEFAULT,
+ 0,
+ 1);
}
void DRW_shgroup_buffer_texture(DRWShadingGroup *shgroup,
const char *name,
GPUVertBuf *vertex_buffer)
{
- int location = GPU_shader_get_ssbo(shgroup->shader, name);
+ int location = GPU_shader_get_texture_binding(shgroup->shader, name);
if (location == -1) {
return;
}
- drw_shgroup_uniform_create_ex(
- shgroup, location, DRW_UNIFORM_VERTEX_BUFFER_AS_TEXTURE, vertex_buffer, 0, 0, 1);
+ drw_shgroup_uniform_create_ex(shgroup,
+ location,
+ DRW_UNIFORM_VERTEX_BUFFER_AS_TEXTURE,
+ vertex_buffer,
+ GPU_SAMPLER_DEFAULT,
+ 0,
+ 1);
}
void DRW_shgroup_buffer_texture_ref(DRWShadingGroup *shgroup,
const char *name,
GPUVertBuf **vertex_buffer)
{
- int location = GPU_shader_get_ssbo(shgroup->shader, name);
+ int location = GPU_shader_get_texture_binding(shgroup->shader, name);
if (location == -1) {
return;
}
- drw_shgroup_uniform_create_ex(
- shgroup, location, DRW_UNIFORM_VERTEX_BUFFER_AS_TEXTURE_REF, vertex_buffer, 0, 0, 1);
+ drw_shgroup_uniform_create_ex(shgroup,
+ location,
+ DRW_UNIFORM_VERTEX_BUFFER_AS_TEXTURE_REF,
+ vertex_buffer,
+ GPU_SAMPLER_DEFAULT,
+ 0,
+ 1);
}
/** \} */
@@ -593,11 +626,11 @@ void DRW_shgroup_buffer_texture_ref(DRWShadingGroup *shgroup,
static void drw_call_calc_orco(Object *ob, float (*r_orcofacs)[4])
{
- ID *ob_data = (ob) ? ob->data : NULL;
+ ID *ob_data = (ob) ? static_cast<ID *>(ob->data) : nullptr;
float loc[3], size[3];
- float *texcoloc = NULL;
- float *texcosize = NULL;
- if (ob_data != NULL) {
+ float *texcoloc = nullptr;
+ float *texcosize = nullptr;
+ if (ob_data != nullptr) {
switch (GS(ob_data->name)) {
case ID_VO: {
BoundBox *bbox = BKE_volume_boundbox_get(ob);
@@ -608,7 +641,7 @@ static void drw_call_calc_orco(Object *ob, float (*r_orcofacs)[4])
break;
}
case ID_ME:
- BKE_mesh_texspace_get_reference((Mesh *)ob_data, NULL, &texcoloc, &texcosize);
+ BKE_mesh_texspace_get_reference((Mesh *)ob_data, nullptr, &texcoloc, &texcosize);
break;
case ID_CU_LEGACY: {
Curve *cu = (Curve *)ob_data;
@@ -628,7 +661,7 @@ static void drw_call_calc_orco(Object *ob, float (*r_orcofacs)[4])
}
}
- if ((texcoloc != NULL) && (texcosize != NULL)) {
+ if ((texcoloc != nullptr) && (texcosize != nullptr)) {
mul_v3_v3fl(r_orcofacs[1], texcosize, 2.0f);
invert_v3(r_orcofacs[1]);
sub_v3_v3v3(r_orcofacs[0], texcoloc, texcosize);
@@ -645,7 +678,7 @@ BLI_INLINE void drw_call_matrix_init(DRWObjectMatrix *ob_mats, Object *ob, float
{
copy_m4_m4(ob_mats->model, obmat);
if (ob) {
- copy_m4_m4(ob_mats->modelinverse, ob->imat);
+ copy_m4_m4(ob_mats->modelinverse, ob->world_to_object);
}
else {
/* WATCH: Can be costly. */
@@ -666,7 +699,7 @@ static void drw_call_obinfos_init(DRWObjectInfos *ob_infos, Object *ob)
/* TODO(fclem): this is rather costly to do at runtime. Maybe we can
* put it in ob->runtime and make depsgraph ensure it is up to date. */
BLI_hash_int_2d(BLI_hash_string(ob->id.name + 2), 0);
- ob_infos->ob_random = random * (1.0f / (float)0xFFFFFFFF);
+ ob_infos->ob_random = random * (1.0f / float(0xFFFFFFFF));
/* Object State. */
ob_infos->ob_flag = 1.0f; /* Required to have a correct sign */
ob_infos->ob_flag += (ob->base_flag & BASE_SELECTED) ? (1 << 1) : 0;
@@ -687,12 +720,12 @@ static void drw_call_obinfos_init(DRWObjectInfos *ob_infos, Object *ob)
static void drw_call_culling_init(DRWCullingState *cull, Object *ob)
{
const BoundBox *bbox;
- if (ob != NULL && (bbox = BKE_object_boundbox_get(ob))) {
+ if (ob != nullptr && (bbox = BKE_object_boundbox_get(ob))) {
float corner[3];
/* Get BoundSphere center and radius from the BoundBox. */
mid_v3_v3v3(cull->bsphere.center, bbox->vec[0], bbox->vec[6]);
- mul_v3_m4v3(corner, ob->obmat, bbox->vec[0]);
- mul_m4_v3(ob->obmat, cull->bsphere.center);
+ mul_v3_m4v3(corner, ob->object_to_world, bbox->vec[0]);
+ mul_m4_v3(ob->object_to_world, cull->bsphere.center);
cull->bsphere.radius = len_v3v3(cull->bsphere.center, corner);
/* Bypass test for very large objects (see T67319). */
@@ -705,16 +738,19 @@ static void drw_call_culling_init(DRWCullingState *cull, Object *ob)
cull->bsphere.radius = -1.0f;
}
/* Reset user data */
- cull->user_data = NULL;
+ cull->user_data = nullptr;
}
static DRWResourceHandle drw_resource_handle_new(float (*obmat)[4], Object *ob)
{
- DRWCullingState *culling = BLI_memblock_alloc(DST.vmempool->cullstates);
- DRWObjectMatrix *ob_mats = BLI_memblock_alloc(DST.vmempool->obmats);
+ DRWCullingState *culling = static_cast<DRWCullingState *>(
+ BLI_memblock_alloc(DST.vmempool->cullstates));
+ DRWObjectMatrix *ob_mats = static_cast<DRWObjectMatrix *>(
+ BLI_memblock_alloc(DST.vmempool->obmats));
/* FIXME Meh, not always needed but can be accessed after creation.
* Also it needs to have the same resource handle. */
- DRWObjectInfos *ob_infos = BLI_memblock_alloc(DST.vmempool->obinfos);
+ DRWObjectInfos *ob_infos = static_cast<DRWObjectInfos *>(
+ BLI_memblock_alloc(DST.vmempool->obinfos));
UNUSED_VARS(ob_infos);
DRWResourceHandle handle = DST.resource_handle;
@@ -731,7 +767,7 @@ static DRWResourceHandle drw_resource_handle_new(float (*obmat)[4], Object *ob)
return handle;
}
-uint32_t DRW_object_resource_id_get(Object *UNUSED(ob))
+uint32_t DRW_object_resource_id_get(Object * /*ob*/)
{
DRWResourceHandle handle = DST.ob_handle;
if (handle == 0) {
@@ -745,13 +781,13 @@ static DRWResourceHandle drw_resource_handle(DRWShadingGroup *shgroup,
float (*obmat)[4],
Object *ob)
{
- if (ob == NULL) {
- if (obmat == NULL) {
+ if (ob == nullptr) {
+ if (obmat == nullptr) {
DRWResourceHandle handle = 0;
return handle;
}
- return drw_resource_handle_new(obmat, NULL);
+ return drw_resource_handle_new(obmat, nullptr);
}
if (DST.ob_handle == 0) {
@@ -762,8 +798,8 @@ static DRWResourceHandle drw_resource_handle(DRWShadingGroup *shgroup,
if (shgroup->objectinfo) {
if (!DST.ob_state_obinfo_init) {
DST.ob_state_obinfo_init = true;
- DRWObjectInfos *ob_infos = DRW_memblock_elem_from_handle(DST.vmempool->obinfos,
- &DST.ob_handle);
+ DRWObjectInfos *ob_infos = static_cast<DRWObjectInfos *>(
+ DRW_memblock_elem_from_handle(DST.vmempool->obinfos, &DST.ob_handle));
drw_call_obinfos_init(ob_infos, ob);
}
@@ -783,20 +819,21 @@ static DRWResourceHandle drw_resource_handle(DRWShadingGroup *shgroup,
static void command_type_set(uint64_t *command_type_bits, int index, eDRWCommandType type)
{
- command_type_bits[index / 16] |= ((uint64_t)type) << ((index % 16) * 4);
+ command_type_bits[index / 16] |= uint64_t(type) << ((index % 16) * 4);
}
eDRWCommandType command_type_get(const uint64_t *command_type_bits, int index)
{
- return ((command_type_bits[index / 16] >> ((index % 16) * 4)) & 0xF);
+ return eDRWCommandType((command_type_bits[index / 16] >> ((index % 16) * 4)) & 0xF);
}
static void *drw_command_create(DRWShadingGroup *shgroup, eDRWCommandType type)
{
DRWCommandChunk *chunk = shgroup->cmd.last;
- if (chunk == NULL) {
- DRWCommandSmallChunk *smallchunk = BLI_memblock_alloc(DST.vmempool->commands_small);
+ if (chunk == nullptr) {
+ DRWCommandSmallChunk *smallchunk = static_cast<DRWCommandSmallChunk *>(
+ BLI_memblock_alloc(DST.vmempool->commands_small));
smallchunk->command_len = ARRAY_SIZE(smallchunk->commands);
smallchunk->command_used = 0;
smallchunk->command_type[0] = 0x0lu;
@@ -804,7 +841,7 @@ static void *drw_command_create(DRWShadingGroup *shgroup, eDRWCommandType type)
BLI_LINKS_APPEND(&shgroup->cmd, chunk);
}
else if (chunk->command_used == chunk->command_len) {
- chunk = BLI_memblock_alloc(DST.vmempool->commands);
+ chunk = static_cast<DRWCommandChunk *>(BLI_memblock_alloc(DST.vmempool->commands));
chunk->command_len = ARRAY_SIZE(chunk->commands);
chunk->command_used = 0;
memset(chunk->command_type, 0x0, sizeof(chunk->command_type));
@@ -818,7 +855,7 @@ static void *drw_command_create(DRWShadingGroup *shgroup, eDRWCommandType type)
static void drw_command_draw(DRWShadingGroup *shgroup, GPUBatch *batch, DRWResourceHandle handle)
{
- DRWCommandDraw *cmd = drw_command_create(shgroup, DRW_CMD_DRAW);
+ DRWCommandDraw *cmd = static_cast<DRWCommandDraw *>(drw_command_create(shgroup, DRW_CMD_DRAW));
cmd->batch = batch;
cmd->handle = handle;
}
@@ -826,7 +863,8 @@ static void drw_command_draw(DRWShadingGroup *shgroup, GPUBatch *batch, DRWResou
static void drw_command_draw_range(
DRWShadingGroup *shgroup, GPUBatch *batch, DRWResourceHandle handle, uint start, uint count)
{
- DRWCommandDrawRange *cmd = drw_command_create(shgroup, DRW_CMD_DRAW_RANGE);
+ DRWCommandDrawRange *cmd = static_cast<DRWCommandDrawRange *>(
+ drw_command_create(shgroup, DRW_CMD_DRAW_RANGE));
cmd->batch = batch;
cmd->handle = handle;
cmd->vert_first = start;
@@ -836,7 +874,8 @@ static void drw_command_draw_range(
static void drw_command_draw_instance(
DRWShadingGroup *shgroup, GPUBatch *batch, DRWResourceHandle handle, uint count, bool use_attr)
{
- DRWCommandDrawInstance *cmd = drw_command_create(shgroup, DRW_CMD_DRAW_INSTANCE);
+ DRWCommandDrawInstance *cmd = static_cast<DRWCommandDrawInstance *>(
+ drw_command_create(shgroup, DRW_CMD_DRAW_INSTANCE));
cmd->batch = batch;
cmd->handle = handle;
cmd->inst_count = count;
@@ -846,7 +885,8 @@ static void drw_command_draw_instance(
static void drw_command_draw_intance_range(
DRWShadingGroup *shgroup, GPUBatch *batch, DRWResourceHandle handle, uint start, uint count)
{
- DRWCommandDrawInstanceRange *cmd = drw_command_create(shgroup, DRW_CMD_DRAW_INSTANCE_RANGE);
+ DRWCommandDrawInstanceRange *cmd = static_cast<DRWCommandDrawInstanceRange *>(
+ drw_command_create(shgroup, DRW_CMD_DRAW_INSTANCE_RANGE));
cmd->batch = batch;
cmd->handle = handle;
cmd->inst_first = start;
@@ -858,7 +898,8 @@ static void drw_command_compute(DRWShadingGroup *shgroup,
int groups_y_len,
int groups_z_len)
{
- DRWCommandCompute *cmd = drw_command_create(shgroup, DRW_CMD_COMPUTE);
+ DRWCommandCompute *cmd = static_cast<DRWCommandCompute *>(
+ drw_command_create(shgroup, DRW_CMD_COMPUTE));
cmd->groups_x_len = groups_x_len;
cmd->groups_y_len = groups_y_len;
cmd->groups_z_len = groups_z_len;
@@ -866,19 +907,22 @@ static void drw_command_compute(DRWShadingGroup *shgroup,
static void drw_command_compute_ref(DRWShadingGroup *shgroup, int groups_ref[3])
{
- DRWCommandComputeRef *cmd = drw_command_create(shgroup, DRW_CMD_COMPUTE_REF);
+ DRWCommandComputeRef *cmd = static_cast<DRWCommandComputeRef *>(
+ drw_command_create(shgroup, DRW_CMD_COMPUTE_REF));
cmd->groups_ref = groups_ref;
}
static void drw_command_compute_indirect(DRWShadingGroup *shgroup, GPUStorageBuf *indirect_buf)
{
- DRWCommandComputeIndirect *cmd = drw_command_create(shgroup, DRW_CMD_COMPUTE_INDIRECT);
+ DRWCommandComputeIndirect *cmd = static_cast<DRWCommandComputeIndirect *>(
+ drw_command_create(shgroup, DRW_CMD_COMPUTE_INDIRECT));
cmd->indirect_buf = indirect_buf;
}
static void drw_command_barrier(DRWShadingGroup *shgroup, eGPUBarrier type)
{
- DRWCommandBarrier *cmd = drw_command_create(shgroup, DRW_CMD_BARRIER);
+ DRWCommandBarrier *cmd = static_cast<DRWCommandBarrier *>(
+ drw_command_create(shgroup, DRW_CMD_BARRIER));
cmd->type = type;
}
@@ -887,7 +931,8 @@ static void drw_command_draw_procedural(DRWShadingGroup *shgroup,
DRWResourceHandle handle,
uint vert_count)
{
- DRWCommandDrawProcedural *cmd = drw_command_create(shgroup, DRW_CMD_DRAW_PROCEDURAL);
+ DRWCommandDrawProcedural *cmd = static_cast<DRWCommandDrawProcedural *>(
+ drw_command_create(shgroup, DRW_CMD_DRAW_PROCEDURAL));
cmd->batch = batch;
cmd->handle = handle;
cmd->vert_count = vert_count;
@@ -898,7 +943,8 @@ static void drw_command_draw_indirect(DRWShadingGroup *shgroup,
DRWResourceHandle handle,
GPUStorageBuf *indirect_buf)
{
- DRWCommandDrawIndirect *cmd = drw_command_create(shgroup, DRW_CMD_DRAW_INDIRECT);
+ DRWCommandDrawIndirect *cmd = static_cast<DRWCommandDrawIndirect *>(
+ drw_command_create(shgroup, DRW_CMD_DRAW_INDIRECT));
cmd->batch = batch;
cmd->handle = handle;
cmd->indirect_buf = indirect_buf;
@@ -907,8 +953,9 @@ static void drw_command_draw_indirect(DRWShadingGroup *shgroup,
static void drw_command_set_select_id(DRWShadingGroup *shgroup, GPUVertBuf *buf, uint select_id)
{
/* Only one can be valid. */
- BLI_assert(buf == NULL || select_id == -1);
- DRWCommandSetSelectID *cmd = drw_command_create(shgroup, DRW_CMD_SELECTID);
+ BLI_assert(buf == nullptr || select_id == -1);
+ DRWCommandSetSelectID *cmd = static_cast<DRWCommandSetSelectID *>(
+ drw_command_create(shgroup, DRW_CMD_SELECTID));
cmd->select_buf = buf;
cmd->select_id = select_id;
}
@@ -921,7 +968,8 @@ static void drw_command_set_stencil_mask(DRWShadingGroup *shgroup,
BLI_assert(write_mask <= 0xFF);
BLI_assert(reference <= 0xFF);
BLI_assert(compare_mask <= 0xFF);
- DRWCommandSetStencil *cmd = drw_command_create(shgroup, DRW_CMD_STENCIL);
+ DRWCommandSetStencil *cmd = static_cast<DRWCommandSetStencil *>(
+ drw_command_create(shgroup, DRW_CMD_STENCIL));
cmd->write_mask = write_mask;
cmd->comp_mask = compare_mask;
cmd->ref = reference;
@@ -936,7 +984,8 @@ static void drw_command_clear(DRWShadingGroup *shgroup,
float depth,
uchar stencil)
{
- DRWCommandClear *cmd = drw_command_create(shgroup, DRW_CMD_CLEAR);
+ DRWCommandClear *cmd = static_cast<DRWCommandClear *>(
+ drw_command_create(shgroup, DRW_CMD_CLEAR));
cmd->clear_channels = channels;
cmd->r = r;
cmd->g = g;
@@ -951,7 +1000,8 @@ static void drw_command_set_mutable_state(DRWShadingGroup *shgroup,
DRWState disable)
{
/* TODO: Restrict what state can be changed. */
- DRWCommandSetMutableState *cmd = drw_command_create(shgroup, DRW_CMD_DRWSTATE);
+ DRWCommandSetMutableState *cmd = static_cast<DRWCommandSetMutableState *>(
+ drw_command_create(shgroup, DRW_CMD_DRWSTATE));
cmd->enable = enable;
cmd->disable = disable;
}
@@ -959,21 +1009,21 @@ static void drw_command_set_mutable_state(DRWShadingGroup *shgroup,
void DRW_shgroup_call_ex(DRWShadingGroup *shgroup,
Object *ob,
float (*obmat)[4],
- struct GPUBatch *geom,
+ GPUBatch *geom,
bool bypass_culling,
void *user_data)
{
- BLI_assert(geom != NULL);
+ BLI_assert(geom != nullptr);
if (G.f & G_FLAG_PICKSEL) {
- drw_command_set_select_id(shgroup, NULL, DST.select_id);
+ drw_command_set_select_id(shgroup, nullptr, DST.select_id);
}
- DRWResourceHandle handle = drw_resource_handle(shgroup, ob ? ob->obmat : obmat, ob);
+ DRWResourceHandle handle = drw_resource_handle(shgroup, ob ? ob->object_to_world : obmat, ob);
drw_command_draw(shgroup, geom, handle);
/* Culling data. */
if (user_data || bypass_culling) {
- DRWCullingState *culling = DRW_memblock_elem_from_handle(DST.vmempool->cullstates,
- &DST.ob_handle);
+ DRWCullingState *culling = static_cast<DRWCullingState *>(
+ DRW_memblock_elem_from_handle(DST.vmempool->cullstates, &DST.ob_handle));
if (user_data) {
culling->user_data = user_data;
@@ -986,24 +1036,24 @@ void DRW_shgroup_call_ex(DRWShadingGroup *shgroup,
}
void DRW_shgroup_call_range(
- DRWShadingGroup *shgroup, struct Object *ob, GPUBatch *geom, uint v_sta, uint v_num)
+ DRWShadingGroup *shgroup, Object *ob, GPUBatch *geom, uint v_sta, uint v_num)
{
- BLI_assert(geom != NULL);
+ BLI_assert(geom != nullptr);
if (G.f & G_FLAG_PICKSEL) {
- drw_command_set_select_id(shgroup, NULL, DST.select_id);
+ drw_command_set_select_id(shgroup, nullptr, DST.select_id);
}
- DRWResourceHandle handle = drw_resource_handle(shgroup, ob ? ob->obmat : NULL, ob);
+ DRWResourceHandle handle = drw_resource_handle(shgroup, ob ? ob->object_to_world : nullptr, ob);
drw_command_draw_range(shgroup, geom, handle, v_sta, v_num);
}
void DRW_shgroup_call_instance_range(
- DRWShadingGroup *shgroup, Object *ob, struct GPUBatch *geom, uint i_sta, uint i_num)
+ DRWShadingGroup *shgroup, Object *ob, GPUBatch *geom, uint i_sta, uint i_num)
{
- BLI_assert(geom != NULL);
+ BLI_assert(geom != nullptr);
if (G.f & G_FLAG_PICKSEL) {
- drw_command_set_select_id(shgroup, NULL, DST.select_id);
+ drw_command_set_select_id(shgroup, nullptr, DST.select_id);
}
- DRWResourceHandle handle = drw_resource_handle(shgroup, ob ? ob->obmat : NULL, ob);
+ DRWResourceHandle handle = drw_resource_handle(shgroup, ob ? ob->object_to_world : nullptr, ob);
drw_command_draw_intance_range(shgroup, geom, handle, i_sta, i_num);
}
@@ -1045,29 +1095,29 @@ static void drw_shgroup_call_procedural_add_ex(DRWShadingGroup *shgroup,
uint vert_count)
{
BLI_assert(vert_count > 0);
- BLI_assert(geom != NULL);
+ BLI_assert(geom != nullptr);
if (G.f & G_FLAG_PICKSEL) {
- drw_command_set_select_id(shgroup, NULL, DST.select_id);
+ drw_command_set_select_id(shgroup, nullptr, DST.select_id);
}
- DRWResourceHandle handle = drw_resource_handle(shgroup, ob ? ob->obmat : NULL, ob);
+ DRWResourceHandle handle = drw_resource_handle(shgroup, ob ? ob->object_to_world : nullptr, ob);
drw_command_draw_procedural(shgroup, geom, handle, vert_count);
}
void DRW_shgroup_call_procedural_points(DRWShadingGroup *shgroup, Object *ob, uint point_count)
{
- struct GPUBatch *geom = drw_cache_procedural_points_get();
+ GPUBatch *geom = drw_cache_procedural_points_get();
drw_shgroup_call_procedural_add_ex(shgroup, geom, ob, point_count);
}
void DRW_shgroup_call_procedural_lines(DRWShadingGroup *shgroup, Object *ob, uint line_count)
{
- struct GPUBatch *geom = drw_cache_procedural_lines_get();
+ GPUBatch *geom = drw_cache_procedural_lines_get();
drw_shgroup_call_procedural_add_ex(shgroup, geom, ob, line_count * 2);
}
void DRW_shgroup_call_procedural_triangles(DRWShadingGroup *shgroup, Object *ob, uint tri_count)
{
- struct GPUBatch *geom = drw_cache_procedural_triangles_get();
+ GPUBatch *geom = drw_cache_procedural_triangles_get();
drw_shgroup_call_procedural_add_ex(shgroup, geom, ob, tri_count * 3);
}
@@ -1076,7 +1126,7 @@ void DRW_shgroup_call_procedural_indirect(DRWShadingGroup *shgroup,
Object *ob,
GPUStorageBuf *indirect_buf)
{
- struct GPUBatch *geom = NULL;
+ GPUBatch *geom = nullptr;
switch (primitive_type) {
case GPU_PRIM_POINTS:
geom = drw_cache_procedural_points_get();
@@ -1097,43 +1147,40 @@ void DRW_shgroup_call_procedural_indirect(DRWShadingGroup *shgroup,
break;
}
if (G.f & G_FLAG_PICKSEL) {
- drw_command_set_select_id(shgroup, NULL, DST.select_id);
+ drw_command_set_select_id(shgroup, nullptr, DST.select_id);
}
- DRWResourceHandle handle = drw_resource_handle(shgroup, ob ? ob->obmat : NULL, ob);
+ DRWResourceHandle handle = drw_resource_handle(shgroup, ob ? ob->object_to_world : nullptr, ob);
drw_command_draw_indirect(shgroup, geom, handle, indirect_buf);
}
-void DRW_shgroup_call_instances(DRWShadingGroup *shgroup,
- Object *ob,
- struct GPUBatch *geom,
- uint count)
+void DRW_shgroup_call_instances(DRWShadingGroup *shgroup, Object *ob, GPUBatch *geom, uint count)
{
- BLI_assert(geom != NULL);
+ BLI_assert(geom != nullptr);
if (G.f & G_FLAG_PICKSEL) {
- drw_command_set_select_id(shgroup, NULL, DST.select_id);
+ drw_command_set_select_id(shgroup, nullptr, DST.select_id);
}
- DRWResourceHandle handle = drw_resource_handle(shgroup, ob ? ob->obmat : NULL, ob);
+ DRWResourceHandle handle = drw_resource_handle(shgroup, ob ? ob->object_to_world : nullptr, ob);
drw_command_draw_instance(shgroup, geom, handle, count, false);
}
void DRW_shgroup_call_instances_with_attrs(DRWShadingGroup *shgroup,
Object *ob,
- struct GPUBatch *geom,
- struct GPUBatch *inst_attributes)
+ GPUBatch *geom,
+ GPUBatch *inst_attributes)
{
- BLI_assert(geom != NULL);
- BLI_assert(inst_attributes != NULL);
+ BLI_assert(geom != nullptr);
+ BLI_assert(inst_attributes != nullptr);
if (G.f & G_FLAG_PICKSEL) {
- drw_command_set_select_id(shgroup, NULL, DST.select_id);
+ drw_command_set_select_id(shgroup, nullptr, DST.select_id);
}
- DRWResourceHandle handle = drw_resource_handle(shgroup, ob ? ob->obmat : NULL, ob);
+ DRWResourceHandle handle = drw_resource_handle(shgroup, ob ? ob->object_to_world : nullptr, ob);
GPUBatch *batch = DRW_temp_batch_instance_request(
- DST.vmempool->idatalist, NULL, inst_attributes, geom);
+ DST.vmempool->idatalist, nullptr, inst_attributes, geom);
drw_command_draw_instance(shgroup, batch, handle, 0, true);
}
#define SCULPT_DEBUG_BUFFERS (G.debug_value == 889)
-typedef struct DRWSculptCallbackData {
+struct DRWSculptCallbackData {
Object *ob;
DRWShadingGroup **shading_groups;
int num_shading_groups;
@@ -1144,7 +1191,9 @@ typedef struct DRWSculptCallbackData {
bool fast_mode; /* Set by draw manager. Do not init. */
int debug_node_nr;
-} DRWSculptCallbackData;
+ PBVHAttrReq *attrs;
+ int attrs_num;
+};
#define SCULPT_DEBUG_COLOR(id) (sculpt_debug_colors[id % 9])
static float sculpt_debug_colors[9][4] = {
@@ -1159,29 +1208,35 @@ static float sculpt_debug_colors[9][4] = {
{0.7f, 0.2f, 1.0f, 1.0f},
};
-static void sculpt_draw_cb(DRWSculptCallbackData *scd, GPU_PBVH_Buffers *buffers)
+static void sculpt_draw_cb(DRWSculptCallbackData *scd,
+ PBVHBatches *batches,
+ PBVH_GPU_Args *pbvh_draw_args)
{
- if (!buffers) {
+ if (!batches) {
return;
}
- /* Meh... use_mask is a bit misleading here. */
- if (scd->use_mask && !GPU_pbvh_buffers_has_overlays(buffers)) {
- return;
+ int primcount;
+ GPUBatch *geom;
+
+ if (!scd->use_wire) {
+ geom = DRW_pbvh_tris_get(batches, scd->attrs, scd->attrs_num, pbvh_draw_args, &primcount);
+ }
+ else {
+ geom = DRW_pbvh_lines_get(batches, scd->attrs, scd->attrs_num, pbvh_draw_args, &primcount);
}
- GPUBatch *geom = GPU_pbvh_buffers_batch_get(buffers, scd->fast_mode, scd->use_wire);
short index = 0;
if (scd->use_mats) {
- index = GPU_pbvh_buffers_material_index_get(buffers);
+ index = drw_pbvh_material_index_get(batches);
if (index >= scd->num_shading_groups) {
index = 0;
}
}
DRWShadingGroup *shgrp = scd->shading_groups[index];
- if (geom != NULL && shgrp != NULL) {
+ if (geom != nullptr && shgrp != nullptr) {
if (SCULPT_DEBUG_BUFFERS) {
/* Color each buffers in different colors. Only work in solid/Xray mode. */
shgrp = DRW_shgroup_create_sub(shgrp);
@@ -1228,7 +1283,7 @@ static void drw_sculpt_get_frustum_planes(Object *ob, float planes[6][4])
* 4x4 matrix is done by multiplying with the transpose inverse.
* The inverse cancels out here since we transform by inverse(obmat). */
float tmat[4][4];
- transpose_m4_m4(tmat, ob->obmat);
+ transpose_m4_m4(tmat, ob->object_to_world);
for (int i = 0; i < 6; i++) {
mul_m4_v4(tmat, planes[i]);
}
@@ -1237,7 +1292,7 @@ static void drw_sculpt_get_frustum_planes(Object *ob, float planes[6][4])
static void drw_sculpt_generate_calls(DRWSculptCallbackData *scd)
{
/* PBVH should always exist for non-empty meshes, created by depsgraph eval. */
- PBVH *pbvh = (scd->ob->sculpt) ? scd->ob->sculpt->pbvh : NULL;
+ PBVH *pbvh = (scd->ob->sculpt) ? scd->ob->sculpt->pbvh : nullptr;
if (!pbvh) {
return;
}
@@ -1246,8 +1301,8 @@ static void drw_sculpt_generate_calls(DRWSculptCallbackData *scd)
RegionView3D *rv3d = drwctx->rv3d;
const bool navigating = rv3d && (rv3d->rflag & RV3D_NAVIGATING);
- Paint *p = NULL;
- if (drwctx->evil_C != NULL) {
+ Paint *p = nullptr;
+ if (drwctx->evil_C != nullptr) {
p = BKE_paint_get_active_from_context(drwctx->evil_C);
}
@@ -1291,20 +1346,22 @@ static void drw_sculpt_generate_calls(DRWSculptCallbackData *scd)
update_only_visible = true;
}
- Mesh *mesh = scd->ob->data;
- BKE_pbvh_update_normals(pbvh, mesh->runtime.subdiv_ccg);
+ Mesh *mesh = static_cast<Mesh *>(scd->ob->data);
+ BKE_pbvh_update_normals(pbvh, mesh->runtime->subdiv_ccg);
BKE_pbvh_draw_cb(pbvh,
update_only_visible,
&update_frustum,
&draw_frustum,
- (void (*)(void *, GPU_PBVH_Buffers *))sculpt_draw_cb,
+ (void (*)(void *, PBVHBatches *, PBVH_GPU_Args *))sculpt_draw_cb,
scd,
- scd->use_mats);
+ scd->use_mats,
+ scd->attrs,
+ scd->attrs_num);
if (SCULPT_DEBUG_BUFFERS) {
int debug_node_nr = 0;
- DRW_debug_modelmat(scd->ob->obmat);
+ DRW_debug_modelmat(scd->ob->object_to_world);
BKE_pbvh_draw_debug_cb(
pbvh,
(void (*)(PBVHNode * n, void *d, const float min[3], const float max[3], PBVHNodeFlags f))
@@ -1313,50 +1370,161 @@ static void drw_sculpt_generate_calls(DRWSculptCallbackData *scd)
}
}
-void DRW_shgroup_call_sculpt(DRWShadingGroup *shgroup, Object *ob, bool use_wire, bool use_mask)
+void DRW_shgroup_call_sculpt(DRWShadingGroup *shgroup,
+ Object *ob,
+ bool use_wire,
+ bool use_mask,
+ bool use_fset,
+ bool use_color,
+ bool use_uv)
{
- DRWSculptCallbackData scd = {
- .ob = ob,
- .shading_groups = &shgroup,
- .num_shading_groups = 1,
- .use_wire = use_wire,
- .use_mats = false,
- .use_mask = use_mask,
- };
+ DRWSculptCallbackData scd{};
+ scd.ob = ob;
+ scd.shading_groups = &shgroup;
+ scd.num_shading_groups = 1;
+ scd.use_wire = use_wire;
+ scd.use_mats = false;
+ scd.use_mask = use_mask;
+
+ PBVHAttrReq attrs[16];
+ int attrs_num = 0;
+
+ memset(attrs, 0, sizeof(attrs));
+
+ /* NOTE: these are NOT #eCustomDataType, they are extended values, ASAN may warn about this. */
+ attrs[attrs_num++].type = (eCustomDataType)CD_PBVH_CO_TYPE;
+ attrs[attrs_num++].type = (eCustomDataType)CD_PBVH_NO_TYPE;
+
+ if (use_mask) {
+ attrs[attrs_num++].type = (eCustomDataType)CD_PBVH_MASK_TYPE;
+ }
+
+ if (use_fset) {
+ attrs[attrs_num++].type = (eCustomDataType)CD_PBVH_FSET_TYPE;
+ }
+
+ Mesh *me = BKE_object_get_original_mesh(ob);
+
+ if (use_color) {
+ CustomDataLayer *layer = BKE_id_attributes_active_color_get(&me->id);
+
+ if (layer) {
+ eAttrDomain domain = BKE_id_attribute_domain(&me->id, layer);
+
+ attrs[attrs_num].type = eCustomDataType(layer->type);
+ attrs[attrs_num].domain = domain;
+
+ BLI_strncpy(attrs[attrs_num].name, layer->name, sizeof(attrs[attrs_num].name));
+ attrs_num++;
+ }
+ }
+
+ if (use_uv) {
+ int layer_i = CustomData_get_active_layer_index(&me->ldata, CD_MLOOPUV);
+ if (layer_i != -1) {
+ CustomDataLayer *layer = me->ldata.layers + layer_i;
+
+ attrs[attrs_num].type = CD_MLOOPUV;
+ attrs[attrs_num].domain = ATTR_DOMAIN_CORNER;
+ BLI_strncpy(attrs[attrs_num].name, layer->name, sizeof(attrs[attrs_num].name));
+
+ attrs_num++;
+ }
+ }
+
+ scd.attrs = attrs;
+ scd.attrs_num = attrs_num;
+
drw_sculpt_generate_calls(&scd);
}
void DRW_shgroup_call_sculpt_with_materials(DRWShadingGroup **shgroups,
+ GPUMaterial **gpumats,
int num_shgroups,
Object *ob)
{
- DRWSculptCallbackData scd = {
- .ob = ob,
- .shading_groups = shgroups,
- .num_shading_groups = num_shgroups,
- .use_wire = false,
- .use_mats = true,
- .use_mask = false,
- };
+ DRW_Attributes draw_attrs;
+ DRW_MeshCDMask cd_needed;
+
+ if (gpumats) {
+ DRW_mesh_get_attributes(ob, (Mesh *)ob->data, gpumats, num_shgroups, &draw_attrs, &cd_needed);
+ }
+ else {
+ memset(&draw_attrs, 0, sizeof(draw_attrs));
+ memset(&cd_needed, 0, sizeof(cd_needed));
+ }
+
+ int attrs_num = 2 + draw_attrs.num_requests;
+
+ /* UV maps are not in attribute requests. */
+ attrs_num += count_bits_i(cd_needed.uv);
+
+ blender::Array<PBVHAttrReq, 16> attrs(attrs_num, PBVHAttrReq{});
+
+ int attrs_i = 0;
+
+ /* NOTE: these are NOT #eCustomDataType, they are extended values, ASAN may warn about this. */
+ attrs[attrs_i++].type = (eCustomDataType)CD_PBVH_CO_TYPE;
+ attrs[attrs_i++].type = (eCustomDataType)CD_PBVH_NO_TYPE;
+
+ for (int i = 0; i < draw_attrs.num_requests; i++) {
+ DRW_AttributeRequest *req = draw_attrs.requests + i;
+
+ attrs[attrs_i].type = req->cd_type;
+ attrs[attrs_i].domain = req->domain;
+ BLI_strncpy(attrs[attrs_i].name, req->attribute_name, sizeof(PBVHAttrReq::name));
+ attrs_i++;
+ }
+
+ /* UV maps are not in attribute requests. */
+ Mesh *me = (Mesh *)ob->data;
+
+ for (uint i = 0; i < 32; i++) {
+ if (cd_needed.uv & (1 << i)) {
+ int layer_i = CustomData_get_layer_index_n(&me->ldata, CD_MLOOPUV, i);
+ CustomDataLayer *layer = layer_i != -1 ? me->ldata.layers + layer_i : nullptr;
+
+ if (layer) {
+ attrs[attrs_i].type = CD_MLOOPUV;
+ attrs[attrs_i].domain = ATTR_DOMAIN_CORNER;
+ BLI_strncpy(attrs[attrs_i].name, layer->name, sizeof(PBVHAttrReq::name));
+ attrs_i++;
+ }
+ }
+ }
+
+ attrs_num = attrs_i;
+
+ DRWSculptCallbackData scd{};
+ scd.ob = ob;
+ scd.shading_groups = shgroups;
+ scd.num_shading_groups = num_shgroups;
+ scd.use_wire = false;
+ scd.use_mats = true;
+ scd.use_mask = false;
+ scd.attrs = attrs.data();
+ scd.attrs_num = attrs_num;
+
drw_sculpt_generate_calls(&scd);
}
static GPUVertFormat inst_select_format = {0};
DRWCallBuffer *DRW_shgroup_call_buffer(DRWShadingGroup *shgroup,
- struct GPUVertFormat *format,
+ GPUVertFormat *format,
GPUPrimType prim_type)
{
BLI_assert(ELEM(prim_type, GPU_PRIM_POINTS, GPU_PRIM_LINES, GPU_PRIM_TRI_FAN));
- BLI_assert(format != NULL);
+ BLI_assert(format != nullptr);
- DRWCallBuffer *callbuf = BLI_memblock_alloc(DST.vmempool->callbuffers);
+ DRWCallBuffer *callbuf = static_cast<DRWCallBuffer *>(
+ BLI_memblock_alloc(DST.vmempool->callbuffers));
callbuf->buf = DRW_temp_buffer_request(DST.vmempool->idatalist, format, &callbuf->count);
- callbuf->buf_select = NULL;
+ callbuf->buf_select = nullptr;
callbuf->count = 0;
if (G.f & G_FLAG_PICKSEL) {
- /* Not actually used for rendering but alloced in one chunk. */
+ /* Not actually used for rendering but allocated in one chunk. */
if (inst_select_format.attr_len == 0) {
GPU_vertformat_attr_add(&inst_select_format, "selectId", GPU_COMP_I32, 1, GPU_FETCH_INT);
}
@@ -1365,7 +1533,7 @@ DRWCallBuffer *DRW_shgroup_call_buffer(DRWShadingGroup *shgroup,
drw_command_set_select_id(shgroup, callbuf->buf_select, -1);
}
- DRWResourceHandle handle = drw_resource_handle(shgroup, NULL, NULL);
+ DRWResourceHandle handle = drw_resource_handle(shgroup, nullptr, nullptr);
GPUBatch *batch = DRW_temp_batch_request(DST.vmempool->idatalist, callbuf->buf, prim_type);
drw_command_draw(shgroup, batch, handle);
@@ -1373,19 +1541,20 @@ DRWCallBuffer *DRW_shgroup_call_buffer(DRWShadingGroup *shgroup,
}
DRWCallBuffer *DRW_shgroup_call_buffer_instance(DRWShadingGroup *shgroup,
- struct GPUVertFormat *format,
+ GPUVertFormat *format,
GPUBatch *geom)
{
- BLI_assert(geom != NULL);
- BLI_assert(format != NULL);
+ BLI_assert(geom != nullptr);
+ BLI_assert(format != nullptr);
- DRWCallBuffer *callbuf = BLI_memblock_alloc(DST.vmempool->callbuffers);
+ DRWCallBuffer *callbuf = static_cast<DRWCallBuffer *>(
+ BLI_memblock_alloc(DST.vmempool->callbuffers));
callbuf->buf = DRW_temp_buffer_request(DST.vmempool->idatalist, format, &callbuf->count);
- callbuf->buf_select = NULL;
+ callbuf->buf_select = nullptr;
callbuf->count = 0;
if (G.f & G_FLAG_PICKSEL) {
- /* Not actually used for rendering but alloced in one chunk. */
+ /* Not actually used for rendering but allocated in one chunk. */
if (inst_select_format.attr_len == 0) {
GPU_vertformat_attr_add(&inst_select_format, "selectId", GPU_COMP_I32, 1, GPU_FETCH_INT);
}
@@ -1394,9 +1563,9 @@ DRWCallBuffer *DRW_shgroup_call_buffer_instance(DRWShadingGroup *shgroup,
drw_command_set_select_id(shgroup, callbuf->buf_select, -1);
}
- DRWResourceHandle handle = drw_resource_handle(shgroup, NULL, NULL);
+ DRWResourceHandle handle = drw_resource_handle(shgroup, nullptr, nullptr);
GPUBatch *batch = DRW_temp_batch_instance_request(
- DST.vmempool->idatalist, callbuf->buf, NULL, geom);
+ DST.vmempool->idatalist, callbuf->buf, nullptr, geom);
drw_command_draw(shgroup, batch, handle);
return callbuf;
@@ -1457,9 +1626,10 @@ void DRW_buffer_add_entry_array(DRWCallBuffer *callbuf, const void *attr[], uint
static void drw_shgroup_init(DRWShadingGroup *shgroup, GPUShader *shader)
{
- shgroup->uniforms = NULL;
- shgroup->uniform_attrs = NULL;
+ shgroup->uniforms = nullptr;
+ shgroup->uniform_attrs = nullptr;
+ int clipping_ubo_location = GPU_shader_get_builtin_block(shader, GPU_UNIFORM_BLOCK_DRW_CLIPPING);
int view_ubo_location = GPU_shader_get_builtin_block(shader, GPU_UNIFORM_BLOCK_VIEW);
int model_ubo_location = GPU_shader_get_builtin_block(shader, GPU_UNIFORM_BLOCK_MODEL);
int info_ubo_location = GPU_shader_get_builtin_block(shader, GPU_UNIFORM_BLOCK_INFO);
@@ -1480,22 +1650,22 @@ static void drw_shgroup_init(DRWShadingGroup *shgroup, GPUShader *shader)
if (chunkid_location != -1) {
drw_shgroup_uniform_create_ex(
- shgroup, chunkid_location, DRW_UNIFORM_RESOURCE_CHUNK, NULL, 0, 0, 1);
+ shgroup, chunkid_location, DRW_UNIFORM_RESOURCE_CHUNK, nullptr, GPU_SAMPLER_DEFAULT, 0, 1);
}
if (resourceid_location != -1) {
drw_shgroup_uniform_create_ex(
- shgroup, resourceid_location, DRW_UNIFORM_RESOURCE_ID, NULL, 0, 0, 1);
+ shgroup, resourceid_location, DRW_UNIFORM_RESOURCE_ID, nullptr, GPU_SAMPLER_DEFAULT, 0, 1);
}
if (baseinst_location != -1) {
drw_shgroup_uniform_create_ex(
- shgroup, baseinst_location, DRW_UNIFORM_BASE_INSTANCE, NULL, 0, 0, 1);
+ shgroup, baseinst_location, DRW_UNIFORM_BASE_INSTANCE, nullptr, GPU_SAMPLER_DEFAULT, 0, 1);
}
if (model_ubo_location != -1) {
drw_shgroup_uniform_create_ex(
- shgroup, model_ubo_location, DRW_UNIFORM_BLOCK_OBMATS, NULL, 0, 0, 1);
+ shgroup, model_ubo_location, DRW_UNIFORM_BLOCK_OBMATS, nullptr, GPU_SAMPLER_DEFAULT, 0, 1);
}
else {
/* NOTE: This is only here to support old hardware fallback where uniform buffer is still
@@ -1503,17 +1673,23 @@ static void drw_shgroup_init(DRWShadingGroup *shgroup, GPUShader *shader)
int model = GPU_shader_get_builtin_uniform(shader, GPU_UNIFORM_MODEL);
int modelinverse = GPU_shader_get_builtin_uniform(shader, GPU_UNIFORM_MODEL_INV);
if (model != -1) {
- drw_shgroup_uniform_create_ex(shgroup, model, DRW_UNIFORM_MODEL_MATRIX, NULL, 0, 0, 1);
+ drw_shgroup_uniform_create_ex(
+ shgroup, model, DRW_UNIFORM_MODEL_MATRIX, nullptr, GPU_SAMPLER_DEFAULT, 0, 1);
}
if (modelinverse != -1) {
- drw_shgroup_uniform_create_ex(
- shgroup, modelinverse, DRW_UNIFORM_MODEL_MATRIX_INVERSE, NULL, 0, 0, 1);
+ drw_shgroup_uniform_create_ex(shgroup,
+ modelinverse,
+ DRW_UNIFORM_MODEL_MATRIX_INVERSE,
+ nullptr,
+ GPU_SAMPLER_DEFAULT,
+ 0,
+ 1);
}
}
if (info_ubo_location != -1) {
drw_shgroup_uniform_create_ex(
- shgroup, info_ubo_location, DRW_UNIFORM_BLOCK_OBINFOS, NULL, 0, 0, 1);
+ shgroup, info_ubo_location, DRW_UNIFORM_BLOCK_OBINFOS, nullptr, GPU_SAMPLER_DEFAULT, 0, 1);
/* Abusing this loc to tell shgroup we need the obinfos. */
shgroup->objectinfo = 1;
@@ -1524,27 +1700,46 @@ static void drw_shgroup_init(DRWShadingGroup *shgroup, GPUShader *shader)
if (view_ubo_location != -1) {
drw_shgroup_uniform_create_ex(
- shgroup, view_ubo_location, DRW_UNIFORM_BLOCK, G_draw.view_ubo, 0, 0, 1);
+ shgroup, view_ubo_location, DRW_UNIFORM_BLOCK, G_draw.view_ubo, GPU_SAMPLER_DEFAULT, 0, 1);
+ }
+
+ if (clipping_ubo_location) {
+ drw_shgroup_uniform_create_ex(shgroup,
+ clipping_ubo_location,
+ DRW_UNIFORM_BLOCK,
+ G_draw.clipping_ubo,
+ GPU_SAMPLER_DEFAULT,
+ 0,
+ 1);
}
#ifdef DEBUG
- int debug_print_location = GPU_shader_get_builtin_ssbo(shader, GPU_STORAGE_BUFFER_DEBUG_PRINT);
- if (debug_print_location != -1) {
- GPUStorageBuf *buf = drw_debug_gpu_print_buf_get();
- drw_shgroup_uniform_create_ex(
- shgroup, debug_print_location, DRW_UNIFORM_STORAGE_BLOCK, buf, 0, 0, 1);
+ /* TODO(Metal): Support Shader debug print.
+ * This is not currently supported by Metal Backend. */
+ if (GPU_backend_get_type() != GPU_BACKEND_METAL) {
+ int debug_print_location = GPU_shader_get_builtin_ssbo(shader, GPU_STORAGE_BUFFER_DEBUG_PRINT);
+ if (debug_print_location != -1) {
+ GPUStorageBuf *buf = drw_debug_gpu_print_buf_get();
+ drw_shgroup_uniform_create_ex(shgroup,
+ debug_print_location,
+ DRW_UNIFORM_STORAGE_BLOCK,
+ buf,
+ GPU_SAMPLER_DEFAULT,
+ 0,
+ 1);
# ifndef DISABLE_DEBUG_SHADER_PRINT_BARRIER
- /* Add a barrier to allow multiple shader writing to the same buffer. */
- DRW_shgroup_barrier(shgroup, GPU_BARRIER_SHADER_STORAGE);
+ /* Add a barrier to allow multiple shader writing to the same buffer. */
+ DRW_shgroup_barrier(shgroup, GPU_BARRIER_SHADER_STORAGE);
# endif
- }
+ }
- int debug_draw_location = GPU_shader_get_builtin_ssbo(shader, GPU_STORAGE_BUFFER_DEBUG_VERTS);
- if (debug_draw_location != -1) {
- GPUStorageBuf *buf = drw_debug_gpu_draw_buf_get();
- drw_shgroup_uniform_create_ex(
- shgroup, debug_draw_location, DRW_UNIFORM_STORAGE_BLOCK, buf, 0, 0, 1);
- /* NOTE(fclem): No barrier as ordering is not important. */
+ int debug_draw_location = GPU_shader_get_builtin_ssbo(shader, GPU_STORAGE_BUFFER_DEBUG_VERTS);
+ if (debug_draw_location != -1) {
+ GPUStorageBuf *buf = drw_debug_gpu_draw_buf_get();
+ drw_shgroup_uniform_create_ex(
+ shgroup, debug_draw_location, DRW_UNIFORM_STORAGE_BLOCK, buf, GPU_SAMPLER_DEFAULT, 0, 1);
+ /* NOTE(fclem): No barrier as ordering is not important. */
+ }
}
#endif
@@ -1562,15 +1757,16 @@ static void drw_shgroup_init(DRWShadingGroup *shgroup, GPUShader *shader)
BLI_assert(GPU_shader_get_builtin_uniform(shader, GPU_UNIFORM_MVP) == -1);
}
-static DRWShadingGroup *drw_shgroup_create_ex(struct GPUShader *shader, DRWPass *pass)
+static DRWShadingGroup *drw_shgroup_create_ex(GPUShader *shader, DRWPass *pass)
{
- DRWShadingGroup *shgroup = BLI_memblock_alloc(DST.vmempool->shgroups);
+ DRWShadingGroup *shgroup = static_cast<DRWShadingGroup *>(
+ BLI_memblock_alloc(DST.vmempool->shgroups));
BLI_LINKS_APPEND(&pass->shgroups, shgroup);
shgroup->shader = shader;
- shgroup->cmd.first = NULL;
- shgroup->cmd.last = NULL;
+ shgroup->cmd.first = nullptr;
+ shgroup->cmd.last = nullptr;
shgroup->pass_handle = pass->handle;
return shgroup;
@@ -1580,14 +1776,14 @@ static DRWShadingGroup *drw_shgroup_material_create_ex(GPUPass *gpupass, DRWPass
{
if (!gpupass) {
/* Shader compilation error */
- return NULL;
+ return nullptr;
}
GPUShader *sh = GPU_pass_shader_get(gpupass);
if (!sh) {
/* Shader not yet compiled */
- return NULL;
+ return nullptr;
}
DRWShadingGroup *grp = drw_shgroup_create_ex(sh, pass);
@@ -1601,12 +1797,12 @@ static void drw_shgroup_material_texture(DRWShadingGroup *grp,
{
DRW_shgroup_uniform_texture_ex(grp, name, gputex, state);
- GPUTexture **gputex_ref = BLI_memblock_alloc(DST.vmempool->images);
+ GPUTexture **gputex_ref = static_cast<GPUTexture **>(BLI_memblock_alloc(DST.vmempool->images));
*gputex_ref = gputex;
GPU_texture_ref(gputex);
}
-void DRW_shgroup_add_material_resources(DRWShadingGroup *grp, struct GPUMaterial *material)
+void DRW_shgroup_add_material_resources(DRWShadingGroup *grp, GPUMaterial *material)
{
ListBase textures = GPU_material_textures(material);
@@ -1615,41 +1811,57 @@ void DRW_shgroup_add_material_resources(DRWShadingGroup *grp, struct GPUMaterial
if (tex->ima) {
/* Image */
GPUTexture *gputex;
- ImageUser *iuser = tex->iuser_available ? &tex->iuser : NULL;
+ ImageUser *iuser = tex->iuser_available ? &tex->iuser : nullptr;
if (tex->tiled_mapping_name[0]) {
- gputex = BKE_image_get_gpu_tiles(tex->ima, iuser, NULL);
- drw_shgroup_material_texture(grp, gputex, tex->sampler_name, tex->sampler_state);
- gputex = BKE_image_get_gpu_tilemap(tex->ima, iuser, NULL);
- drw_shgroup_material_texture(grp, gputex, tex->tiled_mapping_name, tex->sampler_state);
+ gputex = BKE_image_get_gpu_tiles(tex->ima, iuser, nullptr);
+ drw_shgroup_material_texture(
+ grp, gputex, tex->sampler_name, eGPUSamplerState(tex->sampler_state));
+ gputex = BKE_image_get_gpu_tilemap(tex->ima, iuser, nullptr);
+ drw_shgroup_material_texture(
+ grp, gputex, tex->tiled_mapping_name, eGPUSamplerState(tex->sampler_state));
}
else {
- gputex = BKE_image_get_gpu_texture(tex->ima, iuser, NULL);
- drw_shgroup_material_texture(grp, gputex, tex->sampler_name, tex->sampler_state);
+ gputex = BKE_image_get_gpu_texture(tex->ima, iuser, nullptr);
+ drw_shgroup_material_texture(
+ grp, gputex, tex->sampler_name, eGPUSamplerState(tex->sampler_state));
}
}
else if (tex->colorband) {
/* Color Ramp */
DRW_shgroup_uniform_texture(grp, tex->sampler_name, *tex->colorband);
}
+ else if (tex->sky) {
+ /* Sky */
+ DRW_shgroup_uniform_texture_ex(
+ grp, tex->sampler_name, *tex->sky, eGPUSamplerState(tex->sampler_state));
+ }
}
GPUUniformBuf *ubo = GPU_material_uniform_buffer_get(material);
- if (ubo != NULL) {
+ if (ubo != nullptr) {
DRW_shgroup_uniform_block(grp, GPU_UBO_BLOCK_NAME, ubo);
}
const GPUUniformAttrList *uattrs = GPU_material_uniform_attributes(material);
- if (uattrs != NULL) {
+ if (uattrs != nullptr) {
int loc = GPU_shader_get_uniform_block_binding(grp->shader, GPU_ATTRIBUTE_UBO_BLOCK_NAME);
- drw_shgroup_uniform_create_ex(grp, loc, DRW_UNIFORM_BLOCK_OBATTRS, uattrs, 0, 0, 1);
+ drw_shgroup_uniform_create_ex(
+ grp, loc, DRW_UNIFORM_BLOCK_OBATTRS, uattrs, GPU_SAMPLER_DEFAULT, 0, 1);
grp->uniform_attrs = uattrs;
}
+
+ if (GPU_material_layer_attributes(material) != NULL) {
+ int loc = GPU_shader_get_uniform_block_binding(grp->shader,
+ GPU_LAYER_ATTRIBUTE_UBO_BLOCK_NAME);
+ drw_shgroup_uniform_create_ex(
+ grp, loc, DRW_UNIFORM_BLOCK_VLATTRS, nullptr, GPU_SAMPLER_DEFAULT, 0, 1);
+ }
}
GPUVertFormat *DRW_shgroup_instance_format_array(const DRWInstanceAttrFormat attrs[],
int arraysize)
{
- GPUVertFormat *format = MEM_callocN(sizeof(GPUVertFormat), "GPUVertFormat");
+ GPUVertFormat *format = MEM_cnew<GPUVertFormat>(__func__);
for (int i = 0; i < arraysize; i++) {
GPU_vertformat_attr_add(format,
@@ -1661,7 +1873,7 @@ GPUVertFormat *DRW_shgroup_instance_format_array(const DRWInstanceAttrFormat att
return format;
}
-DRWShadingGroup *DRW_shgroup_material_create(struct GPUMaterial *material, DRWPass *pass)
+DRWShadingGroup *DRW_shgroup_material_create(GPUMaterial *material, DRWPass *pass)
{
GPUPass *gpupass = GPU_material_get_pass(material);
DRWShadingGroup *shgroup = drw_shgroup_material_create_ex(gpupass, pass);
@@ -1673,32 +1885,33 @@ DRWShadingGroup *DRW_shgroup_material_create(struct GPUMaterial *material, DRWPa
return shgroup;
}
-DRWShadingGroup *DRW_shgroup_create(struct GPUShader *shader, DRWPass *pass)
+DRWShadingGroup *DRW_shgroup_create(GPUShader *shader, DRWPass *pass)
{
DRWShadingGroup *shgroup = drw_shgroup_create_ex(shader, pass);
drw_shgroup_init(shgroup, shader);
return shgroup;
}
-DRWShadingGroup *DRW_shgroup_transform_feedback_create(struct GPUShader *shader,
+DRWShadingGroup *DRW_shgroup_transform_feedback_create(GPUShader *shader,
DRWPass *pass,
GPUVertBuf *tf_target)
{
- BLI_assert(tf_target != NULL);
+ BLI_assert(tf_target != nullptr);
DRWShadingGroup *shgroup = drw_shgroup_create_ex(shader, pass);
drw_shgroup_init(shgroup, shader);
- drw_shgroup_uniform_create_ex(shgroup, 0, DRW_UNIFORM_TFEEDBACK_TARGET, tf_target, 0, 0, 1);
+ drw_shgroup_uniform_create_ex(
+ shgroup, 0, DRW_UNIFORM_TFEEDBACK_TARGET, tf_target, GPU_SAMPLER_DEFAULT, 0, 1);
return shgroup;
}
void DRW_shgroup_state_enable(DRWShadingGroup *shgroup, DRWState state)
{
- drw_command_set_mutable_state(shgroup, state, 0x0);
+ drw_command_set_mutable_state(shgroup, state, DRW_STATE_NO_DRAW);
}
void DRW_shgroup_state_disable(DRWShadingGroup *shgroup, DRWState state)
{
- drw_command_set_mutable_state(shgroup, 0x0, state);
+ drw_command_set_mutable_state(shgroup, DRW_STATE_NO_DRAW, state);
}
void DRW_shgroup_stencil_set(DRWShadingGroup *shgroup,
@@ -1741,15 +1954,16 @@ bool DRW_shgroup_is_empty(DRWShadingGroup *shgroup)
DRWShadingGroup *DRW_shgroup_create_sub(DRWShadingGroup *shgroup)
{
- DRWShadingGroup *shgroup_new = BLI_memblock_alloc(DST.vmempool->shgroups);
+ DRWShadingGroup *shgroup_new = static_cast<DRWShadingGroup *>(
+ BLI_memblock_alloc(DST.vmempool->shgroups));
*shgroup_new = *shgroup;
drw_shgroup_init(shgroup_new, shgroup_new->shader);
- shgroup_new->cmd.first = NULL;
- shgroup_new->cmd.last = NULL;
+ shgroup_new->cmd.first = nullptr;
+ shgroup_new->cmd.last = nullptr;
- DRWPass *parent_pass = DRW_memblock_elem_from_handle(DST.vmempool->passes,
- &shgroup->pass_handle);
+ DRWPass *parent_pass = static_cast<DRWPass *>(
+ DRW_memblock_elem_from_handle(DST.vmempool->passes, &shgroup->pass_handle));
BLI_LINKS_INSERT_AFTER(&parent_pass->shgroups, shgroup, shgroup_new);
@@ -1943,64 +2157,20 @@ static void draw_frustum_bound_sphere_calc(const BoundBox *bbox,
}
}
-static void draw_view_matrix_state_update(ViewInfos *storage,
+static void draw_view_matrix_state_update(DRWView *view,
const float viewmat[4][4],
const float winmat[4][4])
{
- copy_m4_m4(storage->viewmat, viewmat);
- invert_m4_m4(storage->viewinv, storage->viewmat);
-
- copy_m4_m4(storage->winmat, winmat);
- invert_m4_m4(storage->wininv, storage->winmat);
-
- mul_m4_m4m4(storage->persmat, winmat, viewmat);
- invert_m4_m4(storage->persinv, storage->persmat);
-
- const bool is_persp = (winmat[3][3] == 0.0f);
-
- /* Near clip distance. */
- storage->viewvecs[0][3] = (is_persp) ? -winmat[3][2] / (winmat[2][2] - 1.0f) :
- -(winmat[3][2] + 1.0f) / winmat[2][2];
-
- /* Far clip distance. */
- storage->viewvecs[1][3] = (is_persp) ? -winmat[3][2] / (winmat[2][2] + 1.0f) :
- -(winmat[3][2] - 1.0f) / winmat[2][2];
+ ViewMatrices *storage = &view->storage;
- /* view vectors for the corners of the view frustum.
- * Can be used to recreate the world space position easily */
- float view_vecs[4][3] = {
- {-1.0f, -1.0f, -1.0f},
- {1.0f, -1.0f, -1.0f},
- {-1.0f, 1.0f, -1.0f},
- {-1.0f, -1.0f, 1.0f},
- };
+ copy_m4_m4(storage->viewmat.values, viewmat);
+ invert_m4_m4(storage->viewinv.values, storage->viewmat.values);
- /* convert the view vectors to view space */
- for (int i = 0; i < 4; i++) {
- mul_project_m4_v3(storage->wininv, view_vecs[i]);
- /* normalized trick see:
- * http://www.derschmale.com/2014/01/26/reconstructing-positions-from-the-depth-buffer */
- if (is_persp) {
- /* Divide XY by Z. */
- mul_v2_fl(view_vecs[i], 1.0f / view_vecs[i][2]);
- }
- }
+ copy_m4_m4(storage->winmat.values, winmat);
+ invert_m4_m4(storage->wininv.values, storage->winmat.values);
- /**
- * If ortho : view_vecs[0] is the near-bottom-left corner of the frustum and
- * view_vecs[1] is the vector going from the near-bottom-left corner to
- * the far-top-right corner.
- * If Persp : view_vecs[0].xy and view_vecs[1].xy are respectively the bottom-left corner
- * when Z = 1, and top-left corner if Z = 1.
- * view_vecs[0].z the near clip distance and view_vecs[1].z is the (signed)
- * distance from the near plane to the far clip plane.
- */
- copy_v3_v3(storage->viewvecs[0], view_vecs[0]);
-
- /* we need to store the differences */
- storage->viewvecs[1][0] = view_vecs[1][0] - view_vecs[0][0];
- storage->viewvecs[1][1] = view_vecs[2][1] - view_vecs[0][1];
- storage->viewvecs[1][2] = view_vecs[3][2] - view_vecs[0][2];
+ mul_m4_m4m4(view->persmat.values, winmat, viewmat);
+ invert_m4_m4(view->persinv.values, view->persmat.values);
}
DRWView *DRW_view_create(const float viewmat[4][4],
@@ -2009,7 +2179,7 @@ DRWView *DRW_view_create(const float viewmat[4][4],
const float (*culling_winmat)[4],
DRWCallVisibilityFn *visibility_fn)
{
- DRWView *view = BLI_memblock_alloc(DST.vmempool->views);
+ DRWView *view = static_cast<DRWView *>(BLI_memblock_alloc(DST.vmempool->views));
if (DST.primary_view_num < MAX_CULLED_VIEWS) {
view->culling_mask = 1u << DST.primary_view_num++;
@@ -2020,16 +2190,7 @@ DRWView *DRW_view_create(const float viewmat[4][4],
}
view->clip_planes_len = 0;
view->visibility_fn = visibility_fn;
- view->parent = NULL;
-
- copy_v4_fl4(view->storage.viewcamtexcofac, 1.0f, 1.0f, 0.0f, 0.0f);
-
- if (DST.draw_ctx.evil_C && DST.draw_ctx.region) {
- int region_origin[2] = {DST.draw_ctx.region->winrct.xmin, DST.draw_ctx.region->winrct.ymin};
- struct wmWindow *win = CTX_wm_window(DST.draw_ctx.evil_C);
- wm_cursor_position_get(win, &view->storage.mouse_pixel[0], &view->storage.mouse_pixel[1]);
- sub_v2_v2v2_int(view->storage.mouse_pixel, view->storage.mouse_pixel, region_origin);
- }
+ view->parent = nullptr;
DRW_view_update(view, viewmat, winmat, culling_viewmat, culling_winmat);
@@ -2042,11 +2203,11 @@ DRWView *DRW_view_create_sub(const DRWView *parent_view,
{
/* Search original parent. */
const DRWView *ori_view = parent_view;
- while (ori_view->parent != NULL) {
+ while (ori_view->parent != nullptr) {
ori_view = ori_view->parent;
}
- DRWView *view = BLI_memblock_alloc(DST.vmempool->views);
+ DRWView *view = static_cast<DRWView *>(BLI_memblock_alloc(DST.vmempool->views));
/* Perform copy. */
*view = *ori_view;
@@ -2063,12 +2224,12 @@ DRWView *DRW_view_create_sub(const DRWView *parent_view,
void DRW_view_update_sub(DRWView *view, const float viewmat[4][4], const float winmat[4][4])
{
- BLI_assert(view->parent != NULL);
+ BLI_assert(view->parent != nullptr);
view->is_dirty = true;
view->is_inverted = (is_negative_m4(viewmat) == is_negative_m4(winmat));
- draw_view_matrix_state_update(&view->storage, viewmat, winmat);
+ draw_view_matrix_state_update(view, viewmat, winmat);
}
void DRW_view_update(DRWView *view,
@@ -2080,12 +2241,12 @@ void DRW_view_update(DRWView *view,
/* DO NOT UPDATE THE DEFAULT VIEW.
* Create sub-views instead, or a copy. */
BLI_assert(view != DST.view_default);
- BLI_assert(view->parent == NULL);
+ BLI_assert(view->parent == nullptr);
view->is_dirty = true;
view->is_inverted = (is_negative_m4(viewmat) == is_negative_m4(winmat));
- draw_view_matrix_state_update(&view->storage, viewmat, winmat);
+ draw_view_matrix_state_update(view, viewmat, winmat);
/* Prepare frustum culling. */
@@ -2113,7 +2274,7 @@ void DRW_view_update(DRWView *view,
invert_m4_m4(wininv, winmat);
}
else {
- copy_m4_m4(wininv, view->storage.wininv);
+ copy_m4_m4(wininv, view->storage.wininv.values);
}
float viewinv[4][4];
@@ -2122,22 +2283,14 @@ void DRW_view_update(DRWView *view,
invert_m4_m4(viewinv, viewmat);
}
else {
- copy_m4_m4(viewinv, view->storage.viewinv);
+ copy_m4_m4(viewinv, view->storage.viewinv.values);
}
draw_frustum_boundbox_calc(viewinv, winmat, &view->frustum_corners);
- draw_frustum_culling_planes_calc(view->storage.persmat, view->frustum_planes);
+ draw_frustum_culling_planes_calc(view->persmat.values, view->frustum_planes);
draw_frustum_bound_sphere_calc(
&view->frustum_corners, viewinv, winmat, wininv, &view->frustum_bsphere);
- /* TODO(fclem): Deduplicate. */
- for (int i = 0; i < 8; i++) {
- copy_v3_v3(view->storage.frustum_corners[i], view->frustum_corners.vec[i]);
- }
- for (int i = 0; i < 6; i++) {
- copy_v4_v4(view->storage.frustum_planes[i], view->frustum_planes[i]);
- }
-
#ifdef DRW_DEBUG_CULLING
if (G.debug_value != 0) {
DRW_debug_sphere(
@@ -2154,14 +2307,14 @@ const DRWView *DRW_view_default_get(void)
void DRW_view_reset(void)
{
- DST.view_default = NULL;
- DST.view_active = NULL;
- DST.view_previous = NULL;
+ DST.view_default = nullptr;
+ DST.view_active = nullptr;
+ DST.view_previous = nullptr;
}
void DRW_view_default_set(const DRWView *view)
{
- BLI_assert(DST.view_default == NULL);
+ BLI_assert(DST.view_default == nullptr);
DST.view_default = (DRWView *)view;
}
@@ -2170,20 +2323,10 @@ void DRW_view_clip_planes_set(DRWView *view, float (*planes)[4], int plane_len)
BLI_assert(plane_len <= MAX_CLIP_PLANES);
view->clip_planes_len = plane_len;
if (plane_len > 0) {
- memcpy(view->storage.clip_planes, planes, sizeof(float[4]) * plane_len);
+ memcpy(view->clip_planes, planes, sizeof(float[4]) * plane_len);
}
}
-void DRW_view_camtexco_set(DRWView *view, float texco[4])
-{
- copy_v4_v4(view->storage.viewcamtexcofac, texco);
-}
-
-void DRW_view_camtexco_get(const DRWView *view, float r_texco[4])
-{
- copy_v4_v4(r_texco, view->storage.viewcamtexcofac);
-}
-
void DRW_view_frustum_corners_get(const DRWView *view, BoundBox *corners)
{
memcpy(corners, &view->frustum_corners, sizeof(view->frustum_corners));
@@ -2203,7 +2346,7 @@ bool DRW_view_is_persp_get(const DRWView *view)
float DRW_view_near_distance_get(const DRWView *view)
{
view = (view) ? view : DST.view_default;
- const float(*projmat)[4] = view->storage.winmat;
+ const float4x4 &projmat = view->storage.winmat;
if (DRW_view_is_persp_get(view)) {
return -projmat[3][2] / (projmat[2][2] - 1.0f);
@@ -2215,7 +2358,7 @@ float DRW_view_near_distance_get(const DRWView *view)
float DRW_view_far_distance_get(const DRWView *view)
{
view = (view) ? view : DST.view_default;
- const float(*projmat)[4] = view->storage.winmat;
+ const float4x4 &projmat = view->storage.winmat;
if (DRW_view_is_persp_get(view)) {
return -projmat[3][2] / (projmat[2][2] + 1.0f);
@@ -2227,22 +2370,21 @@ float DRW_view_far_distance_get(const DRWView *view)
void DRW_view_viewmat_get(const DRWView *view, float mat[4][4], bool inverse)
{
view = (view) ? view : DST.view_default;
- const ViewInfos *storage = &view->storage;
- copy_m4_m4(mat, (inverse) ? storage->viewinv : storage->viewmat);
+ const ViewMatrices *storage = &view->storage;
+ copy_m4_m4(mat, (inverse) ? storage->viewinv.values : storage->viewmat.values);
}
void DRW_view_winmat_get(const DRWView *view, float mat[4][4], bool inverse)
{
view = (view) ? view : DST.view_default;
- const ViewInfos *storage = &view->storage;
- copy_m4_m4(mat, (inverse) ? storage->wininv : storage->winmat);
+ const ViewMatrices *storage = &view->storage;
+ copy_m4_m4(mat, (inverse) ? storage->wininv.values : storage->winmat.values);
}
void DRW_view_persmat_get(const DRWView *view, float mat[4][4], bool inverse)
{
view = (view) ? view : DST.view_default;
- const ViewInfos *storage = &view->storage;
- copy_m4_m4(mat, (inverse) ? storage->persinv : storage->persmat);
+ copy_m4_m4(mat, (inverse) ? view->persinv.values : view->persmat.values);
}
/** \} */
@@ -2253,19 +2395,19 @@ void DRW_view_persmat_get(const DRWView *view, float mat[4][4], bool inverse)
DRWPass *DRW_pass_create(const char *name, DRWState state)
{
- DRWPass *pass = BLI_memblock_alloc(DST.vmempool->passes);
+ DRWPass *pass = static_cast<DRWPass *>(BLI_memblock_alloc(DST.vmempool->passes));
pass->state = state | DRW_STATE_PROGRAM_POINT_SIZE;
if (G.debug & G_DEBUG_GPU) {
BLI_strncpy(pass->name, name, MAX_PASS_NAME);
}
- pass->shgroups.first = NULL;
- pass->shgroups.last = NULL;
+ pass->shgroups.first = nullptr;
+ pass->shgroups.last = nullptr;
pass->handle = DST.pass_handle;
DRW_handle_increment(&DST.pass_handle);
- pass->original = NULL;
- pass->next = NULL;
+ pass->original = nullptr;
+ pass->next = nullptr;
return pass;
}
@@ -2281,7 +2423,7 @@ DRWPass *DRW_pass_create_instance(const char *name, DRWPass *original, DRWState
void DRW_pass_link(DRWPass *first, DRWPass *second)
{
BLI_assert(first != second);
- BLI_assert(first->next == NULL);
+ BLI_assert(first->next == nullptr);
first->next = second;
}
@@ -2340,7 +2482,7 @@ static int pass_shgroup_dist_sort(const void *a, const void *b)
void DRW_pass_sort_shgroup_z(DRWPass *pass)
{
- const float(*viewinv)[4] = DST.view_active->storage.viewinv;
+ const float4x4 &viewinv = DST.view_active->storage.viewinv;
if (!(pass->shgroups.first && pass->shgroups.first->next)) {
/* Nothing to sort */
@@ -2366,7 +2508,8 @@ void DRW_pass_sort_shgroup_z(DRWPass *pass)
* (see T76730 & D7729). */
// BLI_assert(handle != 0);
- DRWObjectMatrix *obmats = DRW_memblock_elem_from_handle(DST.vmempool->obmats, &handle);
+ DRWObjectMatrix *obmats = static_cast<DRWObjectMatrix *>(
+ DRW_memblock_elem_from_handle(DST.vmempool->obmats, &handle));
/* Compute distance to camera. */
float tmp[3];
diff --git a/source/blender/draw/intern/draw_manager_exec.c b/source/blender/draw/intern/draw_manager_exec.c
index 0e39cc1d3b9..8b1b35b5f03 100644
--- a/source/blender/draw/intern/draw_manager_exec.c
+++ b/source/blender/draw/intern/draw_manager_exec.c
@@ -44,6 +44,7 @@ typedef struct DRWCommandsState {
int obmats_loc;
int obinfos_loc;
int obattrs_loc;
+ int vlattrs_loc;
int baseinst_loc;
int chunkid_loc;
int resourceid_loc;
@@ -682,6 +683,10 @@ static void draw_update_uniforms(DRWShadingGroup *shgroup,
uni->uniform_attrs);
DRW_sparse_uniform_buffer_bind(state->obattrs_ubo, 0, uni->location);
break;
+ case DRW_UNIFORM_BLOCK_VLATTRS:
+ state->vlattrs_loc = uni->location;
+ GPU_uniformbuf_bind(drw_ensure_layer_attribute_buffer(), uni->location);
+ break;
case DRW_UNIFORM_RESOURCE_CHUNK:
state->chunkid_loc = uni->location;
GPU_shader_uniform_int(shgroup->shader, uni->location, 0);
@@ -960,6 +965,9 @@ static void draw_call_batching_finish(DRWShadingGroup *shgroup, DRWCommandsState
if (state->obattrs_loc != -1) {
DRW_sparse_uniform_buffer_unbind(state->obattrs_ubo, state->resource_chunk);
}
+ if (state->vlattrs_loc != -1) {
+ GPU_uniformbuf_unbind(DST.vmempool->vlattrs_ubo);
+ }
}
static void draw_shgroup(DRWShadingGroup *shgroup, DRWState pass_state)
@@ -970,6 +978,7 @@ static void draw_shgroup(DRWShadingGroup *shgroup, DRWState pass_state)
.obmats_loc = -1,
.obinfos_loc = -1,
.obattrs_loc = -1,
+ .vlattrs_loc = -1,
.baseinst_loc = -1,
.chunkid_loc = -1,
.resourceid_loc = -1,
@@ -1146,15 +1155,11 @@ static void draw_shgroup(DRWShadingGroup *shgroup, DRWState pass_state)
}
}
-static void drw_update_view(const float viewport_size[2])
+static void drw_update_view(void)
{
- ViewInfos *storage = &DST.view_active->storage;
- copy_v2_v2(storage->viewport_size, viewport_size);
- copy_v2_v2(storage->viewport_size_inverse, viewport_size);
- invert_v2(storage->viewport_size_inverse);
-
/* TODO(fclem): update a big UBO and only bind ranges here. */
GPU_uniformbuf_update(G_draw.view_ubo, &DST.view_active->storage);
+ GPU_uniformbuf_update(G_draw.clipping_ubo, &DST.view_active->clip_planes);
/* TODO: get rid of this. */
DST.view_storage_cpy = DST.view_active->storage;
@@ -1180,11 +1185,8 @@ static void drw_draw_pass_ex(DRWPass *pass,
BLI_assert(DST.buffer_finish_called &&
"DRW_render_instance_buffer_finish had not been called before drawing");
- float viewport[4];
- GPU_viewport_size_get_f(viewport);
- if (DST.view_previous != DST.view_active || DST.view_active->is_dirty ||
- !equals_v2v2(DST.view_active->storage.viewport_size, &viewport[2])) {
- drw_update_view(&viewport[2]);
+ if (DST.view_previous != DST.view_active || DST.view_active->is_dirty) {
+ drw_update_view();
DST.view_active->is_dirty = false;
DST.view_previous = DST.view_active;
}
diff --git a/source/blender/draw/intern/draw_manager_profiling.c b/source/blender/draw/intern/draw_manager_profiling.c
index d14f5c7f125..92cb3e008b9 100644
--- a/source/blender/draw/intern/draw_manager_profiling.c
+++ b/source/blender/draw/intern/draw_manager_profiling.c
@@ -225,15 +225,15 @@ void DRW_stats_draw(const rcti *rect)
/* ------------------------------------------ */
/* Label row */
char col_label[32];
- sprintf(col_label, "Engine");
+ BLI_snprintf(col_label, sizeof(col_label), "Engine");
draw_stat_5row(rect, u++, v, col_label, sizeof(col_label));
- sprintf(col_label, "Init");
+ BLI_snprintf(col_label, sizeof(col_label), "Init");
draw_stat_5row(rect, u++, v, col_label, sizeof(col_label));
- sprintf(col_label, "Background");
+ BLI_snprintf(col_label, sizeof(col_label), "Background");
draw_stat_5row(rect, u++, v, col_label, sizeof(col_label));
- sprintf(col_label, "Render");
+ BLI_snprintf(col_label, sizeof(col_label), "Render");
draw_stat_5row(rect, u++, v, col_label, sizeof(col_label));
- sprintf(col_label, "Total (w/o cache)");
+ BLI_snprintf(col_label, sizeof(col_label), "Total (w/o cache)");
draw_stat_5row(rect, u++, v, col_label, sizeof(col_label));
v++;
@@ -245,42 +245,45 @@ void DRW_stats_draw(const rcti *rect)
draw_stat_5row(rect, u++, v, engine->idname, sizeof(engine->idname));
init_tot_time += data->init_time;
- sprintf(time_to_txt, "%.2fms", data->init_time);
+ BLI_snprintf(time_to_txt, sizeof(time_to_txt), "%.2fms", data->init_time);
draw_stat_5row(rect, u++, v, time_to_txt, sizeof(time_to_txt));
background_tot_time += data->background_time;
- sprintf(time_to_txt, "%.2fms", data->background_time);
+ BLI_snprintf(time_to_txt, sizeof(time_to_txt), "%.2fms", data->background_time);
draw_stat_5row(rect, u++, v, time_to_txt, sizeof(time_to_txt));
render_tot_time += data->render_time;
- sprintf(time_to_txt, "%.2fms", data->render_time);
+ BLI_snprintf(time_to_txt, sizeof(time_to_txt), "%.2fms", data->render_time);
draw_stat_5row(rect, u++, v, time_to_txt, sizeof(time_to_txt));
tot_time += data->init_time + data->background_time + data->render_time;
- sprintf(time_to_txt, "%.2fms", data->init_time + data->background_time + data->render_time);
+ BLI_snprintf(time_to_txt,
+ sizeof(time_to_txt),
+ "%.2fms",
+ data->init_time + data->background_time + data->render_time);
draw_stat_5row(rect, u++, v, time_to_txt, sizeof(time_to_txt));
v++;
}
/* Totals row */
u = 0;
- sprintf(col_label, "Sub Total");
+ BLI_snprintf(col_label, sizeof(col_label), "Sub Total");
draw_stat_5row(rect, u++, v, col_label, sizeof(col_label));
- sprintf(time_to_txt, "%.2fms", init_tot_time);
+ BLI_snprintf(time_to_txt, sizeof(time_to_txt), "%.2fms", init_tot_time);
draw_stat_5row(rect, u++, v, time_to_txt, sizeof(time_to_txt));
- sprintf(time_to_txt, "%.2fms", background_tot_time);
+ BLI_snprintf(time_to_txt, sizeof(time_to_txt), "%.2fms", background_tot_time);
draw_stat_5row(rect, u++, v, time_to_txt, sizeof(time_to_txt));
- sprintf(time_to_txt, "%.2fms", render_tot_time);
+ BLI_snprintf(time_to_txt, sizeof(time_to_txt), "%.2fms", render_tot_time);
draw_stat_5row(rect, u++, v, time_to_txt, sizeof(time_to_txt));
- sprintf(time_to_txt, "%.2fms", tot_time);
+ BLI_snprintf(time_to_txt, sizeof(time_to_txt), "%.2fms", tot_time);
draw_stat_5row(rect, u++, v, time_to_txt, sizeof(time_to_txt));
v += 2;
u = 0;
double *cache_time = DRW_view_data_cache_time_get(DST.view_data_active);
- sprintf(col_label, "Cache Time");
+ BLI_snprintf(col_label, sizeof(col_label), "Cache Time");
draw_stat_5row(rect, u++, v, col_label, sizeof(col_label));
- sprintf(time_to_txt, "%.2fms", *cache_time);
+ BLI_snprintf(time_to_txt, sizeof(time_to_txt), "%.2fms", *cache_time);
draw_stat_5row(rect, u++, v, time_to_txt, sizeof(time_to_txt));
v += 2;
@@ -292,17 +295,18 @@ void DRW_stats_draw(const rcti *rect)
uint tex_mem = GPU_texture_memory_usage_get();
uint vbo_mem = GPU_vertbuf_get_memory_usage();
- sprintf(stat_string, "GPU Memory");
+ BLI_snprintf(stat_string, sizeof(stat_string), "GPU Memory");
draw_stat(rect, 0, v, stat_string, sizeof(stat_string));
- sprintf(stat_string, "%.2fMB", (double)(tex_mem + vbo_mem) / 1000000.0);
+ BLI_snprintf(
+ stat_string, sizeof(stat_string), "%.2fMB", (double)(tex_mem + vbo_mem) / 1000000.0);
draw_stat_5row(rect, 1, v++, stat_string, sizeof(stat_string));
- sprintf(stat_string, "Textures");
+ BLI_snprintf(stat_string, sizeof(stat_string), "Textures");
draw_stat(rect, 1, v, stat_string, sizeof(stat_string));
- sprintf(stat_string, "%.2fMB", (double)tex_mem / 1000000.0);
+ BLI_snprintf(stat_string, sizeof(stat_string), "%.2fMB", (double)tex_mem / 1000000.0);
draw_stat_5row(rect, 1, v++, stat_string, sizeof(stat_string));
- sprintf(stat_string, "Meshes");
+ BLI_snprintf(stat_string, sizeof(stat_string), "Meshes");
draw_stat(rect, 1, v, stat_string, sizeof(stat_string));
- sprintf(stat_string, "%.2fMB", (double)vbo_mem / 1000000.0);
+ BLI_snprintf(stat_string, sizeof(stat_string), "%.2fMB", (double)vbo_mem / 1000000.0);
draw_stat_5row(rect, 1, v++, stat_string, sizeof(stat_string));
v += 1;
diff --git a/source/blender/draw/intern/draw_manager_shader.c b/source/blender/draw/intern/draw_manager_shader.c
index 1ada99093c6..85701a10f4b 100644
--- a/source/blender/draw/intern/draw_manager_shader.c
+++ b/source/blender/draw/intern/draw_manager_shader.c
@@ -51,6 +51,7 @@ extern char datatoc_common_fullscreen_vert_glsl[];
* \{ */
typedef struct DRWShaderCompiler {
+ /** Default compilation queue. */
ListBase queue; /* GPUMaterial */
SpinLock list_lock;
@@ -63,8 +64,8 @@ static void drw_deferred_shader_compilation_exec(
void *custom_data,
/* Cannot be const, this function implements wm_jobs_start_callback.
* NOLINTNEXTLINE: readability-non-const-parameter. */
- short *stop,
- short *UNUSED(do_update),
+ bool *stop,
+ bool *UNUSED(do_update),
float *UNUSED(progress))
{
GPU_render_begin();
@@ -109,6 +110,7 @@ static void drw_deferred_shader_compilation_exec(
MEM_freeN(link);
}
else {
+ /* No more materials to optimize, or shaders to compile. */
break;
}
@@ -216,7 +218,7 @@ static void drw_deferred_shader_add(GPUMaterial *mat, bool deferred)
}
else {
comp->gl_context = WM_opengl_context_create();
- comp->gpu_context = GPU_context_create(NULL);
+ comp->gpu_context = GPU_context_create(NULL, comp->gl_context);
GPU_context_active_set(NULL);
WM_opengl_context_activate(DST.gl_context);
@@ -235,6 +237,42 @@ static void drw_deferred_shader_add(GPUMaterial *mat, bool deferred)
WM_jobs_start(wm, wm_job);
}
+static void drw_register_shader_vlattrs(GPUMaterial *mat)
+{
+ const ListBase *attrs = GPU_material_layer_attributes(mat);
+
+ if (!attrs) {
+ return;
+ }
+
+ GHash *hash = DST.vmempool->vlattrs_name_cache;
+ ListBase *list = &DST.vmempool->vlattrs_name_list;
+
+ LISTBASE_FOREACH (GPULayerAttr *, attr, attrs) {
+ GPULayerAttr **p_val;
+
+ /* Add to the table and list if newly seen. */
+ if (!BLI_ghash_ensure_p(hash, POINTER_FROM_UINT(attr->hash_code), (void ***)&p_val)) {
+ DST.vmempool->vlattrs_ubo_ready = false;
+
+ GPULayerAttr *new_link = *p_val = MEM_dupallocN(attr);
+
+ /* Insert into the list ensuring sorted order. */
+ GPULayerAttr *link = list->first;
+
+ while (link && link->hash_code <= attr->hash_code) {
+ link = link->next;
+ }
+
+ new_link->prev = new_link->next = NULL;
+ BLI_insertlinkbefore(list, link, new_link);
+ }
+
+ /* Reset the unused frames counter. */
+ (*p_val)->users = 0;
+ }
+}
+
void DRW_deferred_shader_remove(GPUMaterial *mat)
{
LISTBASE_FOREACH (wmWindowManager *, wm, &G_MAIN->wm) {
@@ -243,6 +281,8 @@ void DRW_deferred_shader_remove(GPUMaterial *mat)
wm, wm, WM_JOB_TYPE_SHADER_COMPILATION);
if (comp != NULL) {
BLI_spin_lock(&comp->list_lock);
+
+ /* Search for compilation job in queue. */
LinkData *link = (LinkData *)BLI_findptr(&comp->queue, mat, offsetof(LinkData, data));
if (link) {
BLI_remlink(&comp->queue, link);
@@ -297,18 +337,6 @@ GPUShader *DRW_shader_create_with_lib_ex(const char *vert,
return sh;
}
-GPUShader *DRW_shader_create_compute_with_shaderlib(const char *comp,
- const DRWShaderLibrary *lib,
- const char *defines,
- const char *name)
-{
- char *comp_with_lib = DRW_shader_library_create_shader_string(lib, comp);
- GPUShader *sh = GPU_shader_create_compute(comp_with_lib, NULL, defines, name);
- MEM_SAFE_FREE(comp_with_lib);
-
- return sh;
-}
-
GPUShader *DRW_shader_create_with_shaderlib_ex(const char *vert,
const char *geom,
const char *frag,
@@ -390,6 +418,9 @@ GPUMaterial *DRW_shader_from_world(World *wo,
false,
callback,
thunk);
+
+ drw_register_shader_vlattrs(mat);
+
if (DRW_state_is_image_render()) {
/* Do not deferred if doing render. */
deferred = false;
@@ -419,6 +450,8 @@ GPUMaterial *DRW_shader_from_material(Material *ma,
callback,
thunk);
+ drw_register_shader_vlattrs(mat);
+
if (DRW_state_is_image_render()) {
/* Do not deferred if doing render. */
deferred = false;
diff --git a/source/blender/draw/intern/draw_manager_text.c b/source/blender/draw/intern/draw_manager_text.cc
index 203276e63ef..1244c46e166 100644
--- a/source/blender/draw/intern/draw_manager_text.c
+++ b/source/blender/draw/intern/draw_manager_text.cc
@@ -15,6 +15,7 @@
#include "BKE_editmesh.h"
#include "BKE_editmesh_cache.h"
#include "BKE_global.h"
+#include "BKE_mesh.h"
#include "BKE_unit.h"
#include "DNA_mesh_types.h"
@@ -38,7 +39,7 @@
#include "draw_manager_text.h"
#include "intern/bmesh_polygon.h"
-typedef struct ViewCachedString {
+struct ViewCachedString {
float vec[3];
union {
uchar ub[4];
@@ -51,20 +52,20 @@ typedef struct ViewCachedString {
/* str is allocated past the end */
char str[0];
-} ViewCachedString;
+};
-typedef struct DRWTextStore {
+struct DRWTextStore {
BLI_memiter *cache_strings;
-} DRWTextStore;
+};
DRWTextStore *DRW_text_cache_create(void)
{
- DRWTextStore *dt = MEM_callocN(sizeof(*dt), __func__);
+ DRWTextStore *dt = MEM_cnew<DRWTextStore>(__func__);
dt->cache_strings = BLI_memiter_create(1 << 14); /* 16kb */
return dt;
}
-void DRW_text_cache_destroy(struct DRWTextStore *dt)
+void DRW_text_cache_destroy(DRWTextStore *dt)
{
BLI_memiter_destroy(dt->cache_strings);
MEM_freeN(dt);
@@ -90,7 +91,8 @@ void DRW_text_cache_add(DRWTextStore *dt,
alloc_len = str_len + 1;
}
- vos = BLI_memiter_alloc(dt->cache_strings, sizeof(ViewCachedString) + alloc_len);
+ vos = static_cast<ViewCachedString *>(
+ BLI_memiter_alloc(dt->cache_strings, sizeof(ViewCachedString) + alloc_len));
copy_v3_v3(vos->vec, co);
copy_v4_v4_uchar(vos->col.ub, col);
@@ -125,10 +127,10 @@ static void drw_text_cache_draw_ex(DRWTextStore *dt, ARegion *region)
const uiStyle *style = UI_style_get();
- BLF_size(font_id, style->widget.points * U.pixelsize, U.dpi);
+ BLF_size(font_id, style->widget.points * U.dpi_fac);
BLI_memiter_iter_init(dt->cache_strings, &it);
- while ((vos = BLI_memiter_iter_step(&it))) {
+ while ((vos = static_cast<ViewCachedString *>(BLI_memiter_iter_step(&it)))) {
if (vos->sco[0] != IS_CLIPPED) {
if (col_pack_prev != vos->col.pack) {
BLF_color4ubv(font_id, vos->col.ub);
@@ -136,7 +138,7 @@ static void drw_text_cache_draw_ex(DRWTextStore *dt, ARegion *region)
}
BLF_position(
- font_id, (float)(vos->sco[0] + vos->xoffs), (float)(vos->sco[1] + vos->yoffs), 2.0f);
+ font_id, float(vos->sco[0] + vos->xoffs), float(vos->sco[1] + vos->yoffs), 2.0f);
BLF_draw(font_id,
(vos->flag & DRW_TEXT_CACHE_STRING_PTR) ? *((const char **)vos->str) : vos->str,
vos->str_len);
@@ -147,16 +149,16 @@ static void drw_text_cache_draw_ex(DRWTextStore *dt, ARegion *region)
GPU_matrix_projection_set(original_proj);
}
-void DRW_text_cache_draw(DRWTextStore *dt, ARegion *region, struct View3D *v3d)
+void DRW_text_cache_draw(DRWTextStore *dt, ARegion *region, View3D *v3d)
{
ViewCachedString *vos;
if (v3d) {
- RegionView3D *rv3d = region->regiondata;
+ RegionView3D *rv3d = static_cast<RegionView3D *>(region->regiondata);
int tot = 0;
/* project first and test */
BLI_memiter_handle it;
BLI_memiter_iter_init(dt->cache_strings, &it);
- while ((vos = BLI_memiter_iter_step(&it))) {
+ while ((vos = static_cast<ViewCachedString *>(BLI_memiter_iter_step(&it)))) {
if (ED_view3d_project_short_ex(
region,
(vos->flag & DRW_TEXT_CACHE_GLOBALSPACE) ? rv3d->persmat : rv3d->persmatob,
@@ -192,10 +194,10 @@ void DRW_text_cache_draw(DRWTextStore *dt, ARegion *region, struct View3D *v3d)
BLI_memiter_iter_init(dt->cache_strings, &it);
View2D *v2d = &region->v2d;
float viewmat[4][4];
- rctf region_space = {0.0f, region->winx, 0.0f, region->winy};
+ rctf region_space = {0.0f, float(region->winx), 0.0f, float(region->winy)};
BLI_rctf_transform_calc_m4_pivot_min(&v2d->cur, &region_space, viewmat);
- while ((vos = BLI_memiter_iter_step(&it))) {
+ while ((vos = static_cast<ViewCachedString *>(BLI_memiter_iter_step(&it)))) {
float p[3];
copy_v3_v3(p, vos->vec);
mul_m4_v3(viewmat, p);
@@ -216,9 +218,9 @@ void DRW_text_edit_mesh_measure_stats(ARegion *region,
/* Do not use ascii when using non-default unit system, some unit chars are utf8 (micro, square,
* etc.). See bug T36090.
*/
- struct DRWTextStore *dt = DRW_text_cache_ensure();
+ DRWTextStore *dt = DRW_text_cache_ensure();
const short txt_flag = DRW_TEXT_CACHE_GLOBALSPACE;
- Mesh *me = ob->data;
+ Mesh *me = static_cast<Mesh *>(ob->data);
BMEditMesh *em = me->edit_mesh;
float v1[3], v2[3], v3[3], vmid[3], fvec[3];
char numstr[32]; /* Stores the measurement display text here */
@@ -232,8 +234,9 @@ void DRW_text_edit_mesh_measure_stats(ARegion *region,
float clip_planes[4][4];
/* allow for displaying shape keys and deform mods */
BMIter iter;
- const float(*vert_coords)[3] = (me->runtime.edit_data ? me->runtime.edit_data->vertexCos : NULL);
- const bool use_coords = (vert_coords != NULL);
+ const float(*vert_coords)[3] = (me->runtime->edit_data ? me->runtime->edit_data->vertexCos :
+ nullptr);
+ const bool use_coords = (vert_coords != nullptr);
/* when 2 or more edge-info options are enabled, space apart */
short edge_tex_count = 0;
@@ -246,7 +249,7 @@ void DRW_text_edit_mesh_measure_stats(ARegion *region,
if ((v3d->overlay.edit_flag & V3D_OVERLAY_EDIT_INDICES) && (em->selectmode & SCE_SELECT_EDGE)) {
edge_tex_count += 1;
}
- const short edge_tex_sep = (short)((edge_tex_count - 1) * 5.0f * U.dpi_fac);
+ const short edge_tex_sep = short((edge_tex_count - 1) * 5.0f * U.dpi_fac);
/* Make the precision of the display value proportionate to the grid-size. */
@@ -302,11 +305,11 @@ void DRW_text_edit_mesh_measure_stats(ARegion *region,
if (clip_segment_v3_plane_n(v1, v2, clip_planes, 4, v1_clip, v2_clip)) {
mid_v3_v3v3(vmid, v1_clip, v2_clip);
- mul_m4_v3(ob->obmat, vmid);
+ mul_m4_v3(ob->object_to_world, vmid);
if (do_global) {
- mul_mat3_m4_v3(ob->obmat, v1);
- mul_mat3_m4_v3(ob->obmat, v2);
+ mul_mat3_m4_v3(ob->object_to_world, v1);
+ mul_mat3_m4_v3(ob->object_to_world, v2);
}
if (unit->system) {
@@ -334,11 +337,11 @@ void DRW_text_edit_mesh_measure_stats(ARegion *region,
UI_GetThemeColor3ubv(TH_DRAWEXTRA_EDGEANG, col);
- const float(*poly_normals)[3] = NULL;
+ const float(*poly_normals)[3] = nullptr;
if (use_coords) {
BM_mesh_elem_index_ensure(em->bm, BM_VERT | BM_FACE);
- BKE_editmesh_cache_ensure_poly_normals(em, me->runtime.edit_data);
- poly_normals = me->runtime.edit_data->polyNos;
+ BKE_editmesh_cache_ensure_poly_normals(em, me->runtime->edit_data);
+ poly_normals = me->runtime->edit_data->polyNos;
}
BM_ITER_MESH (eed, &iter, em->bm, BM_EDGES_OF_MESH) {
@@ -370,7 +373,7 @@ void DRW_text_edit_mesh_measure_stats(ARegion *region,
float angle;
mid_v3_v3v3(vmid, v1_clip, v2_clip);
- mul_m4_v3(ob->obmat, vmid);
+ mul_m4_v3(ob->object_to_world, vmid);
if (use_coords) {
copy_v3_v3(no_a, poly_normals[BM_elem_index_get(l_a->f)]);
@@ -382,8 +385,8 @@ void DRW_text_edit_mesh_measure_stats(ARegion *region,
}
if (do_global) {
- mul_mat3_m4_v3(ob->imat, no_a);
- mul_mat3_m4_v3(ob->imat, no_b);
+ mul_mat3_m4_v3(ob->world_to_object, no_a);
+ mul_mat3_m4_v3(ob->world_to_object, no_b);
normalize_v3(no_a);
normalize_v3(no_b);
}
@@ -410,7 +413,7 @@ void DRW_text_edit_mesh_measure_stats(ARegion *region,
UI_GetThemeColor3ubv(TH_DRAWEXTRA_FACEAREA, col);
int i, n;
- BMFace *f = NULL;
+ BMFace *f = nullptr;
/* Alternative to using `poly_to_tri_count(i, BM_elem_index_get(f->l_first))`
* without having to add an extra loop. */
int looptri_index = 0;
@@ -440,22 +443,22 @@ void DRW_text_edit_mesh_measure_stats(ARegion *region,
n += 3;
if (do_global) {
- mul_mat3_m4_v3(ob->obmat, v1);
- mul_mat3_m4_v3(ob->obmat, v2);
- mul_mat3_m4_v3(ob->obmat, v3);
+ mul_mat3_m4_v3(ob->object_to_world, v1);
+ mul_mat3_m4_v3(ob->object_to_world, v2);
+ mul_mat3_m4_v3(ob->object_to_world, v3);
}
area += area_tri_v3(v1, v2, v3);
}
- mul_v3_fl(vmid, 1.0f / (float)n);
- mul_m4_v3(ob->obmat, vmid);
+ mul_v3_fl(vmid, 1.0f / float(n));
+ mul_m4_v3(ob->object_to_world, vmid);
if (unit->system) {
numstr_len = BKE_unit_value_as_string(
numstr,
sizeof(numstr),
- (double)(area * unit->scale_length * unit->scale_length),
+ double(area * unit->scale_length * unit->scale_length),
3,
B_UNIT_AREA,
unit,
@@ -519,9 +522,9 @@ void DRW_text_edit_mesh_measure_stats(ARegion *region,
copy_v3_v3(v2_local, v2);
if (do_global) {
- mul_mat3_m4_v3(ob->obmat, v1);
- mul_mat3_m4_v3(ob->obmat, v2);
- mul_mat3_m4_v3(ob->obmat, v3);
+ mul_mat3_m4_v3(ob->object_to_world, v1);
+ mul_mat3_m4_v3(ob->object_to_world, v2);
+ mul_mat3_m4_v3(ob->object_to_world, v3);
}
float angle = angle_v3v3v3(v1, v2, v3);
@@ -532,7 +535,7 @@ void DRW_text_edit_mesh_measure_stats(ARegion *region,
(is_rad) ? angle : RAD2DEGF(angle),
(is_rad) ? "r" : "°");
interp_v3_v3v3(fvec, vmid, v2_local, 0.8f);
- mul_m4_v3(ob->obmat, fvec);
+ mul_m4_v3(ob->object_to_world, fvec);
DRW_text_cache_add(dt, fvec, numstr, numstr_len, 0, 0, txt_flag, col);
}
}
@@ -563,7 +566,7 @@ void DRW_text_edit_mesh_measure_stats(ARegion *region,
copy_v3_v3(v1, v->co);
}
- mul_m4_v3(ob->obmat, v1);
+ mul_m4_v3(ob->object_to_world, v1);
numstr_len = BLI_snprintf_rlen(numstr, sizeof(numstr), "%d", i);
DRW_text_cache_add(dt, v1, numstr, numstr_len, 0, 0, txt_flag, col);
@@ -592,7 +595,7 @@ void DRW_text_edit_mesh_measure_stats(ARegion *region,
if (clip_segment_v3_plane_n(v1, v2, clip_planes, 4, v1_clip, v2_clip)) {
mid_v3_v3v3(vmid, v1_clip, v2_clip);
- mul_m4_v3(ob->obmat, vmid);
+ mul_m4_v3(ob->object_to_world, vmid);
numstr_len = BLI_snprintf_rlen(numstr, sizeof(numstr), "%d", i);
DRW_text_cache_add(
@@ -626,7 +629,7 @@ void DRW_text_edit_mesh_measure_stats(ARegion *region,
BM_face_calc_center_median(f, v1);
}
- mul_m4_v3(ob->obmat, v1);
+ mul_m4_v3(ob->object_to_world, v1);
numstr_len = BLI_snprintf_rlen(numstr, sizeof(numstr), "%d", i);
DRW_text_cache_add(dt, v1, numstr, numstr_len, 0, 0, txt_flag, col);
diff --git a/source/blender/draw/intern/draw_pass.hh b/source/blender/draw/intern/draw_pass.hh
index e1a0a6652ac..2c1fd16928e 100644
--- a/source/blender/draw/intern/draw_pass.hh
+++ b/source/blender/draw/intern/draw_pass.hh
@@ -14,8 +14,7 @@
* #Pass. Use many #PassSub along with a main #Pass to reduce the overhead and allow groupings of
* commands. \note The draw call order inside a batch of multiple draw with the exact same state is
* not guaranteed and is not even deterministic. Use a #PassSimple or #PassSortable if ordering is
- * needed. \note As of now, it is also quite limited in the type of draw command it can record
- * (no custom vertex count, no custom first vertex).
+ * needed. Custom vertex count and custom first vertex will effectively disable batching.
*
* `PassSimple`:
* Does not have the overhead of #PassMain but does not have the culling and batching optimization.
@@ -160,8 +159,10 @@ class PassBase {
*
* IMPORTANT: This does not set the stencil mask/reference values. Add a call to state_stencil()
* to ensure correct behavior of stencil aware draws.
+ *
+ * TODO(fclem): clip_plane_count should be part of shader state.
*/
- void state_set(DRWState state);
+ void state_set(DRWState state, int clip_plane_count = 0);
/**
* Clear the current frame-buffer.
@@ -174,9 +175,15 @@ class PassBase {
/**
* Reminders:
- * - (compare_mask & reference) is what is tested against (compare_mask & stencil_value)
+ * - `compare_mask & reference` is what is tested against `compare_mask & stencil_value`
* stencil_value being the value stored in the stencil buffer.
- * - (write-mask & reference) is what gets written if the test condition is fulfilled.
+ * - `write-mask & reference` is what gets written if the test condition is fulfilled.
+ *
+ * This will modify the stencil state until another call to this function.
+ * If not specified before any draw-call, these states will be undefined.
+ *
+ * For more information see:
+ * https://registry.khronos.org/vulkan/specs/1.3-extensions/man/html/VkStencilOpState.html
*/
void state_stencil(uint8_t write_mask, uint8_t reference, uint8_t compare_mask);
@@ -186,6 +193,12 @@ class PassBase {
void shader_set(GPUShader *shader);
/**
+ * Bind a framebuffer. This is equivalent to a deferred GPU_framebuffer_bind() call.
+ * \note Changes the global GPU state (outside of DRW).
+ */
+ void framebuffer_set(GPUFrameBuffer *framebuffer);
+
+ /**
* Bind a material shader along with its associated resources. Any following bind() or
* push_constant() call will use its interface.
* IMPORTANT: Assumes material is compiled and can be used (no compilation error).
@@ -403,7 +416,7 @@ class PassSortable : public PassMain {
{
int64_t index = sub_passes_.append_and_get_index(
PassBase(name, draw_commands_buf_, sub_passes_, shader_));
- headers_.append({Type::SubPass, static_cast<uint>(index)});
+ headers_.append({Type::SubPass, uint(index)});
sorting_values_.append(sorting_value);
return sub_passes_[index];
}
@@ -442,7 +455,7 @@ namespace detail {
template<class T> inline command::Undetermined &PassBase<T>::create_command(command::Type type)
{
int64_t index = commands_.append_and_get_index({});
- headers_.append({type, static_cast<uint>(index)});
+ headers_.append({type, uint(index)});
return commands_[index];
}
@@ -452,7 +465,7 @@ inline void PassBase<T>::clear(eGPUFrameBufferBits planes,
float depth,
uint8_t stencil)
{
- create_command(command::Type::Clear).clear = {(uint8_t)planes, stencil, depth, color};
+ create_command(command::Type::Clear).clear = {uint8_t(planes), stencil, depth, color};
}
template<class T> inline GPUBatch *PassBase<T>::procedural_batch_get(GPUPrimType primitive)
@@ -477,7 +490,7 @@ template<class T> inline PassBase<T> &PassBase<T>::sub(const char *name)
{
int64_t index = sub_passes_.append_and_get_index(
PassBase(name, draw_commands_buf_, sub_passes_, shader_));
- headers_.append({command::Type::SubPass, static_cast<uint>(index)});
+ headers_.append({command::Type::SubPass, uint(index)});
return sub_passes_[index];
}
@@ -720,15 +733,19 @@ template<class T> inline void PassBase<T>::barrier(eGPUBarrier type)
/** \name State Implementation
* \{ */
-template<class T> inline void PassBase<T>::state_set(DRWState state)
+template<class T> inline void PassBase<T>::state_set(DRWState state, int clip_plane_count)
{
- create_command(Type::StateSet).state_set = {state};
+ /** \note This is for compatibility with the old clip plane API. */
+ if (clip_plane_count > 0) {
+ state |= DRW_STATE_CLIP_PLANES;
+ }
+ create_command(Type::StateSet).state_set = {state, clip_plane_count};
}
template<class T>
inline void PassBase<T>::state_stencil(uint8_t write_mask, uint8_t reference, uint8_t compare_mask)
{
- create_command(Type::StencilSet).stencil_set = {write_mask, reference, compare_mask};
+ create_command(Type::StencilSet).stencil_set = {write_mask, compare_mask, reference};
}
template<class T> inline void PassBase<T>::shader_set(GPUShader *shader)
@@ -737,6 +754,11 @@ template<class T> inline void PassBase<T>::shader_set(GPUShader *shader)
create_command(Type::ShaderBind).shader_bind = {shader};
}
+template<class T> inline void PassBase<T>::framebuffer_set(GPUFrameBuffer *framebuffer)
+{
+ create_command(Type::FramebufferBind).framebuffer_bind = {framebuffer};
+}
+
template<class T> inline void PassBase<T>::material_set(Manager &manager, GPUMaterial *material)
{
GPUPass *gpupass = GPU_material_get_pass(material);
diff --git a/source/blender/draw/intern/draw_pbvh.cc b/source/blender/draw/intern/draw_pbvh.cc
new file mode 100644
index 00000000000..6c504e63511
--- /dev/null
+++ b/source/blender/draw/intern/draw_pbvh.cc
@@ -0,0 +1,1301 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later
+ * Copyright 2005 Blender Foundation. All rights reserved. */
+
+/** \file
+ * \ingroup gpu
+ *
+ * PBVH drawing.
+ * Embeds GPU meshes inside of PBVH nodes, used by mesh sculpt mode.
+ */
+
+#include <algorithm>
+#include <climits>
+#include <cstddef>
+#include <cstdlib>
+#include <cstring>
+#include <string>
+#include <vector>
+
+#include "MEM_guardedalloc.h"
+
+#include "BLI_bitmap.h"
+#include "BLI_ghash.h"
+#include "BLI_index_range.hh"
+#include "BLI_map.hh"
+#include "BLI_math_color.h"
+#include "BLI_math_vec_types.hh"
+#include "BLI_utildefines.h"
+#include "BLI_vector.hh"
+
+#include "DNA_mesh_types.h"
+#include "DNA_meshdata_types.h"
+
+#include "BKE_DerivedMesh.h"
+#include "BKE_attribute.h"
+#include "BKE_ccg.h"
+#include "BKE_customdata.h"
+#include "BKE_mesh.h"
+#include "BKE_paint.h"
+#include "BKE_pbvh.h"
+#include "BKE_subdiv_ccg.h"
+
+#include "GPU_batch.h"
+
+#include "DRW_engine.h"
+#include "DRW_pbvh.h"
+
+#include "bmesh.h"
+#include "draw_pbvh.h"
+#include "gpu_private.h"
+
+#define MAX_PBVH_BATCH_KEY 512
+#define MAX_PBVH_VBOS 16
+
+using blender::char3;
+using blender::float2;
+using blender::float3;
+using blender::float4;
+using blender::IndexRange;
+using blender::Map;
+using blender::short3;
+using blender::uchar3;
+using blender::ushort3;
+using blender::ushort4;
+using blender::Vector;
+
+using string = std::string;
+
+struct PBVHVbo {
+ uint64_t type;
+ eAttrDomain domain;
+ string name;
+ GPUVertBuf *vert_buf = nullptr;
+ string key;
+
+ PBVHVbo(eAttrDomain _domain, uint64_t _type, string _name)
+ : type(_type), domain(_domain), name(_name)
+ {
+ }
+
+ void clear_data()
+ {
+ GPU_vertbuf_clear(vert_buf);
+ }
+
+ string build_key()
+ {
+ char buf[512];
+
+ BLI_snprintf(buf, sizeof(buf), "%d:%d:%s", int(type), int(domain), name.c_str());
+
+ key = string(buf);
+ return key;
+ }
+};
+
+struct PBVHBatch {
+ Vector<int> vbos;
+ string key;
+ GPUBatch *tris = nullptr, *lines = nullptr;
+ int tris_count = 0, lines_count = 0;
+
+ void sort_vbos(Vector<PBVHVbo> &master_vbos)
+ {
+ struct cmp {
+ Vector<PBVHVbo> &master_vbos;
+
+ cmp(Vector<PBVHVbo> &_master_vbos) : master_vbos(_master_vbos)
+ {
+ }
+
+ bool operator()(const int &a, const int &b)
+ {
+ return master_vbos[a].key < master_vbos[b].key;
+ }
+ };
+
+ std::sort(vbos.begin(), vbos.end(), cmp(master_vbos));
+ }
+
+ string build_key(Vector<PBVHVbo> &master_vbos)
+ {
+ key = "";
+
+ sort_vbos(master_vbos);
+
+ for (int vbo_i : vbos) {
+ key += master_vbos[vbo_i].key + ":";
+ }
+
+ return key;
+ }
+};
+
+static CustomData *get_cdata(eAttrDomain domain, PBVH_GPU_Args *args)
+{
+ switch (domain) {
+ case ATTR_DOMAIN_POINT:
+ return args->vdata;
+ case ATTR_DOMAIN_CORNER:
+ return args->ldata;
+ case ATTR_DOMAIN_FACE:
+ return args->pdata;
+ default:
+ return nullptr;
+ }
+}
+
+struct PBVHBatches {
+ Vector<PBVHVbo> vbos;
+ Map<string, PBVHBatch> batches;
+ GPUIndexBuf *tri_index = nullptr;
+ GPUIndexBuf *lines_index = nullptr;
+ int faces_count = 0; /* Used by PBVH_BMESH and PBVH_GRIDS */
+ int tris_count = 0, lines_count = 0;
+ bool needs_tri_index = false;
+
+ int material_index = 0;
+
+ int count_faces(PBVH_GPU_Args *args)
+ {
+ int count = 0;
+
+ switch (args->pbvh_type) {
+ case PBVH_FACES: {
+ for (int i = 0; i < args->totprim; i++) {
+ int face_index = args->mlooptri[args->prim_indices[i]].poly;
+
+ if (args->hide_poly && args->hide_poly[face_index]) {
+ continue;
+ }
+
+ count++;
+ }
+ break;
+ }
+ case PBVH_GRIDS: {
+ count = BKE_pbvh_count_grid_quads((BLI_bitmap **)args->grid_hidden,
+ args->grid_indices,
+ args->totprim,
+ args->ccg_key.grid_size);
+
+ break;
+ }
+ case PBVH_BMESH: {
+ GSET_FOREACH_BEGIN (BMFace *, f, args->bm_faces) {
+ if (!BM_elem_flag_test(f, BM_ELEM_HIDDEN)) {
+ count++;
+ }
+ }
+ GSET_FOREACH_END();
+ }
+ }
+
+ return count;
+ }
+
+ PBVHBatches(PBVH_GPU_Args *args)
+ {
+ faces_count = count_faces(args);
+
+ if (args->pbvh_type == PBVH_BMESH) {
+ tris_count = faces_count;
+ }
+ }
+
+ ~PBVHBatches()
+ {
+ for (PBVHBatch &batch : batches.values()) {
+ GPU_BATCH_DISCARD_SAFE(batch.tris);
+ GPU_BATCH_DISCARD_SAFE(batch.lines);
+ }
+
+ for (PBVHVbo &vbo : vbos) {
+ GPU_vertbuf_discard(vbo.vert_buf);
+ }
+
+ GPU_INDEXBUF_DISCARD_SAFE(tri_index);
+ GPU_INDEXBUF_DISCARD_SAFE(lines_index);
+ }
+
+ string build_key(PBVHAttrReq *attrs, int attrs_num)
+ {
+ string key;
+ PBVHBatch batch;
+ Vector<PBVHVbo> vbos;
+
+ for (int i : IndexRange(attrs_num)) {
+ PBVHAttrReq *attr = attrs + i;
+
+ PBVHVbo vbo(attr->domain, attr->type, string(attr->name));
+ vbo.build_key();
+
+ vbos.append(vbo);
+ batch.vbos.append(i);
+ }
+
+ batch.build_key(vbos);
+ return batch.key;
+ }
+
+ bool has_vbo(eAttrDomain domain, int type, string name)
+ {
+ for (PBVHVbo &vbo : vbos) {
+ if (vbo.domain == domain && vbo.type == type && vbo.name == name) {
+ return true;
+ }
+ }
+
+ return false;
+ }
+
+ int get_vbo_index(PBVHVbo *vbo)
+ {
+ for (int i : IndexRange(vbos.size())) {
+ if (vbo == &vbos[i]) {
+ return i;
+ }
+ }
+
+ return -1;
+ }
+
+ PBVHVbo *get_vbo(eAttrDomain domain, int type, string name)
+ {
+ for (PBVHVbo &vbo : vbos) {
+ if (vbo.domain == domain && vbo.type == type && vbo.name == name) {
+ return &vbo;
+ }
+ }
+
+ return nullptr;
+ }
+
+ bool has_batch(PBVHAttrReq *attrs, int attrs_num)
+ {
+ return batches.contains(build_key(attrs, attrs_num));
+ }
+
+ PBVHBatch &ensure_batch(PBVHAttrReq *attrs, int attrs_num, PBVH_GPU_Args *args)
+ {
+ if (!has_batch(attrs, attrs_num)) {
+ create_batch(attrs, attrs_num, args);
+ }
+
+ return batches.lookup(build_key(attrs, attrs_num));
+ }
+
+ void fill_vbo_normal_faces(
+ PBVHVbo & /*vbo*/,
+ PBVH_GPU_Args *args,
+ std::function<void(std::function<void(int, int, int, const MLoopTri *)> callback)>
+ foreach_faces,
+ GPUVertBufRaw *access)
+ {
+ float fno[3];
+ short no[3];
+ int last_poly = -1;
+ bool smooth = false;
+
+ foreach_faces([&](int /*buffer_i*/, int /*tri_i*/, int vertex_i, const MLoopTri *tri) {
+ const MPoly *mp = args->mpoly + tri->poly;
+
+ if (tri->poly != last_poly) {
+ last_poly = tri->poly;
+
+ if (!(mp->flag & ME_SMOOTH)) {
+ smooth = true;
+ BKE_mesh_calc_poly_normal(mp, args->mloop + mp->loopstart, args->mvert, fno);
+ normal_float_to_short_v3(no, fno);
+ }
+ else {
+ smooth = false;
+ }
+ }
+
+ if (!smooth) {
+ normal_float_to_short_v3(no, args->vert_normals[vertex_i]);
+ }
+
+ *static_cast<short3 *>(GPU_vertbuf_raw_step(access)) = no;
+ });
+ }
+
+ void fill_vbo_grids_intern(
+ PBVHVbo &vbo,
+ PBVH_GPU_Args *args,
+ std::function<
+ void(std::function<void(int x, int y, int grid_index, CCGElem *elems[4], int i)> func)>
+ foreach_grids)
+ {
+ uint vert_per_grid = square_i(args->ccg_key.grid_size - 1) * 4;
+ uint vert_count = args->totprim * vert_per_grid;
+
+ int existing_num = GPU_vertbuf_get_vertex_len(vbo.vert_buf);
+ void *existing_data = GPU_vertbuf_get_data(vbo.vert_buf);
+
+ if (existing_data == nullptr || existing_num != vert_count) {
+ /* Allocate buffer if not allocated yet or size changed. */
+ GPU_vertbuf_data_alloc(vbo.vert_buf, vert_count);
+ }
+
+ GPUVertBufRaw access;
+ GPU_vertbuf_attr_get_raw_data(vbo.vert_buf, 0, &access);
+
+ switch (vbo.type) {
+ case CD_PROP_COLOR:
+ case CD_PROP_BYTE_COLOR: {
+ /* TODO: Implement color support for multires similar to the mesh cache
+ * extractor code. For now just upload white.
+ */
+ const ushort4 white(USHRT_MAX, USHRT_MAX, USHRT_MAX, USHRT_MAX);
+
+ foreach_grids(
+ [&](int /*x*/, int /*y*/, int /*grid_index*/, CCGElem * /*elems*/[4], int /*i*/) {
+ *static_cast<ushort4 *>(GPU_vertbuf_raw_step(&access)) = white;
+ });
+ break;
+ }
+ case CD_PBVH_CO_TYPE:
+ foreach_grids([&](int /*x*/, int /*y*/, int /*grid_index*/, CCGElem *elems[4], int i) {
+ float *co = CCG_elem_co(&args->ccg_key, elems[i]);
+
+ *static_cast<float3 *>(GPU_vertbuf_raw_step(&access)) = co;
+ });
+ break;
+
+ case CD_PBVH_NO_TYPE:
+ foreach_grids([&](int /*x*/, int /*y*/, int grid_index, CCGElem *elems[4], int /*i*/) {
+ float3 no(0.0f, 0.0f, 0.0f);
+
+ const bool smooth = args->grid_flag_mats[grid_index].flag & ME_SMOOTH;
+
+ if (smooth) {
+ no = CCG_elem_no(&args->ccg_key, elems[0]);
+ }
+ else {
+ for (int j = 0; j < 4; j++) {
+ no += CCG_elem_no(&args->ccg_key, elems[j]);
+ }
+ }
+
+ normalize_v3(no);
+ short sno[3];
+
+ normal_float_to_short_v3(sno, no);
+
+ *static_cast<short3 *>(GPU_vertbuf_raw_step(&access)) = sno;
+ });
+ break;
+
+ case CD_PBVH_MASK_TYPE:
+ if (args->ccg_key.has_mask) {
+ foreach_grids([&](int /*x*/, int /*y*/, int /*grid_index*/, CCGElem *elems[4], int i) {
+ float *mask = CCG_elem_mask(&args->ccg_key, elems[i]);
+
+ *static_cast<uchar *>(GPU_vertbuf_raw_step(&access)) = uchar(*mask * 255.0f);
+ });
+ }
+ else {
+ foreach_grids(
+ [&](int /*x*/, int /*y*/, int /*grid_index*/, CCGElem * /*elems*/[4], int /*i*/) {
+ *static_cast<uchar *>(GPU_vertbuf_raw_step(&access)) = 0;
+ });
+ }
+ break;
+
+ case CD_PBVH_FSET_TYPE: {
+ int *face_sets = args->face_sets;
+
+ if (!face_sets) {
+ uchar white[3] = {UCHAR_MAX, UCHAR_MAX, UCHAR_MAX};
+
+ foreach_grids(
+ [&](int /*x*/, int /*y*/, int /*grid_index*/, CCGElem * /*elems*/[4], int /*i*/) {
+ *static_cast<uchar3 *>(GPU_vertbuf_raw_step(&access)) = white;
+ });
+ }
+ else {
+ foreach_grids(
+ [&](int /*x*/, int /*y*/, int grid_index, CCGElem * /*elems*/[4], int /*i*/) {
+ uchar face_set_color[4] = {UCHAR_MAX, UCHAR_MAX, UCHAR_MAX, UCHAR_MAX};
+
+ if (face_sets) {
+ const int face_index = BKE_subdiv_ccg_grid_to_face_index(args->subdiv_ccg,
+ grid_index);
+ const int fset = face_sets[face_index];
+
+ /* Skip for the default color Face Set to render it white. */
+ if (fset != args->face_sets_color_default) {
+ BKE_paint_face_set_overlay_color_get(
+ fset, args->face_sets_color_seed, face_set_color);
+ }
+ }
+
+ *static_cast<uchar3 *>(GPU_vertbuf_raw_step(&access)) = face_set_color;
+ });
+ }
+ break;
+ }
+ }
+ }
+
+ void fill_vbo_grids(PBVHVbo &vbo, PBVH_GPU_Args *args)
+ {
+ int gridsize = args->ccg_key.grid_size;
+
+ uint totgrid = args->totprim;
+
+ auto foreach_solid =
+ [&](std::function<void(int x, int y, int grid_index, CCGElem *elems[4], int i)> func) {
+ for (int i = 0; i < totgrid; i++) {
+ const int grid_index = args->grid_indices[i];
+
+ CCGElem *grid = args->grids[grid_index];
+
+ for (int y = 0; y < gridsize - 1; y++) {
+ for (int x = 0; x < gridsize - 1; x++) {
+ CCGElem *elems[4] = {
+ CCG_grid_elem(&args->ccg_key, grid, x, y),
+ CCG_grid_elem(&args->ccg_key, grid, x + 1, y),
+ CCG_grid_elem(&args->ccg_key, grid, x + 1, y + 1),
+ CCG_grid_elem(&args->ccg_key, grid, x, y + 1),
+ };
+
+ func(x, y, grid_index, elems, 0);
+ func(x + 1, y, grid_index, elems, 1);
+ func(x + 1, y + 1, grid_index, elems, 2);
+ func(x, y + 1, grid_index, elems, 3);
+ }
+ }
+ }
+ };
+
+ auto foreach_indexed =
+ [&](std::function<void(int x, int y, int grid_index, CCGElem *elems[4], int i)> func) {
+ for (int i = 0; i < totgrid; i++) {
+ const int grid_index = args->grid_indices[i];
+
+ CCGElem *grid = args->grids[grid_index];
+
+ for (int y = 0; y < gridsize; y++) {
+ for (int x = 0; x < gridsize; x++) {
+ CCGElem *elems[4] = {
+ CCG_grid_elem(&args->ccg_key, grid, x, y),
+ CCG_grid_elem(&args->ccg_key, grid, x + 1, y),
+ CCG_grid_elem(&args->ccg_key, grid, x + 1, y + 1),
+ CCG_grid_elem(&args->ccg_key, grid, x, y + 1),
+ };
+
+ func(x, y, grid_index, elems, 0);
+ }
+ }
+ }
+ };
+
+ if (needs_tri_index) {
+ fill_vbo_grids_intern(vbo, args, foreach_indexed);
+ }
+ else {
+ fill_vbo_grids_intern(vbo, args, foreach_solid);
+ }
+ }
+
+ void fill_vbo_faces(PBVHVbo &vbo, PBVH_GPU_Args *args)
+ {
+ auto foreach_faces =
+ [&](std::function<void(int buffer_i, int tri_i, int vertex_i, const MLoopTri *tri)> func) {
+ int buffer_i = 0;
+ const MLoop *mloop = args->mloop;
+
+ for (int i : IndexRange(args->totprim)) {
+ int face_index = args->mlooptri[args->prim_indices[i]].poly;
+
+ if (args->hide_poly && args->hide_poly[face_index]) {
+ continue;
+ }
+
+ const MLoopTri *tri = args->mlooptri + args->prim_indices[i];
+
+ for (int j : IndexRange(3)) {
+ func(buffer_i, j, mloop[tri->tri[j]].v, tri);
+ buffer_i++;
+ }
+ }
+ };
+
+ int totvert = 0;
+ foreach_faces([&totvert](int, int, int, const MLoopTri *) { totvert++; });
+
+ int existing_num = GPU_vertbuf_get_vertex_len(vbo.vert_buf);
+ void *existing_data = GPU_vertbuf_get_data(vbo.vert_buf);
+
+ if (existing_data == nullptr || existing_num != totvert) {
+ /* Allocate buffer if not allocated yet or size changed. */
+ GPU_vertbuf_data_alloc(vbo.vert_buf, totvert);
+ }
+
+ GPUVertBufRaw access;
+ GPU_vertbuf_attr_get_raw_data(vbo.vert_buf, 0, &access);
+
+ switch (vbo.type) {
+ case CD_PBVH_CO_TYPE:
+ foreach_faces(
+ [&](int /*buffer_i*/, int /*tri_i*/, int vertex_i, const MLoopTri * /*tri*/) {
+ *static_cast<float3 *>(GPU_vertbuf_raw_step(&access)) = args->mvert[vertex_i].co;
+ });
+ break;
+ case CD_PBVH_NO_TYPE:
+ fill_vbo_normal_faces(vbo, args, foreach_faces, &access);
+ break;
+ case CD_PBVH_MASK_TYPE: {
+ float *mask = static_cast<float *>(CustomData_get_layer(args->vdata, CD_PAINT_MASK));
+
+ if (mask) {
+ foreach_faces(
+ [&](int /*buffer_i*/, int /*tri_i*/, int vertex_i, const MLoopTri * /*tri*/) {
+ *static_cast<uchar *>(GPU_vertbuf_raw_step(&access)) = uchar(mask[vertex_i] *
+ 255.0f);
+ });
+ }
+ else {
+ foreach_faces(
+ [&](int /*buffer_i*/, int /*tri_i*/, int /*vertex_i*/, const MLoopTri * /*tri*/) {
+ *static_cast<uchar *>(GPU_vertbuf_raw_step(&access)) = 0;
+ });
+ }
+ break;
+ }
+ case CD_PBVH_FSET_TYPE: {
+ int *face_sets = static_cast<int *>(
+ CustomData_get_layer_named(args->pdata, CD_PROP_INT32, ".sculpt_face_set"));
+
+ if (face_sets) {
+ int last_poly = -1;
+ uchar fset_color[4] = {UCHAR_MAX, UCHAR_MAX, UCHAR_MAX, UCHAR_MAX};
+
+ foreach_faces(
+ [&](int /*buffer_i*/, int /*tri_i*/, int /*vertex_i*/, const MLoopTri *tri) {
+ if (last_poly != tri->poly) {
+ last_poly = tri->poly;
+
+ const int fset = face_sets[tri->poly];
+
+ if (fset != args->face_sets_color_default) {
+ BKE_paint_face_set_overlay_color_get(
+ fset, args->face_sets_color_seed, fset_color);
+ }
+ else {
+ /* Skip for the default color face set to render it white. */
+ fset_color[0] = fset_color[1] = fset_color[2] = UCHAR_MAX;
+ }
+ }
+
+ *static_cast<uchar3 *>(GPU_vertbuf_raw_step(&access)) = fset_color;
+ });
+ }
+ else {
+ uchar fset_color[4] = {255, 255, 255, 255};
+
+ foreach_faces(
+ [&](int /*buffer_i*/, int /*tri_i*/, int /*vertex_i*/, const MLoopTri * /*tri*/) {
+ *static_cast<uchar3 *>(GPU_vertbuf_raw_step(&access)) = fset_color;
+ });
+ }
+
+ break;
+ }
+ case CD_MLOOPUV: {
+ MLoopUV *mloopuv = static_cast<MLoopUV *>(
+ CustomData_get_layer_named(args->ldata, CD_MLOOPUV, vbo.name.c_str()));
+
+ foreach_faces([&](int /*buffer_i*/, int tri_i, int /*vertex_i*/, const MLoopTri *tri) {
+ *static_cast<float2 *>(GPU_vertbuf_raw_step(&access)) = mloopuv[tri->tri[tri_i]].uv;
+ });
+ break;
+ }
+ case CD_PROP_COLOR:
+ if (vbo.domain == ATTR_DOMAIN_POINT) {
+ MPropCol *mpropcol = static_cast<MPropCol *>(
+ CustomData_get_layer_named(args->vdata, CD_PROP_COLOR, vbo.name.c_str()));
+
+ foreach_faces(
+ [&](int /*buffer_i*/, int /*tri_i*/, int vertex_i, const MLoopTri * /*tri*/) {
+ ushort color[4];
+ MPropCol *col = mpropcol + vertex_i;
+
+ color[0] = unit_float_to_ushort_clamp(col->color[0]);
+ color[1] = unit_float_to_ushort_clamp(col->color[1]);
+ color[2] = unit_float_to_ushort_clamp(col->color[2]);
+ color[3] = unit_float_to_ushort_clamp(col->color[3]);
+
+ *static_cast<ushort4 *>(GPU_vertbuf_raw_step(&access)) = color;
+ });
+ }
+ else if (vbo.domain == ATTR_DOMAIN_CORNER) {
+ MPropCol *mpropcol = static_cast<MPropCol *>(
+ CustomData_get_layer_named(args->ldata, CD_PROP_COLOR, vbo.name.c_str()));
+
+ foreach_faces([&](int /*buffer_i*/, int tri_i, int /*vertex_i*/, const MLoopTri *tri) {
+ ushort color[4];
+ MPropCol *col = mpropcol + tri->tri[tri_i];
+
+ color[0] = unit_float_to_ushort_clamp(col->color[0]);
+ color[1] = unit_float_to_ushort_clamp(col->color[1]);
+ color[2] = unit_float_to_ushort_clamp(col->color[2]);
+ color[3] = unit_float_to_ushort_clamp(col->color[3]);
+
+ *static_cast<ushort4 *>(GPU_vertbuf_raw_step(&access)) = color;
+ });
+ }
+ break;
+ case CD_PROP_BYTE_COLOR:
+ if (vbo.domain == ATTR_DOMAIN_POINT) {
+ MLoopCol *mbytecol = static_cast<MLoopCol *>(
+ CustomData_get_layer_named(args->vdata, CD_PROP_BYTE_COLOR, vbo.name.c_str()));
+
+ foreach_faces(
+ [&](int /*buffer_i*/, int /*tri_i*/, int vertex_i, const MLoopTri * /*tri*/) {
+ ushort color[4];
+ MLoopCol *col = mbytecol + vertex_i;
+
+ color[0] = unit_float_to_ushort_clamp(BLI_color_from_srgb_table[col->r]);
+ color[1] = unit_float_to_ushort_clamp(BLI_color_from_srgb_table[col->g]);
+ color[2] = unit_float_to_ushort_clamp(BLI_color_from_srgb_table[col->b]);
+ color[3] = col->a * 257;
+
+ *static_cast<ushort4 *>(GPU_vertbuf_raw_step(&access)) = color;
+ });
+ }
+ else if (vbo.domain == ATTR_DOMAIN_CORNER) {
+ MLoopCol *mbytecol = static_cast<MLoopCol *>(
+ CustomData_get_layer_named(args->ldata, CD_PROP_BYTE_COLOR, vbo.name.c_str()));
+
+ foreach_faces([&](int /*buffer_i*/, int tri_i, int /*vertex_i*/, const MLoopTri *tri) {
+ ushort color[4];
+ MLoopCol *col = mbytecol + tri->tri[tri_i];
+
+ color[0] = unit_float_to_ushort_clamp(BLI_color_from_srgb_table[col->r]);
+ color[1] = unit_float_to_ushort_clamp(BLI_color_from_srgb_table[col->g]);
+ color[2] = unit_float_to_ushort_clamp(BLI_color_from_srgb_table[col->b]);
+ color[3] = col->a * 257;
+
+ *static_cast<ushort4 *>(GPU_vertbuf_raw_step(&access)) = color;
+ });
+ }
+ break;
+ }
+ }
+
+ void gpu_flush()
+ {
+ for (PBVHVbo &vbo : vbos) {
+ if (vbo.vert_buf && GPU_vertbuf_get_data(vbo.vert_buf)) {
+ GPU_vertbuf_use(vbo.vert_buf);
+ }
+ }
+ }
+
+ void update(PBVH_GPU_Args *args)
+ {
+ check_index_buffers(args);
+
+ for (PBVHVbo &vbo : vbos) {
+ fill_vbo(vbo, args);
+ }
+ }
+
+ void fill_vbo_bmesh(PBVHVbo &vbo, PBVH_GPU_Args *args)
+ {
+ auto foreach_bmesh = [&](std::function<void(BMLoop * l)> callback) {
+ GSET_FOREACH_BEGIN (BMFace *, f, args->bm_faces) {
+ if (BM_elem_flag_test(f, BM_ELEM_HIDDEN)) {
+ continue;
+ }
+
+ BMLoop *l = f->l_first;
+ callback(l->prev);
+ callback(l);
+ callback(l->next);
+ }
+ GSET_FOREACH_END();
+ };
+
+ faces_count = tris_count = count_faces(args);
+
+ int existing_num = GPU_vertbuf_get_vertex_len(vbo.vert_buf);
+ void *existing_data = GPU_vertbuf_get_data(vbo.vert_buf);
+
+ int vert_count = tris_count * 3;
+
+ if (existing_data == nullptr || existing_num != vert_count) {
+ /* Allocate buffer if not allocated yet or size changed. */
+ GPU_vertbuf_data_alloc(vbo.vert_buf, vert_count);
+ }
+
+ GPUVertBufRaw access;
+ GPU_vertbuf_attr_get_raw_data(vbo.vert_buf, 0, &access);
+
+#if 0 /* Enable to fuzz GPU data (to check for over-allocation). */
+ existing_data = GPU_vertbuf_get_data(vbo.vert_buf);
+ uchar *c = static_cast<uchar *>(existing_data);
+ for (int i : IndexRange(vert_count * access.stride)) {
+ *c++ = i & 255;
+ }
+#endif
+
+ switch (vbo.type) {
+ case CD_PROP_COLOR:
+ case CD_PROP_BYTE_COLOR: {
+ ushort4 white = {USHRT_MAX, USHRT_MAX, USHRT_MAX, USHRT_MAX};
+
+ foreach_bmesh([&](BMLoop * /*l*/) {
+ *static_cast<ushort4 *>(GPU_vertbuf_raw_step(&access)) = white;
+ });
+ break;
+ }
+ case CD_PBVH_CO_TYPE:
+ foreach_bmesh(
+ [&](BMLoop *l) { *static_cast<float3 *>(GPU_vertbuf_raw_step(&access)) = l->v->co; });
+ break;
+
+ case CD_PBVH_NO_TYPE:
+ foreach_bmesh([&](BMLoop *l) {
+ short no[3];
+ bool smooth = BM_elem_flag_test(l->f, BM_ELEM_SMOOTH);
+
+ normal_float_to_short_v3(no, smooth ? l->v->no : l->f->no);
+ *static_cast<short3 *>(GPU_vertbuf_raw_step(&access)) = no;
+ });
+ break;
+
+ case CD_PBVH_MASK_TYPE: {
+ int cd_mask = args->cd_mask_layer;
+
+ if (cd_mask == -1) {
+ foreach_bmesh(
+ [&](BMLoop * /*l*/) { *static_cast<float *>(GPU_vertbuf_raw_step(&access)) = 0; });
+ }
+ else {
+ foreach_bmesh([&](BMLoop *l) {
+ float mask = BM_ELEM_CD_GET_FLOAT(l->v, cd_mask);
+
+ *static_cast<uchar *>(GPU_vertbuf_raw_step(&access)) = uchar(mask * 255.0f);
+ });
+ }
+ break;
+ }
+ case CD_PBVH_FSET_TYPE: {
+ uchar3 white(UCHAR_MAX, UCHAR_MAX, UCHAR_MAX);
+
+ foreach_bmesh([&](BMLoop * /*l*/) {
+ *static_cast<uchar3 *>(GPU_vertbuf_raw_step(&access)) = white;
+ });
+ }
+ }
+ }
+
+ void fill_vbo(PBVHVbo &vbo, PBVH_GPU_Args *args)
+ {
+ switch (args->pbvh_type) {
+ case PBVH_FACES:
+ fill_vbo_faces(vbo, args);
+ break;
+ case PBVH_GRIDS:
+ fill_vbo_grids(vbo, args);
+ break;
+ case PBVH_BMESH:
+ fill_vbo_bmesh(vbo, args);
+ break;
+ }
+ }
+
+ void create_vbo(eAttrDomain domain, const uint32_t type, string name, PBVH_GPU_Args *args)
+ {
+ PBVHVbo vbo(domain, type, name);
+ GPUVertFormat format;
+
+ bool need_aliases = !ELEM(
+ type, CD_PBVH_CO_TYPE, CD_PBVH_NO_TYPE, CD_PBVH_FSET_TYPE, CD_PBVH_MASK_TYPE);
+
+ GPU_vertformat_clear(&format);
+
+ switch (type) {
+ case CD_PBVH_CO_TYPE:
+ GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
+ break;
+ case CD_PROP_FLOAT3:
+ GPU_vertformat_attr_add(&format, "a", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
+ need_aliases = true;
+ break;
+ case CD_PBVH_NO_TYPE:
+ GPU_vertformat_attr_add(&format, "nor", GPU_COMP_I16, 3, GPU_FETCH_INT_TO_FLOAT_UNIT);
+ break;
+ case CD_PROP_FLOAT2:
+ GPU_vertformat_attr_add(&format, "a", GPU_COMP_F32, 2, GPU_FETCH_FLOAT);
+ need_aliases = true;
+ break;
+ case CD_MLOOPUV:
+ GPU_vertformat_attr_add(&format, "uvs", GPU_COMP_F32, 2, GPU_FETCH_FLOAT);
+ need_aliases = true;
+ break;
+ case CD_PBVH_FSET_TYPE:
+ GPU_vertformat_attr_add(&format, "fset", GPU_COMP_U8, 3, GPU_FETCH_INT_TO_FLOAT_UNIT);
+ break;
+ case CD_PBVH_MASK_TYPE:
+ GPU_vertformat_attr_add(&format, "msk", GPU_COMP_U8, 1, GPU_FETCH_INT_TO_FLOAT_UNIT);
+ break;
+ case CD_PROP_FLOAT:
+ GPU_vertformat_attr_add(&format, "f", GPU_COMP_F32, 1, GPU_FETCH_FLOAT);
+ need_aliases = true;
+ break;
+ case CD_PROP_COLOR:
+ case CD_PROP_BYTE_COLOR: {
+ GPU_vertformat_attr_add(&format, "c", GPU_COMP_U16, 4, GPU_FETCH_INT_TO_FLOAT_UNIT);
+ need_aliases = true;
+ break;
+ }
+ default:
+ BLI_assert(0);
+ printf("%s: error\n", __func__);
+
+ break;
+ }
+
+ if (need_aliases) {
+ CustomData *cdata = get_cdata(domain, args);
+ int layer_i = cdata ? CustomData_get_named_layer_index(cdata, type, name.c_str()) : -1;
+ CustomDataLayer *layer = layer_i != -1 ? cdata->layers + layer_i : nullptr;
+
+ if (layer) {
+ bool is_render, is_active;
+ const char *prefix = "a";
+
+ if (ELEM(type, CD_PROP_COLOR, CD_PROP_BYTE_COLOR)) {
+ Mesh query_mesh;
+
+ /* Check if we have args->me; if not use get_cdata to build something we
+ * can query for color attributes.
+ */
+ if (args->me) {
+ memcpy(static_cast<void *>(&query_mesh),
+ static_cast<const void *>(args->me),
+ sizeof(Mesh));
+ }
+ else {
+ BKE_id_attribute_copy_domains_temp(ID_ME,
+ get_cdata(ATTR_DOMAIN_POINT, args),
+ nullptr,
+ get_cdata(ATTR_DOMAIN_CORNER, args),
+ nullptr,
+ nullptr,
+ &query_mesh.id);
+ }
+
+ prefix = "c";
+
+ CustomDataLayer *render = BKE_id_attributes_render_color_get(&query_mesh.id);
+ CustomDataLayer *active = BKE_id_attributes_active_color_get(&query_mesh.id);
+
+ is_render = render && layer && STREQ(render->name, layer->name);
+ is_active = active && layer && STREQ(active->name, layer->name);
+ }
+ else {
+ switch (type) {
+ case CD_MLOOPUV:
+ prefix = "u";
+ break;
+ default:
+ break;
+ }
+
+ const char *active_name = CustomData_get_active_layer_name(cdata, type);
+ const char *render_name = CustomData_get_render_layer_name(cdata, type);
+
+ is_active = active_name && STREQ(layer->name, active_name);
+ is_render = render_name && STREQ(layer->name, render_name);
+ }
+
+ DRW_cdlayer_attr_aliases_add(&format, prefix, cdata, layer, is_render, is_active);
+ }
+ else {
+ printf("%s: error looking up attribute %s\n", __func__, name.c_str());
+ }
+ }
+
+ vbo.vert_buf = GPU_vertbuf_create_with_format_ex(&format, GPU_USAGE_STATIC);
+ vbo.build_key();
+ fill_vbo(vbo, args);
+
+ vbos.append(vbo);
+ }
+
+ void update_pre(PBVH_GPU_Args *args)
+ {
+ if (args->pbvh_type == PBVH_BMESH) {
+ int count = count_faces(args);
+
+ if (faces_count != count) {
+ for (PBVHVbo &vbo : vbos) {
+ vbo.clear_data();
+ }
+
+ GPU_INDEXBUF_DISCARD_SAFE(tri_index);
+ GPU_INDEXBUF_DISCARD_SAFE(lines_index);
+
+ tri_index = lines_index = nullptr;
+ faces_count = tris_count = count;
+ }
+ }
+ }
+
+ void create_index_faces(PBVH_GPU_Args *args)
+ {
+ int *mat_index = static_cast<int *>(
+ CustomData_get_layer_named(args->pdata, CD_PROP_INT32, "material_index"));
+
+ if (mat_index && args->totprim) {
+ int poly_index = args->mlooptri[args->prim_indices[0]].poly;
+ material_index = mat_index[poly_index];
+ }
+
+ /* Calculate number of edges*/
+ int edge_count = 0;
+ for (int i = 0; i < args->totprim; i++) {
+ const MLoopTri *lt = args->mlooptri + args->prim_indices[i];
+
+ if (args->hide_poly && args->hide_poly[lt->poly]) {
+ continue;
+ }
+
+ int r_edges[3];
+ BKE_mesh_looptri_get_real_edges(args->me, lt, r_edges);
+
+ if (r_edges[0] != -1) {
+ edge_count++;
+ }
+ if (r_edges[1] != -1) {
+ edge_count++;
+ }
+ if (r_edges[2] != -1) {
+ edge_count++;
+ }
+ }
+
+ GPUIndexBufBuilder elb_lines;
+ GPU_indexbuf_init(&elb_lines, GPU_PRIM_LINES, edge_count * 2, INT_MAX);
+
+ int vertex_i = 0;
+ for (int i = 0; i < args->totprim; i++) {
+ const MLoopTri *lt = args->mlooptri + args->prim_indices[i];
+
+ if (args->hide_poly && args->hide_poly[lt->poly]) {
+ continue;
+ }
+
+ int r_edges[3];
+ BKE_mesh_looptri_get_real_edges(args->me, lt, r_edges);
+
+ if (r_edges[0] != -1) {
+ GPU_indexbuf_add_line_verts(&elb_lines, vertex_i, vertex_i + 1);
+ }
+ if (r_edges[1] != -1) {
+ GPU_indexbuf_add_line_verts(&elb_lines, vertex_i + 1, vertex_i + 2);
+ }
+ if (r_edges[2] != -1) {
+ GPU_indexbuf_add_line_verts(&elb_lines, vertex_i + 2, vertex_i);
+ }
+
+ vertex_i += 3;
+ }
+
+ lines_index = GPU_indexbuf_build(&elb_lines);
+ }
+
+ void create_index_bmesh(PBVH_GPU_Args *args)
+ {
+ GPUIndexBufBuilder elb_lines;
+ GPU_indexbuf_init(&elb_lines, GPU_PRIM_LINES, tris_count * 3 * 2, INT_MAX);
+
+ int v_index = 0;
+ lines_count = 0;
+
+ GSET_FOREACH_BEGIN (BMFace *, f, args->bm_faces) {
+ if (BM_elem_flag_test(f, BM_ELEM_HIDDEN)) {
+ continue;
+ }
+
+ GPU_indexbuf_add_line_verts(&elb_lines, v_index, v_index + 1);
+ GPU_indexbuf_add_line_verts(&elb_lines, v_index + 1, v_index + 2);
+ GPU_indexbuf_add_line_verts(&elb_lines, v_index + 2, v_index);
+
+ lines_count += 3;
+ v_index += 3;
+ }
+ GSET_FOREACH_END();
+
+ lines_index = GPU_indexbuf_build(&elb_lines);
+ }
+
+ void create_index_grids(PBVH_GPU_Args *args)
+ {
+ int *mat_index = static_cast<int *>(
+ CustomData_get_layer_named(args->pdata, CD_PROP_INT32, "material_index"));
+
+ if (mat_index && args->totprim) {
+ int poly_index = BKE_subdiv_ccg_grid_to_face_index(args->subdiv_ccg, args->grid_indices[0]);
+ material_index = mat_index[poly_index];
+ }
+
+ needs_tri_index = true;
+ int gridsize = args->ccg_key.grid_size;
+ int totgrid = args->totprim;
+
+ for (int i : IndexRange(args->totprim)) {
+ int grid_index = args->grid_indices[i];
+ bool smooth = args->grid_flag_mats[grid_index].flag & ME_SMOOTH;
+ BLI_bitmap *gh = args->grid_hidden[grid_index];
+
+ for (int y = 0; y < gridsize - 1; y++) {
+ for (int x = 0; x < gridsize - 1; x++) {
+ if (gh && paint_is_grid_face_hidden(gh, gridsize, x, y)) {
+ /* Skip hidden faces by just setting smooth to true. */
+ smooth = true;
+ goto outer_loop_break;
+ }
+ }
+ }
+
+ outer_loop_break:
+
+ if (!smooth) {
+ needs_tri_index = false;
+ break;
+ }
+ }
+
+ GPUIndexBufBuilder elb, elb_lines;
+
+ CCGKey *key = &args->ccg_key;
+
+ uint visible_quad_len = BKE_pbvh_count_grid_quads(
+ (BLI_bitmap **)args->grid_hidden, args->grid_indices, totgrid, key->grid_size);
+
+ GPU_indexbuf_init(&elb, GPU_PRIM_TRIS, 2 * visible_quad_len, INT_MAX);
+ GPU_indexbuf_init(
+ &elb_lines, GPU_PRIM_LINES, 2 * totgrid * gridsize * (gridsize - 1), INT_MAX);
+
+ if (needs_tri_index) {
+ uint offset = 0;
+ const uint grid_vert_len = gridsize * gridsize;
+ for (int i = 0; i < totgrid; i++, offset += grid_vert_len) {
+ uint v0, v1, v2, v3;
+ bool grid_visible = false;
+
+ BLI_bitmap *gh = args->grid_hidden[args->grid_indices[i]];
+
+ for (int j = 0; j < gridsize - 1; j++) {
+ for (int k = 0; k < gridsize - 1; k++) {
+ /* Skip hidden grid face */
+ if (gh && paint_is_grid_face_hidden(gh, gridsize, k, j)) {
+ continue;
+ }
+ /* Indices in a Clockwise QUAD disposition. */
+ v0 = offset + j * gridsize + k;
+ v1 = v0 + 1;
+ v2 = v1 + gridsize;
+ v3 = v2 - 1;
+
+ GPU_indexbuf_add_tri_verts(&elb, v0, v2, v1);
+ GPU_indexbuf_add_tri_verts(&elb, v0, v3, v2);
+
+ GPU_indexbuf_add_line_verts(&elb_lines, v0, v1);
+ GPU_indexbuf_add_line_verts(&elb_lines, v0, v3);
+
+ if (j + 2 == gridsize) {
+ GPU_indexbuf_add_line_verts(&elb_lines, v2, v3);
+ }
+ grid_visible = true;
+ }
+
+ if (grid_visible) {
+ GPU_indexbuf_add_line_verts(&elb_lines, v1, v2);
+ }
+ }
+ }
+ }
+ else {
+ uint offset = 0;
+ const uint grid_vert_len = square_uint(gridsize - 1) * 4;
+ for (int i = 0; i < totgrid; i++, offset += grid_vert_len) {
+ bool grid_visible = false;
+ BLI_bitmap *gh = args->grid_hidden[args->grid_indices[i]];
+
+ uint v0, v1, v2, v3;
+ for (int j = 0; j < gridsize - 1; j++) {
+ for (int k = 0; k < gridsize - 1; k++) {
+ /* Skip hidden grid face */
+ if (gh && paint_is_grid_face_hidden(gh, gridsize, k, j)) {
+ continue;
+ }
+ /* VBO data are in a Clockwise QUAD disposition. */
+ v0 = offset + (j * (gridsize - 1) + k) * 4;
+ v1 = v0 + 1;
+ v2 = v0 + 2;
+ v3 = v0 + 3;
+
+ GPU_indexbuf_add_tri_verts(&elb, v0, v2, v1);
+ GPU_indexbuf_add_tri_verts(&elb, v0, v3, v2);
+
+ GPU_indexbuf_add_line_verts(&elb_lines, v0, v1);
+ GPU_indexbuf_add_line_verts(&elb_lines, v0, v3);
+
+ if (j + 2 == gridsize) {
+ GPU_indexbuf_add_line_verts(&elb_lines, v2, v3);
+ }
+ grid_visible = true;
+ }
+
+ if (grid_visible) {
+ GPU_indexbuf_add_line_verts(&elb_lines, v1, v2);
+ }
+ }
+ }
+ }
+
+ tri_index = GPU_indexbuf_build(&elb);
+ lines_index = GPU_indexbuf_build(&elb_lines);
+ }
+
+ void create_index(PBVH_GPU_Args *args)
+ {
+ switch (args->pbvh_type) {
+ case PBVH_FACES:
+ create_index_faces(args);
+ break;
+ case PBVH_BMESH:
+ create_index_bmesh(args);
+ break;
+ case PBVH_GRIDS:
+ create_index_grids(args);
+ break;
+ }
+
+ for (PBVHBatch &batch : batches.values()) {
+ if (tri_index) {
+ GPU_batch_elembuf_set(batch.tris, tri_index, false);
+ }
+ else {
+ /* Still flag the batch as dirty even if we're using the default index layout. */
+ batch.tris->flag |= GPU_BATCH_DIRTY;
+ }
+
+ if (lines_index) {
+ GPU_batch_elembuf_set(batch.lines, lines_index, false);
+ }
+ }
+ }
+
+ void check_index_buffers(PBVH_GPU_Args *args)
+ {
+ if (!lines_index) {
+ create_index(args);
+ }
+ }
+
+ void create_batch(PBVHAttrReq *attrs, int attrs_num, PBVH_GPU_Args *args)
+ {
+ check_index_buffers(args);
+
+ PBVHBatch batch;
+
+ batch.tris = GPU_batch_create(GPU_PRIM_TRIS,
+ nullptr,
+ /* can be nullptr if buffer is empty */
+ tri_index);
+ batch.tris_count = tris_count;
+
+ if (lines_index) {
+ batch.lines = GPU_batch_create(GPU_PRIM_LINES, nullptr, lines_index);
+ batch.lines_count = lines_count;
+ }
+
+ for (int i : IndexRange(attrs_num)) {
+ PBVHAttrReq *attr = attrs + i;
+
+ if (!has_vbo(attr->domain, int(attr->type), attr->name)) {
+ create_vbo(attr->domain, uint32_t(attr->type), attr->name, args);
+ }
+
+ PBVHVbo *vbo = get_vbo(attr->domain, uint32_t(attr->type), attr->name);
+ int vbo_i = get_vbo_index(vbo);
+
+ batch.vbos.append(vbo_i);
+ GPU_batch_vertbuf_add_ex(batch.tris, vbo->vert_buf, false);
+
+ if (batch.lines) {
+ GPU_batch_vertbuf_add_ex(batch.lines, vbo->vert_buf, false);
+ }
+ }
+
+ batch.build_key(vbos);
+ batches.add(batch.key, batch);
+ }
+};
+
+void DRW_pbvh_node_update(PBVHBatches *batches, PBVH_GPU_Args *args)
+{
+ batches->update(args);
+}
+
+void DRW_pbvh_node_gpu_flush(PBVHBatches *batches)
+{
+ batches->gpu_flush();
+}
+
+PBVHBatches *DRW_pbvh_node_create(PBVH_GPU_Args *args)
+{
+ PBVHBatches *batches = new PBVHBatches(args);
+ return batches;
+}
+
+void DRW_pbvh_node_free(PBVHBatches *batches)
+{
+ delete batches;
+}
+
+GPUBatch *DRW_pbvh_tris_get(PBVHBatches *batches,
+ PBVHAttrReq *attrs,
+ int attrs_num,
+ PBVH_GPU_Args *args,
+ int *r_prim_count)
+{
+ PBVHBatch &batch = batches->ensure_batch(attrs, attrs_num, args);
+
+ *r_prim_count = batch.tris_count;
+
+ return batch.tris;
+}
+
+GPUBatch *DRW_pbvh_lines_get(PBVHBatches *batches,
+ PBVHAttrReq *attrs,
+ int attrs_num,
+ PBVH_GPU_Args *args,
+ int *r_prim_count)
+{
+ PBVHBatch &batch = batches->ensure_batch(attrs, attrs_num, args);
+
+ *r_prim_count = batch.lines_count;
+
+ return batch.lines;
+}
+
+void DRW_pbvh_update_pre(struct PBVHBatches *batches, struct PBVH_GPU_Args *args)
+{
+ batches->update_pre(args);
+}
+
+int drw_pbvh_material_index_get(struct PBVHBatches *batches)
+{
+ return batches->material_index;
+}
diff --git a/source/blender/draw/intern/draw_pbvh.h b/source/blender/draw/intern/draw_pbvh.h
new file mode 100644
index 00000000000..9e3401d864f
--- /dev/null
+++ b/source/blender/draw/intern/draw_pbvh.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+
+#pragma once
+
+#include "DNA_customdata_types.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+struct PBVHBatches;
+
+enum {
+ CD_PBVH_CO_TYPE = CD_NUMTYPES,
+ CD_PBVH_NO_TYPE = CD_NUMTYPES + 1,
+ CD_PBVH_FSET_TYPE = CD_NUMTYPES + 2,
+ CD_PBVH_MASK_TYPE = CD_NUMTYPES + 3
+};
+
+int drw_pbvh_material_index_get(struct PBVHBatches *batches);
+
+#ifdef __cplusplus
+}
+#endif
diff --git a/source/blender/draw/intern/draw_pointcloud.cc b/source/blender/draw/intern/draw_pointcloud.cc
new file mode 100644
index 00000000000..582dc690cee
--- /dev/null
+++ b/source/blender/draw/intern/draw_pointcloud.cc
@@ -0,0 +1,103 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later
+ * Copyright 2017 Blender Foundation. All rights reserved. */
+
+/** \file
+ * \ingroup draw
+ *
+ * \brief Contains procedural GPU hair drawing methods.
+ */
+
+#include "BLI_string_utils.h"
+#include "BLI_utildefines.h"
+
+#include "DNA_customdata_types.h"
+#include "DNA_pointcloud_types.h"
+
+#include "BKE_curves.hh"
+#include "BKE_geometry_set.hh"
+
+#include "GPU_batch.h"
+#include "GPU_capabilities.h"
+#include "GPU_compute.h"
+#include "GPU_material.h"
+#include "GPU_shader.h"
+#include "GPU_texture.h"
+#include "GPU_vertex_buffer.h"
+
+#include "DRW_gpu_wrapper.hh"
+#include "DRW_render.h"
+
+#include "draw_attributes.h"
+#include "draw_cache_impl.h"
+#include "draw_common.h"
+#include "draw_manager.h"
+#include "draw_pointcloud_private.hh"
+
+static GPUVertBuf *g_dummy_vbo = nullptr;
+
+void DRW_pointcloud_init()
+{
+ if (g_dummy_vbo == nullptr) {
+ /* initialize vertex format */
+ GPUVertFormat format = {0};
+ uint dummy_id = GPU_vertformat_attr_add(&format, "dummy", GPU_COMP_F32, 4, GPU_FETCH_FLOAT);
+
+ g_dummy_vbo = GPU_vertbuf_create_with_format_ex(
+ &format, GPU_USAGE_STATIC | GPU_USAGE_FLAG_BUFFER_TEXTURE_ONLY);
+
+ const float vert[4] = {0.0f, 0.0f, 0.0f, 0.0f};
+ GPU_vertbuf_data_alloc(g_dummy_vbo, 1);
+ GPU_vertbuf_attr_fill(g_dummy_vbo, dummy_id, vert);
+ }
+}
+
+DRWShadingGroup *DRW_shgroup_pointcloud_create_sub(Object *object,
+ DRWShadingGroup *shgrp_parent,
+ GPUMaterial *gpu_material)
+{
+ PointCloud &pointcloud = *static_cast<PointCloud *>(object->data);
+
+ DRWShadingGroup *shgrp = DRW_shgroup_create_sub(shgrp_parent);
+
+ /* Fix issue with certain driver not drawing anything if there is no texture bound to
+ * "ac", "au", "u" or "c". */
+ DRW_shgroup_buffer_texture(shgrp, "u", g_dummy_vbo);
+ DRW_shgroup_buffer_texture(shgrp, "au", g_dummy_vbo);
+ DRW_shgroup_buffer_texture(shgrp, "c", g_dummy_vbo);
+ DRW_shgroup_buffer_texture(shgrp, "ac", g_dummy_vbo);
+
+ GPUVertBuf *pos_rad_buf = pointcloud_position_and_radius_get(&pointcloud);
+ DRW_shgroup_buffer_texture(shgrp, "ptcloud_pos_rad_tx", pos_rad_buf);
+
+ if (gpu_material != nullptr) {
+
+ // const DRW_Attributes &attrs = cache->attr_used;
+ // for (int i = 0; i < attrs.num_requests; i++) {
+ // const DRW_AttributeRequest &request = attrs.requests[i];
+
+ // char sampler_name[32];
+ // /* \note reusing curve attribute function. */
+ // drw_curves_get_attribute_sampler_name(request.attribute_name, sampler_name);
+
+ // GPUTexture *attribute_buf = DRW_pointcloud_evaluated_attribute(&pointcloud);
+ // if (!cache->attributes_buf[i]) {
+ // continue;
+ // }
+ // DRW_shgroup_buffer_texture_ref(shgrp, sampler_name, attribute_buf);
+ // }
+
+ /* Only single material supported for now. */
+ GPUBatch **geom = pointcloud_surface_shaded_get(&pointcloud, &gpu_material, 1);
+ DRW_shgroup_call(shgrp, geom[0], object);
+ }
+ else {
+ GPUBatch *geom = pointcloud_surface_get(&pointcloud);
+ DRW_shgroup_call(shgrp, geom, object);
+ }
+ return shgrp;
+}
+
+void DRW_pointcloud_free()
+{
+ GPU_VERTBUF_DISCARD_SAFE(g_dummy_vbo);
+}
diff --git a/source/blender/draw/intern/draw_pointcloud_private.hh b/source/blender/draw/intern/draw_pointcloud_private.hh
new file mode 100644
index 00000000000..9422d7fbc99
--- /dev/null
+++ b/source/blender/draw/intern/draw_pointcloud_private.hh
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later
+ * Copyright 2021 Blender Foundation. All rights reserved. */
+
+/** \file
+ * \ingroup draw
+ */
+
+#pragma once
+
+struct PointCloud;
+struct GPUBatch;
+struct GPUVertBuf;
+struct GPUMaterial;
+
+GPUVertBuf *pointcloud_position_and_radius_get(PointCloud *pointcloud);
+GPUBatch **pointcloud_surface_shaded_get(PointCloud *pointcloud,
+ GPUMaterial **gpu_materials,
+ int mat_len);
+GPUBatch *pointcloud_surface_get(PointCloud *pointcloud);
diff --git a/source/blender/draw/intern/draw_resource.cc b/source/blender/draw/intern/draw_resource.cc
index 689df4edb31..c1f83c3a5ae 100644
--- a/source/blender/draw/intern/draw_resource.cc
+++ b/source/blender/draw/intern/draw_resource.cc
@@ -19,58 +19,6 @@
* \{ */
/**
- * Extract object attribute from RNA property.
- * Returns true if the attribute was correctly extracted.
- * This function mirrors lookup_property in cycles/blender/blender_object.cpp
- */
-bool ObjectAttribute::id_property_lookup(ID *id, const char *name)
-{
- PointerRNA ptr, id_ptr;
- PropertyRNA *prop;
-
- if (id == nullptr) {
- return false;
- }
-
- RNA_id_pointer_create(id, &id_ptr);
-
- if (!RNA_path_resolve(&id_ptr, name, &ptr, &prop)) {
- return false;
- }
-
- if (prop == nullptr) {
- return false;
- }
-
- PropertyType type = RNA_property_type(prop);
- int array_len = RNA_property_array_length(&ptr, prop);
-
- if (array_len == 0) {
- float value;
-
- if (type == PROP_FLOAT) {
- value = RNA_property_float_get(&ptr, prop);
- }
- else if (type == PROP_INT) {
- value = RNA_property_int_get(&ptr, prop);
- }
- else {
- return false;
- }
-
- *reinterpret_cast<float4 *>(&data_x) = float4(value, value, value, 1.0f);
- return true;
- }
-
- if (type == PROP_FLOAT && array_len <= 4) {
- *reinterpret_cast<float4 *>(&data_x) = float4(0.0f, 0.0f, 0.0f, 1.0f);
- RNA_property_float_get_array(&ptr, prop, &data_x);
- return true;
- }
- return false;
-}
-
-/**
* Go through all possible source of the given object uniform attribute.
* Returns true if the attribute was correctly filled.
* This function mirrors lookup_instance_property in cycles/blender/blender_object.cpp
@@ -81,29 +29,25 @@ bool ObjectAttribute::sync(const blender::draw::ObjectRef &ref, const GPUUniform
/* If requesting instance data, check the parent particle system and object. */
if (attr.use_dupli) {
- if ((ref.dupli_object != nullptr) && (ref.dupli_object->particle_system != nullptr)) {
- ParticleSettings *settings = ref.dupli_object->particle_system->part;
- if (this->id_property_lookup((ID *)settings, attr.name_id_prop) ||
- this->id_property_lookup((ID *)settings, attr.name)) {
- return true;
- }
- }
- if (this->id_property_lookup((ID *)ref.dupli_parent, attr.name_id_prop) ||
- this->id_property_lookup((ID *)ref.dupli_parent, attr.name)) {
- return true;
- }
+ return BKE_object_dupli_find_rgba_attribute(
+ ref.object, ref.dupli_object, ref.dupli_parent, attr.name, &data_x);
}
-
- /* Check the object and mesh. */
- if (ref.object != nullptr) {
- if (this->id_property_lookup((ID *)ref.object, attr.name_id_prop) ||
- this->id_property_lookup((ID *)ref.object, attr.name) ||
- this->id_property_lookup((ID *)ref.object->data, attr.name_id_prop) ||
- this->id_property_lookup((ID *)ref.object->data, attr.name)) {
- return true;
- }
+ else {
+ return BKE_object_dupli_find_rgba_attribute(ref.object, nullptr, nullptr, attr.name, &data_x);
}
- return false;
+}
+
+/** \} */
+
+/* -------------------------------------------------------------------- */
+/** \name LayerAttributes
+ * \{ */
+
+bool LayerAttribute::sync(Scene *scene, ViewLayer *layer, const GPULayerAttr &attr)
+{
+ hash_code = attr.hash_code;
+
+ return BKE_view_layer_find_rgba_attribute(scene, layer, attr.name, &data.x);
}
/** \} */
diff --git a/source/blender/draw/intern/draw_resource.hh b/source/blender/draw/intern/draw_resource.hh
index 2df38e32ed2..b8a0dbb8fa9 100644
--- a/source/blender/draw/intern/draw_resource.hh
+++ b/source/blender/draw/intern/draw_resource.hh
@@ -31,8 +31,8 @@
inline void ObjectMatrices::sync(const Object &object)
{
- model = object.obmat;
- model_inverse = object.imat;
+ model = object.object_to_world;
+ model_inverse = object.world_to_object;
}
inline void ObjectMatrices::sync(const float4x4 &model_matrix)
diff --git a/source/blender/draw/intern/draw_shader.cc b/source/blender/draw/intern/draw_shader.cc
index 960348b4a94..dc08fa76335 100644
--- a/source/blender/draw/intern/draw_shader.cc
+++ b/source/blender/draw/intern/draw_shader.cc
@@ -32,13 +32,12 @@ static struct {
/** \name Hair refinement
* \{ */
-static GPUShader *hair_refine_shader_compute_create(ParticleRefineShader UNUSED(refinement))
+static GPUShader *hair_refine_shader_compute_create(ParticleRefineShader /*refinement*/)
{
return GPU_shader_create_from_info_name("draw_hair_refine_compute");
}
-static GPUShader *hair_refine_shader_transform_feedback_create(
- ParticleRefineShader UNUSED(refinement))
+static GPUShader *hair_refine_shader_transform_feedback_create(ParticleRefineShader /*refinement*/)
{
GPUShader *sh = nullptr;
@@ -58,7 +57,7 @@ static GPUShader *hair_refine_shader_transform_feedback_create(
}
static GPUShader *hair_refine_shader_transform_feedback_workaround_create(
- ParticleRefineShader UNUSED(refinement))
+ ParticleRefineShader /*refinement*/)
{
return GPU_shader_create_from_info_name("draw_hair_refine_transform_feedback_workaround");
}
diff --git a/source/blender/draw/intern/draw_shader_shared.h b/source/blender/draw/intern/draw_shader_shared.h
index bedbedcf438..75a7e28fa75 100644
--- a/source/blender/draw/intern/draw_shader_shared.h
+++ b/source/blender/draw/intern/draw_shader_shared.h
@@ -7,13 +7,15 @@
# include "GPU_shader_shared_utils.h"
# include "draw_defines.h"
-typedef struct ViewInfos ViewInfos;
+typedef struct ViewCullingData ViewCullingData;
+typedef struct ViewMatrices ViewMatrices;
typedef struct ObjectMatrices ObjectMatrices;
typedef struct ObjectInfos ObjectInfos;
typedef struct ObjectBounds ObjectBounds;
typedef struct VolumeInfos VolumeInfos;
typedef struct CurvesInfos CurvesInfos;
typedef struct ObjectAttribute ObjectAttribute;
+typedef struct LayerAttribute LayerAttribute;
typedef struct DrawCommand DrawCommand;
typedef struct DispatchCommand DispatchCommand;
typedef struct DRWDebugPrintBuffer DRWDebugPrintBuffer;
@@ -23,8 +25,10 @@ typedef struct DRWDebugDrawBuffer DRWDebugDrawBuffer;
# ifdef __cplusplus
/* C++ only forward declarations. */
struct Object;
+struct ViewLayer;
struct ID;
struct GPUUniformAttr;
+struct GPULayerAttr;
namespace blender::draw {
@@ -50,51 +54,30 @@ typedef enum eObjectInfoFlag eObjectInfoFlag;
* This should be kept in sync with `GPU_ATTR_MAX` */
#define DRW_ATTRIBUTE_PER_CURVES_MAX 15
-struct ViewInfos {
- /* View matrices */
- float4x4 persmat;
- float4x4 persinv;
+struct ViewCullingData {
+ /** \note vec3 array padded to vec4. */
+ /** Frustum corners. */
+ float4 corners[8];
+ float4 planes[6];
+ float4 bound_sphere;
+};
+BLI_STATIC_ASSERT_ALIGN(ViewCullingData, 16)
+
+struct ViewMatrices {
float4x4 viewmat;
float4x4 viewinv;
float4x4 winmat;
float4x4 wininv;
-
- float4 clip_planes[6];
- float4 viewvecs[2];
- /* Should not be here. Not view dependent (only main view). */
- float4 viewcamtexcofac;
-
- float2 viewport_size;
- float2 viewport_size_inverse;
-
- /** Frustum culling data. */
- /** \note vec3 array padded to vec4. */
- float4 frustum_corners[8];
- float4 frustum_planes[6];
- float4 frustum_bound_sphere;
-
- /** For debugging purpose */
- /* Mouse pixel. */
- int2 mouse_pixel;
-
- /** True if facing needs to be inverted. */
- bool1 is_inverted;
- int _pad0;
};
-BLI_STATIC_ASSERT_ALIGN(ViewInfos, 16)
+BLI_STATIC_ASSERT_ALIGN(ViewMatrices, 16)
/* Do not override old definitions if the shader uses this header but not shader info. */
#ifdef USE_GPU_SHADER_CREATE_INFO
/* TODO(@fclem): Mass rename. */
-# define ViewProjectionMatrix drw_view.persmat
-# define ViewProjectionMatrixInverse drw_view.persinv
# define ViewMatrix drw_view.viewmat
# define ViewMatrixInverse drw_view.viewinv
# define ProjectionMatrix drw_view.winmat
# define ProjectionMatrixInverse drw_view.wininv
-# define clipPlanes drw_view.clip_planes
-# define ViewVecs drw_view.viewvecs
-# define CameraTexCoFactors drw_view.viewcamtexcofac
#endif
/** \} */
@@ -205,7 +188,6 @@ struct ObjectAttribute {
#if !defined(GPU_SHADER) && defined(__cplusplus)
bool sync(const blender::draw::ObjectRef &ref, const GPUUniformAttr &attr);
- bool id_property_lookup(ID *id, const char *name);
#endif
};
#pragma pack(pop)
@@ -213,6 +195,20 @@ struct ObjectAttribute {
* C++ compiler gives us the same size. */
BLI_STATIC_ASSERT_ALIGN(ObjectAttribute, 20)
+#pragma pack(push, 4)
+struct LayerAttribute {
+ float4 data;
+ uint hash_code;
+ uint buffer_length; /* Only in the first record. */
+ uint _pad1, _pad2;
+
+#if !defined(GPU_SHADER) && defined(__cplusplus)
+ bool sync(Scene *scene, ViewLayer *layer, const GPULayerAttr &attr);
+#endif
+};
+#pragma pack(pop)
+BLI_STATIC_ASSERT_ALIGN(LayerAttribute, 32)
+
/** \} */
/* -------------------------------------------------------------------- */
diff --git a/source/blender/draw/intern/draw_view.cc b/source/blender/draw/intern/draw_view.cc
index cb0e1370c28..cf86558f80f 100644
--- a/source/blender/draw/intern/draw_view.cc
+++ b/source/blender/draw/intern/draw_view.cc
@@ -21,17 +21,11 @@ void View::sync(const float4x4 &view_mat, const float4x4 &win_mat)
data_.viewinv = view_mat.inverted();
data_.winmat = win_mat;
data_.wininv = win_mat.inverted();
- data_.persmat = data_.winmat * data_.viewmat;
- data_.persinv = data_.persmat.inverted();
- /* Should not be used anymore. */
- data_.viewcamtexcofac = float4(1.0f, 1.0f, 0.0f, 0.0f);
- data_.is_inverted = (is_negative_m4(view_mat.ptr()) == is_negative_m4(win_mat.ptr()));
+ is_inverted_ = (is_negative_m4(view_mat.ptr()) == is_negative_m4(win_mat.ptr()));
- update_view_vectors();
-
- BoundBox &bound_box = *reinterpret_cast<BoundBox *>(&data_.frustum_corners);
- BoundSphere &bound_sphere = *reinterpret_cast<BoundSphere *>(&data_.frustum_bound_sphere);
+ BoundBox &bound_box = *reinterpret_cast<BoundBox *>(&culling_.corners);
+ BoundSphere &bound_sphere = *reinterpret_cast<BoundSphere *>(&culling_.bound_sphere);
frustum_boundbox_calc(bound_box);
frustum_culling_planes_calc();
frustum_culling_sphere_calc(bound_box, bound_sphere);
@@ -83,17 +77,18 @@ void View::frustum_boundbox_calc(BoundBox &bbox)
void View::frustum_culling_planes_calc()
{
- planes_from_projmat(data_.persmat.ptr(),
- data_.frustum_planes[0],
- data_.frustum_planes[5],
- data_.frustum_planes[1],
- data_.frustum_planes[3],
- data_.frustum_planes[4],
- data_.frustum_planes[2]);
+ float4x4 persmat = data_.winmat * data_.viewmat;
+ planes_from_projmat(persmat.ptr(),
+ culling_.planes[0],
+ culling_.planes[5],
+ culling_.planes[1],
+ culling_.planes[3],
+ culling_.planes[4],
+ culling_.planes[2]);
/* Normalize. */
for (int p = 0; p < 6; p++) {
- data_.frustum_planes[p].w /= normalize_v3(data_.frustum_planes[p]);
+ culling_.planes[p].w /= normalize_v3(culling_.planes[p]);
}
}
@@ -208,97 +203,30 @@ void View::frustum_culling_sphere_calc(const BoundBox &bbox, BoundSphere &bspher
}
}
-void View::set_clip_planes(Span<float4> planes)
-{
- BLI_assert(planes.size() <= ARRAY_SIZE(data_.clip_planes));
- int i = 0;
- for (const auto &plane : planes) {
- data_.clip_planes[i++] = plane;
- }
-}
-
-void View::update_viewport_size()
-{
- float4 viewport;
- GPU_viewport_size_get_f(viewport);
- float2 viewport_size = float2(viewport.z, viewport.w);
- if (assign_if_different(data_.viewport_size, viewport_size)) {
- dirty_ = true;
- }
-}
-
-void View::update_view_vectors()
-{
- bool is_persp = data_.winmat[3][3] == 0.0f;
-
- /* Near clip distance. */
- data_.viewvecs[0][3] = (is_persp) ? -data_.winmat[3][2] / (data_.winmat[2][2] - 1.0f) :
- -(data_.winmat[3][2] + 1.0f) / data_.winmat[2][2];
-
- /* Far clip distance. */
- data_.viewvecs[1][3] = (is_persp) ? -data_.winmat[3][2] / (data_.winmat[2][2] + 1.0f) :
- -(data_.winmat[3][2] - 1.0f) / data_.winmat[2][2];
-
- /* View vectors for the corners of the view frustum.
- * Can be used to recreate the world space position easily */
- float3 view_vecs[4] = {
- {-1.0f, -1.0f, -1.0f},
- {1.0f, -1.0f, -1.0f},
- {-1.0f, 1.0f, -1.0f},
- {-1.0f, -1.0f, 1.0f},
- };
-
- /* Convert the view vectors to view space */
- for (int i = 0; i < 4; i++) {
- mul_project_m4_v3(data_.wininv.ptr(), view_vecs[i]);
- /* Normalized trick see:
- * http://www.derschmale.com/2014/01/26/reconstructing-positions-from-the-depth-buffer */
- if (is_persp) {
- view_vecs[i].x /= view_vecs[i].z;
- view_vecs[i].y /= view_vecs[i].z;
- }
- }
-
- /**
- * - If orthographic:
- * `view_vecs[0]` is the near-bottom-left corner of the frustum and
- * `view_vecs[1]` is the vector going from the near-bottom-left corner to
- * the far-top-right corner.
- * - If perspective:
- * `view_vecs[0].xy` and `view_vecs[1].xy` are respectively the bottom-left corner
- * when `Z = 1`, and top-left corner if `Z = 1`.
- * `view_vecs[0].z` the near clip distance and `view_vecs[1].z` is the (signed)
- * distance from the near plane to the far clip plane.
- */
- copy_v3_v3(data_.viewvecs[0], view_vecs[0]);
-
- /* we need to store the differences */
- data_.viewvecs[1][0] = view_vecs[1][0] - view_vecs[0][0];
- data_.viewvecs[1][1] = view_vecs[2][1] - view_vecs[0][1];
- data_.viewvecs[1][2] = view_vecs[3][2] - view_vecs[0][2];
-}
-
void View::bind()
{
- update_viewport_size();
-
if (dirty_) {
dirty_ = false;
data_.push_update();
+ culling_.push_update();
}
GPU_uniformbuf_bind(data_, DRW_VIEW_UBO_SLOT);
+ GPU_uniformbuf_bind(culling_, DRW_VIEW_CULLING_UBO_SLOT);
}
void View::compute_visibility(ObjectBoundsBuf &bounds, uint resource_len, bool debug_freeze)
{
if (debug_freeze && frozen_ == false) {
- data_freeze_ = static_cast<ViewInfos>(data_);
+ data_freeze_ = static_cast<ViewMatrices>(data_);
data_freeze_.push_update();
+ culling_freeze_ = static_cast<ViewCullingData>(culling_);
+ culling_freeze_.push_update();
}
#ifdef DEBUG
if (debug_freeze) {
- drw_debug_matrix_as_bbox(data_freeze_.persinv, float4(0, 1, 0, 1));
+ float4x4 persmat = data_freeze_.winmat * data_freeze_.viewmat;
+ drw_debug_matrix_as_bbox(persmat.inverted(), float4(0, 1, 0, 1));
}
#endif
frozen_ = debug_freeze;
diff --git a/source/blender/draw/intern/draw_view.hh b/source/blender/draw/intern/draw_view.hh
index 27e7a7a0028..94fb62508bb 100644
--- a/source/blender/draw/intern/draw_view.hh
+++ b/source/blender/draw/intern/draw_view.hh
@@ -25,14 +25,17 @@ class View {
friend Manager;
private:
- UniformBuffer<ViewInfos> data_;
+ UniformBuffer<ViewMatrices> data_;
+ UniformBuffer<ViewCullingData> culling_;
/** Frozen version of data_ used for debugging culling. */
- UniformBuffer<ViewInfos> data_freeze_;
+ UniformBuffer<ViewMatrices> data_freeze_;
+ UniformBuffer<ViewCullingData> culling_freeze_;
/** Result of the visibility computation. 1 bit per resource ID. */
VisibilityBuf visibility_buf_;
const char *debug_name_;
+ bool is_inverted_ = false;
bool do_visibility_ = true;
bool dirty_ = true;
bool frozen_ = false;
@@ -48,8 +51,6 @@ class View {
this->sync(view_mat, win_mat);
}
- void set_clip_planes(Span<float4> planes);
-
void sync(const float4x4 &view_mat, const float4x4 &win_mat);
bool is_persp() const
@@ -59,7 +60,7 @@ class View {
bool is_inverted() const
{
- return data_.is_inverted;
+ return is_inverted_;
}
float far_clip() const
@@ -78,12 +79,31 @@ class View {
return -(data_.winmat[3][2] + 1.0f) / data_.winmat[2][2];
}
+ const float4x4 &viewmat() const
+ {
+ return data_.viewmat;
+ }
+
+ const float4x4 &viewinv() const
+ {
+ return data_.viewinv;
+ }
+
+ const float4x4 &winmat() const
+ {
+ return data_.winmat;
+ }
+
+ const float4x4 &wininv() const
+ {
+ return data_.wininv;
+ }
+
private:
/** Called from draw manager. */
void bind();
void compute_visibility(ObjectBoundsBuf &bounds, uint resource_len, bool debug_freeze);
- void update_view_vectors();
void update_viewport_size();
void frustum_boundbox_calc(BoundBox &bbox);
diff --git a/source/blender/draw/intern/draw_volume.cc b/source/blender/draw/intern/draw_volume.cc
index 8f4383a98d8..5c1ce7c3111 100644
--- a/source/blender/draw/intern/draw_volume.cc
+++ b/source/blender/draw/intern/draw_volume.cc
@@ -127,7 +127,7 @@ static DRWShadingGroup *drw_volume_object_grids_init(Object *ob,
grp = DRW_shgroup_create_sub(grp);
- volume_infos.density_scale = BKE_volume_density_scale(volume, ob->obmat);
+ volume_infos.density_scale = BKE_volume_density_scale(volume, ob->object_to_world);
volume_infos.color_mul = float4(1.0f);
volume_infos.temperature_mul = 1.0f;
volume_infos.temperature_bias = 0.0f;
@@ -184,7 +184,7 @@ static DRWShadingGroup *drw_volume_object_mesh_init(Scene *scene,
/* Smoke Simulation */
if ((md = BKE_modifiers_findby_type(ob, eModifierType_Fluid)) &&
- (BKE_modifier_is_enabled(scene, md, eModifierMode_Realtime)) &&
+ BKE_modifier_is_enabled(scene, md, eModifierMode_Realtime) &&
((FluidModifierData *)md)->domain != nullptr) {
FluidModifierData *fmd = (FluidModifierData *)md;
FluidDomainSettings *fds = fmd->domain;
diff --git a/source/blender/draw/intern/mesh_extractors/extract_mesh.hh b/source/blender/draw/intern/mesh_extractors/extract_mesh.hh
index 10b94291e35..c6230e2695e 100644
--- a/source/blender/draw/intern/mesh_extractors/extract_mesh.hh
+++ b/source/blender/draw/intern/mesh_extractors/extract_mesh.hh
@@ -16,6 +16,8 @@
#include "BKE_customdata.h"
#include "BKE_editmesh.h"
+#include "BKE_editmesh_cache.h"
+#include "BKE_mesh.h"
#include "draw_cache_extract.hh"
@@ -78,14 +80,17 @@ struct MeshRenderData {
BMEdge *eed_act;
BMFace *efa_act;
BMFace *efa_act_uv;
- /* Data created on-demand (usually not for #BMesh based data). */
- MLoopTri *mlooptri;
+ /* The triangulation of #Mesh polygons, owned by the mesh. */
+ const MLoopTri *mlooptri;
const int *material_indices;
const float (*vert_normals)[3];
const float (*poly_normals)[3];
const bool *hide_vert;
const bool *hide_edge;
const bool *hide_poly;
+ const bool *select_vert;
+ const bool *select_edge;
+ const bool *select_poly;
float (*loop_normals)[3];
int *lverts, *ledges;
@@ -113,7 +118,7 @@ BLI_INLINE const Mesh *editmesh_final_or_this(const Object *object, const Mesh *
BLI_INLINE const CustomData *mesh_cd_ldata_get_from_mesh(const Mesh *me)
{
- switch ((eMeshWrapperType)me->runtime.wrapper_type) {
+ switch (me->runtime->wrapper_type) {
case ME_WRAPPER_TYPE_SUBD:
case ME_WRAPPER_TYPE_MDATA:
return &me->ldata;
@@ -129,7 +134,7 @@ BLI_INLINE const CustomData *mesh_cd_ldata_get_from_mesh(const Mesh *me)
BLI_INLINE const CustomData *mesh_cd_pdata_get_from_mesh(const Mesh *me)
{
- switch ((eMeshWrapperType)me->runtime.wrapper_type) {
+ switch (me->runtime->wrapper_type) {
case ME_WRAPPER_TYPE_SUBD:
case ME_WRAPPER_TYPE_MDATA:
return &me->pdata;
@@ -145,7 +150,7 @@ BLI_INLINE const CustomData *mesh_cd_pdata_get_from_mesh(const Mesh *me)
BLI_INLINE const CustomData *mesh_cd_edata_get_from_mesh(const Mesh *me)
{
- switch ((eMeshWrapperType)me->runtime.wrapper_type) {
+ switch (me->runtime->wrapper_type) {
case ME_WRAPPER_TYPE_SUBD:
case ME_WRAPPER_TYPE_MDATA:
return &me->edata;
@@ -161,7 +166,7 @@ BLI_INLINE const CustomData *mesh_cd_edata_get_from_mesh(const Mesh *me)
BLI_INLINE const CustomData *mesh_cd_vdata_get_from_mesh(const Mesh *me)
{
- switch ((eMeshWrapperType)me->runtime.wrapper_type) {
+ switch (me->runtime->wrapper_type) {
case ME_WRAPPER_TYPE_SUBD:
case ME_WRAPPER_TYPE_MDATA:
return &me->vdata;
@@ -437,3 +442,4 @@ extern const MeshExtract extract_edge_idx;
extern const MeshExtract extract_vert_idx;
extern const MeshExtract extract_fdot_idx;
extern const MeshExtract extract_attr[GPU_MAX_ATTR];
+extern const MeshExtract extract_attr_viewer;
diff --git a/source/blender/draw/intern/mesh_extractors/extract_mesh_ibo_edituv.cc b/source/blender/draw/intern/mesh_extractors/extract_mesh_ibo_edituv.cc
index 2f2e59c8c3b..e40503a9707 100644
--- a/source/blender/draw/intern/mesh_extractors/extract_mesh_ibo_edituv.cc
+++ b/source/blender/draw/intern/mesh_extractors/extract_mesh_ibo_edituv.cc
@@ -22,8 +22,8 @@ struct MeshExtract_EditUvElem_Data {
};
static void extract_edituv_tris_init(const MeshRenderData *mr,
- MeshBatchCache *UNUSED(cache),
- void *UNUSED(ibo),
+ MeshBatchCache * /*cache*/,
+ void * /*ibo*/,
void *tls_data)
{
MeshExtract_EditUvElem_Data *data = static_cast<MeshExtract_EditUvElem_Data *>(tls_data);
@@ -39,9 +39,9 @@ BLI_INLINE void edituv_tri_add(
}
}
-static void extract_edituv_tris_iter_looptri_bm(const MeshRenderData *UNUSED(mr),
+static void extract_edituv_tris_iter_looptri_bm(const MeshRenderData * /*mr*/,
BMLoop **elt,
- const int UNUSED(elt_index),
+ const int /*elt_index*/,
void *_data)
{
MeshExtract_EditUvElem_Data *data = static_cast<MeshExtract_EditUvElem_Data *>(_data);
@@ -55,7 +55,7 @@ static void extract_edituv_tris_iter_looptri_bm(const MeshRenderData *UNUSED(mr)
static void extract_edituv_tris_iter_looptri_mesh(const MeshRenderData *mr,
const MLoopTri *mlt,
- const int UNUSED(elt_index),
+ const int /*elt_index*/,
void *_data)
{
MeshExtract_EditUvElem_Data *data = static_cast<MeshExtract_EditUvElem_Data *>(_data);
@@ -66,8 +66,8 @@ static void extract_edituv_tris_iter_looptri_mesh(const MeshRenderData *mr,
edituv_tri_add(data, mp_hidden, mp_select, mlt->tri[0], mlt->tri[1], mlt->tri[2]);
}
-static void extract_edituv_tris_finish(const MeshRenderData *UNUSED(mr),
- MeshBatchCache *UNUSED(cache),
+static void extract_edituv_tris_finish(const MeshRenderData * /*mr*/,
+ MeshBatchCache * /*cache*/,
void *buf,
void *_data)
{
@@ -78,8 +78,8 @@ static void extract_edituv_tris_finish(const MeshRenderData *UNUSED(mr),
static void extract_edituv_tris_init_subdiv(const DRWSubdivCache *subdiv_cache,
const MeshRenderData *mr,
- MeshBatchCache *UNUSED(cache),
- void *UNUSED(buf),
+ MeshBatchCache * /*cache*/,
+ void * /*buf*/,
void *tls_data)
{
MeshExtract_EditUvElem_Data *data = static_cast<MeshExtract_EditUvElem_Data *>(tls_data);
@@ -90,8 +90,8 @@ static void extract_edituv_tris_init_subdiv(const DRWSubdivCache *subdiv_cache,
data->sync_selection = (mr->toolsettings->uv_flag & UV_SYNC_SELECTION) != 0;
}
-static void extract_edituv_tris_iter_subdiv_bm(const DRWSubdivCache *UNUSED(subdiv_cache),
- const MeshRenderData *UNUSED(mr),
+static void extract_edituv_tris_iter_subdiv_bm(const DRWSubdivCache * /*subdiv_cache*/,
+ const MeshRenderData * /*mr*/,
void *_data,
uint subdiv_quad_index,
const BMFace *coarse_quad)
@@ -114,7 +114,7 @@ static void extract_edituv_tris_iter_subdiv_bm(const DRWSubdivCache *UNUSED(subd
loop_idx + 3);
}
-static void extract_edituv_tris_iter_subdiv_mesh(const DRWSubdivCache *UNUSED(subdiv_cache),
+static void extract_edituv_tris_iter_subdiv_mesh(const DRWSubdivCache * /*subdiv_cache*/,
const MeshRenderData *mr,
void *_data,
uint subdiv_quad_index,
@@ -131,9 +131,9 @@ static void extract_edituv_tris_iter_subdiv_mesh(const DRWSubdivCache *UNUSED(su
edituv_tri_add(data, mp_hidden, mp_select, loop_idx, loop_idx + 2, loop_idx + 3);
}
-static void extract_edituv_tris_finish_subdiv(const struct DRWSubdivCache *UNUSED(subdiv_cache),
- const MeshRenderData *UNUSED(mr),
- MeshBatchCache *UNUSED(cache),
+static void extract_edituv_tris_finish_subdiv(const struct DRWSubdivCache * /*subdiv_cache*/,
+ const MeshRenderData * /*mr*/,
+ MeshBatchCache * /*cache*/,
void *buf,
void *_data)
{
@@ -167,8 +167,8 @@ constexpr MeshExtract create_extractor_edituv_tris()
* \{ */
static void extract_edituv_lines_init(const MeshRenderData *mr,
- MeshBatchCache *UNUSED(cache),
- void *UNUSED(ibo),
+ MeshBatchCache * /*cache*/,
+ void * /*ibo*/,
void *tls_data)
{
MeshExtract_EditUvElem_Data *data = static_cast<MeshExtract_EditUvElem_Data *>(tls_data);
@@ -184,9 +184,9 @@ BLI_INLINE void edituv_edge_add(
}
}
-static void extract_edituv_lines_iter_poly_bm(const MeshRenderData *UNUSED(mr),
+static void extract_edituv_lines_iter_poly_bm(const MeshRenderData * /*mr*/,
const BMFace *f,
- const int UNUSED(f_index),
+ const int /*f_index*/,
void *_data)
{
MeshExtract_EditUvElem_Data *data = static_cast<MeshExtract_EditUvElem_Data *>(_data);
@@ -220,7 +220,7 @@ static void extract_edituv_lines_iter_poly_mesh(const MeshRenderData *mr,
}
else {
mp_hidden = (mr->hide_poly) ? mr->hide_poly[mp_index] : false;
- mp_select = (mp->flag & ME_FACE_SEL) != 0;
+ mp_select = mr->select_poly && mr->select_poly[mp_index];
}
for (int ml_index = mp->loopstart; ml_index < ml_index_end; ml_index += 1) {
@@ -234,8 +234,8 @@ static void extract_edituv_lines_iter_poly_mesh(const MeshRenderData *mr,
}
}
-static void extract_edituv_lines_finish(const MeshRenderData *UNUSED(mr),
- MeshBatchCache *UNUSED(cache),
+static void extract_edituv_lines_finish(const MeshRenderData * /*mr*/,
+ MeshBatchCache * /*cache*/,
void *buf,
void *_data)
{
@@ -246,8 +246,8 @@ static void extract_edituv_lines_finish(const MeshRenderData *UNUSED(mr),
static void extract_edituv_lines_init_subdiv(const DRWSubdivCache *subdiv_cache,
const MeshRenderData *mr,
- MeshBatchCache *UNUSED(cache),
- void *UNUSED(buf),
+ MeshBatchCache * /*cache*/,
+ void * /*buf*/,
void *tls_data)
{
MeshExtract_EditUvElem_Data *data = static_cast<MeshExtract_EditUvElem_Data *>(tls_data);
@@ -291,16 +291,16 @@ static void extract_edituv_lines_iter_subdiv_mesh(const DRWSubdivCache *subdiv_c
{
MeshExtract_EditUvElem_Data *data = static_cast<MeshExtract_EditUvElem_Data *>(_data);
int *subdiv_loop_edge_index = (int *)GPU_vertbuf_get_data(subdiv_cache->edges_orig_index);
-
+ const int coarse_poly_index = coarse_poly - mr->mpoly;
bool mp_hidden, mp_select;
if (mr->bm) {
- const BMFace *efa = bm_original_face_get(mr, coarse_poly - mr->mpoly);
+ const BMFace *efa = bm_original_face_get(mr, coarse_poly_index);
mp_hidden = (efa) ? BM_elem_flag_test_bool(efa, BM_ELEM_HIDDEN) : true;
mp_select = (efa) ? BM_elem_flag_test_bool(efa, BM_ELEM_SELECT) : false;
}
else {
- mp_hidden = (mr->hide_poly) ? mr->hide_poly[coarse_poly - mr->mpoly] : false;
- mp_select = (coarse_poly->flag & ME_FACE_SEL) != 0;
+ mp_hidden = (mr->hide_poly) ? mr->hide_poly[coarse_poly_index] : false;
+ mp_select = mr->select_poly && mr->select_poly[coarse_poly_index];
}
uint start_loop_idx = subdiv_quad_index * 4;
@@ -318,9 +318,9 @@ static void extract_edituv_lines_iter_subdiv_mesh(const DRWSubdivCache *subdiv_c
}
}
-static void extract_edituv_lines_finish_subdiv(const struct DRWSubdivCache *UNUSED(subdiv_cache),
- const MeshRenderData *UNUSED(mr),
- MeshBatchCache *UNUSED(cache),
+static void extract_edituv_lines_finish_subdiv(const struct DRWSubdivCache * /*subdiv_cache*/,
+ const MeshRenderData * /*mr*/,
+ MeshBatchCache * /*cache*/,
void *buf,
void *_data)
{
@@ -354,8 +354,8 @@ constexpr MeshExtract create_extractor_edituv_lines()
* \{ */
static void extract_edituv_points_init(const MeshRenderData *mr,
- MeshBatchCache *UNUSED(cache),
- void *UNUSED(ibo),
+ MeshBatchCache * /*cache*/,
+ void * /*ibo*/,
void *tls_data)
{
MeshExtract_EditUvElem_Data *data = static_cast<MeshExtract_EditUvElem_Data *>(tls_data);
@@ -373,9 +373,9 @@ BLI_INLINE void edituv_point_add(MeshExtract_EditUvElem_Data *data,
}
}
-static void extract_edituv_points_iter_poly_bm(const MeshRenderData *UNUSED(mr),
+static void extract_edituv_points_iter_poly_bm(const MeshRenderData * /*mr*/,
const BMFace *f,
- const int UNUSED(f_index),
+ const int /*f_index*/,
void *_data)
{
MeshExtract_EditUvElem_Data *data = static_cast<MeshExtract_EditUvElem_Data *>(_data);
@@ -410,8 +410,8 @@ static void extract_edituv_points_iter_poly_mesh(const MeshRenderData *mr,
}
}
-static void extract_edituv_points_finish(const MeshRenderData *UNUSED(mr),
- MeshBatchCache *UNUSED(cache),
+static void extract_edituv_points_finish(const MeshRenderData * /*mr*/,
+ MeshBatchCache * /*cache*/,
void *buf,
void *_data)
{
@@ -422,8 +422,8 @@ static void extract_edituv_points_finish(const MeshRenderData *UNUSED(mr),
static void extract_edituv_points_init_subdiv(const DRWSubdivCache *subdiv_cache,
const MeshRenderData *mr,
- MeshBatchCache *UNUSED(cache),
- void *UNUSED(buf),
+ MeshBatchCache * /*cache*/,
+ void * /*buf*/,
void *tls_data)
{
MeshExtract_EditUvElem_Data *data = static_cast<MeshExtract_EditUvElem_Data *>(tls_data);
@@ -433,7 +433,7 @@ static void extract_edituv_points_init_subdiv(const DRWSubdivCache *subdiv_cache
}
static void extract_edituv_points_iter_subdiv_bm(const DRWSubdivCache *subdiv_cache,
- const MeshRenderData *UNUSED(mr),
+ const MeshRenderData * /*mr*/,
void *_data,
uint subdiv_quad_index,
const BMFace *coarse_quad)
@@ -475,9 +475,9 @@ static void extract_edituv_points_iter_subdiv_mesh(const DRWSubdivCache *subdiv_
}
}
-static void extract_edituv_points_finish_subdiv(const struct DRWSubdivCache *UNUSED(subdiv_cache),
- const MeshRenderData *UNUSED(mr),
- MeshBatchCache *UNUSED(cache),
+static void extract_edituv_points_finish_subdiv(const struct DRWSubdivCache * /*subdiv_cache*/,
+ const MeshRenderData * /*mr*/,
+ MeshBatchCache * /*cache*/,
void *buf,
void *_data)
{
@@ -511,8 +511,8 @@ constexpr MeshExtract create_extractor_edituv_points()
* \{ */
static void extract_edituv_fdots_init(const MeshRenderData *mr,
- MeshBatchCache *UNUSED(cache),
- void *UNUSED(ibo),
+ MeshBatchCache * /*cache*/,
+ void * /*ibo*/,
void *tls_data)
{
MeshExtract_EditUvElem_Data *data = static_cast<MeshExtract_EditUvElem_Data *>(tls_data);
@@ -533,7 +533,7 @@ BLI_INLINE void edituv_facedot_add(MeshExtract_EditUvElem_Data *data,
}
}
-static void extract_edituv_fdots_iter_poly_bm(const MeshRenderData *UNUSED(mr),
+static void extract_edituv_fdots_iter_poly_bm(const MeshRenderData * /*mr*/,
const BMFace *f,
const int f_index,
void *_data)
@@ -557,7 +557,7 @@ static void extract_edituv_fdots_iter_poly_mesh(const MeshRenderData *mr,
const bool mp_select = (efa) ? BM_elem_flag_test_bool(efa, BM_ELEM_SELECT) : false;
if (mr->use_subsurf_fdots) {
- const BLI_bitmap *facedot_tags = mr->me->runtime.subsurf_face_dot_tags;
+ const BLI_bitmap *facedot_tags = mr->me->runtime->subsurf_face_dot_tags;
const MLoop *mloop = mr->mloop;
const int ml_index_end = mp->loopstart + mp->totloop;
@@ -575,8 +575,8 @@ static void extract_edituv_fdots_iter_poly_mesh(const MeshRenderData *mr,
}
}
-static void extract_edituv_fdots_finish(const MeshRenderData *UNUSED(mr),
- MeshBatchCache *UNUSED(cache),
+static void extract_edituv_fdots_finish(const MeshRenderData * /*mr*/,
+ MeshBatchCache * /*cache*/,
void *buf,
void *_data)
{
diff --git a/source/blender/draw/intern/mesh_extractors/extract_mesh_ibo_fdots.cc b/source/blender/draw/intern/mesh_extractors/extract_mesh_ibo_fdots.cc
index 8dc00617039..1b552b01d6b 100644
--- a/source/blender/draw/intern/mesh_extractors/extract_mesh_ibo_fdots.cc
+++ b/source/blender/draw/intern/mesh_extractors/extract_mesh_ibo_fdots.cc
@@ -15,15 +15,15 @@ namespace blender::draw {
* \{ */
static void extract_fdots_init(const MeshRenderData *mr,
- MeshBatchCache *UNUSED(cache),
- void *UNUSED(buf),
+ MeshBatchCache * /*cache*/,
+ void * /*buf*/,
void *tls_data)
{
GPUIndexBufBuilder *elb = static_cast<GPUIndexBufBuilder *>(tls_data);
GPU_indexbuf_init(elb, GPU_PRIM_POINTS, mr->poly_len, mr->poly_len);
}
-static void extract_fdots_iter_poly_bm(const MeshRenderData *UNUSED(mr),
+static void extract_fdots_iter_poly_bm(const MeshRenderData * /*mr*/,
const BMFace *f,
const int f_index,
void *_userdata)
@@ -46,7 +46,7 @@ static void extract_fdots_iter_poly_mesh(const MeshRenderData *mr,
GPUIndexBufBuilder *elb = static_cast<GPUIndexBufBuilder *>(_userdata);
if (mr->use_subsurf_fdots) {
- const BLI_bitmap *facedot_tags = mr->me->runtime.subsurf_face_dot_tags;
+ const BLI_bitmap *facedot_tags = mr->me->runtime->subsurf_face_dot_tags;
const MLoop *mloop = mr->mloop;
const int ml_index_end = mp->loopstart + mp->totloop;
@@ -69,8 +69,8 @@ static void extract_fdots_iter_poly_mesh(const MeshRenderData *mr,
}
}
-static void extract_fdots_finish(const MeshRenderData *UNUSED(mr),
- MeshBatchCache *UNUSED(cache),
+static void extract_fdots_finish(const MeshRenderData * /*mr*/,
+ MeshBatchCache * /*cache*/,
void *buf,
void *_userdata)
{
diff --git a/source/blender/draw/intern/mesh_extractors/extract_mesh_ibo_lines.cc b/source/blender/draw/intern/mesh_extractors/extract_mesh_ibo_lines.cc
index 9c564c2cdda..56e8baaca22 100644
--- a/source/blender/draw/intern/mesh_extractors/extract_mesh_ibo_lines.cc
+++ b/source/blender/draw/intern/mesh_extractors/extract_mesh_ibo_lines.cc
@@ -18,8 +18,8 @@ namespace blender::draw {
* \{ */
static void extract_lines_init(const MeshRenderData *mr,
- MeshBatchCache *UNUSED(cache),
- void *UNUSED(buf),
+ MeshBatchCache * /*cache*/,
+ void * /*buf*/,
void *tls_data)
{
GPUIndexBufBuilder *elb = static_cast<GPUIndexBufBuilder *>(tls_data);
@@ -28,9 +28,9 @@ static void extract_lines_init(const MeshRenderData *mr,
elb, GPU_PRIM_LINES, mr->edge_len + mr->edge_loose_len, mr->loop_len + mr->loop_loose_len);
}
-static void extract_lines_iter_poly_bm(const MeshRenderData *UNUSED(mr),
+static void extract_lines_iter_poly_bm(const MeshRenderData * /*mr*/,
const BMFace *f,
- const int UNUSED(f_index),
+ const int /*f_index*/,
void *data)
{
GPUIndexBufBuilder *elb = static_cast<GPUIndexBufBuilder *>(data);
@@ -52,7 +52,7 @@ static void extract_lines_iter_poly_bm(const MeshRenderData *UNUSED(mr),
static void extract_lines_iter_poly_mesh(const MeshRenderData *mr,
const MPoly *mp,
- const int UNUSED(mp_index),
+ const int /*mp_index*/,
void *data)
{
GPUIndexBufBuilder *elb = static_cast<GPUIndexBufBuilder *>(data);
@@ -127,8 +127,8 @@ static void extract_lines_task_reduce(void *_userdata_to, void *_userdata_from)
GPU_indexbuf_join(elb_to, elb_from);
}
-static void extract_lines_finish(const MeshRenderData *UNUSED(mr),
- MeshBatchCache *UNUSED(cache),
+static void extract_lines_finish(const MeshRenderData * /*mr*/,
+ MeshBatchCache * /*cache*/,
void *buf,
void *data)
{
@@ -138,10 +138,10 @@ static void extract_lines_finish(const MeshRenderData *UNUSED(mr),
}
static void extract_lines_init_subdiv(const DRWSubdivCache *subdiv_cache,
- const MeshRenderData *UNUSED(mr),
- MeshBatchCache *UNUSED(cache),
+ const MeshRenderData * /*mr*/,
+ MeshBatchCache * /*cache*/,
void *buffer,
- void *UNUSED(data))
+ void * /*data*/)
{
const DRWSubdivLooseGeom &loose_geom = subdiv_cache->loose_geom;
GPUIndexBuf *ibo = static_cast<GPUIndexBuf *>(buffer);
@@ -158,7 +158,7 @@ static void extract_lines_init_subdiv(const DRWSubdivCache *subdiv_cache,
static void extract_lines_loose_geom_subdiv(const DRWSubdivCache *subdiv_cache,
const MeshRenderData *mr,
void *buffer,
- void *UNUSED(data))
+ void * /*data*/)
{
const DRWSubdivLooseGeom &loose_geom = subdiv_cache->loose_geom;
if (loose_geom.edge_len == 0) {
@@ -231,8 +231,7 @@ static void extract_lines_loose_geom_subdiv(const DRWSubdivCache *subdiv_cache,
}
GPUIndexBuf *ibo = static_cast<GPUIndexBuf *>(buffer);
- draw_subdiv_build_lines_loose_buffer(
- subdiv_cache, ibo, flags, static_cast<uint>(loose_geom.edge_len));
+ draw_subdiv_build_lines_loose_buffer(subdiv_cache, ibo, flags, uint(loose_geom.edge_len));
GPU_vertbuf_discard(flags);
}
@@ -285,10 +284,10 @@ static void extract_lines_with_lines_loose_finish(const MeshRenderData *mr,
}
static void extract_lines_with_lines_loose_finish_subdiv(const struct DRWSubdivCache *subdiv_cache,
- const MeshRenderData *UNUSED(mr),
+ const MeshRenderData * /*mr*/,
MeshBatchCache *cache,
- void *UNUSED(buf),
- void *UNUSED(_data))
+ void * /*buf*/,
+ void * /*_data*/)
{
/* Multiply by 2 because these are edges indices. */
const int start = subdiv_cache->num_subdiv_loops * 2;
@@ -327,18 +326,18 @@ constexpr MeshExtract create_extractor_lines_with_lines_loose()
static void extract_lines_loose_only_init(const MeshRenderData *mr,
MeshBatchCache *cache,
void *buf,
- void *UNUSED(tls_data))
+ void * /*tls_data*/)
{
BLI_assert(buf == cache->final.buff.ibo.lines_loose);
UNUSED_VARS_NDEBUG(buf);
extract_lines_loose_subbuffer(mr, cache);
}
-static void extract_lines_loose_only_init_subdiv(const DRWSubdivCache *UNUSED(subdiv_cache),
+static void extract_lines_loose_only_init_subdiv(const DRWSubdivCache * /*subdiv_cache*/,
const MeshRenderData *mr,
MeshBatchCache *cache,
void *buffer,
- void *UNUSED(data))
+ void * /*data*/)
{
BLI_assert(buffer == cache->final.buff.ibo.lines_loose);
UNUSED_VARS_NDEBUG(buffer);
diff --git a/source/blender/draw/intern/mesh_extractors/extract_mesh_ibo_lines_adjacency.cc b/source/blender/draw/intern/mesh_extractors/extract_mesh_ibo_lines_adjacency.cc
index d6c246c51a9..3124a35608d 100644
--- a/source/blender/draw/intern/mesh_extractors/extract_mesh_ibo_lines_adjacency.cc
+++ b/source/blender/draw/intern/mesh_extractors/extract_mesh_ibo_lines_adjacency.cc
@@ -42,8 +42,8 @@ static void line_adjacency_data_init(MeshExtract_LineAdjacency_Data *data,
}
static void extract_lines_adjacency_init(const MeshRenderData *mr,
- MeshBatchCache *UNUSED(cache),
- void *UNUSED(buf),
+ MeshBatchCache * /*cache*/,
+ void * /*buf*/,
void *tls_data)
{
/* Similar to poly_to_tri_count().
@@ -71,7 +71,7 @@ BLI_INLINE void lines_adjacency_triangle(
if (!value_is_init || v_data == NO_EDGE) {
/* Save the winding order inside the sign bit. Because the
* Edge-hash sort the keys and we need to compare winding later. */
- int value = (int)l1 + 1; /* 0 cannot be signed so add one. */
+ int value = int(l1) + 1; /* 0 cannot be signed so add one. */
*pval = POINTER_FROM_INT((inv_indices) ? -value : value);
/* Store loop indices for remaining non-manifold edges. */
data->vert_to_loop[v2] = l2;
@@ -81,7 +81,7 @@ BLI_INLINE void lines_adjacency_triangle(
/* HACK Tag as not used. Prevent overhead of BLI_edgehash_remove. */
*pval = POINTER_FROM_INT(NO_EDGE);
bool inv_opposite = (v_data < 0);
- uint l_opposite = (uint)abs(v_data) - 1;
+ uint l_opposite = uint(abs(v_data)) - 1;
/* TODO: Make this part thread-safe. */
if (inv_opposite == inv_indices) {
/* Don't share edge if triangles have non matching winding. */
@@ -96,9 +96,9 @@ BLI_INLINE void lines_adjacency_triangle(
}
}
-static void extract_lines_adjacency_iter_looptri_bm(const MeshRenderData *UNUSED(mr),
+static void extract_lines_adjacency_iter_looptri_bm(const MeshRenderData * /*mr*/,
BMLoop **elt,
- const int UNUSED(elt_index),
+ const int /*elt_index*/,
void *_data)
{
MeshExtract_LineAdjacency_Data *data = static_cast<MeshExtract_LineAdjacency_Data *>(_data);
@@ -115,7 +115,7 @@ static void extract_lines_adjacency_iter_looptri_bm(const MeshRenderData *UNUSED
static void extract_lines_adjacency_iter_looptri_mesh(const MeshRenderData *mr,
const MLoopTri *mlt,
- const int UNUSED(elt_index),
+ const int /*elt_index*/,
void *_data)
{
MeshExtract_LineAdjacency_Data *data = static_cast<MeshExtract_LineAdjacency_Data *>(_data);
@@ -132,7 +132,7 @@ static void extract_lines_adjacency_iter_looptri_mesh(const MeshRenderData *mr,
data);
}
-static void extract_lines_adjacency_finish(const MeshRenderData *UNUSED(mr),
+static void extract_lines_adjacency_finish(const MeshRenderData * /*mr*/,
MeshBatchCache *cache,
void *buf,
void *_data)
@@ -146,7 +146,7 @@ static void extract_lines_adjacency_finish(const MeshRenderData *UNUSED(mr),
int v_data = POINTER_AS_INT(BLI_edgehashIterator_getValue(ehi));
if (v_data != NO_EDGE) {
BLI_edgehashIterator_getKey(ehi, &v2, &v3);
- l1 = (uint)abs(v_data) - 1;
+ l1 = uint(abs(v_data)) - 1;
if (v_data < 0) { /* `inv_opposite`. */
SWAP(uint, v2, v3);
}
@@ -166,9 +166,9 @@ static void extract_lines_adjacency_finish(const MeshRenderData *UNUSED(mr),
}
static void extract_lines_adjacency_init_subdiv(const DRWSubdivCache *subdiv_cache,
- const MeshRenderData *UNUSED(mr),
- MeshBatchCache *UNUSED(cache),
- void *UNUSED(buf),
+ const MeshRenderData * /*mr*/,
+ MeshBatchCache * /*cache*/,
+ void * /*buf*/,
void *_data)
{
MeshExtract_LineAdjacency_Data *data = static_cast<MeshExtract_LineAdjacency_Data *>(_data);
@@ -182,7 +182,7 @@ static void extract_lines_adjacency_init_subdiv(const DRWSubdivCache *subdiv_cac
}
static void extract_lines_adjacency_iter_subdiv(const DRWSubdivCache *subdiv_cache,
- const MeshRenderData *UNUSED(mr),
+ const MeshRenderData * /*mr*/,
void *_data,
uint subdiv_quad_index)
{
@@ -207,7 +207,7 @@ static void extract_lines_adjacency_iter_subdiv_bm(const DRWSubdivCache *subdiv_
const MeshRenderData *mr,
void *_data,
uint subdiv_quad_index,
- const BMFace *UNUSED(coarse_quad))
+ const BMFace * /*coarse_quad*/)
{
extract_lines_adjacency_iter_subdiv(subdiv_cache, mr, _data, subdiv_quad_index);
}
@@ -216,12 +216,12 @@ static void extract_lines_adjacency_iter_subdiv_mesh(const DRWSubdivCache *subdi
const MeshRenderData *mr,
void *_data,
uint subdiv_quad_index,
- const MPoly *UNUSED(coarse_quad))
+ const MPoly * /*coarse_quad*/)
{
extract_lines_adjacency_iter_subdiv(subdiv_cache, mr, _data, subdiv_quad_index);
}
-static void extract_lines_adjacency_finish_subdiv(const DRWSubdivCache *UNUSED(subdiv_cache),
+static void extract_lines_adjacency_finish_subdiv(const DRWSubdivCache * /*subdiv_cache*/,
const MeshRenderData *mr,
MeshBatchCache *cache,
void *buf,
diff --git a/source/blender/draw/intern/mesh_extractors/extract_mesh_ibo_lines_paint_mask.cc b/source/blender/draw/intern/mesh_extractors/extract_mesh_ibo_lines_paint_mask.cc
index 31e5c515129..572eb5b28f8 100644
--- a/source/blender/draw/intern/mesh_extractors/extract_mesh_ibo_lines_paint_mask.cc
+++ b/source/blender/draw/intern/mesh_extractors/extract_mesh_ibo_lines_paint_mask.cc
@@ -26,8 +26,8 @@ struct MeshExtract_LinePaintMask_Data {
};
static void extract_lines_paint_mask_init(const MeshRenderData *mr,
- MeshBatchCache *UNUSED(cache),
- void *UNUSED(ibo),
+ MeshBatchCache * /*cache*/,
+ void * /*ibo*/,
void *tls_data)
{
MeshExtract_LinePaintMask_Data *data = static_cast<MeshExtract_LinePaintMask_Data *>(tls_data);
@@ -37,7 +37,7 @@ static void extract_lines_paint_mask_init(const MeshRenderData *mr,
static void extract_lines_paint_mask_iter_poly_mesh(const MeshRenderData *mr,
const MPoly *mp,
- const int UNUSED(mp_index),
+ const int mp_index,
void *_data)
{
MeshExtract_LinePaintMask_Data *data = static_cast<MeshExtract_LinePaintMask_Data *>(_data);
@@ -52,7 +52,7 @@ static void extract_lines_paint_mask_iter_poly_mesh(const MeshRenderData *mr,
const int ml_index_last = mp->totloop + mp->loopstart - 1;
const int ml_index_other = (ml_index == ml_index_last) ? mp->loopstart : (ml_index + 1);
- if (mp->flag & ME_FACE_SEL) {
+ if (mr->select_poly && mr->select_poly[mp_index]) {
if (BLI_BITMAP_TEST_AND_SET_ATOMIC(data->select_map, e_index)) {
/* Hide edge as it has more than 2 selected loop. */
GPU_indexbuf_set_line_restart(&data->elb, e_index);
@@ -75,8 +75,8 @@ static void extract_lines_paint_mask_iter_poly_mesh(const MeshRenderData *mr,
}
}
-static void extract_lines_paint_mask_finish(const MeshRenderData *UNUSED(mr),
- MeshBatchCache *UNUSED(cache),
+static void extract_lines_paint_mask_finish(const MeshRenderData * /*mr*/,
+ MeshBatchCache * /*cache*/,
void *buf,
void *_data)
{
@@ -88,8 +88,8 @@ static void extract_lines_paint_mask_finish(const MeshRenderData *UNUSED(mr),
static void extract_lines_paint_mask_init_subdiv(const DRWSubdivCache *subdiv_cache,
const MeshRenderData *mr,
- MeshBatchCache *UNUSED(cache),
- void *UNUSED(buf),
+ MeshBatchCache * /*cache*/,
+ void * /*buf*/,
void *tls_data)
{
MeshExtract_LinePaintMask_Data *data = static_cast<MeshExtract_LinePaintMask_Data *>(tls_data);
@@ -110,11 +110,13 @@ static void extract_lines_paint_mask_iter_subdiv_mesh(const DRWSubdivCache *subd
int *subdiv_loop_edge_index = (int *)GPU_vertbuf_get_data(subdiv_cache->edges_orig_index);
int *subdiv_loop_subdiv_edge_index = subdiv_cache->subdiv_loop_subdiv_edge_index;
+ const int coarse_quad_index = coarse_quad - mr->mpoly;
+
uint start_loop_idx = subdiv_quad_index * 4;
uint end_loop_idx = (subdiv_quad_index + 1) * 4;
for (uint loop_idx = start_loop_idx; loop_idx < end_loop_idx; loop_idx++) {
- const uint coarse_edge_index = (uint)subdiv_loop_edge_index[loop_idx];
- const uint subdiv_edge_index = (uint)subdiv_loop_subdiv_edge_index[loop_idx];
+ const uint coarse_edge_index = uint(subdiv_loop_edge_index[loop_idx]);
+ const uint subdiv_edge_index = uint(subdiv_loop_subdiv_edge_index[loop_idx]);
if (coarse_edge_index == -1u) {
GPU_indexbuf_set_line_restart(&data->elb, subdiv_edge_index);
@@ -124,7 +126,7 @@ static void extract_lines_paint_mask_iter_subdiv_mesh(const DRWSubdivCache *subd
((mr->e_origindex) && (mr->e_origindex[coarse_edge_index] == ORIGINDEX_NONE)))) {
const uint ml_index_other = (loop_idx == (end_loop_idx - 1)) ? start_loop_idx :
loop_idx + 1;
- if (coarse_quad->flag & ME_FACE_SEL) {
+ if (mr->select_poly && mr->select_poly[coarse_quad_index]) {
if (BLI_BITMAP_TEST_AND_SET_ATOMIC(data->select_map, coarse_edge_index)) {
/* Hide edge as it has more than 2 selected loop. */
GPU_indexbuf_set_line_restart(&data->elb, subdiv_edge_index);
@@ -148,12 +150,11 @@ static void extract_lines_paint_mask_iter_subdiv_mesh(const DRWSubdivCache *subd
}
}
-static void extract_lines_paint_mask_finish_subdiv(
- const struct DRWSubdivCache *UNUSED(subdiv_cache),
- const MeshRenderData *mr,
- MeshBatchCache *cache,
- void *buf,
- void *_data)
+static void extract_lines_paint_mask_finish_subdiv(const struct DRWSubdivCache * /*subdiv_cache*/,
+ const MeshRenderData *mr,
+ MeshBatchCache *cache,
+ void *buf,
+ void *_data)
{
extract_lines_paint_mask_finish(mr, cache, buf, _data);
}
diff --git a/source/blender/draw/intern/mesh_extractors/extract_mesh_ibo_points.cc b/source/blender/draw/intern/mesh_extractors/extract_mesh_ibo_points.cc
index 48eeb86e5ee..e84f122d03e 100644
--- a/source/blender/draw/intern/mesh_extractors/extract_mesh_ibo_points.cc
+++ b/source/blender/draw/intern/mesh_extractors/extract_mesh_ibo_points.cc
@@ -19,8 +19,8 @@ namespace blender::draw {
* \{ */
static void extract_points_init(const MeshRenderData *mr,
- MeshBatchCache *UNUSED(cache),
- void *UNUSED(buf),
+ MeshBatchCache * /*cache*/,
+ void * /*buf*/,
void *tls_data)
{
GPUIndexBufBuilder *elb = static_cast<GPUIndexBufBuilder *>(tls_data);
@@ -53,9 +53,9 @@ BLI_INLINE void vert_set_mesh(GPUIndexBufBuilder *elb,
}
}
-static void extract_points_iter_poly_bm(const MeshRenderData *UNUSED(mr),
+static void extract_points_iter_poly_bm(const MeshRenderData * /*mr*/,
const BMFace *f,
- const int UNUSED(f_index),
+ const int /*f_index*/,
void *_userdata)
{
GPUIndexBufBuilder *elb = static_cast<GPUIndexBufBuilder *>(_userdata);
@@ -70,7 +70,7 @@ static void extract_points_iter_poly_bm(const MeshRenderData *UNUSED(mr),
static void extract_points_iter_poly_mesh(const MeshRenderData *mr,
const MPoly *mp,
- const int UNUSED(mp_index),
+ const int /*mp_index*/,
void *_userdata)
{
GPUIndexBufBuilder *elb = static_cast<GPUIndexBufBuilder *>(_userdata);
@@ -113,7 +113,7 @@ static void extract_points_iter_lvert_bm(const MeshRenderData *mr,
}
static void extract_points_iter_lvert_mesh(const MeshRenderData *mr,
- const MVert *UNUSED(mv),
+ const MVert * /*mv*/,
const int lvert_index,
void *_userdata)
{
@@ -129,8 +129,8 @@ static void extract_points_task_reduce(void *_userdata_to, void *_userdata_from)
GPU_indexbuf_join(elb_to, elb_from);
}
-static void extract_points_finish(const MeshRenderData *UNUSED(mr),
- MeshBatchCache *UNUSED(cache),
+static void extract_points_finish(const MeshRenderData * /*mr*/,
+ MeshBatchCache * /*cache*/,
void *buf,
void *_userdata)
{
@@ -141,8 +141,8 @@ static void extract_points_finish(const MeshRenderData *UNUSED(mr),
static void extract_points_init_subdiv(const DRWSubdivCache *subdiv_cache,
const MeshRenderData *mr,
- MeshBatchCache *UNUSED(cache),
- void *UNUSED(buffer),
+ MeshBatchCache * /*cache*/,
+ void * /*buffer*/,
void *data)
{
GPUIndexBufBuilder *elb = static_cast<GPUIndexBufBuilder *>(data);
@@ -194,7 +194,7 @@ static void extract_points_iter_subdiv_bm(const DRWSubdivCache *subdiv_cache,
const MeshRenderData *mr,
void *_data,
uint subdiv_quad_index,
- const BMFace *UNUSED(coarse_quad))
+ const BMFace * /*coarse_quad*/)
{
GPUIndexBufBuilder *elb = static_cast<GPUIndexBufBuilder *>(_data);
extract_points_iter_subdiv_common(elb, mr, subdiv_cache, subdiv_quad_index, true);
@@ -204,7 +204,7 @@ static void extract_points_iter_subdiv_mesh(const DRWSubdivCache *subdiv_cache,
const MeshRenderData *mr,
void *_data,
uint subdiv_quad_index,
- const MPoly *UNUSED(coarse_quad))
+ const MPoly * /*coarse_quad*/)
{
GPUIndexBufBuilder *elb = static_cast<GPUIndexBufBuilder *>(_data);
extract_points_iter_subdiv_common(elb, mr, subdiv_cache, subdiv_quad_index, false);
@@ -212,7 +212,7 @@ static void extract_points_iter_subdiv_mesh(const DRWSubdivCache *subdiv_cache,
static void extract_points_loose_geom_subdiv(const DRWSubdivCache *subdiv_cache,
const MeshRenderData *mr,
- void *UNUSED(buffer),
+ void * /*buffer*/,
void *data)
{
const DRWSubdivLooseGeom &loose_geom = subdiv_cache->loose_geom;
@@ -281,9 +281,9 @@ static void extract_points_loose_geom_subdiv(const DRWSubdivCache *subdiv_cache,
}
}
-static void extract_points_finish_subdiv(const DRWSubdivCache *UNUSED(subdiv_cache),
- const MeshRenderData *UNUSED(mr),
- MeshBatchCache *UNUSED(cache),
+static void extract_points_finish_subdiv(const DRWSubdivCache * /*subdiv_cache*/,
+ const MeshRenderData * /*mr*/,
+ MeshBatchCache * /*cache*/,
void *buf,
void *_userdata)
{
diff --git a/source/blender/draw/intern/mesh_extractors/extract_mesh_ibo_tris.cc b/source/blender/draw/intern/mesh_extractors/extract_mesh_ibo_tris.cc
index 2e3e6c7b6b1..da3560389e4 100644
--- a/source/blender/draw/intern/mesh_extractors/extract_mesh_ibo_tris.cc
+++ b/source/blender/draw/intern/mesh_extractors/extract_mesh_ibo_tris.cc
@@ -25,8 +25,8 @@ static void extract_tris_mat_task_reduce(void *_userdata_to, void *_userdata_fro
* \{ */
static void extract_tris_init(const MeshRenderData *mr,
- MeshBatchCache *UNUSED(cache),
- void *UNUSED(ibo),
+ MeshBatchCache * /*cache*/,
+ void * /*ibo*/,
void *tls_data)
{
GPUIndexBufBuilder *elb = static_cast<GPUIndexBufBuilder *>(tls_data);
@@ -110,10 +110,10 @@ static void extract_tris_finish(const MeshRenderData *mr,
}
static void extract_tris_init_subdiv(const DRWSubdivCache *subdiv_cache,
- const MeshRenderData *UNUSED(mr),
+ const MeshRenderData * /*mr*/,
MeshBatchCache *cache,
void *buffer,
- void *UNUSED(data))
+ void * /*data*/)
{
GPUIndexBuf *ibo = static_cast<GPUIndexBuf *>(buffer);
/* Initialize the index buffer, it was already allocated, it will be filled on the device. */
@@ -157,15 +157,15 @@ constexpr MeshExtract create_extractor_tris()
* \{ */
static void extract_tris_single_mat_init(const MeshRenderData *mr,
- MeshBatchCache *UNUSED(cache),
- void *UNUSED(ibo),
+ MeshBatchCache * /*cache*/,
+ void * /*ibo*/,
void *tls_data)
{
GPUIndexBufBuilder *elb = static_cast<GPUIndexBufBuilder *>(tls_data);
GPU_indexbuf_init(elb, GPU_PRIM_TRIS, mr->tri_len, mr->loop_len);
}
-static void extract_tris_single_mat_iter_looptri_bm(const MeshRenderData *UNUSED(mr),
+static void extract_tris_single_mat_iter_looptri_bm(const MeshRenderData * /*mr*/,
BMLoop **elt,
const int elt_index,
void *_data)
diff --git a/source/blender/draw/intern/mesh_extractors/extract_mesh_vbo_attributes.cc b/source/blender/draw/intern/mesh_extractors/extract_mesh_vbo_attributes.cc
index 64ade020418..1817591b6a3 100644
--- a/source/blender/draw/intern/mesh_extractors/extract_mesh_vbo_attributes.cc
+++ b/source/blender/draw/intern/mesh_extractors/extract_mesh_vbo_attributes.cc
@@ -14,6 +14,8 @@
#include "BLI_string.h"
#include "BKE_attribute.h"
+#include "BKE_attribute.hh"
+#include "BKE_mesh.h"
#include "draw_attributes.h"
#include "draw_subdivision.h"
@@ -278,14 +280,14 @@ static void extract_attr_generic(const MeshRenderData *mr,
}
static void extract_attr_init(
- const MeshRenderData *mr, MeshBatchCache *cache, void *buf, void *UNUSED(tls_data), int index)
+ const MeshRenderData *mr, MeshBatchCache *cache, void *buf, void * /*tls_data*/, int index)
{
const DRW_Attributes *attrs_used = &cache->attr_used;
const DRW_AttributeRequest &request = attrs_used->requests[index];
GPUVertBuf *vbo = static_cast<GPUVertBuf *>(buf);
- init_vbo_for_attribute(*mr, vbo, request, false, static_cast<uint32_t>(mr->loop_len));
+ init_vbo_for_attribute(*mr, vbo, request, false, uint32_t(mr->loop_len));
/* TODO(@kevindietrich): float3 is used for scalar attributes as the implicit conversion done by
* OpenGL to vec4 for a scalar `s` will produce a `vec4(s, 0, 0, 1)`. However, following the
@@ -325,7 +327,7 @@ static void extract_attr_init_subdiv(const DRWSubdivCache *subdiv_cache,
const MeshRenderData *mr,
MeshBatchCache *cache,
void *buffer,
- void *UNUSED(tls_data),
+ void * /*tls_data*/,
int index)
{
const DRW_Attributes *attrs_used = &cache->attr_used;
@@ -340,7 +342,7 @@ static void extract_attr_init_subdiv(const DRWSubdivCache *subdiv_cache,
GPUVertFormat coarse_format = {0};
GPU_vertformat_attr_add(&coarse_format, "data", GPU_COMP_F32, dimensions, GPU_FETCH_FLOAT);
GPU_vertbuf_init_with_format_ex(src_data, &coarse_format, GPU_USAGE_STATIC);
- GPU_vertbuf_data_alloc(src_data, static_cast<uint32_t>(coarse_mesh->totloop));
+ GPU_vertbuf_data_alloc(src_data, uint32_t(coarse_mesh->totloop));
switch (request.cd_type) {
case CD_PROP_BOOL:
@@ -379,7 +381,7 @@ static void extract_attr_init_subdiv(const DRWSubdivCache *subdiv_cache,
draw_subdiv_interp_custom_data(subdiv_cache,
src_data,
dst_buffer,
- static_cast<int>(dimensions),
+ int(dimensions),
0,
ELEM(request.cd_type, CD_PROP_COLOR, CD_PROP_BYTE_COLOR));
@@ -432,6 +434,40 @@ constexpr MeshExtract create_extractor_attr(ExtractInitFn fn, ExtractInitSubdivF
return extractor;
}
+static void extract_mesh_attr_viewer_init(const MeshRenderData *mr,
+ MeshBatchCache * /*cache*/,
+ void *buf,
+ void * /*tls_data*/)
+{
+ GPUVertBuf *vbo = static_cast<GPUVertBuf *>(buf);
+ static GPUVertFormat format = {0};
+ if (format.attr_len == 0) {
+ GPU_vertformat_attr_add(&format, "attribute_value", GPU_COMP_F32, 4, GPU_FETCH_FLOAT);
+ }
+
+ GPU_vertbuf_init_with_format(vbo, &format);
+ GPU_vertbuf_data_alloc(vbo, mr->loop_len);
+ MutableSpan<ColorGeometry4f> attr{static_cast<ColorGeometry4f *>(GPU_vertbuf_get_data(vbo)),
+ mr->loop_len};
+
+ const StringRefNull attr_name = ".viewer";
+ const bke::AttributeAccessor attributes = mr->me->attributes();
+ attributes
+ .lookup_or_default<ColorGeometry4f>(attr_name, ATTR_DOMAIN_CORNER, {1.0f, 0.0f, 1.0f, 1.0f})
+ .materialize(attr);
+}
+
+constexpr MeshExtract create_extractor_attr_viewer()
+{
+ MeshExtract extractor = {nullptr};
+ extractor.init = extract_mesh_attr_viewer_init;
+ extractor.data_type = MR_DATA_NONE;
+ extractor.data_size = 0;
+ extractor.use_threading = false;
+ extractor.mesh_buffer_offset = offsetof(MeshBufferList, vbo.attr_viewer);
+ return extractor;
+}
+
/** \} */
} // namespace blender::draw
@@ -457,3 +493,5 @@ const MeshExtract extract_attr[GPU_MAX_ATTR] = {
CREATE_EXTRACTOR_ATTR(13),
CREATE_EXTRACTOR_ATTR(14),
};
+
+const MeshExtract extract_attr_viewer = blender::draw::create_extractor_attr_viewer();
diff --git a/source/blender/draw/intern/mesh_extractors/extract_mesh_vbo_edge_fac.cc b/source/blender/draw/intern/mesh_extractors/extract_mesh_vbo_edge_fac.cc
index 50c37f6397c..de1f5181ac5 100644
--- a/source/blender/draw/intern/mesh_extractors/extract_mesh_vbo_edge_fac.cc
+++ b/source/blender/draw/intern/mesh_extractors/extract_mesh_vbo_edge_fac.cc
@@ -43,7 +43,7 @@ static float loop_edge_factor_get(const float f_no[3],
}
static void extract_edge_fac_init(const MeshRenderData *mr,
- MeshBatchCache *UNUSED(cache),
+ MeshBatchCache * /*cache*/,
void *buf,
void *tls_data)
{
@@ -66,7 +66,7 @@ static void extract_edge_fac_init(const MeshRenderData *mr,
* We could have a flag in the mesh instead or check the modifier stack. */
const MEdge *med = mr->medge;
for (int e_index = 0; e_index < mr->edge_len; e_index++, med++) {
- if ((med->flag & ME_EDGERENDER) == 0) {
+ if ((med->flag & ME_EDGEDRAW) == 0) {
data->use_edge_render = true;
break;
}
@@ -82,7 +82,7 @@ static void extract_edge_fac_init(const MeshRenderData *mr,
static void extract_edge_fac_iter_poly_bm(const MeshRenderData *mr,
const BMFace *f,
- const int UNUSED(f_index),
+ const int /*f_index*/,
void *_data)
{
MeshExtract_EdgeFac_Data *data = static_cast<MeshExtract_EdgeFac_Data *>(_data);
@@ -118,7 +118,7 @@ static void extract_edge_fac_iter_poly_mesh(const MeshRenderData *mr,
if (data->use_edge_render) {
const MEdge *med = &mr->medge[ml->e];
- data->vbo_data[ml_index] = (med->flag & ME_EDGERENDER) ? 255 : 0;
+ data->vbo_data[ml_index] = (med->flag & ME_EDGEDRAW) ? 255 : 0;
}
else {
@@ -146,7 +146,7 @@ static void extract_edge_fac_iter_poly_mesh(const MeshRenderData *mr,
}
static void extract_edge_fac_iter_ledge_bm(const MeshRenderData *mr,
- const BMEdge *UNUSED(eed),
+ const BMEdge * /*eed*/,
const int ledge_index,
void *_data)
{
@@ -156,7 +156,7 @@ static void extract_edge_fac_iter_ledge_bm(const MeshRenderData *mr,
}
static void extract_edge_fac_iter_ledge_mesh(const MeshRenderData *mr,
- const MEdge *UNUSED(med),
+ const MEdge * /*med*/,
const int ledge_index,
void *_data)
{
@@ -167,7 +167,7 @@ static void extract_edge_fac_iter_ledge_mesh(const MeshRenderData *mr,
}
static void extract_edge_fac_finish(const MeshRenderData *mr,
- MeshBatchCache *UNUSED(cache),
+ MeshBatchCache * /*cache*/,
void *buf,
void *_data)
{
@@ -217,10 +217,10 @@ static GPUVertFormat *get_subdiv_edge_fac_format()
}
static void extract_edge_fac_init_subdiv(const DRWSubdivCache *subdiv_cache,
- const MeshRenderData *UNUSED(mr),
+ const MeshRenderData * /*mr*/,
MeshBatchCache *cache,
void *buffer,
- void *UNUSED(data))
+ void * /*data*/)
{
const DRWSubdivLooseGeom &loose_geom = subdiv_cache->loose_geom;
GPUVertBuf *edge_idx = cache->final.buff.vbo.edge_idx;
@@ -252,9 +252,9 @@ static void extract_edge_fac_init_subdiv(const DRWSubdivCache *subdiv_cache,
}
static void extract_edge_fac_loose_geom_subdiv(const DRWSubdivCache *subdiv_cache,
- const MeshRenderData *UNUSED(mr),
+ const MeshRenderData * /*mr*/,
void *buffer,
- void *UNUSED(data))
+ void * /*data*/)
{
const DRWSubdivLooseGeom &loose_geom = subdiv_cache->loose_geom;
if (loose_geom.edge_len == 0) {
diff --git a/source/blender/draw/intern/mesh_extractors/extract_mesh_vbo_edit_data.cc b/source/blender/draw/intern/mesh_extractors/extract_mesh_vbo_edit_data.cc
index 27fd6546b8c..31dc2fdff6a 100644
--- a/source/blender/draw/intern/mesh_extractors/extract_mesh_vbo_edit_data.cc
+++ b/source/blender/draw/intern/mesh_extractors/extract_mesh_vbo_edit_data.cc
@@ -60,14 +60,14 @@ static void mesh_render_data_edge_flag(const MeshRenderData *mr,
if (mr->edge_crease_ofs != -1) {
float crease = BM_ELEM_CD_GET_FLOAT(eed, mr->edge_crease_ofs);
if (crease > 0) {
- eattr->crease = (uchar)ceil(crease * 15.0f);
+ eattr->crease = uchar(ceil(crease * 15.0f));
}
}
/* Use a byte for value range */
if (mr->bweight_ofs != -1) {
float bweight = BM_ELEM_CD_GET_FLOAT(eed, mr->bweight_ofs);
if (bweight > 0) {
- eattr->bweight = (uchar)(bweight * 255.0f);
+ eattr->bweight = uchar(bweight * 255.0f);
}
}
#ifdef WITH_FREESTYLE
@@ -95,7 +95,7 @@ static void mesh_render_data_vert_flag(const MeshRenderData *mr,
if (mr->vert_crease_ofs != -1) {
float crease = BM_ELEM_CD_GET_FLOAT(eve, mr->vert_crease_ofs);
if (crease > 0) {
- eattr->crease |= (uchar)ceil(crease * 15.0f) << 4;
+ eattr->crease |= uchar(ceil(crease * 15.0f)) << 4;
}
}
}
@@ -112,7 +112,7 @@ static GPUVertFormat *get_edit_data_format()
}
static void extract_edit_data_init(const MeshRenderData *mr,
- MeshBatchCache *UNUSED(cache),
+ MeshBatchCache * /*cache*/,
void *buf,
void *tls_data)
{
@@ -126,7 +126,7 @@ static void extract_edit_data_init(const MeshRenderData *mr,
static void extract_edit_data_iter_poly_bm(const MeshRenderData *mr,
const BMFace *f,
- const int UNUSED(f_index),
+ const int /*f_index*/,
void *_data)
{
EditLoopData *vbo_data = *(EditLoopData **)_data;
@@ -223,7 +223,7 @@ static void extract_edit_data_iter_lvert_bm(const MeshRenderData *mr,
}
static void extract_edit_data_iter_lvert_mesh(const MeshRenderData *mr,
- const MVert *UNUSED(mv),
+ const MVert * /*mv*/,
const int lvert_index,
void *_data)
{
@@ -240,8 +240,8 @@ static void extract_edit_data_iter_lvert_mesh(const MeshRenderData *mr,
}
static void extract_edit_data_init_subdiv(const DRWSubdivCache *subdiv_cache,
- const MeshRenderData *UNUSED(mr),
- MeshBatchCache *UNUSED(cache),
+ const MeshRenderData * /*mr*/,
+ MeshBatchCache * /*cache*/,
void *buf,
void *data)
{
@@ -300,14 +300,14 @@ static void extract_edit_data_iter_subdiv_mesh(const DRWSubdivCache *subdiv_cach
uint subdiv_quad_index,
const MPoly *coarse_quad)
{
- const int coarse_quad_index = static_cast<int>(coarse_quad - mr->mpoly);
+ const int coarse_quad_index = int(coarse_quad - mr->mpoly);
BMFace *coarse_quad_bm = bm_original_face_get(mr, coarse_quad_index);
extract_edit_data_iter_subdiv_bm(subdiv_cache, mr, _data, subdiv_quad_index, coarse_quad_bm);
}
static void extract_edit_data_loose_geom_subdiv(const DRWSubdivCache *subdiv_cache,
const MeshRenderData *mr,
- void *UNUSED(buffer),
+ void * /*buffer*/,
void *_data)
{
const DRWSubdivLooseGeom &loose_geom = subdiv_cache->loose_geom;
diff --git a/source/blender/draw/intern/mesh_extractors/extract_mesh_vbo_edituv_data.cc b/source/blender/draw/intern/mesh_extractors/extract_mesh_vbo_edituv_data.cc
index 0b9043e3289..3ebf2daf1e9 100644
--- a/source/blender/draw/intern/mesh_extractors/extract_mesh_vbo_edituv_data.cc
+++ b/source/blender/draw/intern/mesh_extractors/extract_mesh_vbo_edituv_data.cc
@@ -43,7 +43,7 @@ static void extract_edituv_data_init_common(const MeshRenderData *mr,
}
static void extract_edituv_data_init(const MeshRenderData *mr,
- MeshBatchCache *UNUSED(cache),
+ MeshBatchCache * /*cache*/,
void *buf,
void *tls_data)
{
@@ -54,7 +54,7 @@ static void extract_edituv_data_init(const MeshRenderData *mr,
static void extract_edituv_data_iter_poly_bm(const MeshRenderData *mr,
const BMFace *f,
- const int UNUSED(f_index),
+ const int /*f_index*/,
void *_data)
{
BMLoop *l_iter, *l_first;
@@ -114,7 +114,7 @@ static void extract_edituv_data_iter_poly_mesh(const MeshRenderData *mr,
static void extract_edituv_data_init_subdiv(const DRWSubdivCache *subdiv_cache,
const MeshRenderData *mr,
- MeshBatchCache *UNUSED(cache),
+ MeshBatchCache * /*cache*/,
void *buf,
void *tls_data)
{
@@ -174,7 +174,7 @@ static void extract_edituv_data_iter_subdiv_mesh(const DRWSubdivCache *subdiv_ca
uint subdiv_quad_index,
const MPoly *coarse_quad)
{
- const int coarse_quad_index = static_cast<int>(coarse_quad - mr->mpoly);
+ const int coarse_quad_index = int(coarse_quad - mr->mpoly);
BMFace *coarse_quad_bm = bm_original_face_get(mr, coarse_quad_index);
extract_edituv_data_iter_subdiv_bm(subdiv_cache, mr, _data, subdiv_quad_index, coarse_quad_bm);
}
diff --git a/source/blender/draw/intern/mesh_extractors/extract_mesh_vbo_edituv_stretch_angle.cc b/source/blender/draw/intern/mesh_extractors/extract_mesh_vbo_edituv_stretch_angle.cc
index e4714aabf34..492756f30bb 100644
--- a/source/blender/draw/intern/mesh_extractors/extract_mesh_vbo_edituv_stretch_angle.cc
+++ b/source/blender/draw/intern/mesh_extractors/extract_mesh_vbo_edituv_stretch_angle.cc
@@ -52,7 +52,7 @@ static void compute_normalize_edge_vectors(float auv[2][2],
static short v2_to_short_angle(const float v[2])
{
- return atan2f(v[1], v[0]) * (float)M_1_PI * SHRT_MAX;
+ return atan2f(v[1], v[0]) * float(M_1_PI) * SHRT_MAX;
}
static void edituv_get_edituv_stretch_angle(float auv[2][2],
@@ -63,7 +63,7 @@ static void edituv_get_edituv_stretch_angle(float auv[2][2],
r_stretch->uv_angles[0] = v2_to_short_angle(auv[0]);
r_stretch->uv_angles[1] = v2_to_short_angle(auv[1]);
/* Compute 3D angle here. */
- r_stretch->angle = angle_normalized_v3v3(av[0], av[1]) * (float)M_1_PI * SHRT_MAX;
+ r_stretch->angle = angle_normalized_v3v3(av[0], av[1]) * float(M_1_PI) * SHRT_MAX;
#if 0 /* here for reference, this is done in shader now. */
float uvang = angle_normalized_v2v2(auv0, auv1);
@@ -74,7 +74,7 @@ static void edituv_get_edituv_stretch_angle(float auv[2][2],
}
static void extract_edituv_stretch_angle_init(const MeshRenderData *mr,
- MeshBatchCache *UNUSED(cache),
+ MeshBatchCache * /*cache*/,
void *buf,
void *tls_data)
{
@@ -104,7 +104,7 @@ static void extract_edituv_stretch_angle_init(const MeshRenderData *mr,
static void extract_edituv_stretch_angle_iter_poly_bm(const MeshRenderData *mr,
const BMFace *f,
- const int UNUSED(f_index),
+ const int /*f_index*/,
void *_data)
{
MeshExtract_StretchAngle_Data *data = static_cast<MeshExtract_StretchAngle_Data *>(_data);
@@ -157,7 +157,7 @@ static void extract_edituv_stretch_angle_iter_poly_bm(const MeshRenderData *mr,
static void extract_edituv_stretch_angle_iter_poly_mesh(const MeshRenderData *mr,
const MPoly *mp,
- const int UNUSED(mp_index),
+ const int /*mp_index*/,
void *_data)
{
MeshExtract_StretchAngle_Data *data = static_cast<MeshExtract_StretchAngle_Data *>(_data);
@@ -214,7 +214,7 @@ static void extract_edituv_stretch_angle_init_subdiv(const DRWSubdivCache *subdi
const MeshRenderData *mr,
MeshBatchCache *cache,
void *buffer,
- void *UNUSED(tls_data))
+ void * /*tls_data*/)
{
GPUVertBuf *refined_vbo = static_cast<GPUVertBuf *>(buffer);
diff --git a/source/blender/draw/intern/mesh_extractors/extract_mesh_vbo_edituv_stretch_area.cc b/source/blender/draw/intern/mesh_extractors/extract_mesh_vbo_edituv_stretch_area.cc
index 9679c0523f8..7c96fbd6a99 100644
--- a/source/blender/draw/intern/mesh_extractors/extract_mesh_vbo_edituv_stretch_area.cc
+++ b/source/blender/draw/intern/mesh_extractors/extract_mesh_vbo_edituv_stretch_area.cc
@@ -20,9 +20,9 @@ namespace blender::draw {
* \{ */
static void extract_edituv_stretch_area_init(const MeshRenderData *mr,
- MeshBatchCache *UNUSED(cache),
+ MeshBatchCache * /*cache*/,
void *buf,
- void *UNUSED(tls_data))
+ void * /*tls_data*/)
{
GPUVertBuf *vbo = static_cast<GPUVertBuf *>(buf);
static GPUVertFormat format = {0};
@@ -90,7 +90,7 @@ static void compute_area_ratio(const MeshRenderData *mr,
static void extract_edituv_stretch_area_finish(const MeshRenderData *mr,
MeshBatchCache *cache,
void *buf,
- void *UNUSED(data))
+ void * /*data*/)
{
GPUVertBuf *vbo = static_cast<GPUVertBuf *>(buf);
float *area_ratio = static_cast<float *>(MEM_mallocN(sizeof(float) * mr->poly_len, __func__));
@@ -126,7 +126,7 @@ static void extract_edituv_stretch_area_init_subdiv(const DRWSubdivCache *subdiv
const MeshRenderData *mr,
MeshBatchCache *cache,
void *buffer,
- void *UNUSED(data))
+ void * /*data*/)
{
/* Initialize final buffer. */
diff --git a/source/blender/draw/intern/mesh_extractors/extract_mesh_vbo_fdots_edituv_data.cc b/source/blender/draw/intern/mesh_extractors/extract_mesh_vbo_fdots_edituv_data.cc
index 27d1975d67b..55ad2e67487 100644
--- a/source/blender/draw/intern/mesh_extractors/extract_mesh_vbo_fdots_edituv_data.cc
+++ b/source/blender/draw/intern/mesh_extractors/extract_mesh_vbo_fdots_edituv_data.cc
@@ -21,7 +21,7 @@ struct MeshExtract_EditUVFdotData_Data {
};
static void extract_fdots_edituv_data_init(const MeshRenderData *mr,
- MeshBatchCache *UNUSED(cache),
+ MeshBatchCache * /*cache*/,
void *buf,
void *tls_data)
{
@@ -41,7 +41,7 @@ static void extract_fdots_edituv_data_init(const MeshRenderData *mr,
static void extract_fdots_edituv_data_iter_poly_bm(const MeshRenderData *mr,
const BMFace *f,
- const int UNUSED(f_index),
+ const int /*f_index*/,
void *_data)
{
MeshExtract_EditUVFdotData_Data *data = static_cast<MeshExtract_EditUVFdotData_Data *>(_data);
@@ -51,7 +51,7 @@ static void extract_fdots_edituv_data_iter_poly_bm(const MeshRenderData *mr,
}
static void extract_fdots_edituv_data_iter_poly_mesh(const MeshRenderData *mr,
- const MPoly *UNUSED(mp),
+ const MPoly * /*mp*/,
const int mp_index,
void *_data)
{
diff --git a/source/blender/draw/intern/mesh_extractors/extract_mesh_vbo_fdots_nor.cc b/source/blender/draw/intern/mesh_extractors/extract_mesh_vbo_fdots_nor.cc
index c47cde63630..02e4b877562 100644
--- a/source/blender/draw/intern/mesh_extractors/extract_mesh_vbo_fdots_nor.cc
+++ b/source/blender/draw/intern/mesh_extractors/extract_mesh_vbo_fdots_nor.cc
@@ -19,9 +19,9 @@ namespace blender::draw {
#define NOR_AND_FLAG_HIDDEN -2
static void extract_fdots_nor_init(const MeshRenderData *mr,
- MeshBatchCache *UNUSED(cache),
+ MeshBatchCache * /*cache*/,
void *buf,
- void *UNUSED(tls_data))
+ void * /*tls_data*/)
{
GPUVertBuf *vbo = static_cast<GPUVertBuf *>(buf);
static GPUVertFormat format = {0};
@@ -34,9 +34,9 @@ static void extract_fdots_nor_init(const MeshRenderData *mr,
}
static void extract_fdots_nor_finish(const MeshRenderData *mr,
- MeshBatchCache *UNUSED(cache),
+ MeshBatchCache * /*cache*/,
void *buf,
- void *UNUSED(data))
+ void * /*data*/)
{
GPUVertBuf *vbo = static_cast<GPUVertBuf *>(buf);
static float invalid_normal[3] = {0.0f, 0.0f, 0.0f};
@@ -99,9 +99,9 @@ constexpr MeshExtract create_extractor_fdots_nor()
* \{ */
static void extract_fdots_nor_hq_init(const MeshRenderData *mr,
- MeshBatchCache *UNUSED(cache),
+ MeshBatchCache * /*cache*/,
void *buf,
- void *UNUSED(tls_data))
+ void * /*tls_data*/)
{
GPUVertBuf *vbo = static_cast<GPUVertBuf *>(buf);
static GPUVertFormat format = {0};
@@ -114,9 +114,9 @@ static void extract_fdots_nor_hq_init(const MeshRenderData *mr,
}
static void extract_fdots_nor_hq_finish(const MeshRenderData *mr,
- MeshBatchCache *UNUSED(cache),
+ MeshBatchCache * /*cache*/,
void *buf,
- void *UNUSED(data))
+ void * /*data*/)
{
GPUVertBuf *vbo = static_cast<GPUVertBuf *>(buf);
static float invalid_normal[3] = {0.0f, 0.0f, 0.0f};
diff --git a/source/blender/draw/intern/mesh_extractors/extract_mesh_vbo_fdots_pos.cc b/source/blender/draw/intern/mesh_extractors/extract_mesh_vbo_fdots_pos.cc
index c391cb6ca5a..d43eb6117df 100644
--- a/source/blender/draw/intern/mesh_extractors/extract_mesh_vbo_fdots_pos.cc
+++ b/source/blender/draw/intern/mesh_extractors/extract_mesh_vbo_fdots_pos.cc
@@ -36,7 +36,7 @@ static GPUVertFormat *get_fdots_nor_format_subdiv()
}
static void extract_fdots_pos_init(const MeshRenderData *mr,
- MeshBatchCache *UNUSED(cache),
+ MeshBatchCache * /*cache*/,
void *buf,
void *tls_data)
{
@@ -63,7 +63,7 @@ static void extract_fdots_pos_iter_poly_bm(const MeshRenderData *mr,
do {
add_v3_v3(co, bm_vert_co_get(mr, l_iter->v));
} while ((l_iter = l_iter->next) != l_first);
- mul_v3_fl(co, 1.0f / (float)f->len);
+ mul_v3_fl(co, 1.0f / float(f->len));
}
static void extract_fdots_pos_iter_poly_mesh(const MeshRenderData *mr,
@@ -77,7 +77,7 @@ static void extract_fdots_pos_iter_poly_mesh(const MeshRenderData *mr,
const MVert *mvert = mr->mvert;
const MLoop *mloop = mr->mloop;
- const BLI_bitmap *facedot_tags = mr->me->runtime.subsurf_face_dot_tags;
+ const BLI_bitmap *facedot_tags = mr->me->runtime->subsurf_face_dot_tags;
const int ml_index_end = mp->loopstart + mp->totloop;
for (int ml_index = mp->loopstart; ml_index < ml_index_end; ml_index += 1) {
@@ -95,15 +95,15 @@ static void extract_fdots_pos_iter_poly_mesh(const MeshRenderData *mr,
}
if (!mr->use_subsurf_fdots) {
- mul_v3_fl(co, 1.0f / (float)mp->totloop);
+ mul_v3_fl(co, 1.0f / float(mp->totloop));
}
}
static void extract_fdots_init_subdiv(const DRWSubdivCache *subdiv_cache,
- const MeshRenderData *UNUSED(mr),
+ const MeshRenderData * /*mr*/,
MeshBatchCache *cache,
void *buffer,
- void *UNUSED(data))
+ void * /*data*/)
{
/* We "extract" positions, normals, and indices at once. */
GPUVertBuf *fdots_pos_vbo = static_cast<GPUVertBuf *>(buffer);
diff --git a/source/blender/draw/intern/mesh_extractors/extract_mesh_vbo_fdots_uv.cc b/source/blender/draw/intern/mesh_extractors/extract_mesh_vbo_fdots_uv.cc
index b0403cf7c4c..802f000cb43 100644
--- a/source/blender/draw/intern/mesh_extractors/extract_mesh_vbo_fdots_uv.cc
+++ b/source/blender/draw/intern/mesh_extractors/extract_mesh_vbo_fdots_uv.cc
@@ -22,7 +22,7 @@ struct MeshExtract_FdotUV_Data {
};
static void extract_fdots_uv_init(const MeshRenderData *mr,
- MeshBatchCache *UNUSED(cache),
+ MeshBatchCache * /*cache*/,
void *buf,
void *tls_data)
{
@@ -53,16 +53,16 @@ static void extract_fdots_uv_init(const MeshRenderData *mr,
}
}
-static void extract_fdots_uv_iter_poly_bm(const MeshRenderData *UNUSED(mr),
+static void extract_fdots_uv_iter_poly_bm(const MeshRenderData * /*mr*/,
const BMFace *f,
- const int UNUSED(f_index),
+ const int /*f_index*/,
void *_data)
{
MeshExtract_FdotUV_Data *data = static_cast<MeshExtract_FdotUV_Data *>(_data);
BMLoop *l_iter, *l_first;
l_iter = l_first = BM_FACE_FIRST_LOOP(f);
do {
- float w = 1.0f / (float)f->len;
+ float w = 1.0f / float(f->len);
const MLoopUV *luv = (const MLoopUV *)BM_ELEM_CD_GET_VOID_P(l_iter, data->cd_ofs);
madd_v2_v2fl(data->vbo_data[BM_elem_index_get(f)], luv->uv, w);
} while ((l_iter = l_iter->next) != l_first);
@@ -74,7 +74,7 @@ static void extract_fdots_uv_iter_poly_mesh(const MeshRenderData *mr,
void *_data)
{
MeshExtract_FdotUV_Data *data = static_cast<MeshExtract_FdotUV_Data *>(_data);
- const BLI_bitmap *facedot_tags = mr->me->runtime.subsurf_face_dot_tags;
+ const BLI_bitmap *facedot_tags = mr->me->runtime->subsurf_face_dot_tags;
const MLoop *mloop = mr->mloop;
const int ml_index_end = mp->loopstart + mp->totloop;
@@ -86,7 +86,7 @@ static void extract_fdots_uv_iter_poly_mesh(const MeshRenderData *mr,
}
}
else {
- float w = 1.0f / (float)mp->totloop;
+ float w = 1.0f / float(mp->totloop);
madd_v2_v2fl(data->vbo_data[mp_index], data->uv_data[ml_index].uv, w);
}
}
diff --git a/source/blender/draw/intern/mesh_extractors/extract_mesh_vbo_lnor.cc b/source/blender/draw/intern/mesh_extractors/extract_mesh_vbo_lnor.cc
index 01d07fa5f83..ff0d502ea1e 100644
--- a/source/blender/draw/intern/mesh_extractors/extract_mesh_vbo_lnor.cc
+++ b/source/blender/draw/intern/mesh_extractors/extract_mesh_vbo_lnor.cc
@@ -16,7 +16,7 @@ namespace blender::draw {
* \{ */
static void extract_lnor_init(const MeshRenderData *mr,
- MeshBatchCache *UNUSED(cache),
+ MeshBatchCache * /*cache*/,
void *buf,
void *tls_data)
{
@@ -34,7 +34,7 @@ static void extract_lnor_init(const MeshRenderData *mr,
static void extract_lnor_iter_poly_bm(const MeshRenderData *mr,
const BMFace *f,
- const int UNUSED(f_index),
+ const int /*f_index*/,
void *data)
{
BMLoop *l_iter, *l_first;
@@ -86,7 +86,7 @@ static void extract_lnor_iter_poly_mesh(const MeshRenderData *mr,
(mr->edit_bmesh && (mr->v_origindex) && mr->v_origindex[ml->v] == ORIGINDEX_NONE)) {
lnor_data->w = -1;
}
- else if (mp->flag & ME_FACE_SEL) {
+ else if (mr->select_poly && mr->select_poly[mp_index]) {
lnor_data->w = 1;
}
else {
@@ -106,10 +106,10 @@ static GPUVertFormat *get_subdiv_lnor_format()
}
static void extract_lnor_init_subdiv(const DRWSubdivCache *subdiv_cache,
- const MeshRenderData *UNUSED(mr),
+ const MeshRenderData * /*mr*/,
MeshBatchCache *cache,
void *buffer,
- void *UNUSED(data))
+ void * /*data*/)
{
GPUVertBuf *vbo = static_cast<GPUVertBuf *>(buffer);
GPUVertBuf *pos_nor = cache->final.buff.vbo.pos_nor;
@@ -143,7 +143,7 @@ struct gpuHQNor {
};
static void extract_lnor_hq_init(const MeshRenderData *mr,
- MeshBatchCache *UNUSED(cache),
+ MeshBatchCache * /*cache*/,
void *buf,
void *tls_data)
{
@@ -161,7 +161,7 @@ static void extract_lnor_hq_init(const MeshRenderData *mr,
static void extract_lnor_hq_iter_poly_bm(const MeshRenderData *mr,
const BMFace *f,
- const int UNUSED(f_index),
+ const int /*f_index*/,
void *data)
{
BMLoop *l_iter, *l_first;
@@ -211,7 +211,7 @@ static void extract_lnor_hq_iter_poly_mesh(const MeshRenderData *mr,
(mr->edit_bmesh && (mr->v_origindex) && mr->v_origindex[ml->v] == ORIGINDEX_NONE)) {
lnor_data->w = -1;
}
- else if (mp->flag & ME_FACE_SEL) {
+ else if (mr->select_poly && mr->select_poly[mp_index]) {
lnor_data->w = 1;
}
else {
diff --git a/source/blender/draw/intern/mesh_extractors/extract_mesh_vbo_mesh_analysis.cc b/source/blender/draw/intern/mesh_extractors/extract_mesh_vbo_mesh_analysis.cc
index fe2a02b6b63..d0d97054448 100644
--- a/source/blender/draw/intern/mesh_extractors/extract_mesh_vbo_mesh_analysis.cc
+++ b/source/blender/draw/intern/mesh_extractors/extract_mesh_vbo_mesh_analysis.cc
@@ -23,9 +23,9 @@ namespace blender::draw {
* \{ */
static void extract_mesh_analysis_init(const MeshRenderData *mr,
- MeshBatchCache *UNUSED(cache),
+ MeshBatchCache * /*cache*/,
void *buf,
- void *UNUSED(tls_data))
+ void * /*tls_data*/)
{
GPUVertBuf *vbo = static_cast<GPUVertBuf *>(buf);
static GPUVertFormat format = {0};
@@ -67,8 +67,8 @@ BLI_INLINE float overhang_remap(float fac, float min, float max, float minmax_ir
static void statvis_calc_overhang(const MeshRenderData *mr, float *r_overhang)
{
const MeshStatVis *statvis = &mr->toolsettings->statvis;
- const float min = statvis->overhang_min / (float)M_PI;
- const float max = statvis->overhang_max / (float)M_PI;
+ const float min = statvis->overhang_min / float(M_PI);
+ const float max = statvis->overhang_max / float(M_PI);
const char axis = statvis->overhang_axis;
BMEditMesh *em = mr->edit_bmesh;
BMIter iter;
@@ -88,7 +88,7 @@ static void statvis_calc_overhang(const MeshRenderData *mr, float *r_overhang)
if (mr->extract_type == MR_EXTRACT_BMESH) {
int l_index = 0;
BM_ITER_MESH (f, &iter, bm, BM_FACES_OF_MESH) {
- float fac = angle_normalized_v3v3(bm_face_no_get(mr, f), dir) / (float)M_PI;
+ float fac = angle_normalized_v3v3(bm_face_no_get(mr, f), dir) / float(M_PI);
fac = overhang_remap(fac, min, max, minmax_irange);
for (int i = 0; i < f->len; i++, l_index++) {
r_overhang[l_index] = fac;
@@ -98,7 +98,7 @@ static void statvis_calc_overhang(const MeshRenderData *mr, float *r_overhang)
else {
const MPoly *mp = mr->mpoly;
for (int mp_index = 0, l_index = 0; mp_index < mr->poly_len; mp_index++, mp++) {
- float fac = angle_normalized_v3v3(mr->poly_normals[mp_index], dir) / (float)M_PI;
+ float fac = angle_normalized_v3v3(mr->poly_normals[mp_index], dir) / float(M_PI);
fac = overhang_remap(fac, min, max, minmax_irange);
for (int i = 0; i < mp->totloop; i++, l_index++) {
r_overhang[l_index] = fac;
@@ -265,7 +265,7 @@ struct BVHTree_OverlapData {
float epsilon;
};
-static bool bvh_overlap_cb(void *userdata, int index_a, int index_b, int UNUSED(thread))
+static bool bvh_overlap_cb(void *userdata, int index_a, int index_b, int /*thread*/)
{
struct BVHTree_OverlapData *data = static_cast<struct BVHTree_OverlapData *>(userdata);
@@ -367,7 +367,7 @@ static void statvis_calc_intersect(const MeshRenderData *mr, float *r_intersect)
}
}
-BLI_INLINE float distort_remap(float fac, float min, float UNUSED(max), float minmax_irange)
+BLI_INLINE float distort_remap(float fac, float min, float /*max*/, float minmax_irange)
{
if (fac >= min) {
fac = (fac - min) * minmax_irange;
@@ -474,7 +474,7 @@ static void statvis_calc_distort(const MeshRenderData *mr, float *r_distort)
}
}
-BLI_INLINE float sharp_remap(float fac, float min, float UNUSED(max), float minmax_irange)
+BLI_INLINE float sharp_remap(float fac, float min, float /*max*/, float minmax_irange)
{
/* important not '>=' */
if (fac > min) {
@@ -588,9 +588,9 @@ static void statvis_calc_sharp(const MeshRenderData *mr, float *r_sharp)
}
static void extract_analysis_iter_finish_mesh(const MeshRenderData *mr,
- MeshBatchCache *UNUSED(cache),
+ MeshBatchCache * /*cache*/,
void *buf,
- void *UNUSED(data))
+ void * /*data*/)
{
GPUVertBuf *vbo = static_cast<GPUVertBuf *>(buf);
BLI_assert(mr->edit_bmesh);
diff --git a/source/blender/draw/intern/mesh_extractors/extract_mesh_vbo_orco.cc b/source/blender/draw/intern/mesh_extractors/extract_mesh_vbo_orco.cc
index 4fcbdb1fc7c..915495204d4 100644
--- a/source/blender/draw/intern/mesh_extractors/extract_mesh_vbo_orco.cc
+++ b/source/blender/draw/intern/mesh_extractors/extract_mesh_vbo_orco.cc
@@ -19,7 +19,7 @@ struct MeshExtract_Orco_Data {
};
static void extract_orco_init(const MeshRenderData *mr,
- MeshBatchCache *UNUSED(cache),
+ MeshBatchCache * /*cache*/,
void *buf,
void *tls_data)
{
@@ -45,9 +45,9 @@ static void extract_orco_init(const MeshRenderData *mr,
BLI_assert(data->orco);
}
-static void extract_orco_iter_poly_bm(const MeshRenderData *UNUSED(mr),
+static void extract_orco_iter_poly_bm(const MeshRenderData * /*mr*/,
const BMFace *f,
- const int UNUSED(f_index),
+ const int /*f_index*/,
void *data)
{
MeshExtract_Orco_Data *orco_data = (MeshExtract_Orco_Data *)data;
@@ -63,7 +63,7 @@ static void extract_orco_iter_poly_bm(const MeshRenderData *UNUSED(mr),
static void extract_orco_iter_poly_mesh(const MeshRenderData *mr,
const MPoly *mp,
- const int UNUSED(mp_index),
+ const int /*mp_index*/,
void *data)
{
const MLoop *mloop = mr->mloop;
diff --git a/source/blender/draw/intern/mesh_extractors/extract_mesh_vbo_pos_nor.cc b/source/blender/draw/intern/mesh_extractors/extract_mesh_vbo_pos_nor.cc
index a822845c688..30b4f808487 100644
--- a/source/blender/draw/intern/mesh_extractors/extract_mesh_vbo_pos_nor.cc
+++ b/source/blender/draw/intern/mesh_extractors/extract_mesh_vbo_pos_nor.cc
@@ -28,7 +28,7 @@ struct MeshExtract_PosNor_Data {
};
static void extract_pos_nor_init(const MeshRenderData *mr,
- MeshBatchCache *UNUSED(cache),
+ MeshBatchCache * /*cache*/,
void *buf,
void *tls_data)
{
@@ -66,7 +66,7 @@ static void extract_pos_nor_init(const MeshRenderData *mr,
static void extract_pos_nor_iter_poly_bm(const MeshRenderData *mr,
const BMFace *f,
- const int UNUSED(f_index),
+ const int /*f_index*/,
void *_data)
{
MeshExtract_PosNor_Data *data = static_cast<MeshExtract_PosNor_Data *>(_data);
@@ -104,7 +104,7 @@ static void extract_pos_nor_iter_poly_mesh(const MeshRenderData *mr,
((mr->v_origindex) && (mr->v_origindex[ml->v] == ORIGINDEX_NONE))) {
vert->nor.w = -1;
}
- else if (mv->flag & SELECT) {
+ else if (mr->select_vert && mr->select_vert[ml->v]) {
vert->nor.w = 1;
}
else {
@@ -171,9 +171,9 @@ static void extract_pos_nor_iter_lvert_mesh(const MeshRenderData *mr,
vert->nor = data->normals[v_index].low;
}
-static void extract_pos_nor_finish(const MeshRenderData *UNUSED(mr),
- MeshBatchCache *UNUSED(cache),
- void *UNUSED(buf),
+static void extract_pos_nor_finish(const MeshRenderData * /*mr*/,
+ MeshBatchCache * /*cache*/,
+ void * /*buf*/,
void *_data)
{
MeshExtract_PosNor_Data *data = static_cast<MeshExtract_PosNor_Data *>(_data);
@@ -201,10 +201,10 @@ static GPUVertFormat *get_custom_normals_format()
}
static void extract_pos_nor_init_subdiv(const DRWSubdivCache *subdiv_cache,
- const MeshRenderData *UNUSED(mr),
+ const MeshRenderData * /*mr*/,
MeshBatchCache *cache,
void *buffer,
- void *UNUSED(data))
+ void * /*data*/)
{
GPUVertBuf *vbo = static_cast<GPUVertBuf *>(buffer);
const DRWSubdivLooseGeom &loose_geom = subdiv_cache->loose_geom;
@@ -282,9 +282,9 @@ static void extract_pos_nor_init_subdiv(const DRWSubdivCache *subdiv_cache,
}
static void extract_pos_nor_loose_geom_subdiv(const DRWSubdivCache *subdiv_cache,
- const MeshRenderData *UNUSED(mr),
+ const MeshRenderData * /*mr*/,
void *buffer,
- void *UNUSED(data))
+ void * /*data*/)
{
const DRWSubdivLooseGeom &loose_geom = subdiv_cache->loose_geom;
if (loose_geom.loop_len == 0) {
@@ -373,7 +373,7 @@ struct MeshExtract_PosNorHQ_Data {
};
static void extract_pos_nor_hq_init(const MeshRenderData *mr,
- MeshBatchCache *UNUSED(cache),
+ MeshBatchCache * /*cache*/,
void *buf,
void *tls_data)
{
@@ -411,7 +411,7 @@ static void extract_pos_nor_hq_init(const MeshRenderData *mr,
static void extract_pos_nor_hq_iter_poly_bm(const MeshRenderData *mr,
const BMFace *f,
- const int UNUSED(f_index),
+ const int /*f_index*/,
void *_data)
{
MeshExtract_PosNorHQ_Data *data = static_cast<MeshExtract_PosNorHQ_Data *>(_data);
@@ -429,7 +429,7 @@ static void extract_pos_nor_hq_iter_poly_bm(const MeshRenderData *mr,
static void extract_pos_nor_hq_iter_poly_mesh(const MeshRenderData *mr,
const MPoly *mp,
- const int UNUSED(mp_index),
+ const int /*mp_index*/,
void *_data)
{
MeshExtract_PosNorHQ_Data *data = static_cast<MeshExtract_PosNorHQ_Data *>(_data);
@@ -451,7 +451,7 @@ static void extract_pos_nor_hq_iter_poly_mesh(const MeshRenderData *mr,
((mr->v_origindex) && (mr->v_origindex[ml->v] == ORIGINDEX_NONE))) {
vert->nor[3] = -1;
}
- else if (mv->flag & SELECT) {
+ else if (mr->select_vert && mr->select_vert[ml->v]) {
vert->nor[3] = 1;
}
else {
@@ -523,9 +523,9 @@ static void extract_pos_nor_hq_iter_lvert_mesh(const MeshRenderData *mr,
vert->nor[3] = 0;
}
-static void extract_pos_nor_hq_finish(const MeshRenderData *UNUSED(mr),
- MeshBatchCache *UNUSED(cache),
- void *UNUSED(buf),
+static void extract_pos_nor_hq_finish(const MeshRenderData * /*mr*/,
+ MeshBatchCache * /*cache*/,
+ void * /*buf*/,
void *_data)
{
MeshExtract_PosNorHQ_Data *data = static_cast<MeshExtract_PosNorHQ_Data *>(_data);
diff --git a/source/blender/draw/intern/mesh_extractors/extract_mesh_vbo_sculpt_data.cc b/source/blender/draw/intern/mesh_extractors/extract_mesh_vbo_sculpt_data.cc
index 6202fdd312d..34b8124f872 100644
--- a/source/blender/draw/intern/mesh_extractors/extract_mesh_vbo_sculpt_data.cc
+++ b/source/blender/draw/intern/mesh_extractors/extract_mesh_vbo_sculpt_data.cc
@@ -32,9 +32,9 @@ static GPUVertFormat *get_sculpt_data_format()
}
static void extract_sculpt_data_init(const MeshRenderData *mr,
- MeshBatchCache *UNUSED(cache),
+ MeshBatchCache * /*cache*/,
void *buf,
- void *UNUSED(tls_data))
+ void * /*tls_data*/)
{
GPUVertBuf *vbo = static_cast<GPUVertBuf *>(buf);
GPUVertFormat *format = get_sculpt_data_format();
@@ -44,7 +44,8 @@ static void extract_sculpt_data_init(const MeshRenderData *mr,
CustomData *cd_pdata = (mr->extract_type == MR_EXTRACT_BMESH) ? &mr->bm->pdata : &mr->me->pdata;
const float *cd_mask = (const float *)CustomData_get_layer(cd_vdata, CD_PAINT_MASK);
- const int *cd_face_set = (const int *)CustomData_get_layer(cd_pdata, CD_SCULPT_FACE_SETS);
+ const int *cd_face_set = (const int *)CustomData_get_layer_named(
+ cd_pdata, CD_PROP_INT32, ".sculpt_face_set");
GPU_vertbuf_init_with_format(vbo, format);
GPU_vertbuf_data_alloc(vbo, mr->loop_len);
@@ -59,7 +60,7 @@ static void extract_sculpt_data_init(const MeshRenderData *mr,
if (mr->extract_type == MR_EXTRACT_BMESH) {
int cd_mask_ofs = CustomData_get_offset(cd_vdata, CD_PAINT_MASK);
- int cd_face_set_ofs = CustomData_get_offset(cd_pdata, CD_SCULPT_FACE_SETS);
+ int cd_face_set_ofs = CustomData_get_offset_named(cd_pdata, CD_PROP_INT32, ".sculpt_face_set");
BMIter f_iter;
BMFace *efa;
BM_ITER_MESH (efa, &f_iter, mr->bm, BM_FACES_OF_MESH) {
@@ -114,9 +115,9 @@ static void extract_sculpt_data_init(const MeshRenderData *mr,
static void extract_sculpt_data_init_subdiv(const DRWSubdivCache *subdiv_cache,
const MeshRenderData *mr,
- MeshBatchCache *UNUSED(cache),
+ MeshBatchCache * /*cache*/,
void *buffer,
- void *UNUSED(data))
+ void * /*data*/)
{
GPUVertBuf *vbo = static_cast<GPUVertBuf *>(buffer);
@@ -171,7 +172,8 @@ static void extract_sculpt_data_init_subdiv(const DRWSubdivCache *subdiv_cache,
};
gpuFaceSet *face_sets = (gpuFaceSet *)GPU_vertbuf_get_data(face_set_vbo);
- const int *cd_face_set = (const int *)CustomData_get_layer(cd_pdata, CD_SCULPT_FACE_SETS);
+ const int *cd_face_set = (const int *)CustomData_get_layer_named(
+ cd_pdata, CD_PROP_INT32, ".sculpt_face_set");
GPUVertFormat *format = get_sculpt_data_format();
GPU_vertbuf_init_build_on_device(vbo, format, subdiv_cache->num_subdiv_loops);
diff --git a/source/blender/draw/intern/mesh_extractors/extract_mesh_vbo_select_idx.cc b/source/blender/draw/intern/mesh_extractors/extract_mesh_vbo_select_idx.cc
index 9e0d171c9e4..5c196a67d0b 100644
--- a/source/blender/draw/intern/mesh_extractors/extract_mesh_vbo_select_idx.cc
+++ b/source/blender/draw/intern/mesh_extractors/extract_mesh_vbo_select_idx.cc
@@ -14,7 +14,7 @@ namespace blender::draw {
/** \name Extract Selection Index
* \{ */
-static void extract_select_idx_init_impl(const MeshRenderData *UNUSED(mr),
+static void extract_select_idx_init_impl(const MeshRenderData * /*mr*/,
const int len,
void *buf,
void *tls_data)
@@ -30,7 +30,7 @@ static void extract_select_idx_init_impl(const MeshRenderData *UNUSED(mr),
}
static void extract_select_idx_init(const MeshRenderData *mr,
- MeshBatchCache *UNUSED(cache),
+ MeshBatchCache * /*cache*/,
void *buf,
void *tls_data)
{
@@ -42,7 +42,7 @@ static void extract_select_idx_init(const MeshRenderData *mr,
* index VBO's. We could upload the p/e/v_origindex as a buffer texture and sample it inside the
* shader to output original index. */
-static void extract_poly_idx_iter_poly_bm(const MeshRenderData *UNUSED(mr),
+static void extract_poly_idx_iter_poly_bm(const MeshRenderData * /*mr*/,
const BMFace *f,
const int f_index,
void *data)
@@ -55,9 +55,9 @@ static void extract_poly_idx_iter_poly_bm(const MeshRenderData *UNUSED(mr),
} while ((l_iter = l_iter->next) != l_first);
}
-static void extract_edge_idx_iter_poly_bm(const MeshRenderData *UNUSED(mr),
+static void extract_edge_idx_iter_poly_bm(const MeshRenderData * /*mr*/,
const BMFace *f,
- const int UNUSED(f_index),
+ const int /*f_index*/,
void *data)
{
BMLoop *l_iter, *l_first;
@@ -68,9 +68,9 @@ static void extract_edge_idx_iter_poly_bm(const MeshRenderData *UNUSED(mr),
} while ((l_iter = l_iter->next) != l_first);
}
-static void extract_vert_idx_iter_poly_bm(const MeshRenderData *UNUSED(mr),
+static void extract_vert_idx_iter_poly_bm(const MeshRenderData * /*mr*/,
const BMFace *f,
- const int UNUSED(f_index),
+ const int /*f_index*/,
void *data)
{
BMLoop *l_iter, *l_first;
@@ -122,7 +122,7 @@ static void extract_poly_idx_iter_poly_mesh(const MeshRenderData *mr,
static void extract_edge_idx_iter_poly_mesh(const MeshRenderData *mr,
const MPoly *mp,
- const int UNUSED(mp_index),
+ const int /*mp_index*/,
void *data)
{
const MLoop *mloop = mr->mloop;
@@ -135,7 +135,7 @@ static void extract_edge_idx_iter_poly_mesh(const MeshRenderData *mr,
static void extract_vert_idx_iter_poly_mesh(const MeshRenderData *mr,
const MPoly *mp,
- const int UNUSED(mp_index),
+ const int /*mp_index*/,
void *data)
{
const MLoop *mloop = mr->mloop;
@@ -147,7 +147,7 @@ static void extract_vert_idx_iter_poly_mesh(const MeshRenderData *mr,
}
static void extract_edge_idx_iter_ledge_mesh(const MeshRenderData *mr,
- const MEdge *UNUSED(med),
+ const MEdge * /*med*/,
const int ledge_index,
void *data)
{
@@ -169,7 +169,7 @@ static void extract_vert_idx_iter_ledge_mesh(const MeshRenderData *mr,
}
static void extract_vert_idx_iter_lvert_mesh(const MeshRenderData *mr,
- const MVert *UNUSED(mv),
+ const MVert * /*mv*/,
const int lvert_index,
void *data)
{
@@ -182,9 +182,9 @@ static void extract_vert_idx_iter_lvert_mesh(const MeshRenderData *mr,
static void extract_vert_idx_init_subdiv(const DRWSubdivCache *subdiv_cache,
const MeshRenderData *mr,
- MeshBatchCache *UNUSED(cache),
+ MeshBatchCache * /*cache*/,
void *buf,
- void *UNUSED(data))
+ void * /*data*/)
{
GPUVertBuf *vbo = static_cast<GPUVertBuf *>(buf);
const DRWSubdivLooseGeom &loose_geom = subdiv_cache->loose_geom;
@@ -213,7 +213,7 @@ static void extract_vert_idx_init_subdiv(const DRWSubdivCache *subdiv_cache,
static void extract_vert_idx_loose_geom_subdiv(const DRWSubdivCache *subdiv_cache,
const MeshRenderData *mr,
void *buffer,
- void *UNUSED(data))
+ void * /*data*/)
{
const DRWSubdivLooseGeom &loose_geom = subdiv_cache->loose_geom;
if (loose_geom.loop_len == 0) {
@@ -254,10 +254,10 @@ static void extract_vert_idx_loose_geom_subdiv(const DRWSubdivCache *subdiv_cach
}
static void extract_edge_idx_init_subdiv(const DRWSubdivCache *subdiv_cache,
- const MeshRenderData *UNUSED(mr),
- MeshBatchCache *UNUSED(cache),
+ const MeshRenderData * /*mr*/,
+ MeshBatchCache * /*cache*/,
void *buf,
- void *UNUSED(data))
+ void * /*data*/)
{
GPUVertBuf *vbo = static_cast<GPUVertBuf *>(buf);
const DRWSubdivLooseGeom &loose_geom = subdiv_cache->loose_geom;
@@ -271,7 +271,7 @@ static void extract_edge_idx_init_subdiv(const DRWSubdivCache *subdiv_cache,
static void extract_edge_idx_loose_geom_subdiv(const DRWSubdivCache *subdiv_cache,
const MeshRenderData *mr,
void *buffer,
- void *UNUSED(data))
+ void * /*data*/)
{
const DRWSubdivLooseGeom &loose_geom = subdiv_cache->loose_geom;
if (loose_geom.edge_len == 0) {
@@ -294,9 +294,9 @@ static void extract_edge_idx_loose_geom_subdiv(const DRWSubdivCache *subdiv_cach
static void extract_poly_idx_init_subdiv(const DRWSubdivCache *subdiv_cache,
const MeshRenderData *mr,
- MeshBatchCache *UNUSED(cache),
+ MeshBatchCache * /*cache*/,
void *buf,
- void *UNUSED(data))
+ void * /*data*/)
{
GPUVertBuf *vbo = static_cast<GPUVertBuf *>(buf);
draw_subdiv_init_origindex_buffer(
@@ -366,15 +366,15 @@ constexpr MeshExtract create_extractor_vert_idx()
}
static void extract_fdot_idx_init(const MeshRenderData *mr,
- MeshBatchCache *UNUSED(cache),
+ MeshBatchCache * /*cache*/,
void *buf,
void *tls_data)
{
extract_select_idx_init_impl(mr, mr->poly_len, buf, tls_data);
}
-static void extract_fdot_idx_iter_poly_bm(const MeshRenderData *UNUSED(mr),
- const BMFace *UNUSED(f),
+static void extract_fdot_idx_iter_poly_bm(const MeshRenderData * /*mr*/,
+ const BMFace * /*f*/,
const int f_index,
void *data)
{
@@ -382,7 +382,7 @@ static void extract_fdot_idx_iter_poly_bm(const MeshRenderData *UNUSED(mr),
}
static void extract_fdot_idx_iter_poly_mesh(const MeshRenderData *mr,
- const MPoly *UNUSED(mp),
+ const MPoly * /*mp*/,
const int mp_index,
void *data)
{
diff --git a/source/blender/draw/intern/mesh_extractors/extract_mesh_vbo_skin_roots.cc b/source/blender/draw/intern/mesh_extractors/extract_mesh_vbo_skin_roots.cc
index f7655658bdd..e8feb4bebce 100644
--- a/source/blender/draw/intern/mesh_extractors/extract_mesh_vbo_skin_roots.cc
+++ b/source/blender/draw/intern/mesh_extractors/extract_mesh_vbo_skin_roots.cc
@@ -19,9 +19,9 @@ struct SkinRootData {
};
static void extract_skin_roots_init(const MeshRenderData *mr,
- MeshBatchCache *UNUSED(cache),
+ MeshBatchCache * /*cache*/,
void *buf,
- void *UNUSED(tls_data))
+ void * /*tls_data*/)
{
GPUVertBuf *vbo = static_cast<GPUVertBuf *>(buf);
/* Exclusively for edit mode. */
diff --git a/source/blender/draw/intern/mesh_extractors/extract_mesh_vbo_tan.cc b/source/blender/draw/intern/mesh_extractors/extract_mesh_vbo_tan.cc
index 049fa416523..6f0c98c684b 100644
--- a/source/blender/draw/intern/mesh_extractors/extract_mesh_vbo_tan.cc
+++ b/source/blender/draw/intern/mesh_extractors/extract_mesh_vbo_tan.cc
@@ -237,7 +237,7 @@ static void extract_tan_ex_init(const MeshRenderData *mr,
static void extract_tan_init(const MeshRenderData *mr,
MeshBatchCache *cache,
void *buf,
- void *UNUSED(tls_data))
+ void * /*tls_data*/)
{
GPUVertBuf *vbo = static_cast<GPUVertBuf *>(buf);
extract_tan_ex_init(mr, cache, vbo, false);
@@ -256,7 +256,7 @@ static void extract_tan_init_subdiv(const DRWSubdivCache *subdiv_cache,
const MeshRenderData *mr,
MeshBatchCache *cache,
void *buffer,
- void *UNUSED(data))
+ void * /*data*/)
{
GPUVertCompType comp_type = GPU_COMP_F32;
GPUVertFetchMode fetch_mode = GPU_FETCH_FLOAT;
@@ -302,7 +302,7 @@ static void extract_tan_init_subdiv(const DRWSubdivCache *subdiv_cache,
/* Ensure data is uploaded properly. */
GPU_vertbuf_tag_dirty(coarse_vbo);
/* Include stride in offset. */
- const int dst_offset = (int)subdiv_cache->num_subdiv_loops * 4 * pack_layer_index++;
+ const int dst_offset = int(subdiv_cache->num_subdiv_loops) * 4 * pack_layer_index++;
draw_subdiv_interp_custom_data(subdiv_cache, coarse_vbo, dst_buffer, 4, dst_offset, false);
}
if (use_orco_tan) {
@@ -317,7 +317,7 @@ static void extract_tan_init_subdiv(const DRWSubdivCache *subdiv_cache,
/* Ensure data is uploaded properly. */
GPU_vertbuf_tag_dirty(coarse_vbo);
/* Include stride in offset. */
- const int dst_offset = (int)subdiv_cache->num_subdiv_loops * 4 * pack_layer_index++;
+ const int dst_offset = int(subdiv_cache->num_subdiv_loops) * 4 * pack_layer_index++;
draw_subdiv_interp_custom_data(subdiv_cache, coarse_vbo, dst_buffer, 4, dst_offset, false);
}
@@ -346,7 +346,7 @@ constexpr MeshExtract create_extractor_tan()
static void extract_tan_hq_init(const MeshRenderData *mr,
MeshBatchCache *cache,
void *buf,
- void *UNUSED(tls_data))
+ void * /*tls_data*/)
{
GPUVertBuf *vbo = static_cast<GPUVertBuf *>(buf);
extract_tan_ex_init(mr, cache, vbo, true);
diff --git a/source/blender/draw/intern/mesh_extractors/extract_mesh_vbo_uv.cc b/source/blender/draw/intern/mesh_extractors/extract_mesh_vbo_uv.cc
index 6606912850d..fe6e31af3c2 100644
--- a/source/blender/draw/intern/mesh_extractors/extract_mesh_vbo_uv.cc
+++ b/source/blender/draw/intern/mesh_extractors/extract_mesh_vbo_uv.cc
@@ -74,7 +74,7 @@ static bool mesh_extract_uv_format_init(GPUVertFormat *format,
static void extract_uv_init(const MeshRenderData *mr,
MeshBatchCache *cache,
void *buf,
- void *UNUSED(tls_data))
+ void * /*tls_data*/)
{
GPUVertBuf *vbo = static_cast<GPUVertBuf *>(buf);
GPUVertFormat format = {0};
@@ -119,10 +119,10 @@ static void extract_uv_init(const MeshRenderData *mr,
}
static void extract_uv_init_subdiv(const DRWSubdivCache *subdiv_cache,
- const MeshRenderData *UNUSED(mr),
+ const MeshRenderData * /*mr*/,
MeshBatchCache *cache,
void *buffer,
- void *UNUSED(data))
+ void * /*data*/)
{
Mesh *coarse_mesh = subdiv_cache->mesh;
GPUVertBuf *vbo = static_cast<GPUVertBuf *>(buffer);
@@ -146,7 +146,7 @@ static void extract_uv_init_subdiv(const DRWSubdivCache *subdiv_cache,
int pack_layer_index = 0;
for (int i = 0; i < MAX_MTFACE; i++) {
if (uv_layers & (1 << i)) {
- const int offset = (int)subdiv_cache->num_subdiv_loops * pack_layer_index++;
+ const int offset = int(subdiv_cache->num_subdiv_loops) * pack_layer_index++;
draw_subdiv_extract_uvs(subdiv_cache, vbo, i, offset);
}
}
diff --git a/source/blender/draw/intern/mesh_extractors/extract_mesh_vbo_weights.cc b/source/blender/draw/intern/mesh_extractors/extract_mesh_vbo_weights.cc
index 4db5a8c23a4..6ac498eae26 100644
--- a/source/blender/draw/intern/mesh_extractors/extract_mesh_vbo_weights.cc
+++ b/source/blender/draw/intern/mesh_extractors/extract_mesh_vbo_weights.cc
@@ -111,9 +111,9 @@ static void extract_weights_init(const MeshRenderData *mr,
}
}
-static void extract_weights_iter_poly_bm(const MeshRenderData *UNUSED(mr),
+static void extract_weights_iter_poly_bm(const MeshRenderData * /*mr*/,
const BMFace *f,
- const int UNUSED(f_index),
+ const int /*f_index*/,
void *_data)
{
MeshExtract_Weight_Data *data = static_cast<MeshExtract_Weight_Data *>(_data);
@@ -134,7 +134,7 @@ static void extract_weights_iter_poly_bm(const MeshRenderData *UNUSED(mr),
static void extract_weights_iter_poly_mesh(const MeshRenderData *mr,
const MPoly *mp,
- const int UNUSED(mp_index),
+ const int /*mp_index*/,
void *_data)
{
MeshExtract_Weight_Data *data = static_cast<MeshExtract_Weight_Data *>(_data);
diff --git a/source/blender/draw/intern/shaders/common_debug_print_lib.glsl b/source/blender/draw/intern/shaders/common_debug_print_lib.glsl
index 89d1729b52d..5dc8f490a79 100644
--- a/source/blender/draw/intern/shaders/common_debug_print_lib.glsl
+++ b/source/blender/draw/intern/shaders/common_debug_print_lib.glsl
@@ -21,7 +21,7 @@
*
* NOTE: Floating point representation might not be very precise (see drw_print_value(float)).
*
- * IMPORTANT: Multipler drawcalls can write to the buffer in sequence (if they are from different
+ * IMPORTANT: Multiple drawcalls can write to the buffer in sequence (if they are from different
* shgroups). However, we add barriers to support this case and it might change the application
* behavior. Uncomment DISABLE_DEBUG_SHADER_drw_print_BARRIER to remove the barriers if that
* happens. But then you are limited to a single invocation output.
@@ -92,7 +92,7 @@ void drw_print_char4(uint data)
* it is referenced as an index for char4 and thus do not capture the right
* reference. I do not know if this is undefined behavior. As a matter of
* precaution, we implement all the append function separately. This behavior
- * was observed on both Mesa & amdgpu-pro.
+ * was observed on both MESA & AMDGPU-PRO.
*/
/* Using ascii char code. Expect char1 to be less or equal to 0xFF. Appends chars to the right. */
void drw_print_append_char(uint char1, inout uint char4)
diff --git a/source/blender/draw/intern/shaders/common_fxaa_lib.glsl b/source/blender/draw/intern/shaders/common_fxaa_lib.glsl
index 599d875c500..3d8667e88c8 100644
--- a/source/blender/draw/intern/shaders/common_fxaa_lib.glsl
+++ b/source/blender/draw/intern/shaders/common_fxaa_lib.glsl
@@ -312,7 +312,7 @@ float FxaaLuma(vec4 rgba)
/*--------------------------------------------------------------------------*/
vec4 FxaaPixelShader(
/*
- * Use noperspective interpolation here (turn off perspective interpolation).
+ * Use no perspective interpolation here (turn off perspective interpolation).
* {xy} = center of pixel */
vec2 pos,
/*
diff --git a/source/blender/draw/intern/shaders/common_globals_lib.glsl b/source/blender/draw/intern/shaders/common_globals_lib.glsl
deleted file mode 100644
index a8931292064..00000000000
--- a/source/blender/draw/intern/shaders/common_globals_lib.glsl
+++ /dev/null
@@ -1,148 +0,0 @@
-#define COMMON_GLOBALS_LIB
-
-#ifdef USE_GPU_SHADER_CREATE_INFO
-# error Use draw_globals as additional_info instead of common_globals_lib.glsl
-#endif
-
-/* keep in sync with GlobalsUboStorage */
-layout(std140) uniform globalsBlock
-{
- vec4 colorWire;
- vec4 colorWireEdit;
- vec4 colorActive;
- vec4 colorSelect;
- vec4 colorLibrarySelect;
- vec4 colorLibrary;
- vec4 colorTransform;
- vec4 colorLight;
- vec4 colorSpeaker;
- vec4 colorCamera;
- vec4 colorCameraPath;
- vec4 colorEmpty;
- vec4 colorVertex;
- vec4 colorVertexSelect;
- vec4 colorVertexUnreferenced;
- vec4 colorVertexMissingData;
- vec4 colorEditMeshActive;
- vec4 colorEdgeSelect;
- vec4 colorEdgeSeam;
- vec4 colorEdgeSharp;
- vec4 colorEdgeCrease;
- vec4 colorEdgeBWeight;
- vec4 colorEdgeFaceSelect;
- vec4 colorEdgeFreestyle;
- vec4 colorFace;
- vec4 colorFaceSelect;
- vec4 colorFaceFreestyle;
- vec4 colorGpencilVertex;
- vec4 colorGpencilVertexSelect;
- vec4 colorNormal;
- vec4 colorVNormal;
- vec4 colorLNormal;
- vec4 colorFaceDot;
- vec4 colorSkinRoot;
- vec4 colorDeselect;
- vec4 colorOutline;
- vec4 colorLightNoAlpha;
-
- vec4 colorBackground;
- vec4 colorBackgroundGradient;
- vec4 colorCheckerPrimary;
- vec4 colorCheckerSecondary;
- vec4 colorClippingBorder;
- vec4 colorEditMeshMiddle;
-
- vec4 colorHandleFree;
- vec4 colorHandleAuto;
- vec4 colorHandleVect;
- vec4 colorHandleAlign;
- vec4 colorHandleAutoclamp;
- vec4 colorHandleSelFree;
- vec4 colorHandleSelAuto;
- vec4 colorHandleSelVect;
- vec4 colorHandleSelAlign;
- vec4 colorHandleSelAutoclamp;
- vec4 colorNurbUline;
- vec4 colorNurbVline;
- vec4 colorNurbSelUline;
- vec4 colorNurbSelVline;
- vec4 colorActiveSpline;
-
- vec4 colorBonePose;
- vec4 colorBonePoseActive;
- vec4 colorBonePoseActiveUnsel;
- vec4 colorBonePoseConstraint;
- vec4 colorBonePoseIK;
- vec4 colorBonePoseSplineIK;
- vec4 colorBonePoseTarget;
- vec4 colorBoneSolid;
- vec4 colorBoneLocked;
- vec4 colorBoneActive;
- vec4 colorBoneActiveUnsel;
- vec4 colorBoneSelect;
- vec4 colorBoneIKLine;
- vec4 colorBoneIKLineNoTarget;
- vec4 colorBoneIKLineSpline;
-
- vec4 colorText;
- vec4 colorTextHi;
-
- vec4 colorBundleSolid;
-
- vec4 colorMballRadius;
- vec4 colorMballRadiusSelect;
- vec4 colorMballStiffness;
- vec4 colorMballStiffnessSelect;
-
- vec4 colorCurrentFrame;
-
- vec4 colorGrid;
- vec4 colorGridEmphasis;
- vec4 colorGridAxisX;
- vec4 colorGridAxisY;
- vec4 colorGridAxisZ;
-
- vec4 colorFaceBack;
- vec4 colorFaceFront;
-
- vec4 colorUVShadow;
-
- vec4 screenVecs[2];
- vec4 sizeViewport; /* Inverted size in zw. */
-
- float sizePixel; /* This one is for DPI scaling. */
- float pixelFac; /* To use with mul_project_m4_v3_zfac() */
- float sizeObjectCenter;
- float sizeLightCenter;
- float sizeLightCircle;
- float sizeLightCircleShadow;
- float sizeVertex;
- float sizeEdge;
- float sizeEdgeFix;
- float sizeFaceDot;
- float sizeChecker;
- float sizeVertexGpencil;
-};
-
-#define sizeViewportInv (sizeViewport.zw)
-
-/* See: 'draw_cache_impl.h' for matching includes. */
-#define VERT_GPENCIL_BEZT_HANDLE (1 << 30)
-/* data[0] (1st byte flags) */
-#define FACE_ACTIVE (1 << 0)
-#define FACE_SELECTED (1 << 1)
-#define FACE_FREESTYLE (1 << 2)
-#define VERT_UV_SELECT (1 << 3)
-#define VERT_UV_PINNED (1 << 4)
-#define EDGE_UV_SELECT (1 << 5)
-#define FACE_UV_ACTIVE (1 << 6)
-#define FACE_UV_SELECT (1 << 7)
-/* data[1] (2st byte flags) */
-#define VERT_ACTIVE (1 << 0)
-#define VERT_SELECTED (1 << 1)
-#define VERT_SELECTED_BEZT_HANDLE (1 << 2)
-#define EDGE_ACTIVE (1 << 3)
-#define EDGE_SELECTED (1 << 4)
-#define EDGE_SEAM (1 << 5)
-#define EDGE_SHARP (1 << 6)
-#define EDGE_FREESTYLE (1 << 7)
diff --git a/source/blender/draw/intern/shaders/common_gpencil_lib.glsl b/source/blender/draw/intern/shaders/common_gpencil_lib.glsl
index 123c493b572..def841b07aa 100644
--- a/source/blender/draw/intern/shaders/common_gpencil_lib.glsl
+++ b/source/blender/draw/intern/shaders/common_gpencil_lib.glsl
@@ -90,10 +90,15 @@ float gpencil_clamp_small_stroke_thickness(float thickness, vec4 ndc_pos)
#ifdef GPU_VERTEX_SHADER
-/* Trick to detect if a drawcall is stroke or fill.
- * This does mean that we need to draw an empty stroke segment before starting
- * to draw the real stroke segments. */
-# define GPENCIL_IS_STROKE_VERTEX (gl_InstanceID != 0)
+int gpencil_stroke_point_id()
+{
+ return (gl_VertexID & ~GP_IS_STROKE_VERTEX_BIT) >> GP_VERTEX_ID_SHIFT;
+}
+
+bool gpencil_is_stroke_vertex()
+{
+ return flag_test(gl_VertexID, GP_IS_STROKE_VERTEX_BIT);
+}
/**
* Returns value of gl_Position.
@@ -120,20 +125,7 @@ float gpencil_clamp_small_stroke_thickness(float thickness, vec4 ndc_pos)
* WARNING: Max attribute count is actually 14 because OSX OpenGL implementation
* considers gl_VertexID and gl_InstanceID as vertex attribute. (see T74536)
*/
-vec4 gpencil_vertex(ivec4 ma,
- ivec4 ma1,
- ivec4 ma2,
- ivec4 ma3,
- vec4 pos,
- vec4 pos1,
- vec4 pos2,
- vec4 pos3,
- vec4 uv1,
- vec4 uv2,
- vec4 col1,
- vec4 col2,
- vec4 fcol1,
- vec4 viewport_size,
+vec4 gpencil_vertex(vec4 viewport_size,
gpMaterialFlag material_flags,
vec2 alignment_rot,
/* World Position. */
@@ -155,6 +147,24 @@ vec4 gpencil_vertex(ivec4 ma,
/* Stroke hardness. */
out float out_hardness)
{
+ int stroke_point_id = (gl_VertexID & ~GP_IS_STROKE_VERTEX_BIT) >> GP_VERTEX_ID_SHIFT;
+
+ /* Attribute Loading. */
+ vec4 pos = texelFetch(gp_pos_tx, (stroke_point_id - 1) * 3 + 0);
+ vec4 pos1 = texelFetch(gp_pos_tx, (stroke_point_id + 0) * 3 + 0);
+ vec4 pos2 = texelFetch(gp_pos_tx, (stroke_point_id + 1) * 3 + 0);
+ vec4 pos3 = texelFetch(gp_pos_tx, (stroke_point_id + 2) * 3 + 0);
+ ivec4 ma = floatBitsToInt(texelFetch(gp_pos_tx, (stroke_point_id - 1) * 3 + 1));
+ ivec4 ma1 = floatBitsToInt(texelFetch(gp_pos_tx, (stroke_point_id + 0) * 3 + 1));
+ ivec4 ma2 = floatBitsToInt(texelFetch(gp_pos_tx, (stroke_point_id + 1) * 3 + 1));
+ ivec4 ma3 = floatBitsToInt(texelFetch(gp_pos_tx, (stroke_point_id + 2) * 3 + 1));
+ vec4 uv1 = texelFetch(gp_pos_tx, (stroke_point_id + 0) * 3 + 2);
+ vec4 uv2 = texelFetch(gp_pos_tx, (stroke_point_id + 1) * 3 + 2);
+
+ vec4 col1 = texelFetch(gp_col_tx, (stroke_point_id + 0) * 2 + 0);
+ vec4 col2 = texelFetch(gp_col_tx, (stroke_point_id + 1) * 2 + 0);
+ vec4 fcol1 = texelFetch(gp_col_tx, (stroke_point_id + 0) * 2 + 1);
+
# define thickness1 pos1.w
# define thickness2 pos2.w
# define strength1 uv1.w
@@ -167,7 +177,7 @@ vec4 gpencil_vertex(ivec4 ma,
vec4 out_ndc;
- if (GPENCIL_IS_STROKE_VERTEX) {
+ if (gpencil_is_stroke_vertex()) {
bool is_dot = flag_test(material_flags, GP_STROKE_ALIGNMENT);
bool is_squares = !flag_test(material_flags, GP_STROKE_DOTS);
@@ -177,13 +187,6 @@ vec4 gpencil_vertex(ivec4 ma,
is_squares = false;
}
- /* Endpoints, we discard the vertices. */
- if (ma1.x == -1 || (!is_dot && ma2.x == -1)) {
- /* We set the vertex at the camera origin to generate 0 fragments. */
- out_ndc = vec4(0.0, 0.0, -3e36, 0.0);
- return out_ndc;
- }
-
/* Avoid using a vertex attribute for quad positioning. */
float x = float(gl_VertexID & 1) * 2.0 - 1.0; /* [-1..1] */
float y = float(gl_VertexID & 2) - 1.0; /* [-1..1] */
@@ -336,8 +339,7 @@ vec4 gpencil_vertex(ivec4 ma,
out_N = safe_normalize(N);
/* Decode fill opacity. */
- out_color = vec4(fcol1.rgb, floor(fcol1.a / 10.0));
- out_color.a /= 10000.0;
+ out_color = vec4(fcol1.rgb, floor(fcol1.a / 10.0) / 10000.0);
/* We still offset the fills a little to avoid overlaps */
out_ndc.z += 0.000002;
@@ -355,20 +357,7 @@ vec4 gpencil_vertex(ivec4 ma,
return out_ndc;
}
-vec4 gpencil_vertex(ivec4 ma,
- ivec4 ma1,
- ivec4 ma2,
- ivec4 ma3,
- vec4 pos,
- vec4 pos1,
- vec4 pos2,
- vec4 pos3,
- vec4 uv1,
- vec4 uv2,
- vec4 col1,
- vec4 col2,
- vec4 fcol1,
- vec4 viewport_size,
+vec4 gpencil_vertex(vec4 viewport_size,
out vec3 out_P,
out vec3 out_N,
out vec4 out_color,
@@ -379,20 +368,7 @@ vec4 gpencil_vertex(ivec4 ma,
out vec2 out_thickness,
out float out_hardness)
{
- return gpencil_vertex(ma,
- ma1,
- ma2,
- ma3,
- pos,
- pos1,
- pos2,
- pos3,
- uv1,
- uv2,
- col1,
- col2,
- fcol1,
- viewport_size,
+ return gpencil_vertex(viewport_size,
0u,
vec2(1.0, 0.0),
out_P,
diff --git a/source/blender/draw/intern/shaders/common_intersect_lib.glsl b/source/blender/draw/intern/shaders/common_intersect_lib.glsl
index 83223f89277..d1416e220a4 100644
--- a/source/blender/draw/intern/shaders/common_intersect_lib.glsl
+++ b/source/blender/draw/intern/shaders/common_intersect_lib.glsl
@@ -135,7 +135,7 @@ bool intersect_view(Pyramid pyramid)
for (int p = 0; p < 6; ++p) {
bool is_any_vertex_on_positive_side = false;
for (int v = 0; v < 5; ++v) {
- float test = dot(drw_view.frustum_planes[p], vec4(pyramid.corners[v], 1.0));
+ float test = dot(drw_view_culling.planes[p], vec4(pyramid.corners[v], 1.0));
if (test > 0.0) {
is_any_vertex_on_positive_side = true;
break;
@@ -157,7 +157,7 @@ bool intersect_view(Pyramid pyramid)
for (int p = 0; p < 5; ++p) {
bool is_any_vertex_on_positive_side = false;
for (int v = 0; v < 8; ++v) {
- float test = dot(i_pyramid.planes[p], vec4(drw_view.frustum_corners[v].xyz, 1.0));
+ float test = dot(i_pyramid.planes[p], vec4(drw_view_culling.corners[v].xyz, 1.0));
if (test > 0.0) {
is_any_vertex_on_positive_side = true;
break;
@@ -180,7 +180,7 @@ bool intersect_view(Box box)
for (int p = 0; p < 6; ++p) {
bool is_any_vertex_on_positive_side = false;
for (int v = 0; v < 8; ++v) {
- float test = dot(drw_view.frustum_planes[p], vec4(box.corners[v], 1.0));
+ float test = dot(drw_view_culling.planes[p], vec4(box.corners[v], 1.0));
if (test > 0.0) {
is_any_vertex_on_positive_side = true;
break;
@@ -202,7 +202,7 @@ bool intersect_view(Box box)
for (int p = 0; p < 6; ++p) {
bool is_any_vertex_on_positive_side = false;
for (int v = 0; v < 8; ++v) {
- float test = dot(i_box.planes[p], vec4(drw_view.frustum_corners[v].xyz, 1.0));
+ float test = dot(i_box.planes[p], vec4(drw_view_culling.corners[v].xyz, 1.0));
if (test > 0.0) {
is_any_vertex_on_positive_side = true;
break;
@@ -226,7 +226,7 @@ bool intersect_view(IsectBox i_box)
for (int p = 0; p < 6; ++p) {
bool is_any_vertex_on_positive_side = false;
for (int v = 0; v < 8; ++v) {
- float test = dot(drw_view.frustum_planes[p], vec4(i_box.corners[v], 1.0));
+ float test = dot(drw_view_culling.planes[p], vec4(i_box.corners[v], 1.0));
if (test > 0.0) {
is_any_vertex_on_positive_side = true;
break;
@@ -246,7 +246,7 @@ bool intersect_view(IsectBox i_box)
for (int p = 0; p < 6; ++p) {
bool is_any_vertex_on_positive_side = false;
for (int v = 0; v < 8; ++v) {
- float test = dot(i_box.planes[p], vec4(drw_view.frustum_corners[v].xyz, 1.0));
+ float test = dot(i_box.planes[p], vec4(drw_view_culling.corners[v].xyz, 1.0));
if (test > 0.0) {
is_any_vertex_on_positive_side = true;
break;
@@ -267,7 +267,7 @@ bool intersect_view(Sphere sphere)
bool intersects = true;
for (int p = 0; p < 6 && intersects; ++p) {
- float dist_to_plane = dot(drw_view.frustum_planes[p], vec4(sphere.center, 1.0));
+ float dist_to_plane = dot(drw_view_culling.planes[p], vec4(sphere.center, 1.0));
if (dist_to_plane < -sphere.radius) {
intersects = false;
}
diff --git a/source/blender/draw/intern/shaders/common_pointcloud_lib.glsl b/source/blender/draw/intern/shaders/common_pointcloud_lib.glsl
index dd725ad327f..8725e036435 100644
--- a/source/blender/draw/intern/shaders/common_pointcloud_lib.glsl
+++ b/source/blender/draw/intern/shaders/common_pointcloud_lib.glsl
@@ -2,16 +2,10 @@
/* NOTE: To be used with UNIFORM_RESOURCE_ID and INSTANCED_ATTR as define. */
#pragma BLENDER_REQUIRE(common_view_lib.glsl)
-#ifndef DRW_SHADER_SHARED_H
-
-in vec4 pos; /* Position and radius. */
-
-/* ---- Instanced attribs ---- */
-
-in vec3 pos_inst;
-in vec3 nor;
-
-#endif
+int pointcloud_get_point_id()
+{
+ return gl_VertexID / 32;
+}
mat3 pointcloud_get_facing_matrix(vec3 p)
{
@@ -25,8 +19,10 @@ mat3 pointcloud_get_facing_matrix(vec3 p)
/* Returns world center position and radius. */
void pointcloud_get_pos_and_radius(out vec3 outpos, out float outradius)
{
- outpos = point_object_to_world(pos.xyz);
- outradius = dot(abs(mat3(ModelMatrix) * pos.www), vec3(1.0 / 3.0));
+ int id = pointcloud_get_point_id();
+ vec4 pos_rad = texelFetch(ptcloud_pos_rad_tx, id);
+ outpos = point_object_to_world(pos_rad.xyz);
+ outradius = dot(abs(mat3(ModelMatrix) * pos_rad.www), vec3(1.0 / 3.0));
}
/* Return world position and normal. */
@@ -38,15 +34,67 @@ void pointcloud_get_pos_and_nor(out vec3 outpos, out vec3 outnor)
mat3 facing_mat = pointcloud_get_facing_matrix(p);
+ /** \note: Avoid modulo by non-power-of-two in shader. See Index buffer setup. */
+ int vert_id = gl_VertexID % 32;
+ vec3 pos_inst = vec3(0.0);
+
+ switch (vert_id) {
+ case 0:
+ pos_inst.z = 1.0;
+ break;
+ case 1:
+ pos_inst.x = 1.0;
+ break;
+ case 2:
+ pos_inst.y = 1.0;
+ break;
+ case 3:
+ pos_inst.x = -1.0;
+ break;
+ case 4:
+ pos_inst.y = -1.0;
+ break;
+ }
+
/* TODO(fclem): remove multiplication here. Here only for keeping the size correct for now. */
radius *= 0.01;
- outpos = p + (facing_mat * pos_inst) * radius;
- outnor = facing_mat * nor;
+ outnor = facing_mat * pos_inst;
+ outpos = p + outnor * radius;
}
-vec3 pointcloud_get_pos(void)
+vec3 pointcloud_get_pos()
{
vec3 outpos, outnor;
pointcloud_get_pos_and_nor(outpos, outnor);
return outpos;
}
+
+float pointcloud_get_customdata_float(const samplerBuffer cd_buf)
+{
+ int id = pointcloud_get_point_id();
+ return texelFetch(cd_buf, id).r;
+}
+
+vec2 pointcloud_get_customdata_vec2(const samplerBuffer cd_buf)
+{
+ int id = pointcloud_get_point_id();
+ return texelFetch(cd_buf, id).rg;
+}
+
+vec3 pointcloud_get_customdata_vec3(const samplerBuffer cd_buf)
+{
+ int id = pointcloud_get_point_id();
+ return texelFetch(cd_buf, id).rgb;
+}
+
+vec4 pointcloud_get_customdata_vec4(const samplerBuffer cd_buf)
+{
+ int id = pointcloud_get_point_id();
+ return texelFetch(cd_buf, id).rgba;
+}
+
+vec2 pointcloud_get_barycentric(void)
+{
+ /* TODO: To be implemented. */
+ return vec2(0.0);
+}
diff --git a/source/blender/draw/intern/shaders/common_shape_lib.glsl b/source/blender/draw/intern/shaders/common_shape_lib.glsl
index f2c8bf0faaf..56722c417aa 100644
--- a/source/blender/draw/intern/shaders/common_shape_lib.glsl
+++ b/source/blender/draw/intern/shaders/common_shape_lib.glsl
@@ -188,7 +188,7 @@ Frustum shape_frustum(vec3 corners[8])
/** \name Cone
* \{ */
-/* Cone at orign with no height. */
+/* Cone at origin with no height. */
struct Cone {
vec3 direction;
float angle_cos;
diff --git a/source/blender/draw/intern/shaders/common_smaa_lib.glsl b/source/blender/draw/intern/shaders/common_smaa_lib.glsl
index dbc4c998b34..0c040c9acfe 100644
--- a/source/blender/draw/intern/shaders/common_smaa_lib.glsl
+++ b/source/blender/draw/intern/shaders/common_smaa_lib.glsl
@@ -588,15 +588,18 @@ SamplerState PointSampler
# else
# define mad(a, b, c) (a * b + c)
# endif
-# define float2 vec2
-# define float3 vec3
-# define float4 vec4
-# define int2 ivec2
-# define int3 ivec3
-# define int4 ivec4
-# define bool2 bvec2
-# define bool3 bvec3
-# define bool4 bvec4
+/* NOTE(Metal): Types already natively declared in MSL. */
+# ifndef GPU_METAL
+# define float2 vec2
+# define float3 vec3
+# define float4 vec4
+# define int2 ivec2
+# define int3 ivec3
+# define int4 ivec4
+# define bool2 bvec2
+# define bool3 bvec3
+# define bool4 bvec4
+# endif
#endif
/* clang-format off */
@@ -658,7 +661,14 @@ void SMAAMovc(bool4 cond, inout float4 variable, float4 value)
/**
* Edge Detection Vertex Shader
*/
+# ifdef GPU_METAL
+/* NOTE: Metal API requires explicit address space qualifiers for pointer types.
+ * Arrays in functions are passed as pointers, and thus require explicit address
+ * space. */
+void SMAAEdgeDetectionVS(float2 texcoord, thread float4 *offset)
+# else
void SMAAEdgeDetectionVS(float2 texcoord, out float4 offset[3])
+# endif
{
offset[0] = mad(SMAA_RT_METRICS.xyxy, float4(-1.0, 0.0, 0.0, -1.0), texcoord.xyxy);
offset[1] = mad(SMAA_RT_METRICS.xyxy, float4(1.0, 0.0, 0.0, 1.0), texcoord.xyxy);
@@ -668,7 +678,16 @@ void SMAAEdgeDetectionVS(float2 texcoord, out float4 offset[3])
/**
* Blend Weight Calculation Vertex Shader
*/
+# ifdef GPU_METAL
+/* NOTE: Metal API requires explicit address space qualifiers for pointer types.
+ * Arrays in functions are passed as pointers, and thus require explicit address
+ * space. */
+void SMAABlendingWeightCalculationVS(float2 texcoord,
+ thread float2 &pixcoord,
+ thread float4 *offset)
+# else
void SMAABlendingWeightCalculationVS(float2 texcoord, out float2 pixcoord, out float4 offset[3])
+# endif
{
pixcoord = texcoord * SMAA_RT_METRICS.zw;
diff --git a/source/blender/draw/intern/shaders/common_view_clipping_lib.glsl b/source/blender/draw/intern/shaders/common_view_clipping_lib.glsl
index d6dfa326511..4ff6a9a3778 100644
--- a/source/blender/draw/intern/shaders/common_view_clipping_lib.glsl
+++ b/source/blender/draw/intern/shaders/common_view_clipping_lib.glsl
@@ -7,12 +7,12 @@ void view_clipping_distances(vec3 wpos)
{
# ifdef USE_WORLD_CLIP_PLANES
vec4 pos_4d = vec4(wpos, 1.0);
- gl_ClipDistance[0] = dot(drw_view.clip_planes[0], pos_4d);
- gl_ClipDistance[1] = dot(drw_view.clip_planes[1], pos_4d);
- gl_ClipDistance[2] = dot(drw_view.clip_planes[2], pos_4d);
- gl_ClipDistance[3] = dot(drw_view.clip_planes[3], pos_4d);
- gl_ClipDistance[4] = dot(drw_view.clip_planes[4], pos_4d);
- gl_ClipDistance[5] = dot(drw_view.clip_planes[5], pos_4d);
+ gl_ClipDistance[0] = dot(drw_clipping[0], pos_4d);
+ gl_ClipDistance[1] = dot(drw_clipping[1], pos_4d);
+ gl_ClipDistance[2] = dot(drw_clipping[2], pos_4d);
+ gl_ClipDistance[3] = dot(drw_clipping[3], pos_4d);
+ gl_ClipDistance[4] = dot(drw_clipping[4], pos_4d);
+ gl_ClipDistance[5] = dot(drw_clipping[5], pos_4d);
# endif
}
diff --git a/source/blender/draw/intern/shaders/common_view_lib.glsl b/source/blender/draw/intern/shaders/common_view_lib.glsl
index 6521476c3a7..90c1e0490b8 100644
--- a/source/blender/draw/intern/shaders/common_view_lib.glsl
+++ b/source/blender/draw/intern/shaders/common_view_lib.glsl
@@ -11,22 +11,10 @@
/* keep in sync with DRWManager.view_data */
layout(std140) uniform viewBlock
{
- /* Same order as DRWViewportMatrixType */
- mat4 ViewProjectionMatrix;
- mat4 ViewProjectionMatrixInverse;
mat4 ViewMatrix;
mat4 ViewMatrixInverse;
mat4 ProjectionMatrix;
mat4 ProjectionMatrixInverse;
-
- vec4 clipPlanes[6];
-
- /* View frustum corners [NDC(-1.0, -1.0, -1.0) & NDC(1.0, 1.0, 1.0)].
- * Fourth components are near and far values. */
- vec4 ViewVecs[2];
-
- /* TODO: move it elsewhere. */
- vec4 CameraTexCoFactors;
};
#endif /* USE_GPU_SHADER_CREATE_INFO */
@@ -37,12 +25,10 @@ layout(std140) uniform viewBlock
# endif
#endif
-#define IS_DEBUG_MOUSE_FRAGMENT (ivec2(gl_FragCoord) == drw_view.mouse_pixel)
+/* Not supported anymore. TODO(fclem): Add back support. */
+// #define IS_DEBUG_MOUSE_FRAGMENT (ivec2(gl_FragCoord) == drw_view.mouse_pixel)
#define IS_FIRST_INVOCATION (gl_GlobalInvocationID == uvec3(0))
-#define ViewNear (ViewVecs[0].w)
-#define ViewFar (ViewVecs[1].w)
-
#define cameraForward ViewMatrixInverse[2].xyz
#define cameraPos ViewMatrixInverse[3].xyz
vec3 cameraVec(vec3 P)
@@ -51,18 +37,13 @@ vec3 cameraVec(vec3 P)
}
#define viewCameraVec(vP) ((ProjectionMatrix[3][3] == 0.0) ? normalize(-vP) : vec3(0.0, 0.0, 1.0))
-#ifdef world_clip_planes_calc_clip_distance
-# undef world_clip_planes_calc_clip_distance
-# define world_clip_planes_calc_clip_distance(p) \
- _world_clip_planes_calc_clip_distance(p, clipPlanes)
-#endif
-
#ifdef COMMON_GLOBALS_LIB
/* TODO move to overlay engine. */
float mul_project_m4_v3_zfac(in vec3 co)
{
- return pixelFac * ((ViewProjectionMatrix[0][3] * co.x) + (ViewProjectionMatrix[1][3] * co.y) +
- (ViewProjectionMatrix[2][3] * co.z) + ViewProjectionMatrix[3][3]);
+ vec3 vP = (ViewMatrix * vec4(co, 1.0)).xyz;
+ return pixelFac * ((ProjectionMatrix[0][3] * vP.x) + (ProjectionMatrix[1][3] * vP.y) +
+ (ProjectionMatrix[2][3] * vP.z) + ProjectionMatrix[3][3]);
}
#endif
@@ -93,7 +74,7 @@ vec4 pack_line_data(vec2 frag_co, vec2 edge_start, vec2 edge_pos)
edge /= len;
vec2 perp = vec2(-edge.y, edge.x);
float dist = dot(perp, frag_co - edge_start);
- /* Add 0.1 to diffenrentiate with cleared pixels. */
+ /* Add 0.1 to differentiate with cleared pixels. */
return vec4(perp * 0.5 + 0.5, dist * 0.25 + 0.5 + 0.1, 1.0);
}
else {
@@ -228,7 +209,7 @@ layout(std140) uniform modelBlock
# ifndef USE_GPU_SHADER_CREATE_INFO
/* Intel GPU seems to suffer performance impact when the model matrix is in UBO storage.
* So for now we just force using the legacy path. */
-/* Note that this is also a workaround of a problem on osx (amd or nvidia)
+/* Note that this is also a workaround of a problem on OSX (AMD or NVIDIA)
* and older amd driver on windows. */
uniform mat4 ModelMatrix;
uniform mat4 ModelMatrixInverse;
@@ -267,13 +248,14 @@ uniform mat4 ModelMatrixInverse;
#define normal_world_to_view(n) (mat3(ViewMatrix) * n)
#define normal_view_to_world(n) (mat3(ViewMatrixInverse) * n)
-#define point_object_to_ndc(p) (ViewProjectionMatrix * vec4((ModelMatrix * vec4(p, 1.0)).xyz, 1.0))
+#define point_object_to_ndc(p) \
+ (ProjectionMatrix * (ViewMatrix * vec4((ModelMatrix * vec4(p, 1.0)).xyz, 1.0)))
#define point_object_to_view(p) ((ViewMatrix * vec4((ModelMatrix * vec4(p, 1.0)).xyz, 1.0)).xyz)
#define point_object_to_world(p) ((ModelMatrix * vec4(p, 1.0)).xyz)
#define point_view_to_ndc(p) (ProjectionMatrix * vec4(p, 1.0))
#define point_view_to_object(p) ((ModelMatrixInverse * (ViewMatrixInverse * vec4(p, 1.0))).xyz)
#define point_view_to_world(p) ((ViewMatrixInverse * vec4(p, 1.0)).xyz)
-#define point_world_to_ndc(p) (ViewProjectionMatrix * vec4(p, 1.0))
+#define point_world_to_ndc(p) (ProjectionMatrix * (ViewMatrix * vec4(p, 1.0)))
#define point_world_to_object(p) ((ModelMatrixInverse * vec4(p, 1.0)).xyz)
#define point_world_to_view(p) ((ViewMatrix * vec4(p, 1.0)).xyz)
@@ -314,24 +296,26 @@ float buffer_depth(bool is_persp, float z, float zf, float zn)
float get_view_z_from_depth(float depth)
{
+ float d = 2.0 * depth - 1.0;
if (ProjectionMatrix[3][3] == 0.0) {
- float d = 2.0 * depth - 1.0;
- return -ProjectionMatrix[3][2] / (d + ProjectionMatrix[2][2]);
+ d = -ProjectionMatrix[3][2] / (d + ProjectionMatrix[2][2]);
}
else {
- return ViewVecs[0].z + depth * ViewVecs[1].z;
+ d = (d - ProjectionMatrix[3][2]) / ProjectionMatrix[2][2];
}
+ return d;
}
float get_depth_from_view_z(float z)
{
+ float d;
if (ProjectionMatrix[3][3] == 0.0) {
- float d = (-ProjectionMatrix[3][2] / z) - ProjectionMatrix[2][2];
- return d * 0.5 + 0.5;
+ d = (-ProjectionMatrix[3][2] / z) - ProjectionMatrix[2][2];
}
else {
- return (z - ViewVecs[0].z) / ViewVecs[1].z;
+ d = ProjectionMatrix[2][2] * z + ProjectionMatrix[3][2];
}
+ return d * 0.5 + 0.5;
}
vec2 get_uvs_from_view(vec3 view)
@@ -342,12 +326,9 @@ vec2 get_uvs_from_view(vec3 view)
vec3 get_view_space_from_depth(vec2 uvcoords, float depth)
{
- if (ProjectionMatrix[3][3] == 0.0) {
- return vec3(ViewVecs[0].xy + uvcoords * ViewVecs[1].xy, 1.0) * get_view_z_from_depth(depth);
- }
- else {
- return ViewVecs[0].xyz + vec3(uvcoords, depth) * ViewVecs[1].xyz;
- }
+ vec3 ndc = vec3(uvcoords, depth) * 2.0 - 1.0;
+ vec4 p = ProjectionMatrixInverse * vec4(ndc, 1.0);
+ return p.xyz / p.w;
}
vec3 get_world_space_from_depth(vec2 uvcoords, float depth)
@@ -355,14 +336,18 @@ vec3 get_world_space_from_depth(vec2 uvcoords, float depth)
return (ViewMatrixInverse * vec4(get_view_space_from_depth(uvcoords, depth), 1.0)).xyz;
}
-vec3 get_view_vector_from_screen_uv(vec2 uv)
+vec3 get_view_vector_from_screen_uv(vec2 uvcoords)
{
if (ProjectionMatrix[3][3] == 0.0) {
- return normalize(vec3(ViewVecs[0].xy + uv * ViewVecs[1].xy, 1.0));
- }
- else {
- return vec3(0.0, 0.0, 1.0);
+ vec2 ndc = vec2(uvcoords * 2.0 - 1.0);
+ /* This is the manual inversion of the ProjectionMatrix. */
+ vec3 vV = vec3((-ndc - ProjectionMatrix[2].xy) /
+ vec2(ProjectionMatrix[0][0], ProjectionMatrix[1][1]),
+ -ProjectionMatrix[2][2] - ProjectionMatrix[3][2]);
+ return normalize(vV);
}
+ /* Orthographic case. */
+ return vec3(0.0, 0.0, 1.0);
}
#endif /* COMMON_VIEW_LIB_GLSL */
diff --git a/source/blender/draw/intern/shaders/draw_debug_info.hh b/source/blender/draw/intern/shaders/draw_debug_info.hh
index ce450bb1210..4b5590824ba 100644
--- a/source/blender/draw/intern/shaders/draw_debug_info.hh
+++ b/source/blender/draw/intern/shaders/draw_debug_info.hh
@@ -21,6 +21,7 @@ GPU_SHADER_CREATE_INFO(draw_debug_print_display)
.storage_buf(7, Qualifier::READ, "uint", "drw_debug_print_buf[]")
.vertex_out(draw_debug_print_display_iface)
.fragment_out(0, Type::VEC4, "out_color")
+ .push_constant(Type::VEC2, "viewport_size")
.vertex_source("draw_debug_print_display_vert.glsl")
.fragment_source("draw_debug_print_display_frag.glsl")
.additional_info("draw_view");
diff --git a/source/blender/draw/intern/shaders/draw_debug_print_display_vert.glsl b/source/blender/draw/intern/shaders/draw_debug_print_display_vert.glsl
index cb379056e2b..57f293cc6e8 100644
--- a/source/blender/draw/intern/shaders/draw_debug_print_display_vert.glsl
+++ b/source/blender/draw/intern/shaders/draw_debug_print_display_vert.glsl
@@ -23,7 +23,6 @@ void main()
float char_size = 16.0;
/* Change anchor point to the top left. */
vec2 pos_on_screen = char_size * vec2(col, row) + char_size * 4;
- gl_Position = vec4(
- pos_on_screen * drw_view.viewport_size_inverse * vec2(2.0, -2.0) - vec2(1.0, -1.0), 0, 1);
+ gl_Position = vec4((pos_on_screen / viewport_size) * vec2(2.0, -2.0) - vec2(1.0, -1.0), 0, 1);
gl_PointSize = char_size;
}
diff --git a/source/blender/draw/intern/shaders/draw_object_infos_info.hh b/source/blender/draw/intern/shaders/draw_object_infos_info.hh
index 31fee018fbc..33634fb5fcb 100644
--- a/source/blender/draw/intern/shaders/draw_object_infos_info.hh
+++ b/source/blender/draw/intern/shaders/draw_object_infos_info.hh
@@ -13,11 +13,19 @@ GPU_SHADER_CREATE_INFO(draw_object_infos)
GPU_SHADER_CREATE_INFO(draw_volume_infos)
.typedef_source("draw_shader_shared.h")
- .uniform_buf(2, "VolumeInfos", "drw_volume", Frequency::BATCH);
+ .uniform_buf(3, "VolumeInfos", "drw_volume", Frequency::BATCH);
GPU_SHADER_CREATE_INFO(draw_curves_infos)
.typedef_source("draw_shader_shared.h")
- .uniform_buf(2, "CurvesInfos", "drw_curves", Frequency::BATCH);
+ .uniform_buf(3, "CurvesInfos", "drw_curves", Frequency::BATCH);
+
+GPU_SHADER_CREATE_INFO(draw_layer_attributes)
+ .typedef_source("draw_shader_shared.h")
+ .define("VLATTR_LIB")
+ .uniform_buf(DRW_LAYER_ATTR_UBO_SLOT,
+ "LayerAttribute",
+ "drw_layer_attrs[DRW_RESOURCE_CHUNK_LEN]",
+ Frequency::BATCH);
GPU_SHADER_CREATE_INFO(draw_object_infos_new)
.typedef_source("draw_shader_shared.h")
diff --git a/source/blender/draw/intern/shaders/draw_view_info.hh b/source/blender/draw/intern/shaders/draw_view_info.hh
index c522c607791..23892a39062 100644
--- a/source/blender/draw/intern/shaders/draw_view_info.hh
+++ b/source/blender/draw/intern/shaders/draw_view_info.hh
@@ -45,7 +45,11 @@ GPU_SHADER_CREATE_INFO(draw_resource_handle)
* \{ */
GPU_SHADER_CREATE_INFO(draw_view)
- .uniform_buf(DRW_VIEW_UBO_SLOT, "ViewInfos", "drw_view", Frequency::PASS)
+ .uniform_buf(DRW_VIEW_UBO_SLOT, "ViewMatrices", "drw_view", Frequency::PASS)
+ .typedef_source("draw_shader_shared.h");
+
+GPU_SHADER_CREATE_INFO(draw_view_culling)
+ .uniform_buf(DRW_VIEW_CULLING_UBO_SLOT, "ViewCullingData", "drw_view_culling")
.typedef_source("draw_shader_shared.h");
GPU_SHADER_CREATE_INFO(draw_modelmat)
@@ -71,7 +75,10 @@ GPU_SHADER_CREATE_INFO(draw_modelmat_instanced_attr)
/** \name Draw View
* \{ */
-GPU_SHADER_CREATE_INFO(drw_clipped).define("USE_WORLD_CLIP_PLANES");
+GPU_SHADER_CREATE_INFO(drw_clipped)
+ /* TODO(fclem): Move to engine side. */
+ .uniform_buf(DRW_CLIPPING_UBO_SLOT, "vec4", "drw_clipping[6]", Frequency::PASS)
+ .define("USE_WORLD_CLIP_PLANES");
/** \} */
@@ -105,9 +112,7 @@ GPU_SHADER_CREATE_INFO(draw_hair)
.additional_info("draw_modelmat", "draw_resource_id");
GPU_SHADER_CREATE_INFO(draw_pointcloud)
- .vertex_in(0, Type::VEC4, "pos")
- .vertex_in(1, Type::VEC3, "pos_inst")
- .vertex_in(2, Type::VEC3, "nor")
+ .sampler(0, ImageType::FLOAT_BUFFER, "ptcloud_pos_rad_tx", Frequency::BATCH)
.additional_info("draw_modelmat_instanced_attr", "draw_resource_id_uniform");
GPU_SHADER_CREATE_INFO(draw_volume).additional_info("draw_modelmat", "draw_resource_id_uniform");
@@ -115,26 +120,15 @@ GPU_SHADER_CREATE_INFO(draw_volume).additional_info("draw_modelmat", "draw_resou
GPU_SHADER_CREATE_INFO(draw_gpencil)
.typedef_source("gpencil_shader_shared.h")
.define("DRW_GPENCIL_INFO")
- .vertex_in(0, Type::IVEC4, "ma")
- .vertex_in(1, Type::IVEC4, "ma1")
- .vertex_in(2, Type::IVEC4, "ma2")
- .vertex_in(3, Type::IVEC4, "ma3")
- .vertex_in(4, Type::VEC4, "pos")
- .vertex_in(5, Type::VEC4, "pos1")
- .vertex_in(6, Type::VEC4, "pos2")
- .vertex_in(7, Type::VEC4, "pos3")
- .vertex_in(8, Type::VEC4, "uv1")
- .vertex_in(9, Type::VEC4, "uv2")
- .vertex_in(10, Type::VEC4, "col1")
- .vertex_in(11, Type::VEC4, "col2")
- .vertex_in(12, Type::VEC4, "fcol1")
+ .sampler(0, ImageType::FLOAT_BUFFER, "gp_pos_tx")
+ .sampler(1, ImageType::FLOAT_BUFFER, "gp_col_tx")
/* Per Object */
.push_constant(Type::FLOAT, "gpThicknessScale") /* TODO(fclem): Replace with object info. */
.push_constant(Type::FLOAT, "gpThicknessWorldScale") /* TODO(fclem): Same as above. */
.define("gpThicknessIsScreenSpace", "(gpThicknessWorldScale < 0.0)")
/* Per Layer */
.push_constant(Type::FLOAT, "gpThicknessOffset")
- .additional_info("draw_modelmat", "draw_resource_id_uniform", "draw_object_infos");
+ .additional_info("draw_modelmat", "draw_object_infos");
/** \} */
@@ -160,7 +154,7 @@ GPU_SHADER_CREATE_INFO(draw_visibility_compute)
.storage_buf(1, Qualifier::READ_WRITE, "uint", "visibility_buf[]")
.push_constant(Type::INT, "resource_len")
.compute_source("draw_visibility_comp.glsl")
- .additional_info("draw_view");
+ .additional_info("draw_view", "draw_view_culling");
GPU_SHADER_CREATE_INFO(draw_command_generate)
.do_static_compilation(true)