Welcome to mirror list, hosted at ThFree Co, Russian Federation.

git.blender.org/blender.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
Diffstat (limited to 'source/blender/draw/intern')
-rw-r--r--source/blender/draw/intern/DRW_gpu_wrapper.hh84
-rw-r--r--source/blender/draw/intern/DRW_render.h67
-rw-r--r--source/blender/draw/intern/draw_cache.c42
-rw-r--r--source/blender/draw/intern/draw_cache.h16
-rw-r--r--source/blender/draw/intern/draw_cache_extract.h12
-rw-r--r--source/blender/draw/intern/draw_cache_extract_mesh.cc7
-rw-r--r--source/blender/draw/intern/draw_cache_extract_mesh_render_data.c14
-rw-r--r--source/blender/draw/intern/draw_cache_impl.h46
-rw-r--r--source/blender/draw/intern/draw_cache_impl_curves.cc (renamed from source/blender/draw/intern/draw_cache_impl_hair.cc)188
-rw-r--r--source/blender/draw/intern/draw_cache_impl_mesh.c165
-rw-r--r--source/blender/draw/intern/draw_cache_impl_subdivision.cc15
-rw-r--r--source/blender/draw/intern/draw_common.c2
-rw-r--r--source/blender/draw/intern/draw_hair.c4
-rw-r--r--source/blender/draw/intern/draw_instance_data.c2
-rw-r--r--source/blender/draw/intern/draw_manager.c24
-rw-r--r--source/blender/draw/intern/draw_manager.h30
-rw-r--r--source/blender/draw/intern/draw_manager_data.c110
-rw-r--r--source/blender/draw/intern/draw_manager_exec.c18
-rw-r--r--source/blender/draw/intern/draw_manager_shader.c20
-rw-r--r--source/blender/draw/intern/draw_shader.c7
-rw-r--r--source/blender/draw/intern/draw_shader_shared.h19
-rw-r--r--source/blender/draw/intern/draw_view_data.h2
-rw-r--r--source/blender/draw/intern/mesh_extractors/extract_mesh.h3
-rw-r--r--source/blender/draw/intern/mesh_extractors/extract_mesh_ibo_points.cc19
-rw-r--r--source/blender/draw/intern/mesh_extractors/extract_mesh_vbo_attributes.cc13
-rw-r--r--source/blender/draw/intern/mesh_extractors/extract_mesh_vbo_edge_fac.cc2
-rw-r--r--source/blender/draw/intern/mesh_extractors/extract_mesh_vbo_pos_nor.cc2
-rw-r--r--source/blender/draw/intern/shaders/common_fxaa_lib.glsl2
-rw-r--r--source/blender/draw/intern/shaders/common_hair_lib.glsl9
-rw-r--r--source/blender/draw/intern/shaders/common_hair_refine_comp.glsl5
-rw-r--r--source/blender/draw/intern/shaders/common_pointcloud_lib.glsl13
-rw-r--r--source/blender/draw/intern/shaders/common_smaa_lib.glsl6
-rw-r--r--source/blender/draw/intern/shaders/common_subdiv_lib.glsl2
-rw-r--r--source/blender/draw/intern/shaders/common_view_clipping_lib.glsl33
-rw-r--r--source/blender/draw/intern/shaders/common_view_lib.glsl130
-rw-r--r--source/blender/draw/intern/shaders/draw_hair_refine_info.hh42
-rw-r--r--source/blender/draw/intern/shaders/draw_object_infos_info.hh1
-rw-r--r--source/blender/draw/intern/shaders/draw_view_info.hh89
38 files changed, 839 insertions, 426 deletions
diff --git a/source/blender/draw/intern/DRW_gpu_wrapper.hh b/source/blender/draw/intern/DRW_gpu_wrapper.hh
index 7a9bdb377fe..d4491223c10 100644
--- a/source/blender/draw/intern/DRW_gpu_wrapper.hh
+++ b/source/blender/draw/intern/DRW_gpu_wrapper.hh
@@ -72,10 +72,7 @@
#include "draw_texture_pool.h"
-#include "BLI_float4.hh"
-#include "BLI_int2.hh"
-#include "BLI_int3.hh"
-#include "BLI_int4.hh"
+#include "BLI_math_vec_types.hh"
#include "BLI_span.hh"
#include "BLI_utildefines.h"
#include "BLI_utility_mixins.hh"
@@ -105,7 +102,8 @@ class DataBuffer {
T *data_ = nullptr;
int64_t len_ = len;
- BLI_STATIC_ASSERT((sizeof(T) % 16) == 0, "Type need to be aligned to size of float4.");
+ BLI_STATIC_ASSERT(((sizeof(T) * len) % 16) == 0,
+ "Buffer size need to be aligned to size of float4.");
public:
/**
@@ -288,14 +286,18 @@ template<
/** The number of values that can be stored in this uniform buffer. */
int64_t len
/** True if the buffer only resides on GPU memory and cannot be accessed. */
- /* TODO(fclem): Currently unsupported. */
+ /* TODO(@fclem): Currently unsupported. */
/* bool device_only = false */>
class UniformArrayBuffer : public detail::UniformCommon<T, len, false> {
public:
UniformArrayBuffer()
{
- /* TODO(fclem) We should map memory instead. */
- this->data_ = MEM_mallocN_aligned(this->name_);
+ /* TODO(@fclem): We should map memory instead. */
+ this->data_ = (T *)MEM_mallocN_aligned(len * sizeof(T), 16, this->name_);
+ }
+ ~UniformArrayBuffer()
+ {
+ MEM_freeN(this->data_);
}
};
@@ -303,13 +305,13 @@ template<
/** Type of the values stored in this uniform buffer. */
typename T
/** True if the buffer only resides on GPU memory and cannot be accessed. */
- /* TODO(fclem): Currently unsupported. */
+ /* TODO(@fclem): Currently unsupported. */
/* bool device_only = false */>
class UniformBuffer : public T, public detail::UniformCommon<T, 1, false> {
public:
UniformBuffer()
{
- /* TODO(fclem) How could we map this? */
+ /* TODO(@fclem): How could we map this? */
this->data_ = static_cast<T *>(this);
}
@@ -484,7 +486,7 @@ class Texture : NonCopyable {
* Ensure the texture has the correct properties. Recreating it if needed.
* Return true if a texture has been created.
*/
- bool ensure_2d(eGPUTextureFormat format, const int2 &extent, float *data = nullptr, int mips = 1)
+ bool ensure_2d(eGPUTextureFormat format, int2 extent, float *data = nullptr, int mips = 1)
{
return ensure_impl(UNPACK2(extent), 0, mips, format, data, false, false);
}
@@ -493,11 +495,8 @@ class Texture : NonCopyable {
* Ensure the texture has the correct properties. Recreating it if needed.
* Return true if a texture has been created.
*/
- bool ensure_2d_array(eGPUTextureFormat format,
- const int2 &extent,
- int layers,
- float *data = nullptr,
- int mips = 1)
+ bool ensure_2d_array(
+ eGPUTextureFormat format, int2 extent, int layers, float *data = nullptr, int mips = 1)
{
return ensure_impl(UNPACK2(extent), layers, mips, format, data, true, false);
}
@@ -506,7 +505,7 @@ class Texture : NonCopyable {
* Ensure the texture has the correct properties. Recreating it if needed.
* Return true if a texture has been created.
*/
- bool ensure_3d(eGPUTextureFormat format, const int3 &extent, float *data = nullptr, int mips = 1)
+ bool ensure_3d(eGPUTextureFormat format, int3 extent, float *data = nullptr, int mips = 1)
{
return ensure_impl(UNPACK3(extent), mips, format, data, false, false);
}
@@ -599,14 +598,6 @@ class Texture : NonCopyable {
/**
* Clear the entirety of the texture using one pixel worth of data.
*/
- void clear(uchar4 values)
- {
- GPU_texture_clear(tx_, GPU_DATA_UBYTE, &values[0]);
- }
-
- /**
- * Clear the entirety of the texture using one pixel worth of data.
- */
void clear(int4 values)
{
GPU_texture_clear(tx_, GPU_DATA_INT, &values[0]);
@@ -634,6 +625,15 @@ class Texture : NonCopyable {
GPU_TEXTURE_FREE_SAFE(tx_);
}
+ /**
+ * Swap the content of the two textures.
+ */
+ static void swap(Texture &a, Texture &b)
+ {
+ SWAP(GPUTexture *, a.tx_, b.tx_);
+ SWAP(const char *, a.name_, b.name_);
+ }
+
private:
bool ensure_impl(int w,
int h = 0,
@@ -645,7 +645,7 @@ class Texture : NonCopyable {
bool cubemap = false)
{
- /* TODO(fclem) In the future, we need to check if mip_count did not change.
+ /* TODO(@fclem): In the future, we need to check if mip_count did not change.
* For now it's ok as we always define all MIP level. */
if (tx_) {
int3 size = this->size();
@@ -657,7 +657,7 @@ class Texture : NonCopyable {
if (tx_ == nullptr) {
tx_ = create(w, h, d, mips, format, data, layered, cubemap);
if (mips > 1) {
- /* TODO(fclem) Remove once we have immutable storage or when mips are
+ /* TODO(@fclem): Remove once we have immutable storage or when mips are
* generated on creation. */
GPU_texture_generate_mipmap(tx_);
}
@@ -678,20 +678,20 @@ class Texture : NonCopyable {
if (h == 0) {
return GPU_texture_create_1d(name_, w, mips, format, data);
}
- else if (d == 0) {
+ else if (cubemap) {
if (layered) {
- return GPU_texture_create_1d_array(name_, w, h, mips, format, data);
+ return GPU_texture_create_cube_array(name_, w, d, mips, format, data);
}
else {
- return GPU_texture_create_2d(name_, w, h, mips, format, data);
+ return GPU_texture_create_cube(name_, w, mips, format, data);
}
}
- else if (cubemap) {
+ else if (d == 0) {
if (layered) {
- return GPU_texture_create_cube_array(name_, w, d, mips, format, data);
+ return GPU_texture_create_1d_array(name_, w, h, mips, format, data);
}
else {
- return GPU_texture_create_cube(name_, w, mips, format, data);
+ return GPU_texture_create_2d(name_, w, h, mips, format, data);
}
}
else {
@@ -713,7 +713,7 @@ class TextureFromPool : public Texture, NonMovable {
TextureFromPool(const char *name = "gpu::Texture") : Texture(name){};
/* Always use `release()` after rendering. */
- void acquire(int w, int h, eGPUTextureFormat format, void *owner_)
+ void acquire(int2 extent, eGPUTextureFormat format, void *owner_)
{
if (this->tx_ == nullptr) {
if (tx_tmp_saved_ != nullptr) {
@@ -721,7 +721,7 @@ class TextureFromPool : public Texture, NonMovable {
return;
}
DrawEngineType *owner = (DrawEngineType *)owner_;
- this->tx_ = DRW_texture_pool_query_2d(w, h, format, owner);
+ this->tx_ = DRW_texture_pool_query_2d(UNPACK2(extent), format, owner);
}
}
@@ -750,11 +750,6 @@ class TextureFromPool : public Texture, NonMovable {
bool ensure_cube_array(int, int, int, eGPUTextureFormat, float *) = delete;
void filter_mode(bool) = delete;
void free() = delete;
- /**
- * Forbid the use of DRW_shgroup_uniform_texture.
- * Use DRW_shgroup_uniform_texture_ref instead.
- */
- operator GPUTexture *() const = delete;
};
/** \} */
@@ -805,6 +800,15 @@ class Framebuffer : NonCopyable {
{
return fb_;
}
+
+ /**
+ * Swap the content of the two framebuffer.
+ */
+ static void swap(Framebuffer &a, Framebuffer &b)
+ {
+ SWAP(GPUFrameBuffer *, a.fb_, b.fb_);
+ SWAP(const char *, a.name_, b.name_);
+ }
};
/** \} */
diff --git a/source/blender/draw/intern/DRW_render.h b/source/blender/draw/intern/DRW_render.h
index b16caf49209..d531d8ad9f8 100644
--- a/source/blender/draw/intern/DRW_render.h
+++ b/source/blender/draw/intern/DRW_render.h
@@ -66,6 +66,15 @@
extern "C" {
#endif
+/* Uncomment to track unused resource bindings. */
+// #define DRW_UNUSED_RESOURCE_TRACKING
+
+#ifdef DRW_UNUSED_RESOURCE_TRACKING
+# define DRW_DEBUG_FILE_LINE_ARGS , const char *file, int line
+#else
+# define DRW_DEBUG_FILE_LINE_ARGS
+#endif
+
struct GPUBatch;
struct GPUMaterial;
struct GPUShader;
@@ -293,7 +302,9 @@ DRWShaderLibrary *DRW_shader_library_create(void);
/**
* \warning Each library must be added after all its dependencies.
*/
-void DRW_shader_library_add_file(DRWShaderLibrary *lib, char *lib_code, const char *lib_name);
+void DRW_shader_library_add_file(DRWShaderLibrary *lib,
+ const char *lib_code,
+ const char *lib_name);
#define DRW_SHADER_LIB_ADD(lib, lib_name) \
DRW_shader_library_add_file(lib, datatoc_##lib_name##_glsl, STRINGIFY(lib_name) ".glsl")
@@ -466,6 +477,10 @@ void DRW_shgroup_call_compute(DRWShadingGroup *shgroup,
int groups_x_len,
int groups_y_len,
int groups_z_len);
+/**
+ * \warning this keeps the ref to groups_ref until it actually dispatch.
+ */
+void DRW_shgroup_call_compute_ref(DRWShadingGroup *shgroup, int groups_ref[3]);
void DRW_shgroup_call_procedural_points(DRWShadingGroup *sh, Object *ob, uint point_count);
void DRW_shgroup_call_procedural_lines(DRWShadingGroup *sh, Object *ob, uint line_count);
void DRW_shgroup_call_procedural_triangles(DRWShadingGroup *sh, Object *ob, uint tri_count);
@@ -532,6 +547,11 @@ void DRW_shgroup_stencil_set(DRWShadingGroup *shgroup,
void DRW_shgroup_stencil_mask(DRWShadingGroup *shgroup, uint mask);
/**
+ * Issue a barrier command.
+ */
+void DRW_shgroup_barrier(DRWShadingGroup *shgroup, eGPUBarrier type);
+
+/**
* Issue a clear command.
*/
void DRW_shgroup_clear_framebuffer(DRWShadingGroup *shgroup,
@@ -557,12 +577,12 @@ void DRW_shgroup_uniform_texture(DRWShadingGroup *shgroup,
void DRW_shgroup_uniform_texture_ref(DRWShadingGroup *shgroup,
const char *name,
struct GPUTexture **tex);
-void DRW_shgroup_uniform_block(DRWShadingGroup *shgroup,
- const char *name,
- const struct GPUUniformBuf *ubo);
-void DRW_shgroup_uniform_block_ref(DRWShadingGroup *shgroup,
- const char *name,
- struct GPUUniformBuf **ubo);
+void DRW_shgroup_uniform_block_ex(DRWShadingGroup *shgroup,
+ const char *name,
+ const struct GPUUniformBuf *ubo DRW_DEBUG_FILE_LINE_ARGS);
+void DRW_shgroup_uniform_block_ref_ex(DRWShadingGroup *shgroup,
+ const char *name,
+ struct GPUUniformBuf **ubo DRW_DEBUG_FILE_LINE_ARGS);
void DRW_shgroup_uniform_float(DRWShadingGroup *shgroup,
const char *name,
const float *value,
@@ -622,9 +642,32 @@ void DRW_shgroup_uniform_vec4_array_copy(DRWShadingGroup *shgroup,
const char *name,
const float (*value)[4],
int arraysize);
-void DRW_shgroup_vertex_buffer(DRWShadingGroup *shgroup,
- const char *name,
- struct GPUVertBuf *vertex_buffer);
+void DRW_shgroup_vertex_buffer_ex(DRWShadingGroup *shgroup,
+ const char *name,
+ struct GPUVertBuf *vertex_buffer DRW_DEBUG_FILE_LINE_ARGS);
+void DRW_shgroup_vertex_buffer_ref_ex(DRWShadingGroup *shgroup,
+ const char *name,
+ struct GPUVertBuf **vertex_buffer DRW_DEBUG_FILE_LINE_ARGS);
+
+#ifdef DRW_UNUSED_RESOURCE_TRACKING
+# define DRW_shgroup_vertex_buffer(shgroup, name, vert) \
+ DRW_shgroup_vertex_buffer_ex(shgroup, name, vert, __FILE__, __LINE__)
+# define DRW_shgroup_vertex_buffer_ref(shgroup, name, vert) \
+ DRW_shgroup_vertex_buffer_ref_ex(shgroup, name, vert, __FILE__, __LINE__)
+# define DRW_shgroup_uniform_block(shgroup, name, ubo) \
+ DRW_shgroup_uniform_block_ex(shgroup, name, ubo, __FILE__, __LINE__)
+# define DRW_shgroup_uniform_block_ref(shgroup, name, ubo) \
+ DRW_shgroup_uniform_block_ref_ex(shgroup, name, ubo, __FILE__, __LINE__)
+#else
+# define DRW_shgroup_vertex_buffer(shgroup, name, vert) \
+ DRW_shgroup_vertex_buffer_ex(shgroup, name, vert)
+# define DRW_shgroup_vertex_buffer_ref(shgroup, name, vert) \
+ DRW_shgroup_vertex_buffer_ref_ex(shgroup, name, vert)
+# define DRW_shgroup_uniform_block(shgroup, name, ubo) \
+ DRW_shgroup_uniform_block_ex(shgroup, name, ubo)
+# define DRW_shgroup_uniform_block_ref(shgroup, name, ubo) \
+ DRW_shgroup_uniform_block_ref_ex(shgroup, name, ubo)
+#endif
bool DRW_shgroup_is_empty(DRWShadingGroup *shgroup);
@@ -696,7 +739,7 @@ const DRWView *DRW_view_default_get(void);
/**
* MUST only be called once per render and only in render mode. Sets default view.
*/
-void DRW_view_default_set(DRWView *view);
+void DRW_view_default_set(const DRWView *view);
/**
* \warning Only use in render AND only if you are going to set view_default again.
*/
@@ -704,7 +747,7 @@ void DRW_view_reset(void);
/**
* Set active view for rendering.
*/
-void DRW_view_set_active(DRWView *view);
+void DRW_view_set_active(const DRWView *view);
const DRWView *DRW_view_get_active(void);
/**
diff --git a/source/blender/draw/intern/draw_cache.c b/source/blender/draw/intern/draw_cache.c
index 1110658e3b2..430b28f1224 100644
--- a/source/blender/draw/intern/draw_cache.c
+++ b/source/blender/draw/intern/draw_cache.c
@@ -19,7 +19,7 @@
*/
#include "DNA_curve_types.h"
-#include "DNA_hair_types.h"
+#include "DNA_curves_types.h"
#include "DNA_lattice_types.h"
#include "DNA_mesh_types.h"
#include "DNA_meta_types.h"
@@ -835,7 +835,7 @@ GPUBatch *DRW_cache_object_edge_detection_get(Object *ob, bool *r_is_manifold)
return NULL;
case OB_MBALL:
return DRW_cache_mball_edge_detection_get(ob, r_is_manifold);
- case OB_HAIR:
+ case OB_CURVES:
return NULL;
case OB_POINTCLOUD:
return NULL;
@@ -859,7 +859,7 @@ GPUBatch *DRW_cache_object_face_wireframe_get(Object *ob)
return NULL;
case OB_MBALL:
return DRW_cache_mball_face_wireframe_get(ob);
- case OB_HAIR:
+ case OB_CURVES:
return NULL;
case OB_POINTCLOUD:
return DRW_pointcloud_batch_cache_get_dots(ob);
@@ -886,7 +886,7 @@ GPUBatch *DRW_cache_object_loose_edges_get(struct Object *ob)
return NULL;
case OB_MBALL:
return NULL;
- case OB_HAIR:
+ case OB_CURVES:
return NULL;
case OB_POINTCLOUD:
return NULL;
@@ -910,7 +910,7 @@ GPUBatch *DRW_cache_object_surface_get(Object *ob)
return NULL;
case OB_MBALL:
return DRW_cache_mball_surface_get(ob);
- case OB_HAIR:
+ case OB_CURVES:
return NULL;
case OB_POINTCLOUD:
return DRW_cache_pointcloud_surface_get(ob);
@@ -935,7 +935,7 @@ GPUVertBuf *DRW_cache_object_pos_vertbuf_get(Object *ob)
return DRW_curve_batch_cache_pos_vertbuf_get(ob->data);
case OB_MBALL:
return DRW_mball_batch_cache_pos_vertbuf_get(ob);
- case OB_HAIR:
+ case OB_CURVES:
return NULL;
case OB_POINTCLOUD:
return NULL;
@@ -960,15 +960,15 @@ int DRW_cache_object_material_count_get(struct Object *ob)
switch (type) {
case OB_MESH:
- return DRW_mesh_material_count_get((me != NULL) ? me : ob->data);
+ return DRW_mesh_material_count_get(ob, (me != NULL) ? me : ob->data);
case OB_CURVE:
case OB_SURF:
case OB_FONT:
return DRW_curve_material_count_get(ob->data);
case OB_MBALL:
return DRW_metaball_material_count_get(ob->data);
- case OB_HAIR:
- return DRW_hair_material_count_get(ob->data);
+ case OB_CURVES:
+ return DRW_curves_material_count_get(ob->data);
case OB_POINTCLOUD:
return DRW_pointcloud_material_count_get(ob->data);
case OB_VOLUME:
@@ -994,7 +994,7 @@ GPUBatch **DRW_cache_object_surface_material_get(struct Object *ob,
return NULL;
case OB_MBALL:
return DRW_cache_mball_surface_shaded_get(ob, gpumat_array, gpumat_array_len);
- case OB_HAIR:
+ case OB_CURVES:
return NULL;
case OB_POINTCLOUD:
return DRW_cache_pointcloud_surface_shaded_get(ob, gpumat_array, gpumat_array_len);
@@ -2875,7 +2875,7 @@ GPUBatch *DRW_cache_mesh_surface_get(Object *ob)
GPUBatch *DRW_cache_mesh_surface_edges_get(Object *ob)
{
BLI_assert(ob->type == OB_MESH);
- return DRW_mesh_batch_cache_get_surface_edges(ob->data);
+ return DRW_mesh_batch_cache_get_surface_edges(ob, ob->data);
}
GPUBatch **DRW_cache_mesh_surface_shaded_get(Object *ob,
@@ -2883,31 +2883,31 @@ GPUBatch **DRW_cache_mesh_surface_shaded_get(Object *ob,
uint gpumat_array_len)
{
BLI_assert(ob->type == OB_MESH);
- return DRW_mesh_batch_cache_get_surface_shaded(ob->data, gpumat_array, gpumat_array_len);
+ return DRW_mesh_batch_cache_get_surface_shaded(ob, ob->data, gpumat_array, gpumat_array_len);
}
GPUBatch **DRW_cache_mesh_surface_texpaint_get(Object *ob)
{
BLI_assert(ob->type == OB_MESH);
- return DRW_mesh_batch_cache_get_surface_texpaint(ob->data);
+ return DRW_mesh_batch_cache_get_surface_texpaint(ob, ob->data);
}
GPUBatch *DRW_cache_mesh_surface_texpaint_single_get(Object *ob)
{
BLI_assert(ob->type == OB_MESH);
- return DRW_mesh_batch_cache_get_surface_texpaint_single(ob->data);
+ return DRW_mesh_batch_cache_get_surface_texpaint_single(ob, ob->data);
}
GPUBatch *DRW_cache_mesh_surface_vertpaint_get(Object *ob)
{
BLI_assert(ob->type == OB_MESH);
- return DRW_mesh_batch_cache_get_surface_vertpaint(ob->data);
+ return DRW_mesh_batch_cache_get_surface_vertpaint(ob, ob->data);
}
GPUBatch *DRW_cache_mesh_surface_sculptcolors_get(Object *ob)
{
BLI_assert(ob->type == OB_MESH);
- return DRW_mesh_batch_cache_get_surface_sculpt(ob->data);
+ return DRW_mesh_batch_cache_get_surface_sculpt(ob, ob->data);
}
GPUBatch *DRW_cache_mesh_surface_weights_get(Object *ob)
@@ -3091,7 +3091,7 @@ GPUBatch **DRW_cache_surf_surface_shaded_get(Object *ob,
struct Curve *cu = ob->data;
struct Mesh *mesh_eval = BKE_object_get_evaluated_mesh_no_subsurf(ob);
if (mesh_eval != NULL) {
- return DRW_mesh_batch_cache_get_surface_shaded(mesh_eval, gpumat_array, gpumat_array_len);
+ return DRW_mesh_batch_cache_get_surface_shaded(ob, mesh_eval, gpumat_array, gpumat_array_len);
}
return DRW_curve_batch_cache_get_surface_shaded(cu, gpumat_array, gpumat_array_len);
@@ -3385,7 +3385,7 @@ void drw_batch_cache_validate(Object *ob)
struct Mesh *mesh_eval = BKE_object_get_evaluated_mesh_no_subsurf(ob);
switch (ob->type) {
case OB_MESH:
- DRW_mesh_batch_cache_validate((Mesh *)ob->data);
+ DRW_mesh_batch_cache_validate(ob, (Mesh *)ob->data);
break;
case OB_CURVE:
case OB_FONT:
@@ -3393,7 +3393,7 @@ void drw_batch_cache_validate(Object *ob)
break;
case OB_SURF:
if (mesh_eval != NULL) {
- DRW_mesh_batch_cache_validate(mesh_eval);
+ DRW_mesh_batch_cache_validate(ob, mesh_eval);
}
DRW_curve_batch_cache_validate((Curve *)ob->data);
break;
@@ -3403,8 +3403,8 @@ void drw_batch_cache_validate(Object *ob)
case OB_LATTICE:
DRW_lattice_batch_cache_validate((Lattice *)ob->data);
break;
- case OB_HAIR:
- DRW_hair_batch_cache_validate((Hair *)ob->data);
+ case OB_CURVES:
+ DRW_curves_batch_cache_validate((Curves *)ob->data);
break;
case OB_POINTCLOUD:
DRW_pointcloud_batch_cache_validate((PointCloud *)ob->data);
diff --git a/source/blender/draw/intern/draw_cache.h b/source/blender/draw/intern/draw_cache.h
index 30e5a10df91..b94dd466364 100644
--- a/source/blender/draw/intern/draw_cache.h
+++ b/source/blender/draw/intern/draw_cache.h
@@ -246,14 +246,14 @@ struct GPUBatch **DRW_cache_mball_surface_shaded_get(struct Object *ob,
struct GPUBatch *DRW_cache_mball_face_wireframe_get(struct Object *ob);
struct GPUBatch *DRW_cache_mball_edge_detection_get(struct Object *ob, bool *r_is_manifold);
-/* Hair */
-
-struct GPUBatch *DRW_cache_hair_surface_get(struct Object *ob);
-struct GPUBatch **DRW_cache_hair_surface_shaded_get(struct Object *ob,
- struct GPUMaterial **gpumat_array,
- uint gpumat_array_len);
-struct GPUBatch *DRW_cache_hair_face_wireframe_get(struct Object *ob);
-struct GPUBatch *DRW_cache_hair_edge_detection_get(struct Object *ob, bool *r_is_manifold);
+/* Curves */
+
+struct GPUBatch *DRW_cache_curves_surface_get(struct Object *ob);
+struct GPUBatch **DRW_cache_curves_surface_shaded_get(struct Object *ob,
+ struct GPUMaterial **gpumat_array,
+ uint gpumat_array_len);
+struct GPUBatch *DRW_cache_curves_face_wireframe_get(struct Object *ob);
+struct GPUBatch *DRW_cache_curves_edge_detection_get(struct Object *ob, bool *r_is_manifold);
/* PointCloud */
diff --git a/source/blender/draw/intern/draw_cache_extract.h b/source/blender/draw/intern/draw_cache_extract.h
index bd4edaf09fb..e7f66ebacd0 100644
--- a/source/blender/draw/intern/draw_cache_extract.h
+++ b/source/blender/draw/intern/draw_cache_extract.h
@@ -28,6 +28,7 @@ struct TaskGraph;
#include "DNA_customdata_types.h"
#include "BKE_attribute.h"
+#include "BKE_object.h"
#include "GPU_batch.h"
#include "GPU_index_buffer.h"
@@ -107,11 +108,13 @@ ENUM_OPERATORS(eMRDataType, MR_DATA_POLYS_SORTED)
extern "C" {
#endif
-BLI_INLINE int mesh_render_mat_len_get(const Mesh *me)
+BLI_INLINE int mesh_render_mat_len_get(const Object *object, const Mesh *me)
{
- /* In edit mode, the displayed mesh is stored in the edit-mesh. */
- if (me->edit_mesh && me->edit_mesh->mesh_eval_final) {
- return MAX2(1, me->edit_mesh->mesh_eval_final->totcol);
+ if (me->edit_mesh != NULL) {
+ const Mesh *editmesh_eval_final = BKE_object_get_editmesh_eval_final(object);
+ if (editmesh_eval_final != NULL) {
+ return MAX2(1, editmesh_eval_final->totcol);
+ }
}
return MAX2(1, me->totcol);
}
@@ -328,6 +331,7 @@ typedef struct MeshBatchCache {
void mesh_buffer_cache_create_requested(struct TaskGraph *task_graph,
MeshBatchCache *cache,
MeshBufferCache *mbc,
+ Object *object,
Mesh *me,
bool is_editmode,
bool is_paint_mode,
diff --git a/source/blender/draw/intern/draw_cache_extract_mesh.cc b/source/blender/draw/intern/draw_cache_extract_mesh.cc
index fe55778527b..987ddf3a938 100644
--- a/source/blender/draw/intern/draw_cache_extract_mesh.cc
+++ b/source/blender/draw/intern/draw_cache_extract_mesh.cc
@@ -570,6 +570,7 @@ static struct TaskNode *mesh_extract_render_data_node_create(struct TaskGraph *t
static void mesh_buffer_cache_create_requested(struct TaskGraph *task_graph,
MeshBatchCache *cache,
MeshBufferCache *mbc,
+ Object *object,
Mesh *me,
const bool is_editmode,
@@ -615,7 +616,7 @@ static void mesh_buffer_cache_create_requested(struct TaskGraph *task_graph,
*/
const bool do_hq_normals = (scene->r.perf_flag & SCE_PERF_HQ_NORMALS) != 0 ||
GPU_use_hq_normals_workaround();
- const bool override_single_mat = mesh_render_mat_len_get(me) <= 1;
+ const bool override_single_mat = mesh_render_mat_len_get(object, me) <= 1;
/* Create an array containing all the extractors that needs to be executed. */
ExtractorRunDatas extractors;
@@ -700,7 +701,7 @@ static void mesh_buffer_cache_create_requested(struct TaskGraph *task_graph,
#endif
MeshRenderData *mr = mesh_render_data_create(
- me, is_editmode, is_paint_mode, is_mode_active, obmat, do_final, do_uvedit, ts);
+ object, me, is_editmode, is_paint_mode, is_mode_active, obmat, do_final, do_uvedit, ts);
mr->use_hide = use_hide;
mr->use_subsurf_fdots = use_subsurf_fdots;
mr->use_final_mesh = do_final;
@@ -902,6 +903,7 @@ extern "C" {
void mesh_buffer_cache_create_requested(struct TaskGraph *task_graph,
MeshBatchCache *cache,
MeshBufferCache *mbc,
+ Object *object,
Mesh *me,
const bool is_editmode,
@@ -918,6 +920,7 @@ void mesh_buffer_cache_create_requested(struct TaskGraph *task_graph,
blender::draw::mesh_buffer_cache_create_requested(task_graph,
cache,
mbc,
+ object,
me,
is_editmode,
is_paint_mode,
diff --git a/source/blender/draw/intern/draw_cache_extract_mesh_render_data.c b/source/blender/draw/intern/draw_cache_extract_mesh_render_data.c
index 29430c46d93..a47a124bd24 100644
--- a/source/blender/draw/intern/draw_cache_extract_mesh_render_data.c
+++ b/source/blender/draw/intern/draw_cache_extract_mesh_render_data.c
@@ -438,7 +438,8 @@ void mesh_render_data_update_normals(MeshRenderData *mr, const eMRDataType data_
}
}
-MeshRenderData *mesh_render_data_create(Mesh *me,
+MeshRenderData *mesh_render_data_create(Object *object,
+ Mesh *me,
const bool is_editmode,
const bool is_paint_mode,
const bool is_mode_active,
@@ -449,15 +450,18 @@ MeshRenderData *mesh_render_data_create(Mesh *me,
{
MeshRenderData *mr = MEM_callocN(sizeof(*mr), __func__);
mr->toolsettings = ts;
- mr->mat_len = mesh_render_mat_len_get(me);
+ mr->mat_len = mesh_render_mat_len_get(object, me);
copy_m4_m4(mr->obmat, obmat);
if (is_editmode) {
- BLI_assert(me->edit_mesh->mesh_eval_cage && me->edit_mesh->mesh_eval_final);
+ Mesh *editmesh_eval_final = BKE_object_get_editmesh_eval_final(object);
+ Mesh *editmesh_eval_cage = BKE_object_get_editmesh_eval_cage(object);
+
+ BLI_assert(editmesh_eval_cage && editmesh_eval_final);
mr->bm = me->edit_mesh->bm;
mr->edit_bmesh = me->edit_mesh;
- mr->me = (do_final) ? me->edit_mesh->mesh_eval_final : me->edit_mesh->mesh_eval_cage;
+ mr->me = (do_final) ? editmesh_eval_final : editmesh_eval_cage;
mr->edit_data = is_mode_active ? mr->me->runtime.edit_data : NULL;
if (mr->edit_data) {
@@ -507,7 +511,7 @@ MeshRenderData *mesh_render_data_create(Mesh *me,
/* Seems like the mesh_eval_final do not have the right origin indices.
* Force not mapped in this case. */
- if (has_mdata && do_final && me->edit_mesh->mesh_eval_final != me->edit_mesh->mesh_eval_cage) {
+ if (has_mdata && do_final && editmesh_eval_final != editmesh_eval_cage) {
// mr->edit_bmesh = NULL;
mr->extract_type = MR_EXTRACT_MESH;
}
diff --git a/source/blender/draw/intern/draw_cache_impl.h b/source/blender/draw/intern/draw_cache_impl.h
index 77b507bc2b6..9c6814d910e 100644
--- a/source/blender/draw/intern/draw_cache_impl.h
+++ b/source/blender/draw/intern/draw_cache_impl.h
@@ -33,7 +33,7 @@ struct ParticleSystem;
struct TaskGraph;
struct Curve;
-struct Hair;
+struct Curves;
struct Lattice;
struct Mesh;
struct MetaBall;
@@ -60,7 +60,7 @@ void DRW_curve_batch_cache_validate(struct Curve *cu);
void DRW_curve_batch_cache_free(struct Curve *cu);
void DRW_mesh_batch_cache_dirty_tag(struct Mesh *me, eMeshBatchDirtyMode mode);
-void DRW_mesh_batch_cache_validate(struct Mesh *me);
+void DRW_mesh_batch_cache_validate(struct Object *object, struct Mesh *me);
void DRW_mesh_batch_cache_free(struct Mesh *me);
void DRW_lattice_batch_cache_dirty_tag(struct Lattice *lt, int mode);
@@ -73,9 +73,9 @@ void DRW_particle_batch_cache_free(struct ParticleSystem *psys);
void DRW_gpencil_batch_cache_dirty_tag(struct bGPdata *gpd);
void DRW_gpencil_batch_cache_free(struct bGPdata *gpd);
-void DRW_hair_batch_cache_dirty_tag(struct Hair *hair, int mode);
-void DRW_hair_batch_cache_validate(struct Hair *hair);
-void DRW_hair_batch_cache_free(struct Hair *hair);
+void DRW_curves_batch_cache_dirty_tag(struct Curves *curves, int mode);
+void DRW_curves_batch_cache_validate(struct Curves *curves);
+void DRW_curves_batch_cache_free(struct Curves *curves);
void DRW_pointcloud_batch_cache_dirty_tag(struct PointCloud *pointcloud, int mode);
void DRW_pointcloud_batch_cache_validate(struct PointCloud *pointcloud);
@@ -188,7 +188,7 @@ struct GPUBatch *DRW_lattice_batch_cache_get_edit_verts(struct Lattice *lt);
/** \name Hair
* \{ */
-int DRW_hair_material_count_get(struct Hair *hair);
+int DRW_curves_material_count_get(struct Curves *curves);
/** \} */
@@ -236,14 +236,18 @@ struct GPUBatch *DRW_mesh_batch_cache_get_all_edges(struct Mesh *me);
struct GPUBatch *DRW_mesh_batch_cache_get_loose_edges(struct Mesh *me);
struct GPUBatch *DRW_mesh_batch_cache_get_edge_detection(struct Mesh *me, bool *r_is_manifold);
struct GPUBatch *DRW_mesh_batch_cache_get_surface(struct Mesh *me);
-struct GPUBatch *DRW_mesh_batch_cache_get_surface_edges(struct Mesh *me);
-struct GPUBatch **DRW_mesh_batch_cache_get_surface_shaded(struct Mesh *me,
+struct GPUBatch *DRW_mesh_batch_cache_get_surface_edges(struct Object *object, struct Mesh *me);
+struct GPUBatch **DRW_mesh_batch_cache_get_surface_shaded(struct Object *object,
+ struct Mesh *me,
struct GPUMaterial **gpumat_array,
uint gpumat_array_len);
-struct GPUBatch **DRW_mesh_batch_cache_get_surface_texpaint(struct Mesh *me);
-struct GPUBatch *DRW_mesh_batch_cache_get_surface_texpaint_single(struct Mesh *me);
-struct GPUBatch *DRW_mesh_batch_cache_get_surface_vertpaint(struct Mesh *me);
-struct GPUBatch *DRW_mesh_batch_cache_get_surface_sculpt(struct Mesh *me);
+struct GPUBatch **DRW_mesh_batch_cache_get_surface_texpaint(struct Object *object,
+ struct Mesh *me);
+struct GPUBatch *DRW_mesh_batch_cache_get_surface_texpaint_single(struct Object *object,
+ struct Mesh *me);
+struct GPUBatch *DRW_mesh_batch_cache_get_surface_vertpaint(struct Object *object,
+ struct Mesh *me);
+struct GPUBatch *DRW_mesh_batch_cache_get_surface_sculpt(struct Object *object, struct Mesh *me);
struct GPUBatch *DRW_mesh_batch_cache_get_surface_weights(struct Mesh *me);
struct GPUBatch *DRW_mesh_batch_cache_get_sculpt_overlays(struct Mesh *me);
@@ -293,14 +297,16 @@ struct GPUBatch *DRW_mesh_batch_cache_get_wireframes_face(struct Mesh *me);
* The `cache->tot_area` and cache->tot_uv_area` update are calculation are
* only valid after calling `DRW_mesh_batch_cache_create_requested`.
*/
-struct GPUBatch *DRW_mesh_batch_cache_get_edituv_faces_stretch_area(struct Mesh *me,
+struct GPUBatch *DRW_mesh_batch_cache_get_edituv_faces_stretch_area(struct Object *object,
+ struct Mesh *me,
float **tot_area,
float **tot_uv_area);
-struct GPUBatch *DRW_mesh_batch_cache_get_edituv_faces_stretch_angle(struct Mesh *me);
-struct GPUBatch *DRW_mesh_batch_cache_get_edituv_faces(struct Mesh *me);
-struct GPUBatch *DRW_mesh_batch_cache_get_edituv_edges(struct Mesh *me);
-struct GPUBatch *DRW_mesh_batch_cache_get_edituv_verts(struct Mesh *me);
-struct GPUBatch *DRW_mesh_batch_cache_get_edituv_facedots(struct Mesh *me);
+struct GPUBatch *DRW_mesh_batch_cache_get_edituv_faces_stretch_angle(struct Object *object,
+ struct Mesh *me);
+struct GPUBatch *DRW_mesh_batch_cache_get_edituv_faces(struct Object *object, struct Mesh *me);
+struct GPUBatch *DRW_mesh_batch_cache_get_edituv_edges(struct Object *object, struct Mesh *me);
+struct GPUBatch *DRW_mesh_batch_cache_get_edituv_verts(struct Object *object, struct Mesh *me);
+struct GPUBatch *DRW_mesh_batch_cache_get_edituv_facedots(struct Object *object, struct Mesh *me);
/** \} */
@@ -308,7 +314,7 @@ struct GPUBatch *DRW_mesh_batch_cache_get_edituv_facedots(struct Mesh *me);
/** \name For Image UV Editor
* \{ */
-struct GPUBatch *DRW_mesh_batch_cache_get_uv_edges(struct Mesh *me);
+struct GPUBatch *DRW_mesh_batch_cache_get_uv_edges(struct Object *object, struct Mesh *me);
struct GPUBatch *DRW_mesh_batch_cache_get_edit_mesh_analysis(struct Mesh *me);
/** \} */
@@ -321,7 +327,7 @@ struct GPUVertBuf *DRW_mesh_batch_cache_pos_vertbuf_get(struct Mesh *me);
struct GPUVertBuf *DRW_curve_batch_cache_pos_vertbuf_get(struct Curve *cu);
struct GPUVertBuf *DRW_mball_batch_cache_pos_vertbuf_get(struct Object *ob);
-int DRW_mesh_material_count_get(const struct Mesh *me);
+int DRW_mesh_material_count_get(const struct Object *object, const struct Mesh *me);
/* See 'common_globals_lib.glsl' for duplicate defines. */
diff --git a/source/blender/draw/intern/draw_cache_impl_hair.cc b/source/blender/draw/intern/draw_cache_impl_curves.cc
index d236f0c2f59..8ec97495fcf 100644
--- a/source/blender/draw/intern/draw_cache_impl_hair.cc
+++ b/source/blender/draw/intern/draw_cache_impl_curves.cc
@@ -29,13 +29,16 @@
#include "BLI_listbase.h"
#include "BLI_math_base.h"
+#include "BLI_math_vec_types.hh"
#include "BLI_math_vector.h"
+#include "BLI_math_vector.hh"
+#include "BLI_span.hh"
#include "BLI_utildefines.h"
-#include "DNA_hair_types.h"
+#include "DNA_curves_types.h"
#include "DNA_object_types.h"
-#include "BKE_hair.h"
+#include "BKE_curves.h"
#include "GPU_batch.h"
#include "GPU_material.h"
@@ -44,7 +47,11 @@
#include "draw_cache_impl.h" /* own include */
#include "draw_hair_private.h" /* own include */
-static void hair_batch_cache_clear(Hair *hair);
+using blender::float3;
+using blender::IndexRange;
+using blender::Span;
+
+static void curves_batch_cache_clear(Curves *curves);
/* ---------------------------------------------------------------------- */
/* Hair GPUBatch Cache */
@@ -58,19 +65,19 @@ struct HairBatchCache {
/* GPUBatch cache management. */
-static bool hair_batch_cache_valid(Hair *hair)
+static bool curves_batch_cache_valid(Curves *curves)
{
- HairBatchCache *cache = static_cast<HairBatchCache *>(hair->batch_cache);
+ HairBatchCache *cache = static_cast<HairBatchCache *>(curves->batch_cache);
return (cache && cache->is_dirty == false);
}
-static void hair_batch_cache_init(Hair *hair)
+static void curves_batch_cache_init(Curves *curves)
{
- HairBatchCache *cache = static_cast<HairBatchCache *>(hair->batch_cache);
+ HairBatchCache *cache = static_cast<HairBatchCache *>(curves->batch_cache);
if (!cache) {
cache = MEM_cnew<HairBatchCache>(__func__);
- hair->batch_cache = cache;
+ curves->batch_cache = cache;
}
else {
memset(cache, 0, sizeof(*cache));
@@ -79,28 +86,28 @@ static void hair_batch_cache_init(Hair *hair)
cache->is_dirty = false;
}
-void DRW_hair_batch_cache_validate(Hair *hair)
+void DRW_curves_batch_cache_validate(Curves *curves)
{
- if (!hair_batch_cache_valid(hair)) {
- hair_batch_cache_clear(hair);
- hair_batch_cache_init(hair);
+ if (!curves_batch_cache_valid(curves)) {
+ curves_batch_cache_clear(curves);
+ curves_batch_cache_init(curves);
}
}
-static HairBatchCache *hair_batch_cache_get(Hair *hair)
+static HairBatchCache *curves_batch_cache_get(Curves *curves)
{
- DRW_hair_batch_cache_validate(hair);
- return static_cast<HairBatchCache *>(hair->batch_cache);
+ DRW_curves_batch_cache_validate(curves);
+ return static_cast<HairBatchCache *>(curves->batch_cache);
}
-void DRW_hair_batch_cache_dirty_tag(Hair *hair, int mode)
+void DRW_curves_batch_cache_dirty_tag(Curves *curves, int mode)
{
- HairBatchCache *cache = static_cast<HairBatchCache *>(hair->batch_cache);
+ HairBatchCache *cache = static_cast<HairBatchCache *>(curves->batch_cache);
if (cache == nullptr) {
return;
}
switch (mode) {
- case BKE_HAIR_BATCH_DIRTY_ALL:
+ case BKE_CURVES_BATCH_DIRTY_ALL:
cache->is_dirty = true;
break;
default:
@@ -108,9 +115,9 @@ void DRW_hair_batch_cache_dirty_tag(Hair *hair, int mode)
}
}
-static void hair_batch_cache_clear(Hair *hair)
+static void curves_batch_cache_clear(Curves *curves)
{
- HairBatchCache *cache = static_cast<HairBatchCache *>(hair->batch_cache);
+ HairBatchCache *cache = static_cast<HairBatchCache *>(curves->batch_cache);
if (!cache) {
return;
}
@@ -118,69 +125,67 @@ static void hair_batch_cache_clear(Hair *hair)
particle_batch_cache_clear_hair(&cache->hair);
}
-void DRW_hair_batch_cache_free(Hair *hair)
+void DRW_curves_batch_cache_free(Curves *curves)
{
- hair_batch_cache_clear(hair);
- MEM_SAFE_FREE(hair->batch_cache);
+ curves_batch_cache_clear(curves);
+ MEM_SAFE_FREE(curves->batch_cache);
}
-static void ensure_seg_pt_count(Hair *hair, ParticleHairCache *hair_cache)
+static void ensure_seg_pt_count(Curves *curves, ParticleHairCache *curves_cache)
{
- if ((hair_cache->pos != nullptr && hair_cache->indices != nullptr) ||
- (hair_cache->proc_point_buf != nullptr)) {
+ if ((curves_cache->pos != nullptr && curves_cache->indices != nullptr) ||
+ (curves_cache->proc_point_buf != nullptr)) {
return;
}
- hair_cache->strands_len = 0;
- hair_cache->elems_len = 0;
- hair_cache->point_len = 0;
-
- HairCurve *curve = hair->curves;
- int num_curves = hair->totcurve;
- for (int i = 0; i < num_curves; i++, curve++) {
- hair_cache->strands_len++;
- hair_cache->elems_len += curve->numpoints + 1;
- hair_cache->point_len += curve->numpoints;
- }
+ curves_cache->strands_len = curves->geometry.curve_size;
+ curves_cache->elems_len = curves->geometry.point_size + curves->geometry.curve_size;
+ curves_cache->point_len = curves->geometry.point_size;
}
-static void hair_batch_cache_fill_segments_proc_pos(Hair *hair,
- GPUVertBufRaw *attr_step,
- GPUVertBufRaw *length_step)
+static void curves_batch_cache_fill_segments_proc_pos(Curves *curves,
+ GPUVertBufRaw *attr_step,
+ GPUVertBufRaw *length_step)
{
/* TODO: use hair radius layer if available. */
- HairCurve *curve = hair->curves;
- int num_curves = hair->totcurve;
- for (int i = 0; i < num_curves; i++, curve++) {
- float(*curve_co)[3] = hair->co + curve->firstpoint;
+ const int curve_size = curves->geometry.curve_size;
+ Span<int> offsets{curves->geometry.offsets, curves->geometry.curve_size + 1};
+
+ Span<float3> positions{(float3 *)curves->geometry.position, curves->geometry.point_size};
+
+ for (const int i : IndexRange(curve_size)) {
+ const IndexRange curve_range(offsets[i], offsets[i + 1] - offsets[i]);
+
+ Span<float3> spline_positions = positions.slice(curve_range);
float total_len = 0.0f;
- float *co_prev = nullptr, *seg_data_first;
- for (int j = 0; j < curve->numpoints; j++) {
+ float *seg_data_first;
+ for (const int i_spline : spline_positions.index_range()) {
float *seg_data = (float *)GPU_vertbuf_raw_step(attr_step);
- copy_v3_v3(seg_data, curve_co[j]);
- if (co_prev) {
- total_len += len_v3v3(co_prev, curve_co[j]);
+ copy_v3_v3(seg_data, spline_positions[i_spline]);
+ if (i_spline == 0) {
+ seg_data_first = seg_data;
}
else {
- seg_data_first = seg_data;
+ total_len += blender::math::distance(spline_positions[i_spline - 1],
+ spline_positions[i_spline]);
}
seg_data[3] = total_len;
- co_prev = curve_co[j];
}
/* Assign length value. */
*(float *)GPU_vertbuf_raw_step(length_step) = total_len;
if (total_len > 0.0f) {
/* Divide by total length to have a [0-1] number. */
- for (int j = 0; j < curve->numpoints; j++, seg_data_first += 4) {
+ for ([[maybe_unused]] const int i_spline : spline_positions.index_range()) {
seg_data_first[3] /= total_len;
+ seg_data_first += 4;
}
}
}
}
-static void hair_batch_cache_ensure_procedural_pos(Hair *hair,
- ParticleHairCache *cache,
- GPUMaterial *gpu_material)
+static void curves_batch_cache_ensure_procedural_pos(Curves *curves,
+ ParticleHairCache *cache,
+ GPUMaterial *gpu_material)
{
if (cache->proc_point_buf == nullptr) {
/* initialize vertex format */
@@ -203,7 +208,7 @@ static void hair_batch_cache_ensure_procedural_pos(Hair *hair,
GPUVertBufRaw length_step;
GPU_vertbuf_attr_get_raw_data(cache->proc_length_buf, length_id, &length_step);
- hair_batch_cache_fill_segments_proc_pos(hair, &point_step, &length_step);
+ curves_batch_cache_fill_segments_proc_pos(curves, &point_step, &length_step);
/* Create vbo immediately to bind to texture buffer. */
GPU_vertbuf_use(cache->proc_point_buf);
@@ -222,19 +227,23 @@ static void hair_batch_cache_ensure_procedural_pos(Hair *hair,
}
}
-static void hair_batch_cache_fill_strands_data(Hair *hair,
- GPUVertBufRaw *data_step,
- GPUVertBufRaw *seg_step)
+static void curves_batch_cache_fill_strands_data(Curves *curves,
+ GPUVertBufRaw *data_step,
+ GPUVertBufRaw *seg_step)
{
- HairCurve *curve = hair->curves;
- int num_curves = hair->totcurve;
- for (int i = 0; i < num_curves; i++, curve++) {
- *(uint *)GPU_vertbuf_raw_step(data_step) = curve->firstpoint;
- *(ushort *)GPU_vertbuf_raw_step(seg_step) = curve->numpoints - 1;
+ const int curve_size = curves->geometry.curve_size;
+ Span<int> offsets{curves->geometry.offsets, curves->geometry.curve_size + 1};
+
+ for (const int i : IndexRange(curve_size)) {
+ const IndexRange curve_range(offsets[i], offsets[i + 1] - offsets[i]);
+
+ *(uint *)GPU_vertbuf_raw_step(data_step) = curve_range.start();
+ *(ushort *)GPU_vertbuf_raw_step(seg_step) = curve_range.size() - 1;
}
}
-static void hair_batch_cache_ensure_procedural_strand_data(Hair *hair, ParticleHairCache *cache)
+static void curves_batch_cache_ensure_procedural_strand_data(Curves *curves,
+ ParticleHairCache *cache)
{
GPUVertBufRaw data_step, seg_step;
@@ -253,18 +262,18 @@ static void hair_batch_cache_ensure_procedural_strand_data(Hair *hair, ParticleH
GPU_vertbuf_data_alloc(cache->proc_strand_seg_buf, cache->strands_len);
GPU_vertbuf_attr_get_raw_data(cache->proc_strand_seg_buf, seg_id, &seg_step);
- hair_batch_cache_fill_strands_data(hair, &data_step, &seg_step);
+ curves_batch_cache_fill_strands_data(curves, &data_step, &seg_step);
/* Create vbo immediately to bind to texture buffer. */
GPU_vertbuf_use(cache->proc_strand_buf);
- cache->strand_tex = GPU_texture_create_from_vertbuf("hair_strand", cache->proc_strand_buf);
+ cache->strand_tex = GPU_texture_create_from_vertbuf("curves_strand", cache->proc_strand_buf);
GPU_vertbuf_use(cache->proc_strand_seg_buf);
- cache->strand_seg_tex = GPU_texture_create_from_vertbuf("hair_strand_seg",
+ cache->strand_seg_tex = GPU_texture_create_from_vertbuf("curves_strand_seg",
cache->proc_strand_seg_buf);
}
-static void hair_batch_cache_ensure_procedural_final_points(ParticleHairCache *cache, int subdiv)
+static void curves_batch_cache_ensure_procedural_final_points(ParticleHairCache *cache, int subdiv)
{
/* Same format as point_tex. */
GPUVertFormat format = {0};
@@ -285,14 +294,15 @@ static void hair_batch_cache_ensure_procedural_final_points(ParticleHairCache *c
cache->final[subdiv].proc_buf);
}
-static void hair_batch_cache_fill_segments_indices(Hair *hair,
- const int res,
- GPUIndexBufBuilder *elb)
+static void curves_batch_cache_fill_segments_indices(Curves *curves,
+ const int res,
+ GPUIndexBufBuilder *elb)
{
- HairCurve *curve = hair->curves;
- int num_curves = hair->totcurve;
+ const int curve_size = curves->geometry.curve_size;
+
uint curr_point = 0;
- for (int i = 0; i < num_curves; i++, curve++) {
+
+ for ([[maybe_unused]] const int i : IndexRange(curve_size)) {
for (int k = 0; k < res; k++) {
GPU_indexbuf_add_generic_vert(elb, curr_point++);
}
@@ -300,10 +310,10 @@ static void hair_batch_cache_fill_segments_indices(Hair *hair,
}
}
-static void hair_batch_cache_ensure_procedural_indices(Hair *hair,
- ParticleHairCache *cache,
- int thickness_res,
- int subdiv)
+static void curves_batch_cache_ensure_procedural_indices(Curves *curves,
+ ParticleHairCache *cache,
+ int thickness_res,
+ int subdiv)
{
BLI_assert(thickness_res <= MAX_THICKRES); /* Cylinder strip not currently supported. */
@@ -328,7 +338,7 @@ static void hair_batch_cache_ensure_procedural_indices(Hair *hair,
GPUIndexBufBuilder elb;
GPU_indexbuf_init_ex(&elb, prim_type, element_count, element_count);
- hair_batch_cache_fill_segments_indices(hair, verts_per_hair, &elb);
+ curves_batch_cache_fill_segments_indices(curves, verts_per_hair, &elb);
cache->final[subdiv].proc_hairs[thickness_res - 1] = GPU_batch_create_ex(
prim_type, vbo, GPU_indexbuf_build(&elb), GPU_BATCH_OWNS_VBO | GPU_BATCH_OWNS_INDEX);
@@ -341,9 +351,9 @@ bool hair_ensure_procedural_data(Object *object,
int thickness_res)
{
bool need_ft_update = false;
- Hair *hair = static_cast<Hair *>(object->data);
+ Curves *curves = static_cast<Curves *>(object->data);
- HairBatchCache *cache = hair_batch_cache_get(hair);
+ HairBatchCache *cache = curves_batch_cache_get(curves);
*r_hair_cache = &cache->hair;
const int steps = 2; /* TODO: don't hard-code? */
@@ -351,29 +361,29 @@ bool hair_ensure_procedural_data(Object *object,
/* Refreshed on combing and simulation. */
if ((*r_hair_cache)->proc_point_buf == nullptr) {
- ensure_seg_pt_count(hair, &cache->hair);
- hair_batch_cache_ensure_procedural_pos(hair, &cache->hair, gpu_material);
+ ensure_seg_pt_count(curves, &cache->hair);
+ curves_batch_cache_ensure_procedural_pos(curves, &cache->hair, gpu_material);
need_ft_update = true;
}
/* Refreshed if active layer or custom data changes. */
if ((*r_hair_cache)->strand_tex == nullptr) {
- hair_batch_cache_ensure_procedural_strand_data(hair, &cache->hair);
+ curves_batch_cache_ensure_procedural_strand_data(curves, &cache->hair);
}
/* Refreshed only on subdiv count change. */
if ((*r_hair_cache)->final[subdiv].proc_buf == nullptr) {
- hair_batch_cache_ensure_procedural_final_points(&cache->hair, subdiv);
+ curves_batch_cache_ensure_procedural_final_points(&cache->hair, subdiv);
need_ft_update = true;
}
if ((*r_hair_cache)->final[subdiv].proc_hairs[thickness_res - 1] == nullptr) {
- hair_batch_cache_ensure_procedural_indices(hair, &cache->hair, thickness_res, subdiv);
+ curves_batch_cache_ensure_procedural_indices(curves, &cache->hair, thickness_res, subdiv);
}
return need_ft_update;
}
-int DRW_hair_material_count_get(Hair *hair)
+int DRW_curves_material_count_get(Curves *curves)
{
- return max_ii(1, hair->totcol);
+ return max_ii(1, curves->totcol);
}
diff --git a/source/blender/draw/intern/draw_cache_impl_mesh.c b/source/blender/draw/intern/draw_cache_impl_mesh.c
index 1e5ffc14911..e7e0e97499f 100644
--- a/source/blender/draw/intern/draw_cache_impl_mesh.c
+++ b/source/blender/draw/intern/draw_cache_impl_mesh.c
@@ -280,9 +280,16 @@ BLI_INLINE void mesh_cd_layers_type_clear(DRW_MeshCDMask *a)
*((uint32_t *)a) = 0;
}
-BLI_INLINE const Mesh *editmesh_final_or_this(const Mesh *me)
+BLI_INLINE const Mesh *editmesh_final_or_this(const Object *object, const Mesh *me)
{
- return (me->edit_mesh && me->edit_mesh->mesh_eval_final) ? me->edit_mesh->mesh_eval_final : me;
+ if (me->edit_mesh != NULL) {
+ Mesh *editmesh_eval_final = BKE_object_get_editmesh_eval_final(object);
+ if (editmesh_eval_final != NULL) {
+ return editmesh_eval_final;
+ }
+ }
+
+ return me;
}
static void mesh_cd_calc_edit_uv_layer(const Mesh *UNUSED(me), DRW_MeshCDMask *cd_used)
@@ -443,9 +450,11 @@ BLI_INLINE const CustomData *mesh_cd_vdata_get_from_mesh(const Mesh *me)
return &me->vdata;
}
-static void mesh_cd_calc_active_uv_layer(const Mesh *me, DRW_MeshCDMask *cd_used)
+static void mesh_cd_calc_active_uv_layer(const Object *object,
+ const Mesh *me,
+ DRW_MeshCDMask *cd_used)
{
- const Mesh *me_final = editmesh_final_or_this(me);
+ const Mesh *me_final = editmesh_final_or_this(object, me);
const CustomData *cd_ldata = mesh_cd_ldata_get_from_mesh(me_final);
int layer = CustomData_get_active_layer(cd_ldata, CD_MLOOPUV);
if (layer != -1) {
@@ -453,9 +462,11 @@ static void mesh_cd_calc_active_uv_layer(const Mesh *me, DRW_MeshCDMask *cd_used
}
}
-static void mesh_cd_calc_active_mask_uv_layer(const Mesh *me, DRW_MeshCDMask *cd_used)
+static void mesh_cd_calc_active_mask_uv_layer(const Object *object,
+ const Mesh *me,
+ DRW_MeshCDMask *cd_used)
{
- const Mesh *me_final = editmesh_final_or_this(me);
+ const Mesh *me_final = editmesh_final_or_this(object, me);
const CustomData *cd_ldata = mesh_cd_ldata_get_from_mesh(me_final);
int layer = CustomData_get_stencil_layer(cd_ldata, CD_MLOOPUV);
if (layer != -1) {
@@ -463,9 +474,11 @@ static void mesh_cd_calc_active_mask_uv_layer(const Mesh *me, DRW_MeshCDMask *cd
}
}
-static void mesh_cd_calc_active_vcol_layer(const Mesh *me, DRW_MeshAttributes *attrs_used)
+static void mesh_cd_calc_active_vcol_layer(const Object *object,
+ const Mesh *me,
+ DRW_MeshAttributes *attrs_used)
{
- const Mesh *me_final = editmesh_final_or_this(me);
+ const Mesh *me_final = editmesh_final_or_this(object, me);
const CustomData *cd_vdata = mesh_cd_vdata_get_from_mesh(me_final);
int layer = CustomData_get_active_layer(cd_vdata, CD_PROP_COLOR);
@@ -474,9 +487,11 @@ static void mesh_cd_calc_active_vcol_layer(const Mesh *me, DRW_MeshAttributes *a
}
}
-static void mesh_cd_calc_active_mloopcol_layer(const Mesh *me, DRW_MeshCDMask *cd_used)
+static void mesh_cd_calc_active_mloopcol_layer(const Object *object,
+ const Mesh *me,
+ DRW_MeshCDMask *cd_used)
{
- const Mesh *me_final = editmesh_final_or_this(me);
+ const Mesh *me_final = editmesh_final_or_this(object, me);
const CustomData *cd_ldata = mesh_cd_ldata_get_from_mesh(me_final);
int layer = CustomData_get_active_layer(cd_ldata, CD_MLOOPCOL);
@@ -490,8 +505,9 @@ static bool custom_data_match_attribute(const CustomData *custom_data,
int *r_layer_index,
int *r_type)
{
- const int possible_attribute_types[6] = {
+ const int possible_attribute_types[7] = {
CD_PROP_BOOL,
+ CD_PROP_INT8,
CD_PROP_INT32,
CD_PROP_FLOAT,
CD_PROP_FLOAT2,
@@ -514,12 +530,13 @@ static bool custom_data_match_attribute(const CustomData *custom_data,
return false;
}
-static DRW_MeshCDMask mesh_cd_calc_used_gpu_layers(const Mesh *me,
+static DRW_MeshCDMask mesh_cd_calc_used_gpu_layers(const Object *object,
+ const Mesh *me,
struct GPUMaterial **gpumat_array,
int gpumat_array_len,
DRW_MeshAttributes *attributes)
{
- const Mesh *me_final = editmesh_final_or_this(me);
+ const Mesh *me_final = editmesh_final_or_this(object, me);
const CustomData *cd_ldata = mesh_cd_ldata_get_from_mesh(me_final);
const CustomData *cd_pdata = mesh_cd_pdata_get_from_mesh(me_final);
const CustomData *cd_vdata = mesh_cd_vdata_get_from_mesh(me_final);
@@ -639,6 +656,7 @@ static DRW_MeshCDMask mesh_cd_calc_used_gpu_layers(const Mesh *me,
break;
}
case CD_PROP_BOOL:
+ case CD_PROP_INT8:
case CD_PROP_INT32:
case CD_PROP_FLOAT:
case CD_PROP_FLOAT2:
@@ -793,7 +811,7 @@ BLI_INLINE void mesh_batch_cache_add_request(MeshBatchCache *cache, DRWBatchFlag
/* GPUBatch cache management. */
-static bool mesh_batch_cache_valid(Mesh *me)
+static bool mesh_batch_cache_valid(Object *object, Mesh *me)
{
MeshBatchCache *cache = me->runtime.batch_cache;
@@ -809,14 +827,14 @@ static bool mesh_batch_cache_valid(Mesh *me)
return false;
}
- if (cache->mat_len != mesh_render_mat_len_get(me)) {
+ if (cache->mat_len != mesh_render_mat_len_get(object, me)) {
return false;
}
return true;
}
-static void mesh_batch_cache_init(Mesh *me)
+static void mesh_batch_cache_init(Object *object, Mesh *me)
{
MeshBatchCache *cache = me->runtime.batch_cache;
@@ -836,7 +854,7 @@ static void mesh_batch_cache_init(Mesh *me)
// cache->vert_len = mesh_render_verts_len_get(me);
}
- cache->mat_len = mesh_render_mat_len_get(me);
+ cache->mat_len = mesh_render_mat_len_get(object, me);
cache->surface_per_mat = MEM_callocN(sizeof(*cache->surface_per_mat) * cache->mat_len, __func__);
cache->tris_per_mat = MEM_callocN(sizeof(*cache->tris_per_mat) * cache->mat_len, __func__);
@@ -847,11 +865,11 @@ static void mesh_batch_cache_init(Mesh *me)
drw_mesh_weight_state_clear(&cache->weight_state);
}
-void DRW_mesh_batch_cache_validate(Mesh *me)
+void DRW_mesh_batch_cache_validate(Object *object, Mesh *me)
{
- if (!mesh_batch_cache_valid(me)) {
+ if (!mesh_batch_cache_valid(object, me)) {
mesh_batch_cache_clear(me);
- mesh_batch_cache_init(me);
+ mesh_batch_cache_init(object, me);
}
}
@@ -1095,24 +1113,24 @@ void DRW_mesh_batch_cache_free(Mesh *me)
/** \name Public API
* \{ */
-static void texpaint_request_active_uv(MeshBatchCache *cache, Mesh *me)
+static void texpaint_request_active_uv(MeshBatchCache *cache, Object *object, Mesh *me)
{
DRW_MeshCDMask cd_needed;
mesh_cd_layers_type_clear(&cd_needed);
- mesh_cd_calc_active_uv_layer(me, &cd_needed);
+ mesh_cd_calc_active_uv_layer(object, me, &cd_needed);
BLI_assert(cd_needed.uv != 0 &&
"No uv layer available in texpaint, but batches requested anyway!");
- mesh_cd_calc_active_mask_uv_layer(me, &cd_needed);
+ mesh_cd_calc_active_mask_uv_layer(object, me, &cd_needed);
mesh_cd_layers_type_merge(&cache->cd_needed, cd_needed);
}
-static void texpaint_request_active_vcol(MeshBatchCache *cache, Mesh *me)
+static void texpaint_request_active_vcol(MeshBatchCache *cache, Object *object, Mesh *me)
{
DRW_MeshCDMask cd_needed;
mesh_cd_layers_type_clear(&cd_needed);
- mesh_cd_calc_active_mloopcol_layer(me, &cd_needed);
+ mesh_cd_calc_active_mloopcol_layer(object, me, &cd_needed);
BLI_assert(cd_needed.vcol != 0 &&
"No MLOOPCOL layer available in vertpaint, but batches requested anyway!");
@@ -1120,11 +1138,11 @@ static void texpaint_request_active_vcol(MeshBatchCache *cache, Mesh *me)
mesh_cd_layers_type_merge(&cache->cd_needed, cd_needed);
}
-static void sculpt_request_active_vcol(MeshBatchCache *cache, Mesh *me)
+static void sculpt_request_active_vcol(MeshBatchCache *cache, Object *object, Mesh *me)
{
DRW_MeshAttributes attrs_needed;
drw_mesh_attributes_clear(&attrs_needed);
- mesh_cd_calc_active_vcol_layer(me, &attrs_needed);
+ mesh_cd_calc_active_vcol_layer(object, me, &attrs_needed);
BLI_assert(attrs_needed.num_requests != 0 &&
"No MPropCol layer available in Sculpt, but batches requested anyway!");
@@ -1197,7 +1215,8 @@ GPUBatch *DRW_mesh_batch_cache_get_edit_mesh_analysis(Mesh *me)
return DRW_batch_request(&cache->batch.edit_mesh_analysis);
}
-GPUBatch **DRW_mesh_batch_cache_get_surface_shaded(Mesh *me,
+GPUBatch **DRW_mesh_batch_cache_get_surface_shaded(Object *object,
+ Mesh *me,
struct GPUMaterial **gpumat_array,
uint gpumat_array_len)
{
@@ -1205,7 +1224,7 @@ GPUBatch **DRW_mesh_batch_cache_get_surface_shaded(Mesh *me,
DRW_MeshAttributes attrs_needed;
drw_mesh_attributes_clear(&attrs_needed);
DRW_MeshCDMask cd_needed = mesh_cd_calc_used_gpu_layers(
- me, gpumat_array, gpumat_array_len, &attrs_needed);
+ object, me, gpumat_array, gpumat_array_len, &attrs_needed);
BLI_assert(gpumat_array_len == cache->mat_len);
@@ -1216,41 +1235,41 @@ GPUBatch **DRW_mesh_batch_cache_get_surface_shaded(Mesh *me,
return cache->surface_per_mat;
}
-GPUBatch **DRW_mesh_batch_cache_get_surface_texpaint(Mesh *me)
+GPUBatch **DRW_mesh_batch_cache_get_surface_texpaint(Object *object, Mesh *me)
{
MeshBatchCache *cache = mesh_batch_cache_get(me);
- texpaint_request_active_uv(cache, me);
+ texpaint_request_active_uv(cache, object, me);
mesh_batch_cache_request_surface_batches(cache);
return cache->surface_per_mat;
}
-GPUBatch *DRW_mesh_batch_cache_get_surface_texpaint_single(Mesh *me)
+GPUBatch *DRW_mesh_batch_cache_get_surface_texpaint_single(Object *object, Mesh *me)
{
MeshBatchCache *cache = mesh_batch_cache_get(me);
- texpaint_request_active_uv(cache, me);
+ texpaint_request_active_uv(cache, object, me);
mesh_batch_cache_request_surface_batches(cache);
return cache->batch.surface;
}
-GPUBatch *DRW_mesh_batch_cache_get_surface_vertpaint(Mesh *me)
+GPUBatch *DRW_mesh_batch_cache_get_surface_vertpaint(Object *object, Mesh *me)
{
MeshBatchCache *cache = mesh_batch_cache_get(me);
- texpaint_request_active_vcol(cache, me);
+ texpaint_request_active_vcol(cache, object, me);
mesh_batch_cache_request_surface_batches(cache);
return cache->batch.surface;
}
-GPUBatch *DRW_mesh_batch_cache_get_surface_sculpt(Mesh *me)
+GPUBatch *DRW_mesh_batch_cache_get_surface_sculpt(Object *object, Mesh *me)
{
MeshBatchCache *cache = mesh_batch_cache_get(me);
- sculpt_request_active_vcol(cache, me);
+ sculpt_request_active_vcol(cache, object, me);
mesh_batch_cache_request_surface_batches(cache);
return cache->batch.surface;
}
-int DRW_mesh_material_count_get(const Mesh *me)
+int DRW_mesh_material_count_get(const Object *object, const Mesh *me)
{
- return mesh_render_mat_len_get(me);
+ return mesh_render_mat_len_get(object, me);
}
GPUBatch *DRW_mesh_batch_cache_get_sculpt_overlays(Mesh *me)
@@ -1375,26 +1394,27 @@ GPUBatch *DRW_mesh_batch_cache_get_verts_with_select_id(Mesh *me)
/** \name UV Image editor API
* \{ */
-static void edituv_request_active_uv(MeshBatchCache *cache, Mesh *me)
+static void edituv_request_active_uv(MeshBatchCache *cache, Object *object, Mesh *me)
{
DRW_MeshCDMask cd_needed;
mesh_cd_layers_type_clear(&cd_needed);
- mesh_cd_calc_active_uv_layer(me, &cd_needed);
+ mesh_cd_calc_active_uv_layer(object, me, &cd_needed);
mesh_cd_calc_edit_uv_layer(me, &cd_needed);
BLI_assert(cd_needed.edit_uv != 0 &&
"No uv layer available in edituv, but batches requested anyway!");
- mesh_cd_calc_active_mask_uv_layer(me, &cd_needed);
+ mesh_cd_calc_active_mask_uv_layer(object, me, &cd_needed);
mesh_cd_layers_type_merge(&cache->cd_needed, cd_needed);
}
-GPUBatch *DRW_mesh_batch_cache_get_edituv_faces_stretch_area(Mesh *me,
+GPUBatch *DRW_mesh_batch_cache_get_edituv_faces_stretch_area(Object *object,
+ Mesh *me,
float **tot_area,
float **tot_uv_area)
{
MeshBatchCache *cache = mesh_batch_cache_get(me);
- edituv_request_active_uv(cache, me);
+ edituv_request_active_uv(cache, object, me);
mesh_batch_cache_add_request(cache, MBC_EDITUV_FACES_STRETCH_AREA);
if (tot_area != NULL) {
@@ -1406,58 +1426,58 @@ GPUBatch *DRW_mesh_batch_cache_get_edituv_faces_stretch_area(Mesh *me,
return DRW_batch_request(&cache->batch.edituv_faces_stretch_area);
}
-GPUBatch *DRW_mesh_batch_cache_get_edituv_faces_stretch_angle(Mesh *me)
+GPUBatch *DRW_mesh_batch_cache_get_edituv_faces_stretch_angle(Object *object, Mesh *me)
{
MeshBatchCache *cache = mesh_batch_cache_get(me);
- edituv_request_active_uv(cache, me);
+ edituv_request_active_uv(cache, object, me);
mesh_batch_cache_add_request(cache, MBC_EDITUV_FACES_STRETCH_ANGLE);
return DRW_batch_request(&cache->batch.edituv_faces_stretch_angle);
}
-GPUBatch *DRW_mesh_batch_cache_get_edituv_faces(Mesh *me)
+GPUBatch *DRW_mesh_batch_cache_get_edituv_faces(Object *object, Mesh *me)
{
MeshBatchCache *cache = mesh_batch_cache_get(me);
- edituv_request_active_uv(cache, me);
+ edituv_request_active_uv(cache, object, me);
mesh_batch_cache_add_request(cache, MBC_EDITUV_FACES);
return DRW_batch_request(&cache->batch.edituv_faces);
}
-GPUBatch *DRW_mesh_batch_cache_get_edituv_edges(Mesh *me)
+GPUBatch *DRW_mesh_batch_cache_get_edituv_edges(Object *object, Mesh *me)
{
MeshBatchCache *cache = mesh_batch_cache_get(me);
- edituv_request_active_uv(cache, me);
+ edituv_request_active_uv(cache, object, me);
mesh_batch_cache_add_request(cache, MBC_EDITUV_EDGES);
return DRW_batch_request(&cache->batch.edituv_edges);
}
-GPUBatch *DRW_mesh_batch_cache_get_edituv_verts(Mesh *me)
+GPUBatch *DRW_mesh_batch_cache_get_edituv_verts(Object *object, Mesh *me)
{
MeshBatchCache *cache = mesh_batch_cache_get(me);
- edituv_request_active_uv(cache, me);
+ edituv_request_active_uv(cache, object, me);
mesh_batch_cache_add_request(cache, MBC_EDITUV_VERTS);
return DRW_batch_request(&cache->batch.edituv_verts);
}
-GPUBatch *DRW_mesh_batch_cache_get_edituv_facedots(Mesh *me)
+GPUBatch *DRW_mesh_batch_cache_get_edituv_facedots(Object *object, Mesh *me)
{
MeshBatchCache *cache = mesh_batch_cache_get(me);
- edituv_request_active_uv(cache, me);
+ edituv_request_active_uv(cache, object, me);
mesh_batch_cache_add_request(cache, MBC_EDITUV_FACEDOTS);
return DRW_batch_request(&cache->batch.edituv_fdots);
}
-GPUBatch *DRW_mesh_batch_cache_get_uv_edges(Mesh *me)
+GPUBatch *DRW_mesh_batch_cache_get_uv_edges(Object *object, Mesh *me)
{
MeshBatchCache *cache = mesh_batch_cache_get(me);
- edituv_request_active_uv(cache, me);
+ edituv_request_active_uv(cache, object, me);
mesh_batch_cache_add_request(cache, MBC_WIRE_LOOPS_UVS);
return DRW_batch_request(&cache->batch.wire_loops_uvs);
}
-GPUBatch *DRW_mesh_batch_cache_get_surface_edges(Mesh *me)
+GPUBatch *DRW_mesh_batch_cache_get_surface_edges(Object *object, Mesh *me)
{
MeshBatchCache *cache = mesh_batch_cache_get(me);
- texpaint_request_active_uv(cache, me);
+ texpaint_request_active_uv(cache, object, me);
mesh_batch_cache_add_request(cache, MBC_WIRE_LOOPS);
return DRW_batch_request(&cache->batch.wire_loops);
}
@@ -1560,20 +1580,12 @@ void DRW_mesh_batch_cache_create_requested(struct TaskGraph *task_graph,
/* Sanity check. */
if ((me->edit_mesh != NULL) && (ob->mode & OB_MODE_EDIT)) {
- BLI_assert(me->edit_mesh->mesh_eval_final != NULL);
+ BLI_assert(BKE_object_get_editmesh_eval_final(ob) != NULL);
}
- /* Don't check `DRW_object_is_in_edit_mode(ob)` here because it means the same mesh
- * may draw with edit-mesh data and regular mesh data.
- * In this case the custom-data layers used won't always match in `me->runtime.batch_cache`.
- * If we want to display regular mesh data, we should have a separate cache for the edit-mesh.
- * See T77359. */
const bool is_editmode = (me->edit_mesh != NULL) &&
- /* In rare cases we have the edit-mode data but not the generated cache.
- * This can happen when switching an objects data to a mesh which
- * happens to be in edit-mode in another scene, see: T82952. */
- (me->edit_mesh->mesh_eval_final !=
- NULL) /* && DRW_object_is_in_edit_mode(ob) */;
+ (BKE_object_get_editmesh_eval_final(ob) != NULL) &&
+ DRW_object_is_in_edit_mode(ob);
/* This could be set for paint mode too, currently it's only used for edit-mode. */
const bool is_mode_active = is_editmode && DRW_object_is_in_edit_mode(ob);
@@ -1599,7 +1611,7 @@ void DRW_mesh_batch_cache_create_requested(struct TaskGraph *task_graph,
/* Modifiers will only generate an orco layer if the mesh is deformed. */
if (cache->cd_needed.orco != 0) {
/* Orco is always extracted from final mesh. */
- Mesh *me_final = (me->edit_mesh) ? me->edit_mesh->mesh_eval_final : me;
+ Mesh *me_final = (me->edit_mesh) ? BKE_object_get_editmesh_eval_final(ob) : me;
if (CustomData_get_layer(&me_final->vdata, CD_ORCO) == NULL) {
/* Skip orco calculation */
cache->cd_needed.orco = 0;
@@ -1705,10 +1717,14 @@ void DRW_mesh_batch_cache_create_requested(struct TaskGraph *task_graph,
cache->batch_ready |= batch_requested;
- const bool do_cage = (is_editmode &&
- (me->edit_mesh->mesh_eval_final != me->edit_mesh->mesh_eval_cage));
+ bool do_cage = false, do_uvcage = false;
+ if (is_editmode) {
+ Mesh *editmesh_eval_final = BKE_object_get_editmesh_eval_final(ob);
+ Mesh *editmesh_eval_cage = BKE_object_get_editmesh_eval_cage(ob);
- const bool do_uvcage = is_editmode && !me->edit_mesh->mesh_eval_final->runtime.is_original;
+ do_cage = editmesh_eval_final != editmesh_eval_cage;
+ do_uvcage = !editmesh_eval_final->runtime.is_original;
+ }
const int required_mode = BKE_subsurf_modifier_eval_required_mode(DRW_state_is_scene_render(),
is_editmode);
@@ -2029,6 +2045,7 @@ void DRW_mesh_batch_cache_create_requested(struct TaskGraph *task_graph,
mesh_buffer_cache_create_requested(task_graph,
cache,
&cache->uv_cage,
+ ob,
me,
is_editmode,
is_paint_mode,
@@ -2046,6 +2063,7 @@ void DRW_mesh_batch_cache_create_requested(struct TaskGraph *task_graph,
mesh_buffer_cache_create_requested(task_graph,
cache,
&cache->cage,
+ ob,
me,
is_editmode,
is_paint_mode,
@@ -2071,6 +2089,7 @@ void DRW_mesh_batch_cache_create_requested(struct TaskGraph *task_graph,
mesh_buffer_cache_create_requested(task_graph,
cache,
&cache->final,
+ ob,
me,
is_editmode,
is_paint_mode,
diff --git a/source/blender/draw/intern/draw_cache_impl_subdivision.cc b/source/blender/draw/intern/draw_cache_impl_subdivision.cc
index 37bc9435c76..4a8670a9ee2 100644
--- a/source/blender/draw/intern/draw_cache_impl_subdivision.cc
+++ b/source/blender/draw/intern/draw_cache_impl_subdivision.cc
@@ -573,6 +573,9 @@ void draw_subdiv_cache_free(DRWSubdivCache *cache)
GPU_VERTBUF_DISCARD_SAFE(cache->subdiv_vertex_face_adjacency);
cache->resolution = 0;
cache->num_subdiv_loops = 0;
+ cache->num_subdiv_edges = 0;
+ cache->num_subdiv_verts = 0;
+ cache->num_subdiv_triangles = 0;
cache->num_coarse_poly = 0;
cache->num_subdiv_quads = 0;
draw_subdiv_free_edit_mode_cache(cache);
@@ -749,9 +752,10 @@ static bool draw_subdiv_topology_info_cb(const SubdivForeachContext *foreach_con
cache->subdiv_loop_poly_index = static_cast<int *>(
MEM_mallocN(cache->num_subdiv_loops * sizeof(int), "subdiv_loop_poly_index"));
+ const int num_coarse_vertices = ctx->coarse_mesh->totvert;
cache->point_indices = static_cast<int *>(
- MEM_mallocN(cache->num_subdiv_verts * sizeof(int), "point_indices"));
- for (int i = 0; i < num_vertices; i++) {
+ MEM_mallocN(num_coarse_vertices * sizeof(int), "point_indices"));
+ for (int i = 0; i < num_coarse_vertices; i++) {
cache->point_indices[i] = -1;
}
@@ -976,6 +980,7 @@ static bool draw_subdiv_build_cache(DRWSubdivCache *cache,
}
DRWCacheBuildingContext cache_building_context;
+ memset(&cache_building_context, 0, sizeof(DRWCacheBuildingContext));
cache_building_context.coarse_mesh = mesh_eval;
cache_building_context.settings = &to_mesh_settings;
cache_building_context.cache = cache;
@@ -994,7 +999,7 @@ static bool draw_subdiv_build_cache(DRWSubdivCache *cache,
cache->face_ptex_offset = BKE_subdiv_face_ptex_offset_get(subdiv);
- // Build patch coordinates for all the face dots
+ /* Build patch coordinates for all the face dots. */
cache->fdots_patch_coords = gpu_vertbuf_create_from_format(get_blender_patch_coords_format(),
mesh_eval->totpoly);
CompressedPatchCoord *blender_fdots_patch_coords = (CompressedPatchCoord *)GPU_vertbuf_get_data(
@@ -1755,7 +1760,7 @@ static void draw_subdiv_cache_ensure_mat_offsets(DRWSubdivCache *cache,
int *mat_start = static_cast<int *>(MEM_callocN(sizeof(int) * mat_len, "subdiv mat_start"));
int *subdiv_polygon_offset = cache->subdiv_polygon_offset;
- // TODO: parallel_reduce?
+ /* TODO: parallel_reduce? */
for (int i = 0; i < mesh_eval->totpoly; i++) {
const MPoly *mpoly = &mesh_eval->mpoly[i];
const int next_offset = (i == mesh_eval->totpoly - 1) ? number_of_quads :
@@ -1823,7 +1828,7 @@ static bool draw_subdiv_create_requested_buffers(const Scene *scene,
Mesh *mesh_eval = mesh;
BMesh *bm = nullptr;
if (mesh->edit_mesh) {
- mesh_eval = mesh->edit_mesh->mesh_eval_final;
+ mesh_eval = BKE_object_get_editmesh_eval_final(ob);
bm = mesh->edit_mesh->bm;
}
diff --git a/source/blender/draw/intern/draw_common.c b/source/blender/draw/intern/draw_common.c
index 65afc5ed3d8..82b830f6799 100644
--- a/source/blender/draw/intern/draw_common.c
+++ b/source/blender/draw/intern/draw_common.c
@@ -431,7 +431,7 @@ bool DRW_object_is_flat(Object *ob, int *r_axis)
OB_SURF,
OB_FONT,
OB_MBALL,
- OB_HAIR,
+ OB_CURVES,
OB_POINTCLOUD,
OB_VOLUME)) {
/* Non-meshes object cannot be considered as flat. */
diff --git a/source/blender/draw/intern/draw_hair.c b/source/blender/draw/intern/draw_hair.c
index 0abb00a71a9..e9010d7a81a 100644
--- a/source/blender/draw/intern/draw_hair.c
+++ b/source/blender/draw/intern/draw_hair.c
@@ -54,7 +54,7 @@
BLI_INLINE eParticleRefineShaderType drw_hair_shader_type_get(void)
{
#ifdef USE_COMPUTE_SHADERS
- if (GPU_compute_shader_support()) {
+ if (GPU_compute_shader_support() && GPU_shader_storage_buffer_objects_support()) {
return PART_REFINE_SHADER_COMPUTE;
}
#endif
@@ -130,7 +130,7 @@ static void drw_hair_particle_cache_update_compute(ParticleHairCache *cache, con
GPUShader *shader = hair_refine_shader_get(PART_REFINE_CATMULL_ROM);
DRWShadingGroup *shgrp = DRW_shgroup_create(shader, g_tf_pass);
drw_hair_particle_cache_shgrp_attach_resources(shgrp, cache, subdiv);
- DRW_shgroup_vertex_buffer(shgrp, "hairPointOutputBuffer", cache->final[subdiv].proc_buf);
+ DRW_shgroup_vertex_buffer(shgrp, "posTime", cache->final[subdiv].proc_buf);
const int max_strands_per_call = GPU_max_work_group_count(0);
int strands_start = 0;
diff --git a/source/blender/draw/intern/draw_instance_data.c b/source/blender/draw/intern/draw_instance_data.c
index 7e1d1208698..ca01a99c0d3 100644
--- a/source/blender/draw/intern/draw_instance_data.c
+++ b/source/blender/draw/intern/draw_instance_data.c
@@ -80,7 +80,7 @@ typedef struct DRWTempInstancingHandle {
GPUBatch *batch;
/** Batch containing instancing attributes. */
GPUBatch *instancer;
- /** Callbuffer to be used instead of instancer. */
+ /** Call-buffer to be used instead of instancer. */
GPUVertBuf *buf;
/** Original non-instanced batch pointer. */
GPUBatch *geom;
diff --git a/source/blender/draw/intern/draw_manager.c b/source/blender/draw/intern/draw_manager.c
index 276a8cc3a13..039fae43329 100644
--- a/source/blender/draw/intern/draw_manager.c
+++ b/source/blender/draw/intern/draw_manager.c
@@ -35,11 +35,11 @@
#include "BKE_colortools.h"
#include "BKE_context.h"
#include "BKE_curve.h"
+#include "BKE_curves.h"
#include "BKE_duplilist.h"
#include "BKE_editmesh.h"
#include "BKE_global.h"
#include "BKE_gpencil.h"
-#include "BKE_hair.h"
#include "BKE_lattice.h"
#include "BKE_main.h"
#include "BKE_mball.h"
@@ -212,21 +212,7 @@ bool DRW_object_is_in_edit_mode(const Object *ob)
if (BKE_object_is_in_editmode(ob)) {
if (ob->type == OB_MESH) {
if ((ob->mode & OB_MODE_EDIT) == 0) {
- Mesh *me = (Mesh *)ob->data;
- BMEditMesh *embm = me->edit_mesh;
- /* Sanity check when rendering in multiple windows. */
- if (embm && embm->mesh_eval_final == NULL) {
- return false;
- }
- /* Do not draw ob with edit overlay when edit data is present and is modified. */
- if (embm && embm->mesh_eval_cage && (embm->mesh_eval_cage != embm->mesh_eval_final)) {
- return false;
- }
- /* Check if the object that we are drawing is modified. */
- if (!DEG_is_original_id(&me->id)) {
- return false;
- }
- return true;
+ return false;
}
}
return true;
@@ -1496,7 +1482,7 @@ void DRW_draw_callbacks_post_scene(void)
* Don't trust them! */
DRW_state_reset();
- /* needed so gizmo isn't obscured */
+ /* Needed so gizmo isn't occluded. */
if ((v3d->gizmo_flag & V3D_GIZMO_HIDE) == 0) {
GPU_depth_test(GPU_DEPTH_NONE);
DRW_draw_gizmo_3d();
@@ -2962,8 +2948,8 @@ void DRW_engines_register(void)
BKE_gpencil_batch_cache_dirty_tag_cb = DRW_gpencil_batch_cache_dirty_tag;
BKE_gpencil_batch_cache_free_cb = DRW_gpencil_batch_cache_free;
- BKE_hair_batch_cache_dirty_tag_cb = DRW_hair_batch_cache_dirty_tag;
- BKE_hair_batch_cache_free_cb = DRW_hair_batch_cache_free;
+ BKE_curves_batch_cache_dirty_tag_cb = DRW_curves_batch_cache_dirty_tag;
+ BKE_curves_batch_cache_free_cb = DRW_curves_batch_cache_free;
BKE_pointcloud_batch_cache_dirty_tag_cb = DRW_pointcloud_batch_cache_dirty_tag;
BKE_pointcloud_batch_cache_free_cb = DRW_pointcloud_batch_cache_free;
diff --git a/source/blender/draw/intern/draw_manager.h b/source/blender/draw/intern/draw_manager.h
index d27eb8be9e0..73fd157426c 100644
--- a/source/blender/draw/intern/draw_manager.h
+++ b/source/blender/draw/intern/draw_manager.h
@@ -205,8 +205,10 @@ typedef enum {
/* Compute Commands. */
DRW_CMD_COMPUTE = 8,
+ DRW_CMD_COMPUTE_REF = 9,
/* Other Commands */
+ DRW_CMD_BARRIER = 11,
DRW_CMD_CLEAR = 12,
DRW_CMD_DRWSTATE = 13,
DRW_CMD_STENCIL = 14,
@@ -249,6 +251,14 @@ typedef struct DRWCommandCompute {
int groups_z_len;
} DRWCommandCompute;
+typedef struct DRWCommandComputeRef {
+ int *groups_ref;
+} DRWCommandComputeRef;
+
+typedef struct DRWCommandBarrier {
+ eGPUBarrier type;
+} DRWCommandBarrier;
+
typedef struct DRWCommandDrawProcedural {
GPUBatch *batch;
DRWResourceHandle handle;
@@ -286,6 +296,8 @@ typedef union DRWCommand {
DRWCommandDrawInstanceRange instance_range;
DRWCommandDrawProcedural procedural;
DRWCommandCompute compute;
+ DRWCommandComputeRef compute_ref;
+ DRWCommandBarrier barrier;
DRWCommandSetMutableState state;
DRWCommandSetStencil stencil;
DRWCommandSetSelectID select_id;
@@ -300,7 +312,7 @@ struct DRWCallBuffer {
};
/** Used by #DRWUniform.type */
-/* TODO(jbakker): rename to DRW_RESOURCE/DRWResourceType. */
+/* TODO(@jbakker): rename to DRW_RESOURCE/DRWResourceType. */
typedef enum {
DRW_UNIFORM_INT = 0,
DRW_UNIFORM_INT_COPY,
@@ -314,6 +326,7 @@ typedef enum {
DRW_UNIFORM_BLOCK_REF,
DRW_UNIFORM_TFEEDBACK_TARGET,
DRW_UNIFORM_VERTEX_BUFFER_AS_STORAGE,
+ DRW_UNIFORM_VERTEX_BUFFER_AS_STORAGE_REF,
/** Per drawcall uniforms/UBO */
DRW_UNIFORM_BLOCK_OBMATS,
DRW_UNIFORM_BLOCK_OBINFOS,
@@ -345,6 +358,11 @@ struct DRWUniform {
GPUUniformBuf *block;
GPUUniformBuf **block_ref;
};
+ /* DRW_UNIFORM_VERTEX_BUFFER_AS_STORAGE */
+ union {
+ GPUVertBuf *vertbuf;
+ GPUVertBuf **vertbuf_ref;
+ };
/* DRW_UNIFORM_FLOAT_COPY */
float fvalue[4];
/* DRW_UNIFORM_INT_COPY */
@@ -529,7 +547,7 @@ typedef struct DRWData {
struct GHash *obattrs_ubo_pool;
uint ubo_len;
/** Texture pool to reuse temp texture across engines. */
- /* TODO(fclem) the pool could be shared even between viewports. */
+ /* TODO(@fclem): The pool could be shared even between view-ports. */
struct DRWTexturePool *texture_pool;
/** Per stereo view data. Contains engine data and default framebuffers. */
struct DRWViewData *view_data[2];
@@ -549,7 +567,7 @@ typedef struct DupliKey {
typedef struct DRWManager {
/* TODO: clean up this struct a bit. */
/* Cache generation */
- /* TODO(fclem) Rename to data. */
+ /* TODO(@fclem): Rename to data. */
DRWData *vmempool;
/** Active view data structure for one of the 2 stereo view. Not related to DRWView. */
struct DRWViewData *view_data_active;
@@ -572,7 +590,7 @@ typedef struct DRWManager {
struct ID *dupli_origin_data;
/** Hash-map: #DupliKey -> void pointer for each enabled engine. */
struct GHash *dupli_ghash;
- /** TODO(fclem): try to remove usage of this. */
+ /** TODO(@fclem): try to remove usage of this. */
DRWInstanceData *object_instance_data[MAX_INSTANCE_DATA_SIZE];
/* Dupli data for the current dupli for each enabled engine. */
void **dupli_datas;
@@ -615,7 +633,7 @@ typedef struct DRWManager {
DRWView *view_active;
DRWView *view_previous;
uint primary_view_ct;
- /** TODO(fclem): Remove this. Only here to support
+ /** TODO(@fclem): Remove this. Only here to support
* shaders without common_view_lib.glsl */
DRWViewUboStorage view_storage_cpy;
@@ -640,7 +658,7 @@ typedef struct DRWManager {
GPUDrawList *draw_list;
struct {
- /* TODO(fclem): optimize: use chunks. */
+ /* TODO(@fclem): optimize: use chunks. */
DRWDebugLine *lines;
DRWDebugSphere *spheres;
} debug;
diff --git a/source/blender/draw/intern/draw_manager_data.c b/source/blender/draw/intern/draw_manager_data.c
index f1c13bc039a..3f299e878c4 100644
--- a/source/blender/draw/intern/draw_manager_data.c
+++ b/source/blender/draw/intern/draw_manager_data.c
@@ -283,19 +283,45 @@ void DRW_shgroup_uniform_image_ref(DRWShadingGroup *shgroup, const char *name, G
drw_shgroup_uniform_create_ex(shgroup, loc, DRW_UNIFORM_IMAGE_REF, tex, 0, 0, 1);
}
-void DRW_shgroup_uniform_block(DRWShadingGroup *shgroup,
- const char *name,
- const GPUUniformBuf *ubo)
+void DRW_shgroup_uniform_block_ex(DRWShadingGroup *shgroup,
+ const char *name,
+ const GPUUniformBuf *ubo DRW_DEBUG_FILE_LINE_ARGS)
{
BLI_assert(ubo != NULL);
int loc = GPU_shader_get_uniform_block_binding(shgroup->shader, name);
+ if (loc == -1) {
+#ifdef DRW_UNUSED_RESOURCE_TRACKING
+ printf("%s:%d: Unable to locate binding of shader uniform buffer object: %s.\n",
+ file,
+ line,
+ name);
+#else
+ /* TODO(@fclem): Would be good to have, but eevee has too much of this for the moment. */
+ // BLI_assert_msg(0, "Unable to locate binding of shader uniform buffer objects.");
+#endif
+ return;
+ }
drw_shgroup_uniform_create_ex(shgroup, loc, DRW_UNIFORM_BLOCK, ubo, 0, 0, 1);
}
-void DRW_shgroup_uniform_block_ref(DRWShadingGroup *shgroup, const char *name, GPUUniformBuf **ubo)
+void DRW_shgroup_uniform_block_ref_ex(DRWShadingGroup *shgroup,
+ const char *name,
+ GPUUniformBuf **ubo DRW_DEBUG_FILE_LINE_ARGS)
{
BLI_assert(ubo != NULL);
int loc = GPU_shader_get_uniform_block_binding(shgroup->shader, name);
+ if (loc == -1) {
+#ifdef DRW_UNUSED_RESOURCE_TRACKING
+ printf("%s:%d: Unable to locate binding of shader uniform buffer object: %s.\n",
+ file,
+ line,
+ name);
+#else
+ /* TODO(@fclem): Would be good to have, but eevee has too much of this for the moment. */
+ // BLI_assert_msg(0, "Unable to locate binding of shader uniform buffer objects.");
+#endif
+ return;
+ }
drw_shgroup_uniform_create_ex(shgroup, loc, DRW_UNIFORM_BLOCK_REF, ubo, 0, 0, 1);
}
@@ -447,19 +473,46 @@ void DRW_shgroup_uniform_vec4_array_copy(DRWShadingGroup *shgroup,
}
}
-void DRW_shgroup_vertex_buffer(DRWShadingGroup *shgroup,
- const char *name,
- GPUVertBuf *vertex_buffer)
+void DRW_shgroup_vertex_buffer_ex(DRWShadingGroup *shgroup,
+ const char *name,
+ GPUVertBuf *vertex_buffer DRW_DEBUG_FILE_LINE_ARGS)
{
int location = GPU_shader_get_ssbo(shgroup->shader, name);
if (location == -1) {
+#ifdef DRW_UNUSED_RESOURCE_TRACKING
+ printf("%s:%d: Unable to locate binding of shader storage buffer object: %s.\n",
+ file,
+ line,
+ name);
+#else
BLI_assert_msg(0, "Unable to locate binding of shader storage buffer objects.");
+#endif
return;
}
drw_shgroup_uniform_create_ex(
shgroup, location, DRW_UNIFORM_VERTEX_BUFFER_AS_STORAGE, vertex_buffer, 0, 0, 1);
}
+void DRW_shgroup_vertex_buffer_ref_ex(DRWShadingGroup *shgroup,
+ const char *name,
+ GPUVertBuf **vertex_buffer DRW_DEBUG_FILE_LINE_ARGS)
+{
+ int location = GPU_shader_get_ssbo(shgroup->shader, name);
+ if (location == -1) {
+#ifdef DRW_UNUSED_RESOURCE_TRACKING
+ printf("%s:%d: Unable to locate binding of shader storage buffer object: %s.\n",
+ file,
+ line,
+ name);
+#else
+ BLI_assert_msg(0, "Unable to locate binding of shader storage buffer objects.");
+#endif
+ return;
+ }
+ drw_shgroup_uniform_create_ex(
+ shgroup, location, DRW_UNIFORM_VERTEX_BUFFER_AS_STORAGE_REF, vertex_buffer, 0, 0, 1);
+}
+
/** \} */
/* -------------------------------------------------------------------- */
@@ -730,6 +783,18 @@ static void drw_command_compute(DRWShadingGroup *shgroup,
cmd->groups_z_len = groups_z_len;
}
+static void drw_command_compute_ref(DRWShadingGroup *shgroup, int groups_ref[3])
+{
+ DRWCommandComputeRef *cmd = drw_command_create(shgroup, DRW_CMD_COMPUTE_REF);
+ cmd->groups_ref = groups_ref;
+}
+
+static void drw_command_barrier(DRWShadingGroup *shgroup, eGPUBarrier type)
+{
+ DRWCommandBarrier *cmd = drw_command_create(shgroup, DRW_CMD_BARRIER);
+ cmd->type = type;
+}
+
static void drw_command_draw_procedural(DRWShadingGroup *shgroup,
GPUBatch *batch,
DRWResourceHandle handle,
@@ -816,7 +881,7 @@ void DRW_shgroup_call_ex(DRWShadingGroup *shgroup,
culling->user_data = user_data;
}
if (bypass_culling) {
- /* NOTE this will disable culling for the whole object. */
+ /* NOTE: this will disable culling for the whole object. */
culling->bsphere.radius = -1.0f;
}
}
@@ -855,6 +920,20 @@ void DRW_shgroup_call_compute(DRWShadingGroup *shgroup,
drw_command_compute(shgroup, groups_x_len, groups_y_len, groups_z_len);
}
+void DRW_shgroup_call_compute_ref(DRWShadingGroup *shgroup, int groups_ref[3])
+{
+ BLI_assert(GPU_compute_shader_support());
+
+ drw_command_compute_ref(shgroup, groups_ref);
+}
+
+void DRW_shgroup_barrier(DRWShadingGroup *shgroup, eGPUBarrier type)
+{
+ BLI_assert(GPU_compute_shader_support());
+
+ drw_command_barrier(shgroup, type);
+}
+
static void drw_shgroup_call_procedural_add_ex(DRWShadingGroup *shgroup,
GPUBatch *geom,
Object *ob,
@@ -1248,6 +1327,17 @@ static void drw_shgroup_init(DRWShadingGroup *shgroup, GPUShader *shader)
int chunkid_location = GPU_shader_get_builtin_uniform(shader, GPU_UNIFORM_RESOURCE_CHUNK);
int resourceid_location = GPU_shader_get_builtin_uniform(shader, GPU_UNIFORM_RESOURCE_ID);
+ /* TODO(@fclem): Will take the place of the above after the GPUShaderCreateInfo port. */
+ if (view_ubo_location == -1) {
+ view_ubo_location = GPU_shader_get_builtin_block(shader, GPU_UNIFORM_BLOCK_DRW_VIEW);
+ }
+ if (model_ubo_location == -1) {
+ model_ubo_location = GPU_shader_get_builtin_block(shader, GPU_UNIFORM_BLOCK_DRW_MODEL);
+ }
+ if (info_ubo_location == -1) {
+ info_ubo_location = GPU_shader_get_builtin_block(shader, GPU_UNIFORM_BLOCK_DRW_INFOS);
+ }
+
if (chunkid_location != -1) {
drw_shgroup_uniform_create_ex(
shgroup, chunkid_location, DRW_UNIFORM_RESOURCE_CHUNK, NULL, 0, 0, 1);
@@ -1893,10 +1983,10 @@ void DRW_view_reset(void)
DST.view_previous = NULL;
}
-void DRW_view_default_set(DRWView *view)
+void DRW_view_default_set(const DRWView *view)
{
BLI_assert(DST.view_default == NULL);
- DST.view_default = view;
+ DST.view_default = (DRWView *)view;
}
void DRW_view_clip_planes_set(DRWView *view, float (*planes)[4], int plane_len)
diff --git a/source/blender/draw/intern/draw_manager_exec.c b/source/blender/draw/intern/draw_manager_exec.c
index 8dd24c01337..a579403975f 100644
--- a/source/blender/draw/intern/draw_manager_exec.c
+++ b/source/blender/draw/intern/draw_manager_exec.c
@@ -354,9 +354,9 @@ static bool draw_call_is_culled(const DRWResourceHandle *handle, DRWView *view)
return (culling->mask & view->culling_mask) != 0;
}
-void DRW_view_set_active(DRWView *view)
+void DRW_view_set_active(const DRWView *view)
{
- DST.view_active = (view) ? view : DST.view_default;
+ DST.view_active = (view != NULL) ? ((DRWView *)view) : DST.view_default;
}
const DRWView *DRW_view_get_active(void)
@@ -662,8 +662,11 @@ static void draw_update_uniforms(DRWShadingGroup *shgroup,
*use_tfeedback = GPU_shader_transform_feedback_enable(shgroup->shader,
((GPUVertBuf *)uni->pvalue));
break;
+ case DRW_UNIFORM_VERTEX_BUFFER_AS_STORAGE_REF:
+ GPU_vertbuf_bind_as_ssbo(*uni->vertbuf_ref, uni->location);
+ break;
case DRW_UNIFORM_VERTEX_BUFFER_AS_STORAGE:
- GPU_vertbuf_bind_as_ssbo((GPUVertBuf *)uni->pvalue, uni->location);
+ GPU_vertbuf_bind_as_ssbo(uni->vertbuf, uni->location);
break;
/* Legacy/Fallback support. */
case DRW_UNIFORM_BASE_INSTANCE:
@@ -1049,6 +1052,15 @@ static void draw_shgroup(DRWShadingGroup *shgroup, DRWState pass_state)
cmd->compute.groups_y_len,
cmd->compute.groups_z_len);
break;
+ case DRW_CMD_COMPUTE_REF:
+ GPU_compute_dispatch(shgroup->shader,
+ cmd->compute_ref.groups_ref[0],
+ cmd->compute_ref.groups_ref[1],
+ cmd->compute_ref.groups_ref[2]);
+ break;
+ case DRW_CMD_BARRIER:
+ GPU_memory_barrier(cmd->barrier.type);
+ break;
}
}
diff --git a/source/blender/draw/intern/draw_manager_shader.c b/source/blender/draw/intern/draw_manager_shader.c
index 84440a8effe..3cfbb3fe416 100644
--- a/source/blender/draw/intern/draw_manager_shader.c
+++ b/source/blender/draw/intern/draw_manager_shader.c
@@ -46,8 +46,10 @@
#include "draw_manager.h"
-extern char datatoc_gpu_shader_2D_vert_glsl[];
-extern char datatoc_gpu_shader_3D_vert_glsl[];
+#include "CLG_log.h"
+
+static CLG_LogRef LOG = {"draw.manager.shader"};
+
extern char datatoc_gpu_shader_depth_only_frag_glsl[];
extern char datatoc_common_fullscreen_vert_glsl[];
@@ -567,7 +569,7 @@ void DRW_shader_free(GPUShader *shader)
#define MAX_LIB_DEPS 8
struct DRWShaderLibrary {
- char *libs[MAX_LIB];
+ const char *libs[MAX_LIB];
char libs_name[MAX_LIB][MAX_LIB_NAME];
uint32_t libs_deps[MAX_LIB];
};
@@ -616,11 +618,11 @@ static uint32_t drw_shader_dependencies_get(const DRWShaderLibrary *lib, const c
}
dbg_name[i + 1] = '\0';
- printf(
- "Error: Dependency not found: %s\n"
- "This might be due to bad lib ordering.\n",
- dbg_name);
- BLI_assert(0);
+ CLOG_INFO(&LOG,
+ 0,
+ "Dependency '%s' not found\n"
+ "This might be due to bad lib ordering or overriding a builtin shader.\n",
+ dbg_name);
}
else {
deps |= 1u << (uint32_t)dep;
@@ -629,7 +631,7 @@ static uint32_t drw_shader_dependencies_get(const DRWShaderLibrary *lib, const c
return deps;
}
-void DRW_shader_library_add_file(DRWShaderLibrary *lib, char *lib_code, const char *lib_name)
+void DRW_shader_library_add_file(DRWShaderLibrary *lib, const char *lib_code, const char *lib_name)
{
int index = -1;
for (int i = 0; i < MAX_LIB; i++) {
diff --git a/source/blender/draw/intern/draw_shader.c b/source/blender/draw/intern/draw_shader.c
index 121a0acd059..28ac76ccd7f 100644
--- a/source/blender/draw/intern/draw_shader.c
+++ b/source/blender/draw/intern/draw_shader.c
@@ -47,12 +47,7 @@ static struct {
static GPUShader *hair_refine_shader_compute_create(ParticleRefineShader UNUSED(refinement))
{
- GPUShader *sh = NULL;
- sh = GPU_shader_create_compute(datatoc_common_hair_refine_comp_glsl,
- datatoc_common_hair_lib_glsl,
- "#define HAIR_PHASE_SUBDIV\n",
- __func__);
- return sh;
+ return GPU_shader_create_from_info_name("draw_hair_refine_compute");
}
static GPUShader *hair_refine_shader_transform_feedback_create(
diff --git a/source/blender/draw/intern/draw_shader_shared.h b/source/blender/draw/intern/draw_shader_shared.h
index aa117f44e84..fba8f91b2f6 100644
--- a/source/blender/draw/intern/draw_shader_shared.h
+++ b/source/blender/draw/intern/draw_shader_shared.h
@@ -1,6 +1,6 @@
#ifndef GPU_SHADER
-# include "gpu_shader_shared_utils.h"
+# include "GPU_shader_shared_utils.h"
#endif
#define DRW_SHADER_SHARED_H
@@ -16,21 +16,21 @@ struct ViewInfos {
float4x4 winmat;
float4x4 wininv;
- float4 clipplanes[6];
+ float4 clip_planes[6];
float4 viewvecs[2];
/* Should not be here. Not view dependent (only main view). */
float4 viewcamtexcofac;
};
BLI_STATIC_ASSERT_ALIGN(ViewInfos, 16)
-/* TODO(fclem) Mass rename. */
+/* TODO(@fclem): Mass rename. */
#define ViewProjectionMatrix drw_view.persmat
#define ViewProjectionMatrixInverse drw_view.persinv
#define ViewMatrix drw_view.viewmat
#define ViewMatrixInverse drw_view.viewinv
#define ProjectionMatrix drw_view.winmat
#define ProjectionMatrixInverse drw_view.wininv
-#define clipPlanes drw_view.clipplanes
+#define clipPlanes drw_view.clip_planes
#define ViewVecs drw_view.viewvecs
#define CameraTexCoFactors drw_view.viewcamtexcofac
@@ -39,3 +39,14 @@ struct ObjectMatrices {
float4x4 drw_modelMatrixInverse;
};
BLI_STATIC_ASSERT_ALIGN(ViewInfos, 16)
+
+struct ObjectInfos {
+ float4 drw_OrcoTexCoFactors[2];
+ float4 drw_ObjectColor;
+ float4 drw_Infos;
+};
+BLI_STATIC_ASSERT_ALIGN(ViewInfos, 16)
+
+#define OrcoTexCoFactors (drw_infos[resource_id].drw_OrcoTexCoFactors)
+#define ObjectInfo (drw_infos[resource_id].drw_Infos)
+#define ObjectColor (drw_infos[resource_id].drw_ObjectColor)
diff --git a/source/blender/draw/intern/draw_view_data.h b/source/blender/draw/intern/draw_view_data.h
index b37e48c11e8..98ada5a59cb 100644
--- a/source/blender/draw/intern/draw_view_data.h
+++ b/source/blender/draw/intern/draw_view_data.h
@@ -35,7 +35,7 @@ struct DRWRegisteredDrawEngine;
struct DrawEngineType;
struct GPUViewport;
-/* NOTE these structs are only here for reading the actual lists from the engine.
+/* NOTE: these structs are only here for reading the actual lists from the engine.
* The actual length of them is stored in a ViewportEngineData_Info.
* The length of 1 is just here to avoid compiler warning. */
typedef struct FramebufferList {
diff --git a/source/blender/draw/intern/mesh_extractors/extract_mesh.h b/source/blender/draw/intern/mesh_extractors/extract_mesh.h
index bb55d0c5b2c..37eb4f80442 100644
--- a/source/blender/draw/intern/mesh_extractors/extract_mesh.h
+++ b/source/blender/draw/intern/mesh_extractors/extract_mesh.h
@@ -281,7 +281,8 @@ typedef struct MeshExtract {
* \param is_mode_active: When true, use the modifiers from the edit-data,
* otherwise don't use modifiers as they are not from this object.
*/
-MeshRenderData *mesh_render_data_create(Mesh *me,
+MeshRenderData *mesh_render_data_create(Object *object,
+ Mesh *me,
bool is_editmode,
bool is_paint_mode,
bool is_mode_active,
diff --git a/source/blender/draw/intern/mesh_extractors/extract_mesh_ibo_points.cc b/source/blender/draw/intern/mesh_extractors/extract_mesh_ibo_points.cc
index 3ea3e67a8da..c3f89ab96ee 100644
--- a/source/blender/draw/intern/mesh_extractors/extract_mesh_ibo_points.cc
+++ b/source/blender/draw/intern/mesh_extractors/extract_mesh_ibo_points.cc
@@ -157,7 +157,7 @@ static void extract_points_finish(const MeshRenderData *UNUSED(mr),
}
static void extract_points_init_subdiv(const DRWSubdivCache *subdiv_cache,
- const MeshRenderData *UNUSED(mr),
+ const MeshRenderData *mr,
struct MeshBatchCache *UNUSED(cache),
void *UNUSED(buffer),
void *data)
@@ -165,9 +165,9 @@ static void extract_points_init_subdiv(const DRWSubdivCache *subdiv_cache,
GPUIndexBufBuilder *elb = static_cast<GPUIndexBufBuilder *>(data);
/* Copy the points as the data upload will free them. */
elb->data = (uint *)MEM_dupallocN(subdiv_cache->point_indices);
- elb->index_len = subdiv_cache->num_subdiv_verts;
+ elb->index_len = mr->vert_len;
elb->index_min = 0;
- elb->index_max = subdiv_cache->num_subdiv_loops - 1;
+ elb->index_max = subdiv_cache->num_subdiv_loops + mr->loop_loose_len;
elb->prim_type = GPU_PRIM_POINTS;
}
@@ -184,9 +184,6 @@ static void extract_points_loose_geom_subdiv(const DRWSubdivCache *subdiv_cache,
GPUIndexBufBuilder *elb = static_cast<GPUIndexBufBuilder *>(data);
- elb->data = static_cast<uint32_t *>(
- MEM_reallocN(elb->data, sizeof(uint) * (subdiv_cache->num_subdiv_loops + loop_loose_len)));
-
const Mesh *coarse_mesh = subdiv_cache->mesh;
const MEdge *coarse_edges = coarse_mesh->medge;
@@ -195,22 +192,18 @@ static void extract_points_loose_geom_subdiv(const DRWSubdivCache *subdiv_cache,
for (int i = 0; i < loose_geom->edge_len; i++) {
const MEdge *loose_edge = &coarse_edges[loose_geom->edges[i]];
if (elb->data[loose_edge->v1] == -1u) {
- elb->data[loose_edge->v1] = offset;
+ GPU_indexbuf_set_point_vert(elb, loose_edge->v1, offset);
}
if (elb->data[loose_edge->v2] == -1u) {
- elb->data[loose_edge->v2] = offset + 1;
+ GPU_indexbuf_set_point_vert(elb, loose_edge->v2, offset + 1);
}
- elb->index_max += 2;
- elb->index_len += 2;
offset += 2;
}
for (int i = 0; i < loose_geom->vert_len; i++) {
if (elb->data[loose_geom->verts[i]] == -1u) {
- elb->data[loose_geom->verts[i]] = offset;
+ GPU_indexbuf_set_point_vert(elb, loose_geom->verts[i], offset);
}
- elb->index_max += 1;
- elb->index_len += 1;
offset += 1;
}
}
diff --git a/source/blender/draw/intern/mesh_extractors/extract_mesh_vbo_attributes.cc b/source/blender/draw/intern/mesh_extractors/extract_mesh_vbo_attributes.cc
index b846da3f016..b27633405b9 100644
--- a/source/blender/draw/intern/mesh_extractors/extract_mesh_vbo_attributes.cc
+++ b/source/blender/draw/intern/mesh_extractors/extract_mesh_vbo_attributes.cc
@@ -99,9 +99,10 @@ static uint gpu_component_size_for_attribute_type(CustomDataType type)
{
switch (type) {
case CD_PROP_BOOL:
+ case CD_PROP_INT8:
case CD_PROP_INT32:
case CD_PROP_FLOAT: {
- /* TODO(kevindietrich) : should be 1 when scalar attributes conversion is handled by us. See
+ /* TODO(@kevindietrich): should be 1 when scalar attributes conversion is handled by us. See
* comment #extract_attr_init. */
return 3;
}
@@ -317,7 +318,7 @@ static void extract_attr_init(const MeshRenderData *mr,
init_vbo_for_attribute(mr, vbo, request, false, static_cast<uint32_t>(mr->loop_len));
- /* TODO(kevindietrich) : float3 is used for scalar attributes as the implicit conversion done by
+ /* TODO(@kevindietrich): float3 is used for scalar attributes as the implicit conversion done by
* OpenGL to vec4 for a scalar `s` will produce a `vec4(s, 0, 0, 1)`. However, following the
* Blender convention, it should be `vec4(s, s, s, 1)`. This could be resolved using a similar
* texture as for volume attribute, so we can control the conversion ourselves. */
@@ -326,6 +327,10 @@ static void extract_attr_init(const MeshRenderData *mr,
extract_attr_generic<bool, float3>(mr, vbo, request);
break;
}
+ case CD_PROP_INT8: {
+ extract_attr_generic<int8_t, float3>(mr, vbo, request);
+ break;
+ }
case CD_PROP_INT32: {
extract_attr_generic<int32_t, float3>(mr, vbo, request);
break;
@@ -378,6 +383,10 @@ static void extract_attr_init_subdiv(const DRWSubdivCache *subdiv_cache,
extract_attr_generic<bool, float3>(mr, src_data, request);
break;
}
+ case CD_PROP_INT8: {
+ extract_attr_generic<int8_t, float3>(mr, src_data, request);
+ break;
+ }
case CD_PROP_INT32: {
extract_attr_generic<int32_t, float3>(mr, src_data, request);
break;
diff --git a/source/blender/draw/intern/mesh_extractors/extract_mesh_vbo_edge_fac.cc b/source/blender/draw/intern/mesh_extractors/extract_mesh_vbo_edge_fac.cc
index 8470a71059f..4185f2f84a2 100644
--- a/source/blender/draw/intern/mesh_extractors/extract_mesh_vbo_edge_fac.cc
+++ b/source/blender/draw/intern/mesh_extractors/extract_mesh_vbo_edge_fac.cc
@@ -78,7 +78,7 @@ static void extract_edge_fac_init(const MeshRenderData *mr,
data->edge_loop_count = static_cast<uchar *>(
MEM_callocN(sizeof(uint32_t) * mr->edge_len, __func__));
- /* HACK(fclem) Detecting the need for edge render.
+ /* HACK(@fclem): Detecting the need for edge render.
* We could have a flag in the mesh instead or check the modifier stack. */
const MEdge *med = mr->medge;
for (int e_index = 0; e_index < mr->edge_len; e_index++, med++) {
diff --git a/source/blender/draw/intern/mesh_extractors/extract_mesh_vbo_pos_nor.cc b/source/blender/draw/intern/mesh_extractors/extract_mesh_vbo_pos_nor.cc
index 5d2ea923658..11f1515275c 100644
--- a/source/blender/draw/intern/mesh_extractors/extract_mesh_vbo_pos_nor.cc
+++ b/source/blender/draw/intern/mesh_extractors/extract_mesh_vbo_pos_nor.cc
@@ -270,7 +270,7 @@ static void extract_pos_nor_loose_geom_subdiv(const DRWSubdivCache *subdiv_cache
const MVert *coarse_verts = coarse_mesh->mvert;
uint offset = subdiv_cache->num_subdiv_loops;
- /* TODO(kevindietrich) : replace this when compressed normals are supported. */
+ /* TODO(@kevindietrich): replace this when compressed normals are supported. */
struct SubdivPosNorLoop {
float pos[3];
float nor[3];
diff --git a/source/blender/draw/intern/shaders/common_fxaa_lib.glsl b/source/blender/draw/intern/shaders/common_fxaa_lib.glsl
index bf59972fbaa..599d875c500 100644
--- a/source/blender/draw/intern/shaders/common_fxaa_lib.glsl
+++ b/source/blender/draw/intern/shaders/common_fxaa_lib.glsl
@@ -51,7 +51,7 @@
/*============================================================================
FXAA QUALITY - TUNING KNOBS
------------------------------------------------------------------------------
-NOTE the other tuning knobs are now in the shader function inputs!
+NOTE: the other tuning knobs are now in the shader function inputs!
============================================================================*/
#ifndef FXAA_QUALITY__PRESET
/*
diff --git a/source/blender/draw/intern/shaders/common_hair_lib.glsl b/source/blender/draw/intern/shaders/common_hair_lib.glsl
index 6cc7f09a852..ed8b8aeb849 100644
--- a/source/blender/draw/intern/shaders/common_hair_lib.glsl
+++ b/source/blender/draw/intern/shaders/common_hair_lib.glsl
@@ -5,6 +5,9 @@
* of data the CPU has to precompute and transfer for each update.
*/
+/* TODO(fclem): Keep documentation but remove the uniform declaration. */
+#ifndef USE_GPU_SHADER_CREATE_INFO
+
/**
* hairStrandsRes: Number of points per hair strand.
* 2 - no subdivision
@@ -33,8 +36,6 @@ uniform int hairStrandOffset = 0;
/* -- Per control points -- */
uniform samplerBuffer hairPointBuffer; /* RGBA32F */
-#define point_position xyz
-#define point_time w /* Position along the hair length */
/* -- Per strands data -- */
uniform usamplerBuffer hairStrandBuffer; /* R32UI */
@@ -43,6 +44,10 @@ uniform usamplerBuffer hairStrandSegBuffer; /* R16UI */
/* Not used, use one buffer per uv layer */
// uniform samplerBuffer hairUVBuffer; /* RG32F */
// uniform samplerBuffer hairColBuffer; /* RGBA16 linear color */
+#endif
+
+#define point_position xyz
+#define point_time w /* Position along the hair length */
/* -- Subdivision stage -- */
/**
diff --git a/source/blender/draw/intern/shaders/common_hair_refine_comp.glsl b/source/blender/draw/intern/shaders/common_hair_refine_comp.glsl
index 4dcde4b0245..6a3a7815cdc 100644
--- a/source/blender/draw/intern/shaders/common_hair_refine_comp.glsl
+++ b/source/blender/draw/intern/shaders/common_hair_refine_comp.glsl
@@ -1,4 +1,6 @@
+#pragma BLENDER_REQUIRE(common_hair_lib.glsl)
+#ifndef USE_GPU_SHADER_CREATE_INFO
/*
* To be compiled with common_hair_lib.glsl.
*/
@@ -9,6 +11,7 @@ layout(std430, binding = 0) writeonly buffer hairPointOutputBuffer
vec4 posTime[];
}
out_vertbuf;
+#endif
void main(void)
{
@@ -20,5 +23,5 @@ void main(void)
vec4 result = hair_interp_data(data0, data1, data2, data3, weights);
uint index = uint(hair_get_id() * hairStrandsRes) + gl_GlobalInvocationID.y;
- out_vertbuf.posTime[index] = result;
+ posTime[index] = result;
}
diff --git a/source/blender/draw/intern/shaders/common_pointcloud_lib.glsl b/source/blender/draw/intern/shaders/common_pointcloud_lib.glsl
index 2da16d3f6a9..dd725ad327f 100644
--- a/source/blender/draw/intern/shaders/common_pointcloud_lib.glsl
+++ b/source/blender/draw/intern/shaders/common_pointcloud_lib.glsl
@@ -22,13 +22,22 @@ mat3 pointcloud_get_facing_matrix(vec3 p)
return facing_mat;
}
+/* Returns world center position and radius. */
+void pointcloud_get_pos_and_radius(out vec3 outpos, out float outradius)
+{
+ outpos = point_object_to_world(pos.xyz);
+ outradius = dot(abs(mat3(ModelMatrix) * pos.www), vec3(1.0 / 3.0));
+}
+
/* Return world position and normal. */
void pointcloud_get_pos_and_nor(out vec3 outpos, out vec3 outnor)
{
- vec3 p = point_object_to_world(pos.xyz);
+ vec3 p;
+ float radius;
+ pointcloud_get_pos_and_radius(p, radius);
+
mat3 facing_mat = pointcloud_get_facing_matrix(p);
- float radius = dot(abs(mat3(ModelMatrix) * pos.www), vec3(1.0 / 3.0));
/* TODO(fclem): remove multiplication here. Here only for keeping the size correct for now. */
radius *= 0.01;
outpos = p + (facing_mat * pos_inst) * radius;
diff --git a/source/blender/draw/intern/shaders/common_smaa_lib.glsl b/source/blender/draw/intern/shaders/common_smaa_lib.glsl
index 36ffb4d8b32..73f65fb0799 100644
--- a/source/blender/draw/intern/shaders/common_smaa_lib.glsl
+++ b/source/blender/draw/intern/shaders/common_smaa_lib.glsl
@@ -736,9 +736,11 @@ float2 SMAALumaEdgeDetectionPS(float2 texcoord,
float2 edges = step(threshold, delta.xy);
# ifndef SMAA_NO_DISCARD
+# ifdef GPU_FRAGMENT_SHADER
// Then discard if there is no edge:
if (dot(edges, float2(1.0, 1.0)) == 0.0)
discard;
+# endif
# endif
// Calculate right and bottom deltas:
@@ -804,9 +806,11 @@ float2 SMAAColorEdgeDetectionPS(float2 texcoord,
float2 edges = step(threshold, delta.xy);
# ifndef SMAA_NO_DISCARD
+# ifdef GPU_FRAGMENT_SHADER
// Then discard if there is no edge:
if (dot(edges, float2(1.0, 1.0)) == 0.0)
discard;
+# endif
# endif
// Calculate right and bottom deltas:
@@ -851,8 +855,10 @@ float2 SMAADepthEdgeDetectionPS(float2 texcoord, float4 offset[3], SMAATexture2D
float2 delta = abs(neighbours.xx - float2(neighbours.y, neighbours.z));
float2 edges = step(SMAA_DEPTH_THRESHOLD, delta);
+# ifdef GPU_FRAGMENT_SHADER
if (dot(edges, float2(1.0, 1.0)) == 0.0)
discard;
+# endif
return edges;
}
diff --git a/source/blender/draw/intern/shaders/common_subdiv_lib.glsl b/source/blender/draw/intern/shaders/common_subdiv_lib.glsl
index 9dd86c35ee4..e6538d80111 100644
--- a/source/blender/draw/intern/shaders/common_subdiv_lib.glsl
+++ b/source/blender/draw/intern/shaders/common_subdiv_lib.glsl
@@ -101,7 +101,7 @@ uint get_index(uint i)
* the format. */
struct PosNorLoop {
float x, y, z;
- /* TODO(kevindietrich) : figure how to compress properly as GLSL does not have char/short types,
+ /* TODO(@kevindietrich): figure how to compress properly as GLSL does not have char/short types,
* bit operations get tricky. */
float nx, ny, nz;
float flag;
diff --git a/source/blender/draw/intern/shaders/common_view_clipping_lib.glsl b/source/blender/draw/intern/shaders/common_view_clipping_lib.glsl
new file mode 100644
index 00000000000..d55808c42d2
--- /dev/null
+++ b/source/blender/draw/intern/shaders/common_view_clipping_lib.glsl
@@ -0,0 +1,33 @@
+
+#pragma BLENDER_REQUIRE(common_view_lib.glsl)
+
+#if defined(GPU_VERTEX_SHADER) || defined(GPU_GEOMETRY_SHADER)
+
+void view_clipping_distances(vec3 wpos)
+{
+# ifdef USE_WORLD_CLIP_PLANES
+ vec4 pos = vec4(wpos, 1.0);
+ gl_ClipDistance[0] = dot(drw_view.clip_planes[0], pos);
+ gl_ClipDistance[1] = dot(drw_view.clip_planes[1], pos);
+ gl_ClipDistance[2] = dot(drw_view.clip_planes[2], pos);
+ gl_ClipDistance[3] = dot(drw_view.clip_planes[3], pos);
+ gl_ClipDistance[4] = dot(drw_view.clip_planes[4], pos);
+ gl_ClipDistance[5] = dot(drw_view.clip_planes[5], pos);
+# endif
+}
+
+/* Kept as define for compiler compatibility. */
+# ifdef USE_WORLD_CLIP_PLANES
+# define view_clipping_distances_set(c) \
+ gl_ClipDistance[0] = (c).gl_ClipDistance[0]; \
+ gl_ClipDistance[1] = (c).gl_ClipDistance[1]; \
+ gl_ClipDistance[2] = (c).gl_ClipDistance[2]; \
+ gl_ClipDistance[3] = (c).gl_ClipDistance[3]; \
+ gl_ClipDistance[4] = (c).gl_ClipDistance[4]; \
+ gl_ClipDistance[5] = (c).gl_ClipDistance[5];
+
+# else
+# define view_clipping_distances_set(c)
+# endif
+
+#endif
diff --git a/source/blender/draw/intern/shaders/common_view_lib.glsl b/source/blender/draw/intern/shaders/common_view_lib.glsl
index b0d405165f2..2ac157ad208 100644
--- a/source/blender/draw/intern/shaders/common_view_lib.glsl
+++ b/source/blender/draw/intern/shaders/common_view_lib.glsl
@@ -1,5 +1,5 @@
/* Temporary until we fully make the switch. */
-#ifndef DRW_SHADER_SHARED_H
+#ifndef USE_GPU_SHADER_CREATE_INFO
# define DRW_RESOURCE_CHUNK_LEN 512
@@ -24,7 +24,13 @@ layout(std140) uniform viewBlock
vec4 CameraTexCoFactors;
};
-#endif /* DRW_SHADER_SHARED_H */
+#endif /* USE_GPU_SHADER_CREATE_INFO */
+
+#ifdef USE_GPU_SHADER_CREATE_INFO
+# ifndef DRW_RESOURCE_CHUNK_LEN
+# error "Missing draw_view additional create info on shader create info"
+# endif
+#endif
#define ViewNear (ViewVecs[0].w)
#define ViewFar (ViewVecs[1].w)
@@ -84,66 +90,91 @@ vec4 pack_line_data(vec2 frag_co, vec2 edge_start, vec2 edge_pos)
}
}
-uniform int resourceChunk;
+/* Temporary until we fully make the switch. */
+#ifndef DRW_SHADER_SHARED_H
+uniform int drw_resourceChunk;
+#endif /* DRW_SHADER_SHARED_H */
#ifdef GPU_VERTEX_SHADER
-# ifdef GPU_ARB_shader_draw_parameters
-# define baseInstance gl_BaseInstanceARB
-# else /* no ARB_shader_draw_parameters */
-uniform int baseInstance;
-# endif
-# if defined(IN_PLACE_INSTANCES) || defined(INSTANCED_ATTR)
-/* When drawing instances of an object at the same position. */
-# define instanceId 0
-# elif defined(GPU_DEPRECATED_AMD_DRIVER)
-/* A driver bug make it so that when using an attribute with GL_INT_2_10_10_10_REV as format,
- * the gl_InstanceID is incremented by the 2 bit component of the attribute.
- * Ignore gl_InstanceID then. */
-# define instanceId 0
-# else
-# define instanceId gl_InstanceID
-# endif
+/* Temporary until we fully make the switch. */
+# ifndef DRW_SHADER_SHARED_H
-# ifdef UNIFORM_RESOURCE_ID
-/* This is in the case we want to do a special instance drawcall but still want to have the
- * right resourceId and all the correct ubo datas. */
-uniform int resourceId;
-# define resource_id resourceId
-# else
-# define resource_id (baseInstance + instanceId)
-# endif
+/* clang-format off */
+# if defined(IN_PLACE_INSTANCES) || defined(INSTANCED_ATTR) || defined(DRW_LEGACY_MODEL_MATRIX) || defined(GPU_DEPRECATED_AMD_DRIVER)
+/* clang-format on */
+/* When drawing instances of an object at the same position. */
+# define instanceId 0
+# else
+# define instanceId gl_InstanceID
+# endif
+
+# if defined(UNIFORM_RESOURCE_ID)
+/* This is in the case we want to do a special instance drawcall for one object but still want to
+ * have the right resourceId and all the correct ubo datas. */
+uniform int drw_ResourceID;
+# define resource_id drw_ResourceID
+# else
+# define resource_id (gpu_BaseInstance + instanceId)
+# endif
/* Use this to declare and pass the value if
* the fragment shader uses the resource_id. */
-# ifdef USE_GEOMETRY_SHADER
-# define RESOURCE_ID_VARYING flat out int resourceIDGeom;
-# define PASS_RESOURCE_ID resourceIDGeom = resource_id;
-# else
-# define RESOURCE_ID_VARYING flat out int resourceIDFrag;
-# define PASS_RESOURCE_ID resourceIDFrag = resource_id;
+# ifdef USE_GEOMETRY_SHADER
+# define RESOURCE_ID_VARYING flat out int resourceIDGeom;
+# define PASS_RESOURCE_ID resourceIDGeom = resource_id;
+# else
+# define RESOURCE_ID_VARYING flat out int resourceIDFrag;
+# define PASS_RESOURCE_ID resourceIDFrag = resource_id;
+# endif
+
+# endif /* DRW_SHADER_SHARED_H */
+
+#endif /* GPU_VERTEX_SHADER */
+
+/* Temporary until we fully make the switch. */
+#ifdef DRW_SHADER_SHARED_H
+/* TODO(fclem): Rename PASS_RESOURCE_ID to DRW_RESOURCE_ID_VARYING_SET */
+# if defined(UNIFORM_RESOURCE_ID)
+# define resource_id drw_ResourceID
+# define PASS_RESOURCE_ID
+
+# elif defined(GPU_VERTEX_SHADER)
+# define resource_id gpu_InstanceIndex
+# define PASS_RESOURCE_ID drw_ResourceID_iface.resource_index = resource_id;
+
+# elif defined(GPU_GEOMETRY_SHADER)
+# define resource_id drw_ResourceID_iface_in[0].index
+# define PASS_RESOURCE_ID drw_ResourceID_iface_out.resource_index = resource_id;
+
+# elif defined(GPU_FRAGMENT_SHADER)
+# define resource_id drw_ResourceID_iface.resource_index
# endif
-#endif
+/* TODO(fclem): Remove. */
+# define RESOURCE_ID_VARYING
+
+#else
/* If used in a fragment / geometry shader, we pass
* resource_id as varying. */
-#ifdef GPU_GEOMETRY_SHADER
-# define RESOURCE_ID_VARYING \
- flat out int resourceIDFrag; \
- flat in int resourceIDGeom[];
+# ifdef GPU_GEOMETRY_SHADER
+# define RESOURCE_ID_VARYING \
+ flat out int resourceIDFrag; \
+ flat in int resourceIDGeom[];
-# define resource_id resourceIDGeom
-# define PASS_RESOURCE_ID resourceIDFrag = resource_id[0];
-#endif
+# define resource_id resourceIDGeom
+# define PASS_RESOURCE_ID resourceIDFrag = resource_id[0];
+# endif
-#ifdef GPU_FRAGMENT_SHADER
+# ifdef GPU_FRAGMENT_SHADER
flat in int resourceIDFrag;
-# define resource_id resourceIDFrag
+# define resource_id resourceIDFrag
+# endif
#endif
/* Breaking this across multiple lines causes issues for some older GLSL compilers. */
/* clang-format off */
-#if !defined(GPU_INTEL) && !defined(GPU_DEPRECATED_AMD_DRIVER) && !defined(OS_MAC) && !defined(INSTANCED_ATTR)
+#if !defined(GPU_INTEL) && !defined(GPU_DEPRECATED_AMD_DRIVER) && !defined(OS_MAC) && !defined(INSTANCED_ATTR) && !defined(DRW_LEGACY_MODEL_MATRIX)
/* clang-format on */
/* Temporary until we fully make the switch. */
@@ -158,10 +189,10 @@ layout(std140) uniform modelBlock
{
ObjectMatrices drw_matrices[DRW_RESOURCE_CHUNK_LEN];
};
-# endif /* DRW_SHADER_SHARED_H */
-# define ModelMatrix (drw_matrices[resource_id].drw_modelMatrix)
-# define ModelMatrixInverse (drw_matrices[resource_id].drw_modelMatrixInverse)
+# define ModelMatrix (drw_matrices[resource_id].drw_modelMatrix)
+# define ModelMatrixInverse (drw_matrices[resource_id].drw_modelMatrixInverse)
+# endif /* DRW_SHADER_SHARED_H */
#else /* GPU_INTEL */
@@ -177,7 +208,10 @@ uniform mat4 ModelMatrixInverse;
#endif
-#define resource_handle (resourceChunk * DRW_RESOURCE_CHUNK_LEN + resource_id)
+/* Temporary until we fully make the switch. */
+#ifndef DRW_SHADER_SHARED_H
+# define resource_handle (drw_resourceChunk * DRW_RESOURCE_CHUNK_LEN + resource_id)
+#endif
/** Transform shortcuts. */
/* Rule of thumb: Try to reuse world positions and normals because converting through viewspace
diff --git a/source/blender/draw/intern/shaders/draw_hair_refine_info.hh b/source/blender/draw/intern/shaders/draw_hair_refine_info.hh
new file mode 100644
index 00000000000..bdfc26b7dcd
--- /dev/null
+++ b/source/blender/draw/intern/shaders/draw_hair_refine_info.hh
@@ -0,0 +1,42 @@
+/*
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * The Original Code is Copyright (C) 2022 Blender Foundation.
+ * All rights reserved.
+ */
+
+/** \file
+ * \ingroup draw
+ */
+
+#include "gpu_shader_create_info.hh"
+
+GPU_SHADER_CREATE_INFO(draw_hair_refine_compute)
+ .local_group_size(1, 1)
+ .storage_buf(0, Qualifier::WRITE, "vec4", "posTime[]")
+ .sampler(0, ImageType::FLOAT_BUFFER, "hairPointBuffer")
+ .sampler(1, ImageType::UINT_BUFFER, "hairStrandBuffer")
+ .sampler(2, ImageType::UINT_BUFFER, "hairStrandSegBuffer")
+ .push_constant(Type::VEC4, "hairDupliMatrix", 4)
+ .push_constant(Type::BOOL, "hairCloseTip")
+ .push_constant(Type::FLOAT, "hairRadShape")
+ .push_constant(Type::FLOAT, "hairRadTip")
+ .push_constant(Type::FLOAT, "hairRadRoot")
+ .push_constant(Type::INT, "hairThicknessRes")
+ .push_constant(Type::INT, "hairStrandsRes")
+ .push_constant(Type::INT, "hairStrandOffset")
+ .compute_source("common_hair_refine_comp.glsl")
+ .define("HAIR_PHASE_SUBDIV")
+ .do_static_compilation(true);
diff --git a/source/blender/draw/intern/shaders/draw_object_infos_info.hh b/source/blender/draw/intern/shaders/draw_object_infos_info.hh
index 10b3754eebb..17a32c7d5ed 100644
--- a/source/blender/draw/intern/shaders/draw_object_infos_info.hh
+++ b/source/blender/draw/intern/shaders/draw_object_infos_info.hh
@@ -2,4 +2,5 @@
#include "gpu_shader_create_info.hh"
GPU_SHADER_CREATE_INFO(draw_object_infos)
+ .typedef_source("draw_shader_shared.h")
.uniform_buf(1, "ObjectInfos", "drw_infos[DRW_RESOURCE_CHUNK_LEN]", Frequency::BATCH);
diff --git a/source/blender/draw/intern/shaders/draw_view_info.hh b/source/blender/draw/intern/shaders/draw_view_info.hh
index a92284efa5b..f9dcf291f95 100644
--- a/source/blender/draw/intern/shaders/draw_view_info.hh
+++ b/source/blender/draw/intern/shaders/draw_view_info.hh
@@ -2,6 +2,43 @@
#include "gpu_shader_create_info.hh"
/* -------------------------------------------------------------------- */
+/** \name Resource ID
+ *
+ * This is used to fetch per object data in drw_matrices and other object indexed
+ * buffers. There is multiple possibilities depending on how we are drawing the object.
+ *
+ * \{ */
+
+/* Standard way. Use gpu_InstanceIndex to index the object data. */
+GPU_SHADER_CREATE_INFO(draw_resource_id).define("DYNAMIC_RESOURCE_ID");
+
+/**
+ * Used if the resource index needs to be passed to the fragment shader.
+ * IMPORTANT: Vertex and Geometry shaders need to use PASS_RESOURCE_ID in main().
+ */
+GPU_SHADER_INTERFACE_INFO(draw_resource_id_iface, "drw_ResourceID_iface")
+ .flat(Type::INT, "resource_index");
+
+GPU_SHADER_CREATE_INFO(draw_resource_id_varying)
+ .vertex_out(draw_resource_id_iface)
+ .geometry_out(draw_resource_id_iface); /* Used if needed. */
+
+/* Variation used when drawing multiple instances for one object. */
+GPU_SHADER_CREATE_INFO(draw_resource_id_uniform)
+ .define("UNIFORM_RESOURCE_ID")
+ .push_constant(Type::INT, "drw_ResourceID");
+
+/**
+ * Declare a resource handle that identify a unique object.
+ * Requires draw_resource_id[_uniform].
+ */
+GPU_SHADER_CREATE_INFO(draw_resource_handle)
+ .define("resource_handle (drw_resourceChunk * DRW_RESOURCE_CHUNK_LEN + resource_id)")
+ .push_constant(Type::INT, "drw_resourceChunk");
+
+/** \} */
+
+/* -------------------------------------------------------------------- */
/** \name Draw View
* \{ */
@@ -9,32 +46,60 @@ GPU_SHADER_CREATE_INFO(draw_view)
.uniform_buf(0, "ViewInfos", "drw_view", Frequency::PASS)
.typedef_source("draw_shader_shared.h");
-GPU_SHADER_CREATE_INFO(draw_view_instanced_attr)
- .push_constant(0, Type::MAT4, "ModelMatrix")
- .push_constant(16, Type::MAT4, "ModelMatrixInverse")
+GPU_SHADER_CREATE_INFO(draw_modelmat)
+ .uniform_buf(8, "ObjectMatrices", "drw_matrices[DRW_RESOURCE_CHUNK_LEN]", Frequency::BATCH)
+ .define("ModelMatrix", "(drw_matrices[resource_id].drw_modelMatrix)")
+ .define("ModelMatrixInverse", "(drw_matrices[resource_id].drw_modelMatrixInverse)")
.additional_info("draw_view");
+GPU_SHADER_CREATE_INFO(draw_modelmat_legacy)
+ .define("DRW_LEGACY_MODEL_MATRIX")
+ .push_constant(Type::MAT4, "ModelMatrix")
+ .push_constant(Type::MAT4, "ModelMatrixInverse")
+ .additional_info("draw_view");
+
+GPU_SHADER_CREATE_INFO(draw_modelmat_instanced_attr)
+ .push_constant(Type::MAT4, "ModelMatrix")
+ .push_constant(Type::MAT4, "ModelMatrixInverse")
+ .additional_info("draw_view");
+
+/** \} */
+
+/* -------------------------------------------------------------------- */
+/** \name Draw View
+ * \{ */
+
+GPU_SHADER_CREATE_INFO(drw_clipped).define("USE_WORLD_CLIP_PLANES");
+
/** \} */
/* -------------------------------------------------------------------- */
/** \name Geometry Type
* \{ */
-GPU_SHADER_CREATE_INFO(draw_mesh)
- .uniform_buf(8, "ObjectMatrices", "drw_matrices[DRW_RESOURCE_CHUNK_LEN]", Frequency::BATCH)
- .additional_info("draw_view");
+GPU_SHADER_CREATE_INFO(draw_mesh).additional_info("draw_modelmat", "draw_resource_id");
GPU_SHADER_CREATE_INFO(draw_hair)
- /* TODO(fclem) Finish */
- .uniform_buf(8, "ObjectMatrices", "drw_matrices[DRW_RESOURCE_CHUNK_LEN]", Frequency::BATCH)
- .additional_info("draw_view");
+ .sampler(15, ImageType::FLOAT_BUFFER, "hairPointBuffer")
+ .sampler(14, ImageType::UINT_BUFFER, "hairStrandBuffer")
+ .sampler(13, ImageType::UINT_BUFFER, "hairStrandSegBuffer")
+ /* TODO(@fclem): Pack these into one UBO. */
+ .push_constant(Type::INT, "hairStrandsRes")
+ .push_constant(Type::INT, "hairThicknessRes")
+ .push_constant(Type::FLOAT, "hairRadRoot")
+ .push_constant(Type::FLOAT, "hairRadTip")
+ .push_constant(Type::FLOAT, "hairRadShape")
+ .push_constant(Type::BOOL, "hairCloseTip")
+ .push_constant(Type::INT, "hairStrandOffset")
+ .push_constant(Type::VEC4, "hairDupliMatrix", 4)
+ .additional_info("draw_modelmat", "draw_resource_id");
GPU_SHADER_CREATE_INFO(draw_pointcloud)
.vertex_in(0, Type::VEC4, "pos")
.vertex_in(1, Type::VEC3, "pos_inst")
.vertex_in(2, Type::VEC3, "nor")
- .define("UNIFORM_RESOURCE_ID")
- .define("INSTANCED_ATTR")
- .additional_info("draw_view_instanced_attr");
+ .additional_info("draw_modelmat_instanced_attr", "draw_resource_id_uniform");
+
+GPU_SHADER_CREATE_INFO(draw_volume).additional_info("draw_modelmat", "draw_resource_id_uniform");
/** \} */