Welcome to mirror list, hosted at ThFree Co, Russian Federation.

git.blender.org/blender.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
Diffstat (limited to 'source/blender/gpu/intern')
-rw-r--r--source/blender/gpu/intern/gpu_batch.cc20
-rw-r--r--source/blender/gpu/intern/gpu_codegen.c21
-rw-r--r--source/blender/gpu/intern/gpu_context.cc16
-rw-r--r--source/blender/gpu/intern/gpu_framebuffer.cc33
-rw-r--r--source/blender/gpu/intern/gpu_immediate.cc34
-rw-r--r--source/blender/gpu/intern/gpu_immediate_util.c12
-rw-r--r--source/blender/gpu/intern/gpu_index_buffer.cc6
-rw-r--r--source/blender/gpu/intern/gpu_material.c10
-rw-r--r--source/blender/gpu/intern/gpu_matrix.cc17
-rw-r--r--source/blender/gpu/intern/gpu_node_graph.c154
-rw-r--r--source/blender/gpu/intern/gpu_node_graph.h10
-rw-r--r--source/blender/gpu/intern/gpu_platform.cc6
-rw-r--r--source/blender/gpu/intern/gpu_select_sample_query.cc2
-rw-r--r--source/blender/gpu/intern/gpu_shader.cc33
-rw-r--r--source/blender/gpu/intern/gpu_shader_interface.cc8
-rw-r--r--source/blender/gpu/intern/gpu_state.cc20
-rw-r--r--source/blender/gpu/intern/gpu_texture.cc16
-rw-r--r--source/blender/gpu/intern/gpu_uniform_buffer.cc16
-rw-r--r--source/blender/gpu/intern/gpu_vertex_buffer.cc20
-rw-r--r--source/blender/gpu/intern/gpu_viewport.c3
20 files changed, 327 insertions, 130 deletions
diff --git a/source/blender/gpu/intern/gpu_batch.cc b/source/blender/gpu/intern/gpu_batch.cc
index 511ddd210af..3bf1233c000 100644
--- a/source/blender/gpu/intern/gpu_batch.cc
+++ b/source/blender/gpu/intern/gpu_batch.cc
@@ -73,21 +73,21 @@ void GPU_batch_init_ex(GPUBatch *batch,
GPUIndexBuf *elem,
eGPUBatchFlag owns_flag)
{
- BLI_assert(verts != NULL);
+ BLI_assert(verts != nullptr);
/* Do not pass any other flag */
BLI_assert((owns_flag & ~(GPU_BATCH_OWNS_VBO | GPU_BATCH_OWNS_INDEX)) == 0);
batch->verts[0] = verts;
for (int v = 1; v < GPU_BATCH_VBO_MAX_LEN; v++) {
- batch->verts[v] = NULL;
+ batch->verts[v] = nullptr;
}
for (int v = 0; v < GPU_BATCH_INST_VBO_MAX_LEN; v++) {
- batch->inst[v] = NULL;
+ batch->inst[v] = nullptr;
}
batch->elem = elem;
batch->prim_type = prim_type;
batch->flag = owns_flag | GPU_BATCH_INIT | GPU_BATCH_DIRTY;
- batch->shader = NULL;
+ batch->shader = nullptr;
}
/* This will share the VBOs with the new batch. */
@@ -171,7 +171,7 @@ int GPU_batch_instbuf_add_ex(GPUBatch *batch, GPUVertBuf *insts, bool own_vbo)
batch->flag |= GPU_BATCH_DIRTY;
for (uint v = 0; v < GPU_BATCH_INST_VBO_MAX_LEN; v++) {
- if (batch->inst[v] == NULL) {
+ if (batch->inst[v] == nullptr) {
/* for now all VertexBuffers must have same vertex_len */
if (batch->inst[0]) {
/* Allow for different size of vertex buffer (will choose the smallest number of verts). */
@@ -195,9 +195,9 @@ int GPU_batch_vertbuf_add_ex(GPUBatch *batch, GPUVertBuf *verts, bool own_vbo)
batch->flag |= GPU_BATCH_DIRTY;
for (uint v = 0; v < GPU_BATCH_VBO_MAX_LEN; v++) {
- if (batch->verts[v] == NULL) {
+ if (batch->verts[v] == nullptr) {
/* for now all VertexBuffers must have same vertex_len */
- if (batch->verts[0] != NULL) {
+ if (batch->verts[0] != nullptr) {
/* This is an issue for the HACK inside DRW_vbo_request(). */
// BLI_assert(verts->vertex_len == batch->verts[0]->vertex_len);
}
@@ -246,7 +246,7 @@ void GPU_batch_draw_range(GPUBatch *batch, int v_first, int v_count)
/* Draw multiple instance of a batch without having any instance attributes. */
void GPU_batch_draw_instanced(GPUBatch *batch, int i_count)
{
- BLI_assert(batch->inst[0] == NULL);
+ BLI_assert(batch->inst[0] == nullptr);
GPU_shader_bind(batch->shader);
GPU_batch_draw_advanced(batch, 0, 0, 0, i_count);
@@ -255,7 +255,7 @@ void GPU_batch_draw_instanced(GPUBatch *batch, int i_count)
void GPU_batch_draw_advanced(
GPUBatch *gpu_batch, int v_first, int v_count, int i_first, int i_count)
{
- BLI_assert(Context::get()->shader != NULL);
+ BLI_assert(Context::get()->shader != nullptr);
Batch *batch = static_cast<Batch *>(gpu_batch);
if (v_count == 0) {
@@ -269,7 +269,7 @@ void GPU_batch_draw_advanced(
if (i_count == 0) {
i_count = (batch->inst[0]) ? batch->inst_(0)->vertex_len : 1;
/* Meh. This is to be able to use different numbers of verts in instance vbos. */
- if (batch->inst[1] != NULL) {
+ if (batch->inst[1] != nullptr) {
i_count = min_ii(i_count, batch->inst_(1)->vertex_len);
}
}
diff --git a/source/blender/gpu/intern/gpu_codegen.c b/source/blender/gpu/intern/gpu_codegen.c
index 2d76e793fc0..3ebe2edc89e 100644
--- a/source/blender/gpu/intern/gpu_codegen.c
+++ b/source/blender/gpu/intern/gpu_codegen.c
@@ -377,6 +377,19 @@ static int codegen_process_uniforms_functions(GPUMaterial *material,
BLI_freelistN(&ubo_inputs);
}
+ /* Generate the uniform attribute UBO if necessary. */
+ if (!BLI_listbase_is_empty(&graph->uniform_attrs.list)) {
+ BLI_dynstr_append(ds, "\nstruct UniformAttributes {\n");
+ LISTBASE_FOREACH (GPUUniformAttr *, attr, &graph->uniform_attrs.list) {
+ BLI_dynstr_appendf(ds, " vec4 attr%d;\n", attr->id);
+ }
+ BLI_dynstr_append(ds, "};\n");
+ BLI_dynstr_appendf(ds, "layout (std140) uniform %s {\n", GPU_ATTRIBUTE_UBO_BLOCK_NAME);
+ BLI_dynstr_append(ds, " UniformAttributes uniform_attrs[DRW_RESOURCE_CHUNK_LEN];\n");
+ BLI_dynstr_append(ds, "};\n");
+ BLI_dynstr_append(ds, "#define GET_UNIFORM_ATTR(name) (uniform_attrs[resource_id].name)\n");
+ }
+
BLI_dynstr_append(ds, "\n");
return builtins;
@@ -478,7 +491,10 @@ static void codegen_call_functions(DynStr *ds, GPUNodeGraph *graph, GPUOutput *f
BLI_dynstr_appendf(ds, "cons%d", input->id);
}
else if (input->source == GPU_SOURCE_ATTR) {
- BLI_dynstr_appendf(ds, "var%d", input->attr->id);
+ codegen_convert_datatype(ds, input->attr->gputype, input->type, "var", input->attr->id);
+ }
+ else if (input->source == GPU_SOURCE_UNIFORM_ATTR) {
+ BLI_dynstr_appendf(ds, "GET_UNIFORM_ATTR(attr%d)", input->uniform_attr->id);
}
BLI_dynstr_append(ds, ", ");
@@ -799,6 +815,7 @@ GPUPass *GPU_generate_pass(GPUMaterial *material,
/* Prune the unused nodes and extract attributes before compiling so the
* generated VBOs are ready to accept the future shader. */
gpu_node_graph_prune_unused(graph);
+ gpu_node_graph_finalize_uniform_attrs(graph);
int builtins = 0;
LISTBASE_FOREACH (GPUNode *, node, &graph->nodes) {
@@ -914,7 +931,7 @@ static int count_active_texture_sampler(GPUShader *shader, char *source)
/* Move past "uniform". */
code += 7;
/* Skip sampler type suffix. */
- while (*code != ' ' && *code != '\0') {
+ while (!ELEM(*code, ' ', '\0')) {
code++;
}
/* Skip following spaces. */
diff --git a/source/blender/gpu/intern/gpu_context.cc b/source/blender/gpu/intern/gpu_context.cc
index 119c1ef9c55..a9d32dcf297 100644
--- a/source/blender/gpu/intern/gpu_context.cc
+++ b/source/blender/gpu/intern/gpu_context.cc
@@ -54,7 +54,7 @@
using namespace blender::gpu;
-static thread_local Context *active_ctx = NULL;
+static thread_local Context *active_ctx = nullptr;
/* -------------------------------------------------------------------- */
/** \name gpu::Context methods
@@ -80,12 +80,12 @@ Context::~Context()
delete imm;
}
-bool Context::is_active_on_thread(void)
+bool Context::is_active_on_thread()
{
return (this == active_ctx) && pthread_equal(pthread_self(), thread_);
}
-Context *Context::get(void)
+Context *Context::get()
{
return active_ctx;
}
@@ -98,7 +98,7 @@ Context *Context::get(void)
GPUContext *GPU_context_create(void *ghost_window)
{
- if (GPUBackend::get() == NULL) {
+ if (GPUBackend::get() == nullptr) {
/* TODO move where it make sense. */
GPU_backend_init(GPU_BACKEND_OPENGL);
}
@@ -114,7 +114,7 @@ void GPU_context_discard(GPUContext *ctx_)
{
Context *ctx = unwrap(ctx_);
delete ctx;
- active_ctx = NULL;
+ active_ctx = nullptr;
}
/* ctx can be NULL */
@@ -166,7 +166,7 @@ static GPUBackend *g_backend;
void GPU_backend_init(eGPUBackendType backend_type)
{
- BLI_assert(g_backend == NULL);
+ BLI_assert(g_backend == nullptr);
switch (backend_type) {
#if WITH_OPENGL_BACKEND
@@ -185,10 +185,10 @@ void GPU_backend_exit(void)
/* TODO assert no resource left. Currently UI textures are still not freed in their context
* correctly. */
delete g_backend;
- g_backend = NULL;
+ g_backend = nullptr;
}
-GPUBackend *GPUBackend::get(void)
+GPUBackend *GPUBackend::get()
{
return g_backend;
}
diff --git a/source/blender/gpu/intern/gpu_framebuffer.cc b/source/blender/gpu/intern/gpu_framebuffer.cc
index 8d9a1301be0..f11f1cea753 100644
--- a/source/blender/gpu/intern/gpu_framebuffer.cc
+++ b/source/blender/gpu/intern/gpu_framebuffer.cc
@@ -58,7 +58,7 @@ FrameBuffer::FrameBuffer(const char *name)
dirty_state_ = true;
for (int i = 0; i < ARRAY_SIZE(attachments_); i++) {
- attachments_[i].tex = NULL;
+ attachments_[i].tex = nullptr;
attachments_[i].mip = -1;
attachments_[i].layer = -1;
}
@@ -67,7 +67,7 @@ FrameBuffer::FrameBuffer(const char *name)
FrameBuffer::~FrameBuffer()
{
for (int i = 0; i < ARRAY_SIZE(attachments_); i++) {
- if (attachments_[i].tex != NULL) {
+ if (attachments_[i].tex != nullptr) {
reinterpret_cast<Texture *>(attachments_[i].tex)->detach_from(this);
}
}
@@ -150,7 +150,7 @@ void FrameBuffer::recursive_downsample(int max_lvl,
/* Replace attached mip-level for each attachment. */
for (int att = 0; att < ARRAY_SIZE(attachments_); att++) {
Texture *tex = reinterpret_cast<Texture *>(attachments_[att].tex);
- if (tex != NULL) {
+ if (tex != nullptr) {
/* Some Intel HDXXX have issue with rendering to a mipmap that is below
* the texture GL_TEXTURE_MAX_LEVEL. So even if it not correct, in this case
* we allow GL_TEXTURE_MAX_LEVEL to be one level lower. In practice it does work! */
@@ -169,7 +169,7 @@ void FrameBuffer::recursive_downsample(int max_lvl,
}
for (int att = 0; att < ARRAY_SIZE(attachments_); att++) {
- if (attachments_[att].tex != NULL) {
+ if (attachments_[att].tex != nullptr) {
/* Reset mipmap level range. */
reinterpret_cast<Texture *>(attachments_[att].tex)->mip_range_set(0, max_lvl);
/* Reset base level. NOTE: might not be the one bound at the start of this function. */
@@ -242,14 +242,14 @@ void GPU_framebuffer_restore(void)
GPUFrameBuffer *GPU_framebuffer_active_get(void)
{
Context *ctx = Context::get();
- return wrap(ctx ? ctx->active_fb : NULL);
+ return wrap(ctx ? ctx->active_fb : nullptr);
}
/* Returns the default frame-buffer. Will always exists even if it's just a dummy. */
GPUFrameBuffer *GPU_framebuffer_back_get(void)
{
Context *ctx = Context::get();
- return wrap(ctx ? ctx->back_left : NULL);
+ return wrap(ctx ? ctx->back_left : nullptr);
}
bool GPU_framebuffer_bound(GPUFrameBuffer *gpu_fb)
@@ -314,7 +314,7 @@ void GPU_framebuffer_config_array(GPUFrameBuffer *gpu_fb,
if (depth_attachment.mip == -1) {
/* GPU_ATTACHMENT_LEAVE */
}
- else if (depth_attachment.tex == NULL) {
+ else if (depth_attachment.tex == nullptr) {
/* GPU_ATTACHMENT_NONE: Need to clear both targets. */
fb->attachment_set(GPU_FB_DEPTH_STENCIL_ATTACHMENT, depth_attachment);
fb->attachment_set(GPU_FB_DEPTH_ATTACHMENT, depth_attachment);
@@ -487,7 +487,7 @@ void GPU_framebuffer_recursive_downsample(GPUFrameBuffer *gpu_fb,
static struct {
GPUFrameBuffer *framebuffers[FRAMEBUFFER_STACK_DEPTH];
uint top;
-} FrameBufferStack = {{0}};
+} FrameBufferStack = {{nullptr}};
static void gpuPushFrameBuffer(GPUFrameBuffer *fb)
{
@@ -496,7 +496,7 @@ static void gpuPushFrameBuffer(GPUFrameBuffer *fb)
FrameBufferStack.top++;
}
-static GPUFrameBuffer *gpuPopFrameBuffer(void)
+static GPUFrameBuffer *gpuPopFrameBuffer()
{
BLI_assert(FrameBufferStack.top > 0);
FrameBufferStack.top--;
@@ -526,7 +526,7 @@ static GPUFrameBuffer *gpu_offscreen_fb_get(GPUOffScreen *ofs)
BLI_assert(ctx);
for (int i = 0; i < MAX_CTX_FB_LEN; i++) {
- if (ofs->framebuffers[i].fb == NULL) {
+ if (ofs->framebuffers[i].fb == nullptr) {
ofs->framebuffers[i].ctx = ctx;
GPU_framebuffer_ensure_config(&ofs->framebuffers[i].fb,
{
@@ -552,7 +552,7 @@ static GPUFrameBuffer *gpu_offscreen_fb_get(GPUOffScreen *ofs)
for (int i = 0; i < MAX_CTX_FB_LEN; i++) {
GPU_framebuffer_free(ofs->framebuffers[i].fb);
- ofs->framebuffers[i].fb = NULL;
+ ofs->framebuffers[i].fb = nullptr;
}
return gpu_offscreen_fb_get(ofs);
@@ -569,16 +569,17 @@ GPUOffScreen *GPU_offscreen_create(
width = max_ii(1, width);
ofs->color = GPU_texture_create_2d(
- "ofs_color", width, height, 1, (high_bitdepth) ? GPU_RGBA16F : GPU_RGBA8, NULL);
+ "ofs_color", width, height, 1, (high_bitdepth) ? GPU_RGBA16F : GPU_RGBA8, nullptr);
if (depth) {
- ofs->depth = GPU_texture_create_2d("ofs_depth", width, height, 1, GPU_DEPTH24_STENCIL8, NULL);
+ ofs->depth = GPU_texture_create_2d(
+ "ofs_depth", width, height, 1, GPU_DEPTH24_STENCIL8, nullptr);
}
if ((depth && !ofs->depth) || !ofs->color) {
BLI_snprintf(err_out, 256, "GPUTexture: Texture allocation failed.");
GPU_offscreen_free(ofs);
- return NULL;
+ return nullptr;
}
GPUFrameBuffer *fb = gpu_offscreen_fb_get(ofs);
@@ -586,7 +587,7 @@ GPUOffScreen *GPU_offscreen_create(
/* check validity at the very end! */
if (!GPU_framebuffer_check_valid(fb, err_out)) {
GPU_offscreen_free(ofs);
- return NULL;
+ return nullptr;
}
GPU_framebuffer_restore();
return ofs;
@@ -620,7 +621,7 @@ void GPU_offscreen_bind(GPUOffScreen *ofs, bool save)
void GPU_offscreen_unbind(GPUOffScreen *UNUSED(ofs), bool restore)
{
- GPUFrameBuffer *fb = NULL;
+ GPUFrameBuffer *fb = nullptr;
if (restore) {
fb = gpuPopFrameBuffer();
}
diff --git a/source/blender/gpu/intern/gpu_immediate.cc b/source/blender/gpu/intern/gpu_immediate.cc
index 979b3cbb557..95718391165 100644
--- a/source/blender/gpu/intern/gpu_immediate.cc
+++ b/source/blender/gpu/intern/gpu_immediate.cc
@@ -39,19 +39,19 @@
using namespace blender::gpu;
-static thread_local Immediate *imm = NULL;
+static thread_local Immediate *imm = nullptr;
-void immActivate(void)
+void immActivate()
{
imm = Context::get()->imm;
}
-void immDeactivate(void)
+void immDeactivate()
{
- imm = NULL;
+ imm = nullptr;
}
-GPUVertFormat *immVertexFormat(void)
+GPUVertFormat *immVertexFormat()
{
GPU_vertformat_clear(&imm->vertex_format);
return &imm->vertex_format;
@@ -59,7 +59,7 @@ GPUVertFormat *immVertexFormat(void)
void immBindShader(GPUShader *shader)
{
- BLI_assert(imm->shader == NULL);
+ BLI_assert(imm->shader == nullptr);
imm->shader = shader;
imm->builtin_shader_bound = GPU_SHADER_TEXT; /* Default value. */
@@ -81,16 +81,16 @@ void immBindBuiltinProgram(eGPUBuiltinShader shader_id)
imm->builtin_shader_bound = shader_id;
}
-void immUnbindProgram(void)
+void immUnbindProgram()
{
- BLI_assert(imm->shader != NULL);
+ BLI_assert(imm->shader != nullptr);
GPU_shader_unbind();
- imm->shader = NULL;
+ imm->shader = nullptr;
}
/* XXX do not use it. Special hack to use OCIO with batch API. */
-GPUShader *immGetShader(void)
+GPUShader *immGetShader()
{
return imm->shader;
}
@@ -192,7 +192,7 @@ static void wide_line_workaround_start(GPUPrimType prim_type)
}
}
-static void wide_line_workaround_end(void)
+static void wide_line_workaround_end()
{
if (imm->prev_shader) {
if (GPU_blend_get() == GPU_BLEND_NONE) {
@@ -202,7 +202,7 @@ static void wide_line_workaround_end(void)
immUnbindProgram();
immBindShader(imm->prev_shader);
- imm->prev_shader = NULL;
+ imm->prev_shader = nullptr;
}
}
@@ -245,7 +245,7 @@ GPUBatch *immBeginBatch(GPUPrimType prim_type, uint vertex_len)
imm->vertex_data = (uchar *)GPU_vertbuf_get_data(verts);
- imm->batch = GPU_batch_create_ex(prim_type, verts, NULL, GPU_BATCH_OWNS_VBO);
+ imm->batch = GPU_batch_create_ex(prim_type, verts, nullptr, GPU_BATCH_OWNS_VBO);
imm->batch->flag |= GPU_BATCH_BUILDING;
return imm->batch;
@@ -258,7 +258,7 @@ GPUBatch *immBeginBatchAtMost(GPUPrimType prim_type, uint vertex_len)
return immBeginBatch(prim_type, vertex_len);
}
-void immEnd(void)
+void immEnd()
{
BLI_assert(imm->prim_type != GPU_PRIM_NONE); /* Make sure we're between a Begin/End pair. */
BLI_assert(imm->vertex_data || imm->batch);
@@ -279,7 +279,7 @@ void immEnd(void)
}
GPU_batch_set_shader(imm->batch, imm->shader);
imm->batch->flag &= ~GPU_BATCH_BUILDING;
- imm->batch = NULL; /* don't free, batch belongs to caller */
+ imm->batch = nullptr; /* don't free, batch belongs to caller */
}
else {
imm->end();
@@ -288,7 +288,7 @@ void immEnd(void)
/* Prepare for next immBegin. */
imm->prim_type = GPU_PRIM_NONE;
imm->strict_vertex_len = true;
- imm->vertex_data = NULL;
+ imm->vertex_data = nullptr;
wide_line_workaround_end();
}
@@ -489,7 +489,7 @@ void immAttrSkip(uint attr_id)
setAttrValueBit(attr_id);
}
-static void immEndVertex(void) /* and move on to the next vertex */
+static void immEndVertex() /* and move on to the next vertex */
{
BLI_assert(imm->prim_type != GPU_PRIM_NONE); /* make sure we're between a Begin/End pair */
BLI_assert(imm->vertex_idx < imm->vertex_len);
diff --git a/source/blender/gpu/intern/gpu_immediate_util.c b/source/blender/gpu/intern/gpu_immediate_util.c
index b8cd9fe356d..d18dc862ce7 100644
--- a/source/blender/gpu/intern/gpu_immediate_util.c
+++ b/source/blender/gpu/intern/gpu_immediate_util.c
@@ -455,8 +455,8 @@ void imm_draw_cylinder_fill_normal_3d(
for (int j = 0; j < stacks; j++) {
float fac1 = (float)j / (float)stacks;
float fac2 = (float)(j + 1) / (float)stacks;
- float r1 = base * (1.f - fac1) + top * fac1;
- float r2 = base * (1.f - fac2) + top * fac2;
+ float r1 = base * (1.0f - fac1) + top * fac1;
+ float r2 = base * (1.0f - fac2) + top * fac2;
float h1 = height * ((float)j / (float)stacks);
float h2 = height * ((float)(j + 1) / (float)stacks);
@@ -511,8 +511,8 @@ void imm_draw_cylinder_wire_3d(
for (int j = 0; j < stacks; j++) {
float fac1 = (float)j / (float)stacks;
float fac2 = (float)(j + 1) / (float)stacks;
- float r1 = base * (1.f - fac1) + top * fac1;
- float r2 = base * (1.f - fac2) + top * fac2;
+ float r1 = base * (1.0f - fac1) + top * fac1;
+ float r2 = base * (1.0f - fac2) + top * fac2;
float h1 = height * ((float)j / (float)stacks);
float h2 = height * ((float)(j + 1) / (float)stacks);
@@ -549,8 +549,8 @@ void imm_draw_cylinder_fill_3d(
for (int j = 0; j < stacks; j++) {
float fac1 = (float)j / (float)stacks;
float fac2 = (float)(j + 1) / (float)stacks;
- float r1 = base * (1.f - fac1) + top * fac1;
- float r2 = base * (1.f - fac2) + top * fac2;
+ float r1 = base * (1.0f - fac1) + top * fac1;
+ float r2 = base * (1.0f - fac2) + top * fac2;
float h1 = height * ((float)j / (float)stacks);
float h2 = height * ((float)(j + 1) / (float)stacks);
diff --git a/source/blender/gpu/intern/gpu_index_buffer.cc b/source/blender/gpu/intern/gpu_index_buffer.cc
index 36f18f2da49..65932d2dbf4 100644
--- a/source/blender/gpu/intern/gpu_index_buffer.cc
+++ b/source/blender/gpu/intern/gpu_index_buffer.cc
@@ -69,7 +69,7 @@ void GPU_indexbuf_init(GPUIndexBufBuilder *builder,
void GPU_indexbuf_add_generic_vert(GPUIndexBufBuilder *builder, uint v)
{
#if TRUST_NO_ONE
- assert(builder->data != NULL);
+ assert(builder->data != nullptr);
assert(builder->index_len < builder->max_index_len);
assert(v <= builder->max_allowed_index);
#endif
@@ -79,7 +79,7 @@ void GPU_indexbuf_add_generic_vert(GPUIndexBufBuilder *builder, uint v)
void GPU_indexbuf_add_primitive_restart(GPUIndexBufBuilder *builder)
{
#if TRUST_NO_ONE
- assert(builder->data != NULL);
+ assert(builder->data != nullptr);
assert(builder->index_len < builder->max_index_len);
#endif
builder->data[builder->index_len++] = RESTART_INDEX;
@@ -336,7 +336,7 @@ GPUIndexBuf *GPU_indexbuf_create_subrange(GPUIndexBuf *elem_src, uint start, uin
void GPU_indexbuf_build_in_place(GPUIndexBufBuilder *builder, GPUIndexBuf *elem)
{
- BLI_assert(builder->data != NULL);
+ BLI_assert(builder->data != nullptr);
/* Transfer data ownership to GPUIndexBuf.
* It will be uploaded upon first use. */
unwrap(elem)->init(builder->index_len, builder->data);
diff --git a/source/blender/gpu/intern/gpu_material.c b/source/blender/gpu/intern/gpu_material.c
index 011d14673b4..a0fe77598f2 100644
--- a/source/blender/gpu/intern/gpu_material.c
+++ b/source/blender/gpu/intern/gpu_material.c
@@ -313,7 +313,7 @@ static float eval_profile(float r, short falloff_type, float sharpness, float pa
{
r = fabsf(r);
- if (falloff_type == SHD_SUBSURFACE_BURLEY || falloff_type == SHD_SUBSURFACE_RANDOM_WALK) {
+ if (ELEM(falloff_type, SHD_SUBSURFACE_BURLEY, SHD_SUBSURFACE_RANDOM_WALK)) {
return burley_profile(r, param) / BURLEY_TRUNCATE_CDF;
}
if (falloff_type == SHD_SUBSURFACE_CUBIC) {
@@ -353,7 +353,7 @@ static void compute_sss_kernel(
/* Christensen-Burley fitting */
float l[3], d[3];
- if (falloff_type == SHD_SUBSURFACE_BURLEY || falloff_type == SHD_SUBSURFACE_RANDOM_WALK) {
+ if (ELEM(falloff_type, SHD_SUBSURFACE_BURLEY, SHD_SUBSURFACE_RANDOM_WALK)) {
mul_v3_v3fl(l, rad, 0.25f * M_1_PI);
const float A = 1.0f;
const float s = 1.9f - A + 3.5f * (A - 0.8f) * (A - 0.8f);
@@ -584,6 +584,12 @@ ListBase GPU_material_volume_grids(GPUMaterial *material)
return material->graph.volume_grids;
}
+GPUUniformAttrList *GPU_material_uniform_attributes(GPUMaterial *material)
+{
+ GPUUniformAttrList *attrs = &material->graph.uniform_attrs;
+ return attrs->count > 0 ? attrs : NULL;
+}
+
void GPU_material_output_link(GPUMaterial *material, GPUNodeLink *link)
{
if (!material->graph.outlink) {
diff --git a/source/blender/gpu/intern/gpu_matrix.cc b/source/blender/gpu/intern/gpu_matrix.cc
index 0274966d4b9..dae56e39db6 100644
--- a/source/blender/gpu/intern/gpu_matrix.cc
+++ b/source/blender/gpu/intern/gpu_matrix.cc
@@ -606,7 +606,7 @@ const float (*GPU_matrix_projection_get(float m[4][4]))[4]
const float (*GPU_matrix_model_view_projection_get(float m[4][4]))[4]
{
- if (m == NULL) {
+ if (m == nullptr) {
static Mat4 temp;
m = temp;
}
@@ -617,12 +617,12 @@ const float (*GPU_matrix_model_view_projection_get(float m[4][4]))[4]
const float (*GPU_matrix_normal_get(float m[3][3]))[3]
{
- if (m == NULL) {
+ if (m == nullptr) {
static Mat3 temp3;
m = temp3;
}
- copy_m3_m4(m, (const float(*)[4])GPU_matrix_model_view_get(NULL));
+ copy_m3_m4(m, (const float(*)[4])GPU_matrix_model_view_get(nullptr));
invert_m3(m);
transpose_m3(m);
@@ -632,7 +632,7 @@ const float (*GPU_matrix_normal_get(float m[3][3]))[3]
const float (*GPU_matrix_normal_inverse_get(float m[3][3]))[3]
{
- if (m == NULL) {
+ if (m == nullptr) {
static Mat3 temp3;
m = temp3;
}
@@ -658,17 +658,18 @@ void GPU_matrix_bind(GPUShader *shader)
int32_t P_inv = GPU_shader_get_builtin_uniform(shader, GPU_UNIFORM_PROJECTION_INV);
if (MV != -1) {
- GPU_shader_uniform_vector(shader, MV, 16, 1, (const float *)GPU_matrix_model_view_get(NULL));
+ GPU_shader_uniform_vector(
+ shader, MV, 16, 1, (const float *)GPU_matrix_model_view_get(nullptr));
}
if (P != -1) {
- GPU_shader_uniform_vector(shader, P, 16, 1, (const float *)GPU_matrix_projection_get(NULL));
+ GPU_shader_uniform_vector(shader, P, 16, 1, (const float *)GPU_matrix_projection_get(nullptr));
}
if (MVP != -1) {
GPU_shader_uniform_vector(
- shader, MVP, 16, 1, (const float *)GPU_matrix_model_view_projection_get(NULL));
+ shader, MVP, 16, 1, (const float *)GPU_matrix_model_view_projection_get(nullptr));
}
if (N != -1) {
- GPU_shader_uniform_vector(shader, N, 9, 1, (const float *)GPU_matrix_normal_get(NULL));
+ GPU_shader_uniform_vector(shader, N, 9, 1, (const float *)GPU_matrix_normal_get(nullptr));
}
if (MV_inv != -1) {
Mat4 m;
diff --git a/source/blender/gpu/intern/gpu_node_graph.c b/source/blender/gpu/intern/gpu_node_graph.c
index c890d56994f..2a2a51e32b3 100644
--- a/source/blender/gpu/intern/gpu_node_graph.c
+++ b/source/blender/gpu/intern/gpu_node_graph.c
@@ -132,7 +132,14 @@ static void gpu_node_input_link(GPUNode *node, GPUNodeLink *link, const eGPUType
case GPU_NODE_LINK_ATTR:
input->source = GPU_SOURCE_ATTR;
input->attr = link->attr;
- input->attr->gputype = type;
+ /* Failsafe handling if the same attribute is used with different datatypes for
+ * some reason (only really makes sense with float/vec2/vec3/vec4 though). This
+ * can happen if mixing the generic Attribute node with specialized ones. */
+ CLAMP_MIN(input->attr->gputype, type);
+ break;
+ case GPU_NODE_LINK_UNIFORM_ATTR:
+ input->source = GPU_SOURCE_UNIFORM_ATTR;
+ input->uniform_attr = link->uniform_attr;
break;
case GPU_NODE_LINK_CONSTANT:
input->source = (type == GPU_CLOSURE) ? GPU_SOURCE_STRUCT : GPU_SOURCE_CONSTANT;
@@ -259,8 +266,90 @@ static void gpu_node_output(GPUNode *node, const eGPUType type, GPUNodeLink **li
BLI_addtail(&node->outputs, output);
}
+/* Uniform Attribute Functions */
+
+static int uniform_attr_sort_cmp(const void *a, const void *b)
+{
+ const GPUUniformAttr *attr_a = a, *attr_b = b;
+
+ int cmps = strcmp(attr_a->name, attr_b->name);
+ if (cmps != 0) {
+ return cmps > 0 ? 1 : 0;
+ }
+
+ return (attr_a->use_dupli && !attr_b->use_dupli);
+}
+
+static unsigned int uniform_attr_list_hash(const void *key)
+{
+ const GPUUniformAttrList *attrs = key;
+ return attrs->hash_code;
+}
+
+static bool uniform_attr_list_cmp(const void *a, const void *b)
+{
+ const GPUUniformAttrList *set_a = a, *set_b = b;
+
+ if (set_a->hash_code != set_b->hash_code || set_a->count != set_b->count) {
+ return true;
+ }
+
+ GPUUniformAttr *attr_a = set_a->list.first, *attr_b = set_b->list.first;
+
+ for (; attr_a && attr_b; attr_a = attr_a->next, attr_b = attr_b->next) {
+ if (!STREQ(attr_a->name, attr_b->name) || attr_a->use_dupli != attr_b->use_dupli) {
+ return true;
+ }
+ }
+
+ return attr_a || attr_b;
+}
+
+struct GHash *GPU_uniform_attr_list_hash_new(const char *info)
+{
+ return BLI_ghash_new(uniform_attr_list_hash, uniform_attr_list_cmp, info);
+}
+
+void GPU_uniform_attr_list_copy(GPUUniformAttrList *dest, GPUUniformAttrList *src)
+{
+ dest->count = src->count;
+ dest->hash_code = src->hash_code;
+ BLI_duplicatelist(&dest->list, &src->list);
+}
+
+void GPU_uniform_attr_list_free(GPUUniformAttrList *set)
+{
+ set->count = 0;
+ set->hash_code = 0;
+ BLI_freelistN(&set->list);
+}
+
+void gpu_node_graph_finalize_uniform_attrs(GPUNodeGraph *graph)
+{
+ GPUUniformAttrList *attrs = &graph->uniform_attrs;
+ BLI_assert(attrs->count == BLI_listbase_count(&attrs->list));
+
+ /* Sort the attributes by name to ensure a stable order. */
+ BLI_listbase_sort(&attrs->list, uniform_attr_sort_cmp);
+
+ /* Compute the indices and the hash code. */
+ int next_id = 0;
+ attrs->hash_code = 0;
+
+ LISTBASE_FOREACH (GPUUniformAttr *, attr, &attrs->list) {
+ attr->id = next_id++;
+
+ attrs->hash_code ^= BLI_ghashutil_strhash_p(attr->name);
+
+ if (attr->use_dupli) {
+ attrs->hash_code ^= BLI_ghashutil_uinthash(attr->id);
+ }
+ }
+}
+
/* Attributes and Textures */
+/** Add a new varying attribute of given type and name. Returns NULL if out of slots. */
static GPUMaterialAttribute *gpu_node_graph_add_attribute(GPUNodeGraph *graph,
CustomDataType type,
const char *name)
@@ -296,6 +385,38 @@ static GPUMaterialAttribute *gpu_node_graph_add_attribute(GPUNodeGraph *graph,
return attr;
}
+/** Add a new uniform attribute of given type and name. Returns NULL if out of slots. */
+static GPUUniformAttr *gpu_node_graph_add_uniform_attribute(GPUNodeGraph *graph,
+ const char *name,
+ bool use_dupli)
+{
+ /* Find existing attribute. */
+ GPUUniformAttrList *attrs = &graph->uniform_attrs;
+ GPUUniformAttr *attr = attrs->list.first;
+
+ for (; attr; attr = attr->next) {
+ if (STREQ(attr->name, name) && attr->use_dupli == use_dupli) {
+ break;
+ }
+ }
+
+ /* Add new requested attribute if it's within GPU limits. */
+ if (attr == NULL && attrs->count < GPU_MAX_UNIFORM_ATTR) {
+ attr = MEM_callocN(sizeof(*attr), __func__);
+ STRNCPY(attr->name, name);
+ attr->use_dupli = use_dupli;
+ attr->id = -1;
+ BLI_addtail(&attrs->list, attr);
+ attrs->count++;
+ }
+
+ if (attr != NULL) {
+ attr->users++;
+ }
+
+ return attr;
+}
+
static GPUMaterialTexture *gpu_node_graph_add_texture(GPUNodeGraph *graph,
Image *ima,
ImageUser *iuser,
@@ -369,6 +490,7 @@ GPUNodeLink *GPU_attribute(GPUMaterial *mat, const CustomDataType type, const ch
GPUNodeGraph *graph = gpu_material_node_graph(mat);
GPUMaterialAttribute *attr = gpu_node_graph_add_attribute(graph, type, name);
+ /* Dummy fallback if out of slots. */
if (attr == NULL) {
static const float zero_data[GPU_MAX_CONSTANT_DATA] = {0.0f};
return GPU_constant(zero_data);
@@ -380,6 +502,23 @@ GPUNodeLink *GPU_attribute(GPUMaterial *mat, const CustomDataType type, const ch
return link;
}
+GPUNodeLink *GPU_uniform_attribute(GPUMaterial *mat, const char *name, bool use_dupli)
+{
+ GPUNodeGraph *graph = gpu_material_node_graph(mat);
+ GPUUniformAttr *attr = gpu_node_graph_add_uniform_attribute(graph, name, use_dupli);
+
+ /* Dummy fallback if out of slots. */
+ if (attr == NULL) {
+ static const float zero_data[GPU_MAX_CONSTANT_DATA] = {0.0f};
+ return GPU_constant(zero_data);
+ }
+
+ GPUNodeLink *link = gpu_node_link_create();
+ link->link_type = GPU_NODE_LINK_UNIFORM_ATTR;
+ link->uniform_attr = attr;
+ return link;
+}
+
GPUNodeLink *GPU_constant(const float *num)
{
GPUNodeLink *link = gpu_node_link_create();
@@ -616,6 +755,9 @@ static void gpu_inputs_free(ListBase *inputs)
if (input->source == GPU_SOURCE_ATTR) {
input->attr->users--;
}
+ else if (input->source == GPU_SOURCE_UNIFORM_ATTR) {
+ input->uniform_attr->users--;
+ }
else if (ELEM(input->source, GPU_SOURCE_TEX, GPU_SOURCE_TEX_TILED_MAPPING)) {
input->texture->users--;
}
@@ -671,6 +813,7 @@ void gpu_node_graph_free(GPUNodeGraph *graph)
BLI_freelistN(&graph->volume_grids);
BLI_freelistN(&graph->textures);
BLI_freelistN(&graph->attributes);
+ GPU_uniform_attr_list_free(&graph->uniform_attrs);
}
/* Prune Unused Nodes */
@@ -735,4 +878,13 @@ void gpu_node_graph_prune_unused(GPUNodeGraph *graph)
BLI_freelinkN(&graph->volume_grids, grid);
}
}
+
+ GPUUniformAttrList *uattrs = &graph->uniform_attrs;
+
+ LISTBASE_FOREACH_MUTABLE (GPUUniformAttr *, attr, &uattrs->list) {
+ if (attr->users == 0) {
+ BLI_freelinkN(&uattrs->list, attr);
+ uattrs->count--;
+ }
+ }
}
diff --git a/source/blender/gpu/intern/gpu_node_graph.h b/source/blender/gpu/intern/gpu_node_graph.h
index 7265abf4d65..a0e6298cd92 100644
--- a/source/blender/gpu/intern/gpu_node_graph.h
+++ b/source/blender/gpu/intern/gpu_node_graph.h
@@ -42,6 +42,7 @@ typedef enum eGPUDataSource {
GPU_SOURCE_CONSTANT,
GPU_SOURCE_UNIFORM,
GPU_SOURCE_ATTR,
+ GPU_SOURCE_UNIFORM_ATTR,
GPU_SOURCE_BUILTIN,
GPU_SOURCE_STRUCT,
GPU_SOURCE_TEX,
@@ -53,6 +54,7 @@ typedef enum eGPUDataSource {
typedef enum {
GPU_NODE_LINK_NONE = 0,
GPU_NODE_LINK_ATTR,
+ GPU_NODE_LINK_UNIFORM_ATTR,
GPU_NODE_LINK_BUILTIN,
GPU_NODE_LINK_COLORBAND,
GPU_NODE_LINK_CONSTANT,
@@ -96,6 +98,8 @@ struct GPUNodeLink {
struct GPUOutput *output;
/* GPU_NODE_LINK_ATTR */
struct GPUMaterialAttribute *attr;
+ /* GPU_NODE_LINK_UNIFORM_ATTR */
+ struct GPUUniformAttr *uniform_attr;
/* GPU_NODE_LINK_IMAGE_BLENDER */
struct GPUMaterialTexture *texture;
};
@@ -130,6 +134,8 @@ typedef struct GPUInput {
struct GPUMaterialTexture *texture;
/* GPU_SOURCE_ATTR */
struct GPUMaterialAttribute *attr;
+ /* GPU_SOURCE_UNIFORM_ATTR */
+ struct GPUUniformAttr *uniform_attr;
/* GPU_SOURCE_VOLUME_GRID | GPU_SOURCE_VOLUME_GRID_TRANSFORM */
struct GPUMaterialVolumeGrid *volume_grid;
};
@@ -146,11 +152,15 @@ typedef struct GPUNodeGraph {
ListBase attributes;
ListBase textures;
ListBase volume_grids;
+
+ /* The list of uniform attributes. */
+ GPUUniformAttrList uniform_attrs;
} GPUNodeGraph;
/* Node Graph */
void gpu_node_graph_prune_unused(GPUNodeGraph *graph);
+void gpu_node_graph_finalize_uniform_attrs(GPUNodeGraph *graph);
void gpu_node_graph_free_nodes(GPUNodeGraph *graph);
void gpu_node_graph_free(GPUNodeGraph *graph);
diff --git a/source/blender/gpu/intern/gpu_platform.cc b/source/blender/gpu/intern/gpu_platform.cc
index ad7142878e7..6b9878f2ba4 100644
--- a/source/blender/gpu/intern/gpu_platform.cc
+++ b/source/blender/gpu/intern/gpu_platform.cc
@@ -77,7 +77,7 @@ void GPUPlatformGlobal::create_gpu_name(const char *vendor,
BLI_str_replace_char(gpu_name, '\r', ' ');
}
-void GPUPlatformGlobal::clear(void)
+void GPUPlatformGlobal::clear()
{
MEM_SAFE_FREE(GPG.support_key);
MEM_SAFE_FREE(GPG.gpu_name);
@@ -94,12 +94,12 @@ void GPUPlatformGlobal::clear(void)
using namespace blender::gpu;
-eGPUSupportLevel GPU_platform_support_level(void)
+eGPUSupportLevel GPU_platform_support_level()
{
return GPG.support_level;
}
-const char *GPU_platform_support_level_key(void)
+const char *GPU_platform_support_level_key()
{
return GPG.support_key;
}
diff --git a/source/blender/gpu/intern/gpu_select_sample_query.cc b/source/blender/gpu/intern/gpu_select_sample_query.cc
index 5bbf3bd05d3..6ca811895a5 100644
--- a/source/blender/gpu/intern/gpu_select_sample_query.cc
+++ b/source/blender/gpu/intern/gpu_select_sample_query.cc
@@ -70,7 +70,7 @@ typedef struct GPUSelectQueryState {
eGPUDepthTest depth_test;
} GPUSelectQueryState;
-static GPUSelectQueryState g_query_state = {0};
+static GPUSelectQueryState g_query_state = {false};
void gpu_select_query_begin(
uint (*buffer)[4], uint bufsize, const rcti *input, char mode, int oldhits)
diff --git a/source/blender/gpu/intern/gpu_shader.cc b/source/blender/gpu/intern/gpu_shader.cc
index c13321ed205..49f96cb652c 100644
--- a/source/blender/gpu/intern/gpu_shader.cc
+++ b/source/blender/gpu/intern/gpu_shader.cc
@@ -291,7 +291,7 @@ GPUShader *GPU_shader_create_ex(const char *vertcode,
const char *shname)
{
/* At least a vertex shader and a fragment shader are required. */
- BLI_assert((fragcode != NULL) && (vertcode != NULL));
+ BLI_assert((fragcode != nullptr) && (vertcode != nullptr));
Shader *shader = GPUBackend::get()->shader_alloc(shname);
@@ -342,14 +342,14 @@ GPUShader *GPU_shader_create_ex(const char *vertcode,
shader->geometry_shader_from_glsl(sources);
}
- if (tf_names != NULL && tf_count > 0) {
+ if (tf_names != nullptr && tf_count > 0) {
BLI_assert(tf_type != GPU_SHADER_TFB_NONE);
shader->transform_feedback_names_set(Span<const char *>(tf_names, tf_count), tf_type);
}
if (!shader->finalize()) {
delete shader;
- return NULL;
+ return nullptr;
};
return wrap(shader);
@@ -374,7 +374,7 @@ GPUShader *GPU_shader_create(const char *vertcode,
const char *shname)
{
return GPU_shader_create_ex(
- vertcode, fragcode, geomcode, libcode, defines, GPU_SHADER_TFB_NONE, NULL, 0, shname);
+ vertcode, fragcode, geomcode, libcode, defines, GPU_SHADER_TFB_NONE, nullptr, 0, shname);
}
GPUShader *GPU_shader_create_from_python(const char *vertcode,
@@ -383,17 +383,24 @@ GPUShader *GPU_shader_create_from_python(const char *vertcode,
const char *libcode,
const char *defines)
{
- char *libcodecat = NULL;
+ char *libcodecat = nullptr;
- if (libcode == NULL) {
+ if (libcode == nullptr) {
libcode = datatoc_gpu_shader_colorspace_lib_glsl;
}
else {
libcode = libcodecat = BLI_strdupcat(libcode, datatoc_gpu_shader_colorspace_lib_glsl);
}
- GPUShader *sh = GPU_shader_create_ex(
- vertcode, fragcode, geomcode, libcode, defines, GPU_SHADER_TFB_NONE, NULL, 0, "pyGPUShader");
+ GPUShader *sh = GPU_shader_create_ex(vertcode,
+ fragcode,
+ geomcode,
+ libcode,
+ defines,
+ GPU_SHADER_TFB_NONE,
+ nullptr,
+ 0,
+ "pyGPUShader");
MEM_SAFE_FREE(libcodecat);
return sh;
@@ -402,9 +409,9 @@ GPUShader *GPU_shader_create_from_python(const char *vertcode,
static const char *string_join_array_maybe_alloc(const char **str_arr, bool *r_is_alloc)
{
bool is_alloc = false;
- if (str_arr == NULL) {
+ if (str_arr == nullptr) {
*r_is_alloc = false;
- return NULL;
+ return nullptr;
}
/* Skip empty strings (avoid alloc if we can). */
while (str_arr[0] && str_arr[0][0] == '\0') {
@@ -450,7 +457,7 @@ struct GPUShader *GPU_shader_create_from_arrays_impl(
struct {
const char *str;
bool is_alloc;
- } str_dst[4] = {{0}};
+ } str_dst[4] = {{nullptr}};
const char **str_src[4] = {params->vert, params->frag, params->geom, params->defs};
for (int i = 0; i < ARRAY_SIZE(str_src); i++) {
@@ -461,7 +468,7 @@ struct GPUShader *GPU_shader_create_from_arrays_impl(
BLI_snprintf(name, sizeof(name), "%s_%d", func, line);
GPUShader *sh = GPU_shader_create(
- str_dst[0].str, str_dst[1].str, str_dst[2].str, NULL, str_dst[3].str, name);
+ str_dst[0].str, str_dst[1].str, str_dst[2].str, nullptr, str_dst[3].str, name);
for (int i = 0; i < ARRAY_SIZE(str_dst); i++) {
if (str_dst[i].is_alloc) {
@@ -502,7 +509,7 @@ void GPU_shader_unbind(void)
if (ctx->shader) {
ctx->shader->unbind();
}
- ctx->shader = NULL;
+ ctx->shader = nullptr;
#endif
}
diff --git a/source/blender/gpu/intern/gpu_shader_interface.cc b/source/blender/gpu/intern/gpu_shader_interface.cc
index e5fb8025e7f..81c1e013877 100644
--- a/source/blender/gpu/intern/gpu_shader_interface.cc
+++ b/source/blender/gpu/intern/gpu_shader_interface.cc
@@ -32,12 +32,12 @@
namespace blender::gpu {
-ShaderInterface::ShaderInterface(void)
+ShaderInterface::ShaderInterface()
{
/* TODO(fclem): add unique ID for debugging. */
}
-ShaderInterface::~ShaderInterface(void)
+ShaderInterface::~ShaderInterface()
{
/* Free memory used by name_buffer. */
MEM_freeN(name_buffer_);
@@ -70,14 +70,14 @@ static void sort_input_list(MutableSpan<ShaderInput> dst)
/* Sorts all inputs inside their respective array.
* This is to allow fast hash collision detection.
* See ShaderInterface::input_lookup for more details. */
-void ShaderInterface::sort_inputs(void)
+void ShaderInterface::sort_inputs()
{
sort_input_list(MutableSpan<ShaderInput>(inputs_, attr_len_));
sort_input_list(MutableSpan<ShaderInput>(inputs_ + attr_len_, ubo_len_));
sort_input_list(MutableSpan<ShaderInput>(inputs_ + attr_len_ + ubo_len_, uniform_len_));
}
-void ShaderInterface::debug_print(void)
+void ShaderInterface::debug_print()
{
Span<ShaderInput> attrs = Span<ShaderInput>(inputs_, attr_len_);
Span<ShaderInput> ubos = Span<ShaderInput>(inputs_ + attr_len_, ubo_len_);
diff --git a/source/blender/gpu/intern/gpu_state.cc b/source/blender/gpu/intern/gpu_state.cc
index 0b2e4989a33..407a8dd6e2b 100644
--- a/source/blender/gpu/intern/gpu_state.cc
+++ b/source/blender/gpu/intern/gpu_state.cc
@@ -260,7 +260,7 @@ eGPUStencilTest GPU_stencil_test_get()
}
/* NOTE: Already premultiplied by U.pixelsize. */
-float GPU_line_width_get(void)
+float GPU_line_width_get()
{
GPUStateMutable &state = Context::get()->state_manager->mutable_state;
return state.line_width;
@@ -285,13 +285,13 @@ void GPU_viewport_size_get_i(int coords[4])
Context::get()->active_fb->viewport_get(coords);
}
-bool GPU_depth_mask_get(void)
+bool GPU_depth_mask_get()
{
GPUState &state = Context::get()->state_manager->state;
return (state.write_mask & GPU_WRITE_DEPTH) != 0;
}
-bool GPU_mipmap_enabled(void)
+bool GPU_mipmap_enabled()
{
/* TODO(fclem): this used to be a userdef option. */
return true;
@@ -303,17 +303,17 @@ bool GPU_mipmap_enabled(void)
/** \name Context Utils
* \{ */
-void GPU_flush(void)
+void GPU_flush()
{
Context::get()->flush();
}
-void GPU_finish(void)
+void GPU_finish()
{
Context::get()->finish();
}
-void GPU_apply_state(void)
+void GPU_apply_state()
{
Context::get()->state_manager->apply_state();
}
@@ -328,7 +328,7 @@ void GPU_apply_state(void)
* bgl functions.
* \{ */
-void GPU_bgl_start(void)
+void GPU_bgl_start()
{
Context *ctx = Context::get();
if (!(ctx && ctx->state_manager)) {
@@ -345,7 +345,7 @@ void GPU_bgl_start(void)
}
/* Just turn off the bgl safeguard system. Can be called even without GPU_bgl_start. */
-void GPU_bgl_end(void)
+void GPU_bgl_end()
{
Context *ctx = Context::get();
if (!(ctx && ctx->state_manager)) {
@@ -359,7 +359,7 @@ void GPU_bgl_end(void)
}
}
-bool GPU_bgl_get(void)
+bool GPU_bgl_get()
{
return Context::get()->state_manager->use_bgl;
}
@@ -381,7 +381,7 @@ void GPU_memory_barrier(eGPUBarrier barrier)
/** \name Default State
* \{ */
-StateManager::StateManager(void)
+StateManager::StateManager()
{
/* Set default state. */
state.write_mask = GPU_WRITE_COLOR;
diff --git a/source/blender/gpu/intern/gpu_texture.cc b/source/blender/gpu/intern/gpu_texture.cc
index 99d286c3abd..d134d718cbe 100644
--- a/source/blender/gpu/intern/gpu_texture.cc
+++ b/source/blender/gpu/intern/gpu_texture.cc
@@ -49,14 +49,14 @@ Texture::Texture(const char *name)
}
for (int i = 0; i < ARRAY_SIZE(fb_); i++) {
- fb_[i] = NULL;
+ fb_[i] = nullptr;
}
}
Texture::~Texture()
{
for (int i = 0; i < ARRAY_SIZE(fb_); i++) {
- if (fb_[i] != NULL) {
+ if (fb_[i] != nullptr) {
fb_[i]->attachment_remove(fb_attachment_[i]);
}
}
@@ -142,7 +142,7 @@ bool Texture::init_buffer(GPUVertBuf *vbo, eGPUTextureFormat format)
void Texture::attach_to(FrameBuffer *fb, GPUAttachmentType type)
{
for (int i = 0; i < ARRAY_SIZE(fb_); i++) {
- if (fb_[i] == NULL) {
+ if (fb_[i] == nullptr) {
fb_attachment_[i] = type;
fb_[i] = fb;
return;
@@ -156,7 +156,7 @@ void Texture::detach_from(FrameBuffer *fb)
for (int i = 0; i < ARRAY_SIZE(fb_); i++) {
if (fb_[i] == fb) {
fb_[i]->attachment_remove(fb_attachment_[i]);
- fb_[i] = NULL;
+ fb_[i] = nullptr;
return;
}
}
@@ -226,7 +226,7 @@ static inline GPUTexture *gpu_texture_create(const char *name,
if (!success) {
delete tex;
- return NULL;
+ return nullptr;
}
if (pixels) {
tex->update(data_format, pixels);
@@ -295,7 +295,7 @@ GPUTexture *GPU_texture_create_compressed_2d(
if (!success) {
delete tex;
- return NULL;
+ return nullptr;
}
if (data) {
size_t ofs = 0;
@@ -320,7 +320,7 @@ GPUTexture *GPU_texture_create_from_vertbuf(const char *name, GPUVertBuf *vert)
bool success = tex->init_buffer(vert, tex_format);
if (!success) {
delete tex;
- return NULL;
+ return nullptr;
}
return reinterpret_cast<GPUTexture *>(tex);
}
@@ -383,7 +383,7 @@ void *GPU_texture_read(GPUTexture *tex_, eGPUDataFormat data_format, int miplvl)
*/
void GPU_texture_clear(GPUTexture *tex, eGPUDataFormat data_format, const void *data)
{
- BLI_assert(data != NULL); /* Do not accept NULL as parameter. */
+ BLI_assert(data != nullptr); /* Do not accept NULL as parameter. */
reinterpret_cast<Texture *>(tex)->clear(data_format, data);
}
diff --git a/source/blender/gpu/intern/gpu_uniform_buffer.cc b/source/blender/gpu/intern/gpu_uniform_buffer.cc
index 2dea98f03ca..89c70c47e4a 100644
--- a/source/blender/gpu/intern/gpu_uniform_buffer.cc
+++ b/source/blender/gpu/intern/gpu_uniform_buffer.cc
@@ -73,7 +73,7 @@ static eGPUType get_padded_gpu_type(LinkData *link)
GPUInput *input = (GPUInput *)link->data;
eGPUType gputype = input->type;
/* Unless the vec3 is followed by a float we need to treat it as a vec4. */
- if (gputype == GPU_VEC3 && (link->next != NULL) &&
+ if (gputype == GPU_VEC3 && (link->next != nullptr) &&
(((GPUInput *)link->next->data)->type != GPU_FLOAT)) {
gputype = GPU_VEC4;
}
@@ -106,7 +106,7 @@ static void buffer_from_list_inputs_sort(ListBase *inputs)
BLI_listbase_sort(inputs, inputs_cmp);
/* Creates a lookup table for the different types; */
- LinkData *inputs_lookup[MAX_UBO_GPU_TYPE + 1] = {NULL};
+ LinkData *inputs_lookup[MAX_UBO_GPU_TYPE + 1] = {nullptr};
eGPUType cur_type = static_cast<eGPUType>(MAX_UBO_GPU_TYPE + 1);
LISTBASE_FOREACH (LinkData *, link, inputs) {
@@ -131,21 +131,21 @@ static void buffer_from_list_inputs_sort(ListBase *inputs)
}
/* If there is no GPU_VEC3 there is no need for alignment. */
- if (inputs_lookup[GPU_VEC3] == NULL) {
+ if (inputs_lookup[GPU_VEC3] == nullptr) {
return;
}
LinkData *link = inputs_lookup[GPU_VEC3];
- while (link != NULL && ((GPUInput *)link->data)->type == GPU_VEC3) {
+ while (link != nullptr && ((GPUInput *)link->data)->type == GPU_VEC3) {
LinkData *link_next = link->next;
/* If GPU_VEC3 is followed by nothing or a GPU_FLOAT, no need for alignment. */
- if ((link_next == NULL) || ((GPUInput *)link_next->data)->type == GPU_FLOAT) {
+ if ((link_next == nullptr) || ((GPUInput *)link_next->data)->type == GPU_FLOAT) {
break;
}
/* If there is a float, move it next to current vec3. */
- if (inputs_lookup[GPU_FLOAT] != NULL) {
+ if (inputs_lookup[GPU_FLOAT] != nullptr) {
LinkData *float_input = inputs_lookup[GPU_FLOAT];
inputs_lookup[GPU_FLOAT] = float_input->next;
@@ -195,7 +195,7 @@ GPUUniformBuf *GPU_uniformbuf_create_ex(size_t size, const void *data, const cha
{
UniformBuf *ubo = GPUBackend::get()->uniformbuf_alloc(size, name);
/* Direct init. */
- if (data != NULL) {
+ if (data != nullptr) {
ubo->update(data);
}
return wrap(ubo);
@@ -211,7 +211,7 @@ GPUUniformBuf *GPU_uniformbuf_create_from_list(ListBase *inputs, const char *nam
{
/* There is no point on creating an UBO if there is no arguments. */
if (BLI_listbase_is_empty(inputs)) {
- return NULL;
+ return nullptr;
}
buffer_from_list_inputs_sort(inputs);
diff --git a/source/blender/gpu/intern/gpu_vertex_buffer.cc b/source/blender/gpu/intern/gpu_vertex_buffer.cc
index 29fa8a89d27..09b9eba9f95 100644
--- a/source/blender/gpu/intern/gpu_vertex_buffer.cc
+++ b/source/blender/gpu/intern/gpu_vertex_buffer.cc
@@ -66,13 +66,13 @@ void VertBuf::init(const GPUVertFormat *format, GPUUsageType usage)
flag |= GPU_VERTBUF_INIT;
}
-void VertBuf::clear(void)
+void VertBuf::clear()
{
this->release_data();
flag = GPU_VERTBUF_INVALID;
}
-VertBuf *VertBuf::duplicate(void)
+VertBuf *VertBuf::duplicate()
{
VertBuf *dst = GPUBackend::get()->vertbuf_alloc();
/* Full copy. */
@@ -107,7 +107,7 @@ void VertBuf::resize(uint vert_len)
flag |= GPU_VERTBUF_DATA_DIRTY;
}
-void VertBuf::upload(void)
+void VertBuf::upload()
{
this->upload_data();
}
@@ -125,7 +125,7 @@ using namespace blender::gpu;
/* -------- Creation & deletion -------- */
-GPUVertBuf *GPU_vertbuf_calloc(void)
+GPUVertBuf *GPU_vertbuf_calloc()
{
return wrap(GPUBackend::get()->vertbuf_alloc());
}
@@ -191,7 +191,7 @@ void GPU_vertbuf_data_resize(GPUVertBuf *verts, uint v_len)
void GPU_vertbuf_data_len_set(GPUVertBuf *verts_, uint v_len)
{
VertBuf *verts = unwrap(verts_);
- BLI_assert(verts->data != NULL); /* Only for dynamic data. */
+ BLI_assert(verts->data != nullptr); /* Only for dynamic data. */
BLI_assert(v_len <= verts->vertex_alloc);
verts->vertex_len = v_len;
}
@@ -203,7 +203,7 @@ void GPU_vertbuf_attr_set(GPUVertBuf *verts_, uint a_idx, uint v_idx, const void
const GPUVertAttr *a = &format->attrs[a_idx];
BLI_assert(v_idx < verts->vertex_alloc);
BLI_assert(a_idx < format->attr_len);
- BLI_assert(verts->data != NULL);
+ BLI_assert(verts->data != nullptr);
verts->flag |= GPU_VERTBUF_DATA_DIRTY;
memcpy(verts->data + a->offset + v_idx * format->stride, data, a->sz);
}
@@ -225,7 +225,7 @@ void GPU_vertbuf_vert_set(GPUVertBuf *verts_, uint v_idx, const void *data)
VertBuf *verts = unwrap(verts_);
const GPUVertFormat *format = &verts->format;
BLI_assert(v_idx < verts->vertex_alloc);
- BLI_assert(verts->data != NULL);
+ BLI_assert(verts->data != nullptr);
verts->flag |= GPU_VERTBUF_DATA_DIRTY;
memcpy(verts->data + v_idx * format->stride, data, format->stride);
}
@@ -236,7 +236,7 @@ void GPU_vertbuf_attr_fill_stride(GPUVertBuf *verts_, uint a_idx, uint stride, c
const GPUVertFormat *format = &verts->format;
const GPUVertAttr *a = &format->attrs[a_idx];
BLI_assert(a_idx < format->attr_len);
- BLI_assert(verts->data != NULL);
+ BLI_assert(verts->data != nullptr);
verts->flag |= GPU_VERTBUF_DATA_DIRTY;
const uint vertex_len = verts->vertex_len;
@@ -259,7 +259,7 @@ void GPU_vertbuf_attr_get_raw_data(GPUVertBuf *verts_, uint a_idx, GPUVertBufRaw
const GPUVertFormat *format = &verts->format;
const GPUVertAttr *a = &format->attrs[a_idx];
BLI_assert(a_idx < format->attr_len);
- BLI_assert(verts->data != NULL);
+ BLI_assert(verts->data != nullptr);
verts->flag |= GPU_VERTBUF_DATA_DIRTY;
verts->flag &= ~GPU_VERTBUF_DATA_UPLOADED;
@@ -313,7 +313,7 @@ GPUVertBufStatus GPU_vertbuf_get_status(const GPUVertBuf *verts)
return unwrap(verts)->flag;
}
-uint GPU_vertbuf_get_memory_usage(void)
+uint GPU_vertbuf_get_memory_usage()
{
return VertBuf::memory_usage;
}
diff --git a/source/blender/gpu/intern/gpu_viewport.c b/source/blender/gpu/intern/gpu_viewport.c
index 9063c8bdbce..188c8786665 100644
--- a/source/blender/gpu/intern/gpu_viewport.c
+++ b/source/blender/gpu/intern/gpu_viewport.c
@@ -1023,6 +1023,9 @@ void GPU_viewport_free(GPUViewport *viewport)
}
BLI_memblock_destroy(viewport->vmempool.images, NULL);
}
+ if (viewport->vmempool.obattrs_ubo_pool != NULL) {
+ DRW_uniform_attrs_pool_free(viewport->vmempool.obattrs_ubo_pool);
+ }
for (int i = 0; i < viewport->vmempool.ubo_len; i++) {
GPU_uniformbuf_free(viewport->vmempool.matrices_ubo[i]);