Welcome to mirror list, hosted at ThFree Co, Russian Federation.

git.blender.org/blender.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorClément Foucault <foucault.clem@gmail.com>2018-07-18 01:12:21 +0300
committerClément Foucault <foucault.clem@gmail.com>2018-07-18 12:49:15 +0300
commit8cd7828792419fb4eac9a2a477968535b4c71535 (patch)
tree8fc733149fe07b7d9edd4b8b1e709519b4481887 /source/blender/gpu/intern
parent247ad2034de2c33a6d9cb7d3b6f1ef7ffa5b859d (diff)
GWN: Port to GPU module: Replace GWN prefix by GPU
Diffstat (limited to 'source/blender/gpu/intern')
-rw-r--r--source/blender/gpu/intern/gpu_attr_binding.c26
-rw-r--r--source/blender/gpu/intern/gpu_attr_binding_private.h16
-rw-r--r--source/blender/gpu/intern/gpu_batch.c240
-rw-r--r--source/blender/gpu/intern/gpu_batch_presets.c76
-rw-r--r--source/blender/gpu/intern/gpu_batch_private.h16
-rw-r--r--source/blender/gpu/intern/gpu_batch_utils.c54
-rw-r--r--source/blender/gpu/intern/gpu_buffer_id.cpp8
-rw-r--r--source/blender/gpu/intern/gpu_buffers.c154
-rw-r--r--source/blender/gpu/intern/gpu_codegen.c2
-rw-r--r--source/blender/gpu/intern/gpu_element.c120
-rw-r--r--source/blender/gpu/intern/gpu_immediate.c170
-rw-r--r--source/blender/gpu/intern/gpu_immediate_util.c50
-rw-r--r--source/blender/gpu/intern/gpu_matrix.c14
-rw-r--r--source/blender/gpu/intern/gpu_primitive.c60
-rw-r--r--source/blender/gpu/intern/gpu_primitive_private.h12
-rw-r--r--source/blender/gpu/intern/gpu_shader.c20
-rw-r--r--source/blender/gpu/intern/gpu_shader_interface.c154
-rw-r--r--source/blender/gpu/intern/gpu_shader_private.h2
-rw-r--r--source/blender/gpu/intern/gpu_texture.c20
-rw-r--r--source/blender/gpu/intern/gpu_vertex_array_id.cpp40
-rw-r--r--source/blender/gpu/intern/gpu_vertex_buffer.c98
-rw-r--r--source/blender/gpu/intern/gpu_vertex_format.c96
-rw-r--r--source/blender/gpu/intern/gpu_vertex_format_private.h14
-rw-r--r--source/blender/gpu/intern/gpu_viewport.c4
24 files changed, 733 insertions, 733 deletions
diff --git a/source/blender/gpu/intern/gpu_attr_binding.c b/source/blender/gpu/intern/gpu_attr_binding.c
index e7eba369335..9ac38578792 100644
--- a/source/blender/gpu/intern/gpu_attr_binding.c
+++ b/source/blender/gpu/intern/gpu_attr_binding.c
@@ -23,10 +23,10 @@
* ***** END GPL LICENSE BLOCK *****
*/
-/** \file blender/gpu/intern/gwn_attr_binding.c
+/** \file blender/gpu/intern/gpu_attr_binding.c
* \ingroup gpu
*
- * Gawain vertex attribute binding
+ * GPU vertex attribute binding
*/
#include "GPU_attr_binding.h"
@@ -34,30 +34,30 @@
#include <stddef.h>
#include <stdlib.h>
-#if GWN_VERT_ATTR_MAX_LEN != 16
- #error "attrib binding code assumes GWN_VERT_ATTR_MAX_LEN = 16"
+#if GPU_VERT_ATTR_MAX_LEN != 16
+ #error "attrib binding code assumes GPU_VERT_ATTR_MAX_LEN = 16"
#endif
-void AttribBinding_clear(Gwn_AttrBinding* binding)
+void AttribBinding_clear(GPUAttrBinding* binding)
{
binding->loc_bits = 0;
binding->enabled_bits = 0;
}
-uint read_attrib_location(const Gwn_AttrBinding* binding, uint a_idx)
+uint read_attrib_location(const GPUAttrBinding* binding, uint a_idx)
{
#if TRUST_NO_ONE
- assert(a_idx < GWN_VERT_ATTR_MAX_LEN);
+ assert(a_idx < GPU_VERT_ATTR_MAX_LEN);
assert(binding->enabled_bits & (1 << a_idx));
#endif
return (binding->loc_bits >> (4 * a_idx)) & 0xF;
}
-static void write_attrib_location(Gwn_AttrBinding* binding, uint a_idx, uint location)
+static void write_attrib_location(GPUAttrBinding* binding, uint a_idx, uint location)
{
#if TRUST_NO_ONE
- assert(a_idx < GWN_VERT_ATTR_MAX_LEN);
- assert(location < GWN_VERT_ATTR_MAX_LEN);
+ assert(a_idx < GPU_VERT_ATTR_MAX_LEN);
+ assert(location < GPU_VERT_ATTR_MAX_LEN);
#endif
const uint shift = 4 * a_idx;
const uint64_t mask = ((uint64_t)0xF) << shift;
@@ -67,14 +67,14 @@ static void write_attrib_location(Gwn_AttrBinding* binding, uint a_idx, uint loc
binding->enabled_bits |= 1 << a_idx;
}
-void get_attrib_locations(const Gwn_VertFormat* format, Gwn_AttrBinding* binding, const Gwn_ShaderInterface* shaderface)
+void get_attrib_locations(const GPUVertFormat* format, GPUAttrBinding* binding, const GPUShaderInterface* shaderface)
{
AttribBinding_clear(binding);
for (uint a_idx = 0; a_idx < format->attr_len; ++a_idx) {
- const Gwn_VertAttr* a = format->attribs + a_idx;
+ const GPUVertAttr* a = format->attribs + a_idx;
for (uint n_idx = 0; n_idx < a->name_len; ++n_idx) {
- const Gwn_ShaderInput* input = GWN_shaderinterface_attr(shaderface, a->name[n_idx]);
+ const GPUShaderInput* input = GPU_shaderinterface_attr(shaderface, a->name[n_idx]);
#if TRUST_NO_ONE
assert(input != NULL);
/* TODO: make this a recoverable runtime error? indicates mismatch between vertex format and program */
diff --git a/source/blender/gpu/intern/gpu_attr_binding_private.h b/source/blender/gpu/intern/gpu_attr_binding_private.h
index 0e0bf89178a..240509de0d4 100644
--- a/source/blender/gpu/intern/gpu_attr_binding_private.h
+++ b/source/blender/gpu/intern/gpu_attr_binding_private.h
@@ -23,21 +23,21 @@
* ***** END GPL LICENSE BLOCK *****
*/
-/** \file blender/gpu/gwn_attr_binding_private.h
+/** \file blender/gpu/intern/gpu_attr_binding_private.h
* \ingroup gpu
*
- * Gawain vertex attribute binding
+ * GPU vertex attribute binding
*/
-#ifndef __GWN_ATTR_BINDING_PRIVATE_H__
-#define __GWN_ATTR_BINDING_PRIVATE_H__
+#ifndef __GPU_ATTR_BINDING_PRIVATE_H__
+#define __GPU_ATTR_BINDING_PRIVATE_H__
#include "GPU_vertex_format.h"
#include "GPU_shader_interface.h"
-void AttribBinding_clear(Gwn_AttrBinding*);
+void AttribBinding_clear(GPUAttrBinding*);
-void get_attrib_locations(const Gwn_VertFormat*, Gwn_AttrBinding*, const Gwn_ShaderInterface*);
-unsigned read_attrib_location(const Gwn_AttrBinding*, unsigned a_idx);
+void get_attrib_locations(const GPUVertFormat*, GPUAttrBinding*, const GPUShaderInterface*);
+unsigned read_attrib_location(const GPUAttrBinding*, unsigned a_idx);
-#endif /* __GWN_ATTR_BINDING_PRIVATE_H__ */
+#endif /* __GPU_ATTR_BINDING_PRIVATE_H__ */
diff --git a/source/blender/gpu/intern/gpu_batch.c b/source/blender/gpu/intern/gpu_batch.c
index 90f30930884..4d455f6f464 100644
--- a/source/blender/gpu/intern/gpu_batch.c
+++ b/source/blender/gpu/intern/gpu_batch.c
@@ -23,10 +23,10 @@
* ***** END GPL LICENSE BLOCK *****
*/
-/** \file blender/gpu/intern/gwn_batch.c
+/** \file blender/gpu/intern/gpu_batch.c
* \ingroup gpu
*
- * Gawain geometry batch
+ * GPU geometry batch
* Contains VAOs + VBOs + Shader representing a drawable entity.
*/
@@ -44,9 +44,9 @@
#include <stdlib.h>
#include <string.h>
-static void batch_update_program_bindings(Gwn_Batch* batch, uint v_first);
+static void batch_update_program_bindings(GPUBatch* batch, uint v_first);
-void gwn_batch_vao_cache_clear(Gwn_Batch* batch)
+void GPU_batch_vao_cache_clear(GPUBatch* batch)
{
if (batch->context == NULL) {
return;
@@ -54,45 +54,45 @@ void gwn_batch_vao_cache_clear(Gwn_Batch* batch)
if (batch->is_dynamic_vao_count) {
for (int i = 0; i < batch->dynamic_vaos.count; ++i) {
if (batch->dynamic_vaos.vao_ids[i]) {
- GWN_vao_free(batch->dynamic_vaos.vao_ids[i], batch->context);
+ GPU_vao_free(batch->dynamic_vaos.vao_ids[i], batch->context);
}
if (batch->dynamic_vaos.interfaces[i]) {
- GWN_shaderinterface_remove_batch_ref((Gwn_ShaderInterface *)batch->dynamic_vaos.interfaces[i], batch);
+ GPU_shaderinterface_remove_batch_ref((GPUShaderInterface *)batch->dynamic_vaos.interfaces[i], batch);
}
}
free(batch->dynamic_vaos.interfaces);
free(batch->dynamic_vaos.vao_ids);
}
else {
- for (int i = 0; i < GWN_BATCH_VAO_STATIC_LEN; ++i) {
+ for (int i = 0; i < GPU_BATCH_VAO_STATIC_LEN; ++i) {
if (batch->static_vaos.vao_ids[i]) {
- GWN_vao_free(batch->static_vaos.vao_ids[i], batch->context);
+ GPU_vao_free(batch->static_vaos.vao_ids[i], batch->context);
}
if (batch->static_vaos.interfaces[i]) {
- GWN_shaderinterface_remove_batch_ref((Gwn_ShaderInterface *)batch->static_vaos.interfaces[i], batch);
+ GPU_shaderinterface_remove_batch_ref((GPUShaderInterface *)batch->static_vaos.interfaces[i], batch);
}
}
}
batch->is_dynamic_vao_count = false;
- for (int i = 0; i < GWN_BATCH_VAO_STATIC_LEN; ++i) {
+ for (int i = 0; i < GPU_BATCH_VAO_STATIC_LEN; ++i) {
batch->static_vaos.vao_ids[i] = 0;
batch->static_vaos.interfaces[i] = NULL;
}
- gwn_context_remove_batch(batch->context, batch);
+ gpu_context_remove_batch(batch->context, batch);
batch->context = NULL;
}
-Gwn_Batch* GWN_batch_create_ex(
- Gwn_PrimType prim_type, Gwn_VertBuf* verts, Gwn_IndexBuf* elem,
+GPUBatch* GPU_batch_create_ex(
+ GPUPrimType prim_type, GPUVertBuf* verts, GPUIndexBuf* elem,
uint owns_flag)
{
- Gwn_Batch* batch = calloc(1, sizeof(Gwn_Batch));
- GWN_batch_init_ex(batch, prim_type, verts, elem, owns_flag);
+ GPUBatch* batch = calloc(1, sizeof(GPUBatch));
+ GPU_batch_init_ex(batch, prim_type, verts, elem, owns_flag);
return batch;
}
-void GWN_batch_init_ex(
- Gwn_Batch* batch, Gwn_PrimType prim_type, Gwn_VertBuf* verts, Gwn_IndexBuf* elem,
+void GPU_batch_init_ex(
+ GPUBatch* batch, GPUPrimType prim_type, GPUVertBuf* verts, GPUIndexBuf* elem,
uint owns_flag)
{
#if TRUST_NO_ONE
@@ -100,49 +100,49 @@ void GWN_batch_init_ex(
#endif
batch->verts[0] = verts;
- for (int v = 1; v < GWN_BATCH_VBO_MAX_LEN; ++v) {
+ for (int v = 1; v < GPU_BATCH_VBO_MAX_LEN; ++v) {
batch->verts[v] = NULL;
}
batch->inst = NULL;
batch->elem = elem;
batch->gl_prim_type = convert_prim_type_to_gl(prim_type);
- batch->phase = GWN_BATCH_READY_TO_DRAW;
+ batch->phase = GPU_BATCH_READY_TO_DRAW;
batch->is_dynamic_vao_count = false;
batch->owns_flag = owns_flag;
batch->free_callback = NULL;
}
/* This will share the VBOs with the new batch. */
-Gwn_Batch* GWN_batch_duplicate(Gwn_Batch* batch_src)
+GPUBatch* GPU_batch_duplicate(GPUBatch* batch_src)
{
- Gwn_Batch* batch = GWN_batch_create_ex(GWN_PRIM_POINTS, batch_src->verts[0], batch_src->elem, 0);
+ GPUBatch* batch = GPU_batch_create_ex(GPU_PRIM_POINTS, batch_src->verts[0], batch_src->elem, 0);
batch->gl_prim_type = batch_src->gl_prim_type;
- for (int v = 1; v < GWN_BATCH_VBO_MAX_LEN; ++v) {
+ for (int v = 1; v < GPU_BATCH_VBO_MAX_LEN; ++v) {
batch->verts[v] = batch_src->verts[v];
}
return batch;
}
-void GWN_batch_discard(Gwn_Batch* batch)
+void GPU_batch_discard(GPUBatch* batch)
{
- if (batch->owns_flag & GWN_BATCH_OWNS_INDEX) {
- GWN_indexbuf_discard(batch->elem);
+ if (batch->owns_flag & GPU_BATCH_OWNS_INDEX) {
+ GPU_indexbuf_discard(batch->elem);
}
- if (batch->owns_flag & GWN_BATCH_OWNS_INSTANCES) {
- GWN_vertbuf_discard(batch->inst);
+ if (batch->owns_flag & GPU_BATCH_OWNS_INSTANCES) {
+ GPU_vertbuf_discard(batch->inst);
}
- if ((batch->owns_flag & ~GWN_BATCH_OWNS_INDEX) != 0) {
- for (int v = 0; v < GWN_BATCH_VBO_MAX_LEN; ++v) {
+ if ((batch->owns_flag & ~GPU_BATCH_OWNS_INDEX) != 0) {
+ for (int v = 0; v < GPU_BATCH_VBO_MAX_LEN; ++v) {
if (batch->verts[v] == NULL) {
break;
}
if (batch->owns_flag & (1 << v)) {
- GWN_vertbuf_discard(batch->verts[v]);
+ GPU_vertbuf_discard(batch->verts[v]);
}
}
}
- gwn_batch_vao_cache_clear(batch);
+ GPU_batch_vao_cache_clear(batch);
if (batch->free_callback) {
batch->free_callback(batch, batch->callback_data);
@@ -150,42 +150,42 @@ void GWN_batch_discard(Gwn_Batch* batch)
free(batch);
}
-void GWN_batch_callback_free_set(Gwn_Batch* batch, void (*callback)(Gwn_Batch*, void*), void* user_data)
+void GPU_batch_callback_free_set(GPUBatch* batch, void (*callback)(GPUBatch*, void*), void* user_data)
{
batch->free_callback = callback;
batch->callback_data = user_data;
}
-void GWN_batch_instbuf_set(Gwn_Batch* batch, Gwn_VertBuf* inst, bool own_vbo)
+void GPU_batch_instbuf_set(GPUBatch* batch, GPUVertBuf* inst, bool own_vbo)
{
#if TRUST_NO_ONE
assert(inst != NULL);
#endif
/* redo the bindings */
- gwn_batch_vao_cache_clear(batch);
+ GPU_batch_vao_cache_clear(batch);
- if (batch->inst != NULL && (batch->owns_flag & GWN_BATCH_OWNS_INSTANCES)) {
- GWN_vertbuf_discard(batch->inst);
+ if (batch->inst != NULL && (batch->owns_flag & GPU_BATCH_OWNS_INSTANCES)) {
+ GPU_vertbuf_discard(batch->inst);
}
batch->inst = inst;
if (own_vbo) {
- batch->owns_flag |= GWN_BATCH_OWNS_INSTANCES;
+ batch->owns_flag |= GPU_BATCH_OWNS_INSTANCES;
}
else {
- batch->owns_flag &= ~GWN_BATCH_OWNS_INSTANCES;
+ batch->owns_flag &= ~GPU_BATCH_OWNS_INSTANCES;
}
}
/* Returns the index of verts in the batch. */
-int GWN_batch_vertbuf_add_ex(
- Gwn_Batch* batch, Gwn_VertBuf* verts,
+int GPU_batch_vertbuf_add_ex(
+ GPUBatch* batch, GPUVertBuf* verts,
bool own_vbo)
{
/* redo the bindings */
- gwn_batch_vao_cache_clear(batch);
+ GPU_batch_vao_cache_clear(batch);
- for (uint v = 0; v < GWN_BATCH_VBO_MAX_LEN; ++v) {
+ for (uint v = 0; v < GPU_BATCH_VBO_MAX_LEN; ++v) {
if (batch->verts[v] == NULL) {
#if TRUST_NO_ONE
/* for now all VertexBuffers must have same vertex_len */
@@ -199,14 +199,14 @@ int GWN_batch_vertbuf_add_ex(
}
}
- /* we only make it this far if there is no room for another Gwn_VertBuf */
+ /* we only make it this far if there is no room for another GPUVertBuf */
#if TRUST_NO_ONE
assert(false);
#endif
return -1;
}
-static GLuint batch_vao_get(Gwn_Batch *batch)
+static GLuint batch_vao_get(GPUBatch *batch)
{
/* Search through cache */
if (batch->is_dynamic_vao_count) {
@@ -215,22 +215,22 @@ static GLuint batch_vao_get(Gwn_Batch *batch)
return batch->dynamic_vaos.vao_ids[i];
}
else {
- for (int i = 0; i < GWN_BATCH_VAO_STATIC_LEN; ++i)
+ for (int i = 0; i < GPU_BATCH_VAO_STATIC_LEN; ++i)
if (batch->static_vaos.interfaces[i] == batch->interface)
return batch->static_vaos.vao_ids[i];
}
/* Set context of this batch.
- * It will be bound to it until gwn_batch_vao_cache_clear is called.
+ * It will be bound to it until GPU_batch_vao_cache_clear is called.
* Until then it can only be drawn with this context. */
if (batch->context == NULL) {
- batch->context = GWN_context_active_get();
- gwn_context_add_batch(batch->context, batch);
+ batch->context = GPU_context_active_get();
+ gpu_context_add_batch(batch->context, batch);
}
#if TRUST_NO_ONE
else {
/* Make sure you are not trying to draw this batch in another context. */
- assert(batch->context == GWN_context_active_get());
+ assert(batch->context == GPU_context_active_get());
}
#endif
@@ -238,25 +238,25 @@ static GLuint batch_vao_get(Gwn_Batch *batch)
GLuint new_vao = 0;
if (!batch->is_dynamic_vao_count) {
int i; /* find first unused slot */
- for (i = 0; i < GWN_BATCH_VAO_STATIC_LEN; ++i)
+ for (i = 0; i < GPU_BATCH_VAO_STATIC_LEN; ++i)
if (batch->static_vaos.vao_ids[i] == 0)
break;
- if (i < GWN_BATCH_VAO_STATIC_LEN) {
+ if (i < GPU_BATCH_VAO_STATIC_LEN) {
batch->static_vaos.interfaces[i] = batch->interface;
- batch->static_vaos.vao_ids[i] = new_vao = GWN_vao_alloc();
+ batch->static_vaos.vao_ids[i] = new_vao = GPU_vao_alloc();
}
else {
/* Not enough place switch to dynamic. */
batch->is_dynamic_vao_count = true;
/* Erase previous entries, they will be added back if drawn again. */
- for (int j = 0; j < GWN_BATCH_VAO_STATIC_LEN; ++j) {
- GWN_shaderinterface_remove_batch_ref((Gwn_ShaderInterface*)batch->static_vaos.interfaces[j], batch);
- GWN_vao_free(batch->static_vaos.vao_ids[j], batch->context);
+ for (int j = 0; j < GPU_BATCH_VAO_STATIC_LEN; ++j) {
+ GPU_shaderinterface_remove_batch_ref((GPUShaderInterface*)batch->static_vaos.interfaces[j], batch);
+ GPU_vao_free(batch->static_vaos.vao_ids[j], batch->context);
}
/* Init dynamic arrays and let the branch below set the values. */
- batch->dynamic_vaos.count = GWN_BATCH_VAO_DYN_ALLOC_COUNT;
- batch->dynamic_vaos.interfaces = calloc(batch->dynamic_vaos.count, sizeof(Gwn_ShaderInterface*));
+ batch->dynamic_vaos.count = GPU_BATCH_VAO_DYN_ALLOC_COUNT;
+ batch->dynamic_vaos.interfaces = calloc(batch->dynamic_vaos.count, sizeof(GPUShaderInterface*));
batch->dynamic_vaos.vao_ids = calloc(batch->dynamic_vaos.count, sizeof(GLuint));
}
}
@@ -270,17 +270,17 @@ static GLuint batch_vao_get(Gwn_Batch *batch)
if (i == batch->dynamic_vaos.count) {
/* Not enough place, realloc the array. */
i = batch->dynamic_vaos.count;
- batch->dynamic_vaos.count += GWN_BATCH_VAO_DYN_ALLOC_COUNT;
- batch->dynamic_vaos.interfaces = realloc(batch->dynamic_vaos.interfaces, sizeof(Gwn_ShaderInterface*) * batch->dynamic_vaos.count);
+ batch->dynamic_vaos.count += GPU_BATCH_VAO_DYN_ALLOC_COUNT;
+ batch->dynamic_vaos.interfaces = realloc(batch->dynamic_vaos.interfaces, sizeof(GPUShaderInterface*) * batch->dynamic_vaos.count);
batch->dynamic_vaos.vao_ids = realloc(batch->dynamic_vaos.vao_ids, sizeof(GLuint) * batch->dynamic_vaos.count);
- memset(batch->dynamic_vaos.interfaces + i, 0, sizeof(Gwn_ShaderInterface*) * GWN_BATCH_VAO_DYN_ALLOC_COUNT);
- memset(batch->dynamic_vaos.vao_ids + i, 0, sizeof(GLuint) * GWN_BATCH_VAO_DYN_ALLOC_COUNT);
+ memset(batch->dynamic_vaos.interfaces + i, 0, sizeof(GPUShaderInterface*) * GPU_BATCH_VAO_DYN_ALLOC_COUNT);
+ memset(batch->dynamic_vaos.vao_ids + i, 0, sizeof(GLuint) * GPU_BATCH_VAO_DYN_ALLOC_COUNT);
}
batch->dynamic_vaos.interfaces[i] = batch->interface;
- batch->dynamic_vaos.vao_ids[i] = new_vao = GWN_vao_alloc();
+ batch->dynamic_vaos.vao_ids[i] = new_vao = GPU_vao_alloc();
}
- GWN_shaderinterface_add_batch_ref((Gwn_ShaderInterface*)batch->interface, batch);
+ GPU_shaderinterface_add_batch_ref((GPUShaderInterface*)batch->interface, batch);
#if TRUST_NO_ONE
assert(new_vao != 0);
@@ -294,7 +294,7 @@ static GLuint batch_vao_get(Gwn_Batch *batch)
return new_vao;
}
-void GWN_batch_program_set_no_use(Gwn_Batch* batch, uint32_t program, const Gwn_ShaderInterface* shaderface)
+void GPU_batch_program_set_no_use(GPUBatch* batch, uint32_t program, const GPUShaderInterface* shaderface)
{
#if TRUST_NO_ONE
assert(glIsProgram(shaderface->program));
@@ -305,18 +305,18 @@ void GWN_batch_program_set_no_use(Gwn_Batch* batch, uint32_t program, const Gwn_
batch->vao_id = batch_vao_get(batch);
}
-void GWN_batch_program_set(Gwn_Batch* batch, uint32_t program, const Gwn_ShaderInterface* shaderface)
+void GPU_batch_program_set(GPUBatch* batch, uint32_t program, const GPUShaderInterface* shaderface)
{
- GWN_batch_program_set_no_use(batch, program, shaderface);
- GWN_batch_program_use_begin(batch); /* hack! to make Batch_Uniform* simpler */
+ GPU_batch_program_set_no_use(batch, program, shaderface);
+ GPU_batch_program_use_begin(batch); /* hack! to make Batch_Uniform* simpler */
}
-void gwn_batch_remove_interface_ref(Gwn_Batch* batch, const Gwn_ShaderInterface* interface)
+void gpu_batch_remove_interface_ref(GPUBatch* batch, const GPUShaderInterface* interface)
{
if (batch->is_dynamic_vao_count) {
for (int i = 0; i < batch->dynamic_vaos.count; ++i) {
if (batch->dynamic_vaos.interfaces[i] == interface) {
- GWN_vao_free(batch->dynamic_vaos.vao_ids[i], batch->context);
+ GPU_vao_free(batch->dynamic_vaos.vao_ids[i], batch->context);
batch->dynamic_vaos.vao_ids[i] = 0;
batch->dynamic_vaos.interfaces[i] = NULL;
break; /* cannot have duplicates */
@@ -325,9 +325,9 @@ void gwn_batch_remove_interface_ref(Gwn_Batch* batch, const Gwn_ShaderInterface*
}
else {
int i;
- for (i = 0; i < GWN_BATCH_VAO_STATIC_LEN; ++i) {
+ for (i = 0; i < GPU_BATCH_VAO_STATIC_LEN; ++i) {
if (batch->static_vaos.interfaces[i] == interface) {
- GWN_vao_free(batch->static_vaos.vao_ids[i], batch->context);
+ GPU_vao_free(batch->static_vaos.vao_ids[i], batch->context);
batch->static_vaos.vao_ids[i] = 0;
batch->static_vaos.interfaces[i] = NULL;
break; /* cannot have duplicates */
@@ -337,28 +337,28 @@ void gwn_batch_remove_interface_ref(Gwn_Batch* batch, const Gwn_ShaderInterface*
}
static void create_bindings(
- Gwn_VertBuf* verts, const Gwn_ShaderInterface* interface,
+ GPUVertBuf* verts, const GPUShaderInterface* interface,
uint v_first, const bool use_instancing)
{
- const Gwn_VertFormat* format = &verts->format;
+ const GPUVertFormat* format = &verts->format;
const uint attr_len = format->attr_len;
const uint stride = format->stride;
- GWN_vertbuf_use(verts);
+ GPU_vertbuf_use(verts);
for (uint a_idx = 0; a_idx < attr_len; ++a_idx) {
- const Gwn_VertAttr* a = format->attribs + a_idx;
+ const GPUVertAttr* a = format->attribs + a_idx;
const GLvoid* pointer = (const GLubyte*)0 + a->offset + v_first * stride;
for (uint n_idx = 0; n_idx < a->name_len; ++n_idx) {
- const Gwn_ShaderInput* input = GWN_shaderinterface_attr(interface, a->name[n_idx]);
+ const GPUShaderInput* input = GPU_shaderinterface_attr(interface, a->name[n_idx]);
if (input == NULL) continue;
if (a->comp_len == 16 || a->comp_len == 12 || a->comp_len == 8) {
#if TRUST_NO_ONE
- assert(a->fetch_mode == GWN_FETCH_FLOAT);
+ assert(a->fetch_mode == GPU_FETCH_FLOAT);
assert(a->gl_comp_type == GL_FLOAT);
#endif
for (int i = 0; i < a->comp_len / 4; ++i) {
@@ -374,14 +374,14 @@ static void create_bindings(
glVertexAttribDivisor(input->location, (use_instancing) ? 1 : 0);
switch (a->fetch_mode) {
- case GWN_FETCH_FLOAT:
- case GWN_FETCH_INT_TO_FLOAT:
+ case GPU_FETCH_FLOAT:
+ case GPU_FETCH_INT_TO_FLOAT:
glVertexAttribPointer(input->location, a->comp_len, a->gl_comp_type, GL_FALSE, stride, pointer);
break;
- case GWN_FETCH_INT_TO_FLOAT_UNIT:
+ case GPU_FETCH_INT_TO_FLOAT_UNIT:
glVertexAttribPointer(input->location, a->comp_len, a->gl_comp_type, GL_TRUE, stride, pointer);
break;
- case GWN_FETCH_INT:
+ case GPU_FETCH_INT:
glVertexAttribIPointer(input->location, a->comp_len, a->gl_comp_type, stride, pointer);
break;
}
@@ -390,20 +390,20 @@ static void create_bindings(
}
}
-static void batch_update_program_bindings(Gwn_Batch* batch, uint v_first)
+static void batch_update_program_bindings(GPUBatch* batch, uint v_first)
{
- for (int v = 0; v < GWN_BATCH_VBO_MAX_LEN && batch->verts[v] != NULL; ++v) {
+ for (int v = 0; v < GPU_BATCH_VBO_MAX_LEN && batch->verts[v] != NULL; ++v) {
create_bindings(batch->verts[v], batch->interface, (batch->inst) ? 0 : v_first, false);
}
if (batch->inst) {
create_bindings(batch->inst, batch->interface, v_first, true);
}
if (batch->elem) {
- GWN_indexbuf_use(batch->elem);
+ GPU_indexbuf_use(batch->elem);
}
}
-void GWN_batch_program_use_begin(Gwn_Batch* batch)
+void GPU_batch_program_use_begin(GPUBatch* batch)
{
/* NOTE: use_program & done_using_program are fragile, depend on staying in sync with
* the GL context's active program. use_program doesn't mark other programs as "not used". */
@@ -415,7 +415,7 @@ void GWN_batch_program_use_begin(Gwn_Batch* batch)
}
}
-void GWN_batch_program_use_end(Gwn_Batch* batch)
+void GPU_batch_program_use_end(GPUBatch* batch)
{
if (batch->program_in_use) {
#if PROGRAM_NO_OPTI
@@ -426,99 +426,99 @@ void GWN_batch_program_use_end(Gwn_Batch* batch)
}
#if TRUST_NO_ONE
- #define GET_UNIFORM const Gwn_ShaderInput* uniform = GWN_shaderinterface_uniform(batch->interface, name); assert(uniform);
+ #define GET_UNIFORM const GPUShaderInput* uniform = GPU_shaderinterface_uniform(batch->interface, name); assert(uniform);
#else
- #define GET_UNIFORM const Gwn_ShaderInput* uniform = GWN_shaderinterface_uniform(batch->interface, name);
+ #define GET_UNIFORM const GPUShaderInput* uniform = GPU_shaderinterface_uniform(batch->interface, name);
#endif
-void GWN_batch_uniform_1ui(Gwn_Batch* batch, const char* name, int value)
+void GPU_batch_uniform_1ui(GPUBatch* batch, const char* name, int value)
{
GET_UNIFORM
glUniform1ui(uniform->location, value);
}
-void GWN_batch_uniform_1i(Gwn_Batch* batch, const char* name, int value)
+void GPU_batch_uniform_1i(GPUBatch* batch, const char* name, int value)
{
GET_UNIFORM
glUniform1i(uniform->location, value);
}
-void GWN_batch_uniform_1b(Gwn_Batch* batch, const char* name, bool value)
+void GPU_batch_uniform_1b(GPUBatch* batch, const char* name, bool value)
{
GET_UNIFORM
glUniform1i(uniform->location, value ? GL_TRUE : GL_FALSE);
}
-void GWN_batch_uniform_2f(Gwn_Batch* batch, const char* name, float x, float y)
+void GPU_batch_uniform_2f(GPUBatch* batch, const char* name, float x, float y)
{
GET_UNIFORM
glUniform2f(uniform->location, x, y);
}
-void GWN_batch_uniform_3f(Gwn_Batch* batch, const char* name, float x, float y, float z)
+void GPU_batch_uniform_3f(GPUBatch* batch, const char* name, float x, float y, float z)
{
GET_UNIFORM
glUniform3f(uniform->location, x, y, z);
}
-void GWN_batch_uniform_4f(Gwn_Batch* batch, const char* name, float x, float y, float z, float w)
+void GPU_batch_uniform_4f(GPUBatch* batch, const char* name, float x, float y, float z, float w)
{
GET_UNIFORM
glUniform4f(uniform->location, x, y, z, w);
}
-void GWN_batch_uniform_1f(Gwn_Batch* batch, const char* name, float x)
+void GPU_batch_uniform_1f(GPUBatch* batch, const char* name, float x)
{
GET_UNIFORM
glUniform1f(uniform->location, x);
}
-void GWN_batch_uniform_2fv(Gwn_Batch* batch, const char* name, const float data[2])
+void GPU_batch_uniform_2fv(GPUBatch* batch, const char* name, const float data[2])
{
GET_UNIFORM
glUniform2fv(uniform->location, 1, data);
}
-void GWN_batch_uniform_3fv(Gwn_Batch* batch, const char* name, const float data[3])
+void GPU_batch_uniform_3fv(GPUBatch* batch, const char* name, const float data[3])
{
GET_UNIFORM
glUniform3fv(uniform->location, 1, data);
}
-void GWN_batch_uniform_4fv(Gwn_Batch* batch, const char* name, const float data[4])
+void GPU_batch_uniform_4fv(GPUBatch* batch, const char* name, const float data[4])
{
GET_UNIFORM
glUniform4fv(uniform->location, 1, data);
}
-void GWN_batch_uniform_2fv_array(Gwn_Batch* batch, const char* name, const int len, const float *data)
+void GPU_batch_uniform_2fv_array(GPUBatch* batch, const char* name, const int len, const float *data)
{
GET_UNIFORM
glUniform2fv(uniform->location, len, data);
}
-void GWN_batch_uniform_4fv_array(Gwn_Batch* batch, const char* name, const int len, const float *data)
+void GPU_batch_uniform_4fv_array(GPUBatch* batch, const char* name, const int len, const float *data)
{
GET_UNIFORM
glUniform4fv(uniform->location, len, data);
}
-void GWN_batch_uniform_mat4(Gwn_Batch* batch, const char* name, const float data[4][4])
+void GPU_batch_uniform_mat4(GPUBatch* batch, const char* name, const float data[4][4])
{
GET_UNIFORM
glUniformMatrix4fv(uniform->location, 1, GL_FALSE, (const float *)data);
}
-static void primitive_restart_enable(const Gwn_IndexBuf *el)
+static void primitive_restart_enable(const GPUIndexBuf *el)
{
// TODO(fclem) Replace by GL_PRIMITIVE_RESTART_FIXED_INDEX when we have ogl 4.3
glEnable(GL_PRIMITIVE_RESTART);
GLuint restart_index = (GLuint)0xFFFFFFFF;
-#if GWN_TRACK_INDEX_RANGE
- if (el->index_type == GWN_INDEX_U8)
+#if GPU_TRACK_INDEX_RANGE
+ if (el->index_type == GPU_INDEX_U8)
restart_index = (GLuint)0xFF;
- else if (el->index_type == GWN_INDEX_U16)
+ else if (el->index_type == GPU_INDEX_U16)
restart_index = (GLuint)0xFFFF;
#endif
@@ -530,21 +530,21 @@ static void primitive_restart_disable(void)
glDisable(GL_PRIMITIVE_RESTART);
}
-void GWN_batch_draw(Gwn_Batch* batch)
+void GPU_batch_draw(GPUBatch* batch)
{
#if TRUST_NO_ONE
- assert(batch->phase == GWN_BATCH_READY_TO_DRAW);
+ assert(batch->phase == GPU_BATCH_READY_TO_DRAW);
assert(batch->verts[0]->vbo_id != 0);
#endif
- GWN_batch_program_use_begin(batch);
+ GPU_batch_program_use_begin(batch);
GPU_matrix_bind(batch->interface); // external call.
- GWN_batch_draw_range_ex(batch, 0, 0, false);
+ GPU_batch_draw_range_ex(batch, 0, 0, false);
- GWN_batch_program_use_end(batch);
+ GPU_batch_program_use_end(batch);
}
-void GWN_batch_draw_range_ex(Gwn_Batch* batch, int v_first, int v_count, bool force_instance)
+void GPU_batch_draw_range_ex(GPUBatch* batch, int v_first, int v_count, bool force_instance)
{
#if TRUST_NO_ONE
assert(!(force_instance && (batch->inst == NULL)) || v_count > 0); // we cannot infer length if force_instance
@@ -553,7 +553,7 @@ void GWN_batch_draw_range_ex(Gwn_Batch* batch, int v_first, int v_count, bool fo
// If using offset drawing, use the default VAO and redo bindings.
if (v_first != 0 && (do_instance || batch->elem)) {
- glBindVertexArray(GWN_vao_default());
+ glBindVertexArray(GPU_vao_default());
batch_update_program_bindings(batch, v_first);
}
else {
@@ -567,12 +567,12 @@ void GWN_batch_draw_range_ex(Gwn_Batch* batch, int v_first, int v_count, bool fo
}
if (batch->elem) {
- const Gwn_IndexBuf* el = batch->elem;
+ const GPUIndexBuf* el = batch->elem;
if (el->use_prim_restart) {
primitive_restart_enable(el);
}
-#if GWN_TRACK_INDEX_RANGE
+#if GPU_TRACK_INDEX_RANGE
glDrawElementsInstancedBaseVertex(batch->gl_prim_type,
el->index_len,
el->gl_index_type,
@@ -597,13 +597,13 @@ void GWN_batch_draw_range_ex(Gwn_Batch* batch, int v_first, int v_count, bool fo
}
if (batch->elem) {
- const Gwn_IndexBuf* el = batch->elem;
+ const GPUIndexBuf* el = batch->elem;
if (el->use_prim_restart) {
primitive_restart_enable(el);
}
-#if GWN_TRACK_INDEX_RANGE
+#if GPU_TRACK_INDEX_RANGE
if (el->base_index) {
glDrawRangeElementsBaseVertex(batch->gl_prim_type,
el->min_index,
@@ -634,10 +634,10 @@ void GWN_batch_draw_range_ex(Gwn_Batch* batch, int v_first, int v_count, bool fo
}
/* just draw some vertices and let shader place them where we want. */
-void GWN_draw_primitive(Gwn_PrimType prim_type, int v_count)
+void GPU_draw_primitive(GPUPrimType prim_type, int v_count)
{
/* we cannot draw without vao ... annoying ... */
- glBindVertexArray(GWN_vao_default());
+ glBindVertexArray(GPU_vao_default());
GLenum type = convert_prim_type_to_gl(prim_type);
glDrawArrays(type, 0, v_count);
@@ -652,10 +652,10 @@ void GWN_draw_primitive(Gwn_PrimType prim_type, int v_count)
/** \name Utilities
* \{ */
-void GWN_batch_program_set_builtin(Gwn_Batch *batch, GPUBuiltinShader shader_id)
+void GPU_batch_program_set_builtin(GPUBatch *batch, GPUBuiltinShader shader_id)
{
GPUShader *shader = GPU_shader_get_builtin_shader(shader_id);
- GWN_batch_program_set(batch, shader->program, shader->interface);
+ GPU_batch_program_set(batch, shader->program, shader->interface);
}
/** \} */
diff --git a/source/blender/gpu/intern/gpu_batch_presets.c b/source/blender/gpu/intern/gpu_batch_presets.c
index fb696fd09a5..83287c57441 100644
--- a/source/blender/gpu/intern/gpu_batch_presets.c
+++ b/source/blender/gpu/intern/gpu_batch_presets.c
@@ -45,14 +45,14 @@
/* Struct to store 3D Batches and their format */
static struct {
struct {
- Gwn_Batch *sphere_high;
- Gwn_Batch *sphere_med;
- Gwn_Batch *sphere_low;
- Gwn_Batch *sphere_wire_low;
- Gwn_Batch *sphere_wire_med;
+ GPUBatch *sphere_high;
+ GPUBatch *sphere_med;
+ GPUBatch *sphere_low;
+ GPUBatch *sphere_wire_low;
+ GPUBatch *sphere_wire_med;
} batch;
- Gwn_VertFormat format;
+ GPUVertFormat format;
struct {
uint pos, nor;
@@ -66,28 +66,28 @@ static ListBase presets_list = {NULL, NULL};
/** \name 3D Primitives
* \{ */
-static Gwn_VertFormat *preset_3d_format(void)
+static GPUVertFormat *preset_3d_format(void)
{
if (g_presets_3d.format.attr_len == 0) {
- Gwn_VertFormat *format = &g_presets_3d.format;
- g_presets_3d.attr_id.pos = GWN_vertformat_attr_add(format, "pos", GWN_COMP_F32, 3, GWN_FETCH_FLOAT);
- g_presets_3d.attr_id.nor = GWN_vertformat_attr_add(format, "nor", GWN_COMP_F32, 3, GWN_FETCH_FLOAT);
+ GPUVertFormat *format = &g_presets_3d.format;
+ g_presets_3d.attr_id.pos = GPU_vertformat_attr_add(format, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
+ g_presets_3d.attr_id.nor = GPU_vertformat_attr_add(format, "nor", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
}
return &g_presets_3d.format;
}
static void batch_sphere_lat_lon_vert(
- Gwn_VertBufRaw *pos_step, Gwn_VertBufRaw *nor_step,
+ GPUVertBufRaw *pos_step, GPUVertBufRaw *nor_step,
float lat, float lon)
{
float pos[3];
pos[0] = sinf(lat) * cosf(lon);
pos[1] = cosf(lat);
pos[2] = sinf(lat) * sinf(lon);
- copy_v3_v3(GWN_vertbuf_raw_step(pos_step), pos);
- copy_v3_v3(GWN_vertbuf_raw_step(nor_step), pos);
+ copy_v3_v3(GPU_vertbuf_raw_step(pos_step), pos);
+ copy_v3_v3(GPU_vertbuf_raw_step(nor_step), pos);
}
-Gwn_Batch *GPU_batch_preset_sphere(int lod)
+GPUBatch *GPU_batch_preset_sphere(int lod)
{
BLI_assert(lod >= 0 && lod <= 2);
BLI_assert(BLI_thread_is_main());
@@ -103,7 +103,7 @@ Gwn_Batch *GPU_batch_preset_sphere(int lod)
}
}
-Gwn_Batch *GPU_batch_preset_sphere_wire(int lod)
+GPUBatch *GPU_batch_preset_sphere_wire(int lod)
{
BLI_assert(lod >= 0 && lod <= 1);
BLI_assert(BLI_thread_is_main());
@@ -123,19 +123,19 @@ Gwn_Batch *GPU_batch_preset_sphere_wire(int lod)
* \{ */
/* Replacement for gluSphere */
-Gwn_Batch *gpu_batch_sphere(int lat_res, int lon_res)
+GPUBatch *gpu_batch_sphere(int lat_res, int lon_res)
{
const float lon_inc = 2 * M_PI / lon_res;
const float lat_inc = M_PI / lat_res;
float lon, lat;
- Gwn_VertBuf *vbo = GWN_vertbuf_create_with_format(preset_3d_format());
+ GPUVertBuf *vbo = GPU_vertbuf_create_with_format(preset_3d_format());
const uint vbo_len = (lat_res - 1) * lon_res * 6;
- GWN_vertbuf_data_alloc(vbo, vbo_len);
+ GPU_vertbuf_data_alloc(vbo, vbo_len);
- Gwn_VertBufRaw pos_step, nor_step;
- GWN_vertbuf_attr_get_raw_data(vbo, g_presets_3d.attr_id.pos, &pos_step);
- GWN_vertbuf_attr_get_raw_data(vbo, g_presets_3d.attr_id.nor, &nor_step);
+ GPUVertBufRaw pos_step, nor_step;
+ GPU_vertbuf_attr_get_raw_data(vbo, g_presets_3d.attr_id.pos, &pos_step);
+ GPU_vertbuf_attr_get_raw_data(vbo, g_presets_3d.attr_id.nor, &nor_step);
lon = 0.0f;
for (int i = 0; i < lon_res; i++, lon += lon_inc) {
@@ -155,25 +155,25 @@ Gwn_Batch *gpu_batch_sphere(int lat_res, int lon_res)
}
}
- BLI_assert(vbo_len == GWN_vertbuf_raw_used(&pos_step));
- BLI_assert(vbo_len == GWN_vertbuf_raw_used(&nor_step));
+ BLI_assert(vbo_len == GPU_vertbuf_raw_used(&pos_step));
+ BLI_assert(vbo_len == GPU_vertbuf_raw_used(&nor_step));
- return GWN_batch_create_ex(GWN_PRIM_TRIS, vbo, NULL, GWN_BATCH_OWNS_VBO);
+ return GPU_batch_create_ex(GPU_PRIM_TRIS, vbo, NULL, GPU_BATCH_OWNS_VBO);
}
-static Gwn_Batch *batch_sphere_wire(int lat_res, int lon_res)
+static GPUBatch *batch_sphere_wire(int lat_res, int lon_res)
{
const float lon_inc = 2 * M_PI / lon_res;
const float lat_inc = M_PI / lat_res;
float lon, lat;
- Gwn_VertBuf *vbo = GWN_vertbuf_create_with_format(preset_3d_format());
+ GPUVertBuf *vbo = GPU_vertbuf_create_with_format(preset_3d_format());
const uint vbo_len = (lat_res * lon_res * 2) + ((lat_res - 1) * lon_res * 2);
- GWN_vertbuf_data_alloc(vbo, vbo_len);
+ GPU_vertbuf_data_alloc(vbo, vbo_len);
- Gwn_VertBufRaw pos_step, nor_step;
- GWN_vertbuf_attr_get_raw_data(vbo, g_presets_3d.attr_id.pos, &pos_step);
- GWN_vertbuf_attr_get_raw_data(vbo, g_presets_3d.attr_id.nor, &nor_step);
+ GPUVertBufRaw pos_step, nor_step;
+ GPU_vertbuf_attr_get_raw_data(vbo, g_presets_3d.attr_id.pos, &pos_step);
+ GPU_vertbuf_attr_get_raw_data(vbo, g_presets_3d.attr_id.nor, &nor_step);
lon = 0.0f;
for (int i = 0; i < lon_res; i++, lon += lon_inc) {
@@ -189,10 +189,10 @@ static Gwn_Batch *batch_sphere_wire(int lat_res, int lon_res)
}
}
- BLI_assert(vbo_len == GWN_vertbuf_raw_used(&pos_step));
- BLI_assert(vbo_len == GWN_vertbuf_raw_used(&nor_step));
+ BLI_assert(vbo_len == GPU_vertbuf_raw_used(&pos_step));
+ BLI_assert(vbo_len == GPU_vertbuf_raw_used(&nor_step));
- return GWN_batch_create_ex(GWN_PRIM_LINES, vbo, NULL, GWN_BATCH_OWNS_VBO);
+ return GPU_batch_create_ex(GPU_PRIM_LINES, vbo, NULL, GPU_BATCH_OWNS_VBO);
}
/** \} */
@@ -216,7 +216,7 @@ void gpu_batch_presets_init(void)
gpu_batch_presets_register(g_presets_3d.batch.sphere_wire_med);
}
-void gpu_batch_presets_register(Gwn_Batch *preset_batch)
+void gpu_batch_presets_register(GPUBatch *preset_batch)
{
BLI_addtail(&presets_list, BLI_genericNodeN(preset_batch));
}
@@ -227,8 +227,8 @@ void gpu_batch_presets_reset(void)
* This way they will draw correctly for each window. */
LinkData *link = presets_list.first;
for (link = presets_list.first; link; link = link->next) {
- Gwn_Batch *preset = link->data;
- gwn_batch_vao_cache_clear(preset);
+ GPUBatch *preset = link->data;
+ GPU_batch_vao_cache_clear(preset);
}
}
@@ -236,8 +236,8 @@ void gpu_batch_presets_exit(void)
{
LinkData *link;
while ((link = BLI_pophead(&presets_list))) {
- Gwn_Batch *preset = link->data;
- GWN_batch_discard(preset);
+ GPUBatch *preset = link->data;
+ GPU_batch_discard(preset);
MEM_freeN(link);
}
}
diff --git a/source/blender/gpu/intern/gpu_batch_private.h b/source/blender/gpu/intern/gpu_batch_private.h
index 1e72bae503f..0b25c0aef05 100644
--- a/source/blender/gpu/intern/gpu_batch_private.h
+++ b/source/blender/gpu/intern/gpu_batch_private.h
@@ -23,15 +23,15 @@
* ***** END GPL LICENSE BLOCK *****
*/
-/** \file blender/gpu/gwn_batch_private.h
+/** \file blender/gpu/intern/gpu_batch_private.h
* \ingroup gpu
*
- * Gawain geometry batch
+ * GPU geometry batch
* Contains VAOs + VBOs + Shader representing a drawable entity.
*/
-#ifndef __GWN_BATCH_PRIVATE_H__
-#define __GWN_BATCH_PRIVATE_H__
+#ifndef __GPU_BATCH_PRIVATE_H__
+#define __GPU_BATCH_PRIVATE_H__
#ifdef __cplusplus
extern "C" {
@@ -41,13 +41,13 @@ extern "C" {
#include "GPU_context.h"
#include "GPU_shader_interface.h"
-void gwn_batch_remove_interface_ref(Gwn_Batch*, const Gwn_ShaderInterface*);
+void gpu_batch_remove_interface_ref(GPUBatch*, const GPUShaderInterface*);
-void gwn_context_add_batch(Gwn_Context*, Gwn_Batch*);
-void gwn_context_remove_batch(Gwn_Context*, Gwn_Batch*);
+void gpu_context_add_batch(GPUContext*, GPUBatch*);
+void gpu_context_remove_batch(GPUContext*, GPUBatch*);
#ifdef __cplusplus
}
#endif
-#endif /* __GWN_BATCH_PRIVATE_H__ */
+#endif /* __GPU_BATCH_PRIVATE_H__ */
diff --git a/source/blender/gpu/intern/gpu_batch_utils.c b/source/blender/gpu/intern/gpu_batch_utils.c
index d6d82ac18b6..0a7f1ca901d 100644
--- a/source/blender/gpu/intern/gpu_batch_utils.c
+++ b/source/blender/gpu/intern/gpu_batch_utils.c
@@ -47,7 +47,7 @@
* \param polys_flat_len: Length of the array (must be an even number).
* \param rect: Optional region to map the byte 0..255 coords to. When not set use -1..1.
*/
-Gwn_Batch *GPU_batch_tris_from_poly_2d_encoded(
+GPUBatch *GPU_batch_tris_from_poly_2d_encoded(
const uchar *polys_flat, uint polys_flat_len, const rctf *rect)
{
const uchar (*polys)[2] = (const void *)polys_flat;
@@ -103,41 +103,41 @@ Gwn_Batch *GPU_batch_tris_from_poly_2d_encoded(
}
/* We have vertices and tris, make a batch from this. */
- static Gwn_VertFormat format = {0};
+ static GPUVertFormat format = {0};
static struct { uint pos; } attr_id;
if (format.attr_len == 0) {
- attr_id.pos = GWN_vertformat_attr_add(&format, "pos", GWN_COMP_F32, 2, GWN_FETCH_FLOAT);
+ attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 2, GPU_FETCH_FLOAT);
}
const uint verts_len = (verts_step - verts);
const uint tris_len = (tris_step - tris);
- Gwn_VertBuf *vbo = GWN_vertbuf_create_with_format(&format);
- GWN_vertbuf_data_alloc(vbo, verts_len);
+ GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
+ GPU_vertbuf_data_alloc(vbo, verts_len);
- Gwn_VertBufRaw pos_step;
- GWN_vertbuf_attr_get_raw_data(vbo, attr_id.pos, &pos_step);
+ GPUVertBufRaw pos_step;
+ GPU_vertbuf_attr_get_raw_data(vbo, attr_id.pos, &pos_step);
for (uint i = 0; i < verts_len; i++) {
- copy_v2_v2(GWN_vertbuf_raw_step(&pos_step), verts[i]);
+ copy_v2_v2(GPU_vertbuf_raw_step(&pos_step), verts[i]);
}
- Gwn_IndexBufBuilder elb;
- GWN_indexbuf_init(&elb, GWN_PRIM_TRIS, tris_len, verts_len);
+ GPUIndexBufBuilder elb;
+ GPU_indexbuf_init(&elb, GPU_PRIM_TRIS, tris_len, verts_len);
for (uint i = 0; i < tris_len; i++) {
- GWN_indexbuf_add_tri_verts(&elb, UNPACK3(tris[i]));
+ GPU_indexbuf_add_tri_verts(&elb, UNPACK3(tris[i]));
}
- Gwn_IndexBuf *indexbuf = GWN_indexbuf_build(&elb);
+ GPUIndexBuf *indexbuf = GPU_indexbuf_build(&elb);
MEM_freeN(tris);
MEM_freeN(verts);
- return GWN_batch_create_ex(
- GWN_PRIM_TRIS, vbo,
+ return GPU_batch_create_ex(
+ GPU_PRIM_TRIS, vbo,
indexbuf,
- GWN_BATCH_OWNS_VBO | GWN_BATCH_OWNS_INDEX);
+ GPU_BATCH_OWNS_VBO | GPU_BATCH_OWNS_INDEX);
}
-Gwn_Batch *GPU_batch_wire_from_poly_2d_encoded(
+GPUBatch *GPU_batch_wire_from_poly_2d_encoded(
const uchar *polys_flat, uint polys_flat_len, const rctf *rect)
{
const uchar (*polys)[2] = (const void *)polys_flat;
@@ -206,18 +206,18 @@ Gwn_Batch *GPU_batch_wire_from_poly_2d_encoded(
}
/* We have vertices and tris, make a batch from this. */
- static Gwn_VertFormat format = {0};
+ static GPUVertFormat format = {0};
static struct { uint pos; } attr_id;
if (format.attr_len == 0) {
- attr_id.pos = GWN_vertformat_attr_add(&format, "pos", GWN_COMP_F32, 2, GWN_FETCH_FLOAT);
+ attr_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 2, GPU_FETCH_FLOAT);
}
- Gwn_VertBuf *vbo = GWN_vertbuf_create_with_format(&format);
+ GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
const uint vbo_len_capacity = lines_len * 2;
- GWN_vertbuf_data_alloc(vbo, vbo_len_capacity);
+ GPU_vertbuf_data_alloc(vbo, vbo_len_capacity);
- Gwn_VertBufRaw pos_step;
- GWN_vertbuf_attr_get_raw_data(vbo, attr_id.pos, &pos_step);
+ GPUVertBufRaw pos_step;
+ GPU_vertbuf_attr_get_raw_data(vbo, attr_id.pos, &pos_step);
for (uint i = 0; i < lines_len; i++) {
union {
@@ -226,18 +226,18 @@ Gwn_Batch *GPU_batch_wire_from_poly_2d_encoded(
} data;
data.as_u32 = lines[i];
for (uint k = 0; k < 2; k++) {
- float *pos_v2 = GWN_vertbuf_raw_step(&pos_step);
+ float *pos_v2 = GPU_vertbuf_raw_step(&pos_step);
for (uint j = 0; j < 2; j++) {
pos_v2[j] = min_uchar[j] + ((float)data.as_u8_pair[k][j] * range_uchar[j]);
}
}
}
- BLI_assert(vbo_len_capacity == GWN_vertbuf_raw_used(&pos_step));
+ BLI_assert(vbo_len_capacity == GPU_vertbuf_raw_used(&pos_step));
MEM_freeN(lines);
- return GWN_batch_create_ex(
- GWN_PRIM_LINES, vbo,
+ return GPU_batch_create_ex(
+ GPU_PRIM_LINES, vbo,
NULL,
- GWN_BATCH_OWNS_VBO);
+ GPU_BATCH_OWNS_VBO);
}
/** \} */
diff --git a/source/blender/gpu/intern/gpu_buffer_id.cpp b/source/blender/gpu/intern/gpu_buffer_id.cpp
index 0c442f687a0..c1aaf1945aa 100644
--- a/source/blender/gpu/intern/gpu_buffer_id.cpp
+++ b/source/blender/gpu/intern/gpu_buffer_id.cpp
@@ -23,10 +23,10 @@
* ***** END GPL LICENSE BLOCK *****
*/
-/** \file blender/gpu/intern/gwn_buffer_id.cpp
+/** \file blender/gpu/intern/gpu_buffer_id.cpp
* \ingroup gpu
*
- * Gawain buffer IDs
+ * GPU buffer IDs
*/
#include "GPU_buffer_id.h"
@@ -54,7 +54,7 @@ static bool thread_is_main()
return BLI_thread_is_main();
}
-GLuint GWN_buf_id_alloc()
+GLuint GPU_buf_id_alloc()
{
/* delete orphaned IDs */
orphan_mutex.lock();
@@ -73,7 +73,7 @@ GLuint GWN_buf_id_alloc()
return new_buffer_id;
}
-void GWN_buf_id_free(GLuint buffer_id)
+void GPU_buf_id_free(GLuint buffer_id)
{
if (thread_is_main()) {
glDeleteBuffers(1, &buffer_id);
diff --git a/source/blender/gpu/intern/gpu_buffers.c b/source/blender/gpu/intern/gpu_buffers.c
index 1a4750652cc..d466fa87388 100644
--- a/source/blender/gpu/intern/gpu_buffers.c
+++ b/source/blender/gpu/intern/gpu_buffers.c
@@ -62,7 +62,7 @@ static ThreadMutex buffer_mutex = BLI_MUTEX_INITIALIZER;
/* multires global buffer, can be used for many grids having the same grid size */
typedef struct GridCommonGPUBuffer {
- Gwn_IndexBuf *mres_buffer;
+ GPUIndexBuf *mres_buffer;
int mres_prev_gridsize;
unsigned mres_prev_totquad;
} GridCommonGPUBuffer;
@@ -71,11 +71,11 @@ typedef struct GridCommonGPUBuffer {
* drawing and doesn't interact at all with the buffer code above */
struct GPU_PBVH_Buffers {
- Gwn_IndexBuf *index_buf, *index_buf_fast;
- Gwn_VertBuf *vert_buf;
+ GPUIndexBuf *index_buf, *index_buf_fast;
+ GPUVertBuf *vert_buf;
- Gwn_Batch *triangles;
- Gwn_Batch *triangles_fast;
+ GPUBatch *triangles;
+ GPUBatch *triangles_fast;
/* mesh pointers in case buffer allocation fails */
const MPoly *mpoly;
@@ -132,23 +132,23 @@ static bool gpu_pbvh_vert_buf_data_set(GPU_PBVH_Buffers *buffers, unsigned int v
/* Initialize vertex buffer */
/* match 'VertexBufferFormat' */
- static Gwn_VertFormat format = {0};
+ static GPUVertFormat format = {0};
if (format.attr_len == 0) {
- g_vbo_id.pos = GWN_vertformat_attr_add(&format, "pos", GWN_COMP_F32, 3, GWN_FETCH_FLOAT);
- g_vbo_id.nor = GWN_vertformat_attr_add(&format, "nor", GWN_COMP_I16, 3, GWN_FETCH_INT_TO_FLOAT_UNIT);
- g_vbo_id.col = GWN_vertformat_attr_add(&format, "color", GWN_COMP_U8, 3, GWN_FETCH_INT_TO_FLOAT_UNIT);
+ g_vbo_id.pos = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
+ g_vbo_id.nor = GPU_vertformat_attr_add(&format, "nor", GPU_COMP_I16, 3, GPU_FETCH_INT_TO_FLOAT_UNIT);
+ g_vbo_id.col = GPU_vertformat_attr_add(&format, "color", GPU_COMP_U8, 3, GPU_FETCH_INT_TO_FLOAT_UNIT);
}
#if 0
- buffers->vert_buf = GWN_vertbuf_create_with_format_ex(&format, GWN_USAGE_DYNAMIC);
- GWN_vertbuf_data_alloc(buffers->vert_buf, vert_len);
+ buffers->vert_buf = GPU_vertbuf_create_with_format_ex(&format, GPU_USAGE_DYNAMIC);
+ GPU_vertbuf_data_alloc(buffers->vert_buf, vert_len);
}
else if (vert_len != buffers->vert_buf->vertex_len) {
- GWN_vertbuf_data_resize(buffers->vert_buf, vert_len);
+ GPU_vertbuf_data_resize(buffers->vert_buf, vert_len);
}
#else
- buffers->vert_buf = GWN_vertbuf_create_with_format_ex(&format, GWN_USAGE_STATIC);
+ buffers->vert_buf = GPU_vertbuf_create_with_format_ex(&format, GPU_USAGE_STATIC);
}
- GWN_vertbuf_data_alloc(buffers->vert_buf, vert_len);
+ GPU_vertbuf_data_alloc(buffers->vert_buf, vert_len);
#endif
return buffers->vert_buf->data != NULL;
}
@@ -157,19 +157,19 @@ static void gpu_pbvh_batch_init(GPU_PBVH_Buffers *buffers)
{
/* force flushing to the GPU */
if (buffers->vert_buf->data) {
- GWN_vertbuf_use(buffers->vert_buf);
+ GPU_vertbuf_use(buffers->vert_buf);
}
if (buffers->triangles == NULL) {
- buffers->triangles = GWN_batch_create(
- GWN_PRIM_TRIS, buffers->vert_buf,
+ buffers->triangles = GPU_batch_create(
+ GPU_PRIM_TRIS, buffers->vert_buf,
/* can be NULL */
buffers->index_buf);
}
if ((buffers->triangles_fast == NULL) && buffers->index_buf_fast) {
- buffers->triangles_fast = GWN_batch_create(
- GWN_PRIM_TRIS, buffers->vert_buf,
+ buffers->triangles_fast = GPU_batch_create(
+ GPU_PRIM_TRIS, buffers->vert_buf,
/* can be NULL */
buffers->index_buf_fast);
}
@@ -245,8 +245,8 @@ void GPU_pbvh_mesh_buffers_update(
if (buffers->smooth) {
for (uint i = 0; i < totvert; ++i) {
const MVert *v = &mvert[vert_indices[i]];
- GWN_vertbuf_attr_set(buffers->vert_buf, g_vbo_id.pos, i, v->co);
- GWN_vertbuf_attr_set(buffers->vert_buf, g_vbo_id.nor, i, v->no);
+ GPU_vertbuf_attr_set(buffers->vert_buf, g_vbo_id.pos, i, v->co);
+ GPU_vertbuf_attr_set(buffers->vert_buf, g_vbo_id.nor, i, v->no);
}
for (uint i = 0; i < buffers->face_indices_len; i++) {
@@ -257,10 +257,10 @@ void GPU_pbvh_mesh_buffers_update(
int v_index = buffers->mloop[lt->tri[j]].v;
uchar color_ub[3];
gpu_color_from_mask_copy(vmask[v_index], diffuse_color, color_ub);
- GWN_vertbuf_attr_set(buffers->vert_buf, g_vbo_id.col, vidx, color_ub);
+ GPU_vertbuf_attr_set(buffers->vert_buf, g_vbo_id.col, vidx, color_ub);
}
else {
- GWN_vertbuf_attr_set(buffers->vert_buf, g_vbo_id.col, vidx, diffuse_color_ub);
+ GPU_vertbuf_attr_set(buffers->vert_buf, g_vbo_id.col, vidx, diffuse_color_ub);
}
}
}
@@ -303,9 +303,9 @@ void GPU_pbvh_mesh_buffers_update(
for (uint j = 0; j < 3; j++) {
const MVert *v = &mvert[vtri[j]];
- GWN_vertbuf_attr_set(buffers->vert_buf, g_vbo_id.pos, vbo_index, v->co);
- GWN_vertbuf_attr_set(buffers->vert_buf, g_vbo_id.nor, vbo_index, no);
- GWN_vertbuf_attr_set(buffers->vert_buf, g_vbo_id.col, vbo_index, color_ub);
+ GPU_vertbuf_attr_set(buffers->vert_buf, g_vbo_id.pos, vbo_index, v->co);
+ GPU_vertbuf_attr_set(buffers->vert_buf, g_vbo_id.nor, vbo_index, no);
+ GPU_vertbuf_attr_set(buffers->vert_buf, g_vbo_id.col, vbo_index, color_ub);
vbo_index++;
}
@@ -367,8 +367,8 @@ GPU_PBVH_Buffers *GPU_pbvh_mesh_buffers_build(
if (buffers->smooth) {
/* Fill the triangle buffer */
buffers->index_buf = NULL;
- Gwn_IndexBufBuilder elb;
- GWN_indexbuf_init(&elb, GWN_PRIM_TRIS, tottri, INT_MAX);
+ GPUIndexBufBuilder elb;
+ GPU_indexbuf_init(&elb, GPU_PRIM_TRIS, tottri, INT_MAX);
for (i = 0; i < face_indices_len; ++i) {
const MLoopTri *lt = &looptri[face_indices[i]];
@@ -377,13 +377,13 @@ GPU_PBVH_Buffers *GPU_pbvh_mesh_buffers_build(
if (paint_is_face_hidden(lt, mvert, mloop))
continue;
- GWN_indexbuf_add_tri_verts(&elb, UNPACK3(face_vert_indices[i]));
+ GPU_indexbuf_add_tri_verts(&elb, UNPACK3(face_vert_indices[i]));
}
- buffers->index_buf = GWN_indexbuf_build(&elb);
+ buffers->index_buf = GPU_indexbuf_build(&elb);
}
else {
if (!buffers->is_index_buf_global) {
- GWN_INDEXBUF_DISCARD_SAFE(buffers->index_buf);
+ GPU_INDEXBUF_DISCARD_SAFE(buffers->index_buf);
}
buffers->index_buf = NULL;
buffers->is_index_buf_global = false;
@@ -438,12 +438,12 @@ void GPU_pbvh_grid_buffers_update(
for (y = 0; y < key->grid_size; y++) {
for (x = 0; x < key->grid_size; x++) {
CCGElem *elem = CCG_grid_elem(key, grid, x, y);
- GWN_vertbuf_attr_set(buffers->vert_buf, g_vbo_id.pos, vbo_index, CCG_elem_co(key, elem));
+ GPU_vertbuf_attr_set(buffers->vert_buf, g_vbo_id.pos, vbo_index, CCG_elem_co(key, elem));
if (buffers->smooth) {
short no_short[3];
normal_float_to_short_v3(no_short, CCG_elem_no(key, elem));
- GWN_vertbuf_attr_set(buffers->vert_buf, g_vbo_id.nor, vbo_index, no_short);
+ GPU_vertbuf_attr_set(buffers->vert_buf, g_vbo_id.nor, vbo_index, no_short);
if (has_mask) {
uchar color_ub[3];
@@ -454,7 +454,7 @@ void GPU_pbvh_grid_buffers_update(
else {
unit_float_to_uchar_clamp_v3(color_ub, diffuse_color);
}
- GWN_vertbuf_attr_set(buffers->vert_buf, g_vbo_id.col, vbo_index, color_ub);
+ GPU_vertbuf_attr_set(buffers->vert_buf, g_vbo_id.col, vbo_index, color_ub);
}
}
vbo_index += 1;
@@ -481,7 +481,7 @@ void GPU_pbvh_grid_buffers_update(
vbo_index = vbo_index_offset + ((j + 1) * key->grid_size + k);
short no_short[3];
normal_float_to_short_v3(no_short, fno);
- GWN_vertbuf_attr_set(buffers->vert_buf, g_vbo_id.nor, vbo_index, no_short);
+ GPU_vertbuf_attr_set(buffers->vert_buf, g_vbo_id.nor, vbo_index, no_short);
if (has_mask) {
uchar color_ub[3];
@@ -497,7 +497,7 @@ void GPU_pbvh_grid_buffers_update(
else {
unit_float_to_uchar_clamp_v3(color_ub, diffuse_color);
}
- GWN_vertbuf_attr_set(buffers->vert_buf, g_vbo_id.col, vbo_index, color_ub);
+ GPU_vertbuf_attr_set(buffers->vert_buf, g_vbo_id.col, vbo_index, color_ub);
}
}
}
@@ -526,9 +526,9 @@ void GPU_pbvh_grid_buffers_update(
int offset = 0; \
int i, j, k; \
\
- Gwn_IndexBufBuilder elb; \
- GWN_indexbuf_init( \
- &elb, GWN_PRIM_TRIS, tot_quad_ * 2, max_vert_); \
+ GPUIndexBufBuilder elb; \
+ GPU_indexbuf_init( \
+ &elb, GPU_PRIM_TRIS, tot_quad_ * 2, max_vert_); \
\
/* Fill the buffer */ \
for (i = 0; i < totgrid; ++i) { \
@@ -544,25 +544,25 @@ void GPU_pbvh_grid_buffers_update(
{ \
continue; \
} \
- GWN_indexbuf_add_generic_vert(&elb, offset + j * gridsize + k + 1); \
- GWN_indexbuf_add_generic_vert(&elb, offset + j * gridsize + k); \
- GWN_indexbuf_add_generic_vert(&elb, offset + (j + 1) * gridsize + k); \
+ GPU_indexbuf_add_generic_vert(&elb, offset + j * gridsize + k + 1); \
+ GPU_indexbuf_add_generic_vert(&elb, offset + j * gridsize + k); \
+ GPU_indexbuf_add_generic_vert(&elb, offset + (j + 1) * gridsize + k); \
\
- GWN_indexbuf_add_generic_vert(&elb, offset + (j + 1) * gridsize + k + 1); \
- GWN_indexbuf_add_generic_vert(&elb, offset + j * gridsize + k + 1); \
- GWN_indexbuf_add_generic_vert(&elb, offset + (j + 1) * gridsize + k); \
+ GPU_indexbuf_add_generic_vert(&elb, offset + (j + 1) * gridsize + k + 1); \
+ GPU_indexbuf_add_generic_vert(&elb, offset + j * gridsize + k + 1); \
+ GPU_indexbuf_add_generic_vert(&elb, offset + (j + 1) * gridsize + k); \
} \
} \
\
offset += gridsize * gridsize; \
} \
- buffer_ = GWN_indexbuf_build(&elb); \
+ buffer_ = GPU_indexbuf_build(&elb); \
} (void)0
/* end FILL_QUAD_BUFFER */
-static Gwn_IndexBuf *gpu_get_grid_buffer(
+static GPUIndexBuf *gpu_get_grid_buffer(
int gridsize, unsigned *totquad, GridCommonGPUBuffer **grid_common_gpu_buffer,
- /* remove this arg when gawain gets base-vertex support! */
+ /* remove this arg when GPU gets base-vertex support! */
int totgrid)
{
/* used in the FILL_QUAD_BUFFER macro */
@@ -586,7 +586,7 @@ static Gwn_IndexBuf *gpu_get_grid_buffer(
}
/* we can't reuse old, delete the existing buffer */
else if (gridbuff->mres_buffer) {
- GWN_indexbuf_discard(gridbuff->mres_buffer);
+ GPU_indexbuf_discard(gridbuff->mres_buffer);
gridbuff->mres_buffer = NULL;
}
@@ -603,17 +603,17 @@ static Gwn_IndexBuf *gpu_get_grid_buffer(
#define FILL_FAST_BUFFER() \
{ \
- Gwn_IndexBufBuilder elb; \
- GWN_indexbuf_init(&elb, GWN_PRIM_TRIS, 6 * totgrid, INT_MAX); \
+ GPUIndexBufBuilder elb; \
+ GPU_indexbuf_init(&elb, GPU_PRIM_TRIS, 6 * totgrid, INT_MAX); \
for (int i = 0; i < totgrid; i++) { \
- GWN_indexbuf_add_generic_vert(&elb, i * gridsize * gridsize + gridsize - 1); \
- GWN_indexbuf_add_generic_vert(&elb, i * gridsize * gridsize); \
- GWN_indexbuf_add_generic_vert(&elb, (i + 1) * gridsize * gridsize - gridsize); \
- GWN_indexbuf_add_generic_vert(&elb, (i + 1) * gridsize * gridsize - 1); \
- GWN_indexbuf_add_generic_vert(&elb, i * gridsize * gridsize + gridsize - 1); \
- GWN_indexbuf_add_generic_vert(&elb, (i + 1) * gridsize * gridsize - gridsize); \
+ GPU_indexbuf_add_generic_vert(&elb, i * gridsize * gridsize + gridsize - 1); \
+ GPU_indexbuf_add_generic_vert(&elb, i * gridsize * gridsize); \
+ GPU_indexbuf_add_generic_vert(&elb, (i + 1) * gridsize * gridsize - gridsize); \
+ GPU_indexbuf_add_generic_vert(&elb, (i + 1) * gridsize * gridsize - 1); \
+ GPU_indexbuf_add_generic_vert(&elb, i * gridsize * gridsize + gridsize - 1); \
+ GPU_indexbuf_add_generic_vert(&elb, (i + 1) * gridsize * gridsize - gridsize); \
} \
- buffers->index_buf_fast = GWN_indexbuf_build(&elb); \
+ buffers->index_buf_fast = GPU_indexbuf_build(&elb); \
} (void)0
GPU_PBVH_Buffers *GPU_pbvh_grid_buffers_build(
@@ -684,7 +684,7 @@ GPU_PBVH_Buffers *GPU_pbvh_grid_buffers_build(
*/
static void gpu_bmesh_vert_to_buffer_copy__gwn(
BMVert *v,
- Gwn_VertBuf *vert_buf,
+ GPUVertBuf *vert_buf,
int *v_index,
const float fno[3],
const float *fmask,
@@ -695,12 +695,12 @@ static void gpu_bmesh_vert_to_buffer_copy__gwn(
if (!BM_elem_flag_test(v, BM_ELEM_HIDDEN)) {
/* Set coord, normal, and mask */
- GWN_vertbuf_attr_set(vert_buf, g_vbo_id.pos, *v_index, v->co);
+ GPU_vertbuf_attr_set(vert_buf, g_vbo_id.pos, *v_index, v->co);
{
short no_short[3];
normal_float_to_short_v3(no_short, fno ? fno : v->no);
- GWN_vertbuf_attr_set(vert_buf, g_vbo_id.nor, *v_index, no_short);
+ GPU_vertbuf_attr_set(vert_buf, g_vbo_id.nor, *v_index, no_short);
}
{
@@ -718,7 +718,7 @@ static void gpu_bmesh_vert_to_buffer_copy__gwn(
effective_mask,
diffuse_color,
color_ub);
- GWN_vertbuf_attr_set(vert_buf, g_vbo_id.col, *v_index, color_ub);
+ GPU_vertbuf_attr_set(vert_buf, g_vbo_id.col, *v_index, color_ub);
}
/* Assign index for use in the triangle index buffer */
@@ -792,7 +792,7 @@ void GPU_pbvh_bmesh_buffers_update(
if (buffers->smooth) {
/* Smooth needs to recreate index buffer, so we have to invalidate the batch. */
- GWN_BATCH_DISCARD_SAFE(buffers->triangles);
+ GPU_BATCH_DISCARD_SAFE(buffers->triangles);
/* Count visible vertices */
totvert = gpu_bmesh_vert_visible_count(bm_unique_verts, bm_other_verts);
}
@@ -893,8 +893,8 @@ void GPU_pbvh_bmesh_buffers_update(
if (buffers->smooth) {
/* Fill the triangle buffer */
buffers->index_buf = NULL;
- Gwn_IndexBufBuilder elb;
- GWN_indexbuf_init(&elb, GWN_PRIM_TRIS, tottri, maxvert);
+ GPUIndexBufBuilder elb;
+ GPU_indexbuf_init(&elb, GPU_PRIM_TRIS, tottri, maxvert);
/* Initialize triangle index buffer */
buffers->is_index_buf_global = false;
@@ -911,7 +911,7 @@ void GPU_pbvh_bmesh_buffers_update(
BMVert *v[3];
BM_face_as_array_vert_tri(f, v);
- GWN_indexbuf_add_tri_verts(
+ GPU_indexbuf_add_tri_verts(
&elb, BM_elem_index_get(v[0]), BM_elem_index_get(v[1]), BM_elem_index_get(v[2]));
}
}
@@ -919,16 +919,16 @@ void GPU_pbvh_bmesh_buffers_update(
buffers->tot_tri = tottri;
if (buffers->index_buf == NULL) {
- buffers->index_buf = GWN_indexbuf_build(&elb);
+ buffers->index_buf = GPU_indexbuf_build(&elb);
}
else {
- GWN_indexbuf_build_in_place(&elb, buffers->index_buf);
+ GPU_indexbuf_build_in_place(&elb, buffers->index_buf);
}
}
}
else if (buffers->index_buf) {
if (!buffers->is_index_buf_global) {
- GWN_INDEXBUF_DISCARD_SAFE(buffers->index_buf);
+ GPU_INDEXBUF_DISCARD_SAFE(buffers->index_buf);
}
buffers->index_buf = NULL;
buffers->is_index_buf_global = false;
@@ -950,7 +950,7 @@ GPU_PBVH_Buffers *GPU_pbvh_bmesh_buffers_build(bool smooth_shading)
return buffers;
}
-Gwn_Batch *GPU_pbvh_buffers_batch_get(GPU_PBVH_Buffers *buffers, bool fast)
+GPUBatch *GPU_pbvh_buffers_batch_get(GPU_PBVH_Buffers *buffers, bool fast)
{
return (fast && buffers->triangles_fast) ?
buffers->triangles_fast : buffers->triangles;
@@ -1003,13 +1003,13 @@ bool GPU_pbvh_buffers_mask_changed(GPU_PBVH_Buffers *buffers, bool show_mask)
void GPU_pbvh_buffers_free(GPU_PBVH_Buffers *buffers)
{
if (buffers) {
- GWN_BATCH_DISCARD_SAFE(buffers->triangles);
- GWN_BATCH_DISCARD_SAFE(buffers->triangles_fast);
+ GPU_BATCH_DISCARD_SAFE(buffers->triangles);
+ GPU_BATCH_DISCARD_SAFE(buffers->triangles_fast);
if (!buffers->is_index_buf_global) {
- GWN_INDEXBUF_DISCARD_SAFE(buffers->index_buf);
+ GPU_INDEXBUF_DISCARD_SAFE(buffers->index_buf);
}
- GWN_INDEXBUF_DISCARD_SAFE(buffers->index_buf_fast);
- GWN_VERTBUF_DISCARD_SAFE(buffers->vert_buf);
+ GPU_INDEXBUF_DISCARD_SAFE(buffers->index_buf_fast);
+ GPU_VERTBUF_DISCARD_SAFE(buffers->vert_buf);
#ifdef USE_BASE_ELEM
if (buffers->baseelemarray)
@@ -1029,7 +1029,7 @@ void GPU_pbvh_multires_buffers_free(GridCommonGPUBuffer **grid_common_gpu_buffer
if (gridbuff) {
if (gridbuff->mres_buffer) {
BLI_mutex_lock(&buffer_mutex);
- GWN_INDEXBUF_DISCARD_SAFE(gridbuff->mres_buffer);
+ GPU_INDEXBUF_DISCARD_SAFE(gridbuff->mres_buffer);
BLI_mutex_unlock(&buffer_mutex);
}
MEM_freeN(gridbuff);
@@ -1049,7 +1049,7 @@ void GPU_pbvh_BB_draw(float min[3], float max[3], bool leaf, unsigned int pos)
* could keep a static batch & index buffer, change the VBO contents per draw
*/
- immBegin(GWN_PRIM_LINES, 24);
+ immBegin(GPU_PRIM_LINES, 24);
/* top */
immVertex3f(pos, min[0], min[1], max[2]);
diff --git a/source/blender/gpu/intern/gpu_codegen.c b/source/blender/gpu/intern/gpu_codegen.c
index a450b551d4a..51f21d01a9f 100644
--- a/source/blender/gpu/intern/gpu_codegen.c
+++ b/source/blender/gpu/intern/gpu_codegen.c
@@ -917,7 +917,7 @@ static const char *attrib_prefix_get(CustomDataType type)
case CD_TANGENT: return "t";
case CD_MCOL: return "c";
case CD_AUTO_FROM_NAME: return "a";
- default: BLI_assert(false && "Gwn_VertAttr Prefix type not found : This should not happen!"); return "";
+ default: BLI_assert(false && "GPUVertAttr Prefix type not found : This should not happen!"); return "";
}
}
diff --git a/source/blender/gpu/intern/gpu_element.c b/source/blender/gpu/intern/gpu_element.c
index 596530a6ff4..1b5a08ac35c 100644
--- a/source/blender/gpu/intern/gpu_element.c
+++ b/source/blender/gpu/intern/gpu_element.c
@@ -23,10 +23,10 @@
* ***** END GPL LICENSE BLOCK *****
*/
-/** \file blender/gpu/intern/gwn_element.c
+/** \file blender/gpu/intern/gpu_element.c
* \ingroup gpu
*
- * Gawain element list (AKA index buffer)
+ * GPU element list (AKA index buffer)
*/
#include "GPU_element.h"
@@ -36,23 +36,23 @@
#define KEEP_SINGLE_COPY 1
-static GLenum convert_index_type_to_gl(Gwn_IndexBufType type)
+static GLenum convert_index_type_to_gl(GPUIndexBufType type)
{
static const GLenum table[] = {
- [GWN_INDEX_U8] = GL_UNSIGNED_BYTE, /* GL has this, Vulkan does not */
- [GWN_INDEX_U16] = GL_UNSIGNED_SHORT,
- [GWN_INDEX_U32] = GL_UNSIGNED_INT
+ [GPU_INDEX_U8] = GL_UNSIGNED_BYTE, /* GL has this, Vulkan does not */
+ [GPU_INDEX_U16] = GL_UNSIGNED_SHORT,
+ [GPU_INDEX_U32] = GL_UNSIGNED_INT
};
return table[type];
}
-uint GWN_indexbuf_size_get(const Gwn_IndexBuf* elem)
+uint GPU_indexbuf_size_get(const GPUIndexBuf* elem)
{
-#if GWN_TRACK_INDEX_RANGE
+#if GPU_TRACK_INDEX_RANGE
static const uint table[] = {
- [GWN_INDEX_U8] = sizeof(GLubyte), /* GL has this, Vulkan does not */
- [GWN_INDEX_U16] = sizeof(GLushort),
- [GWN_INDEX_U32] = sizeof(GLuint)
+ [GPU_INDEX_U8] = sizeof(GLubyte), /* GL has this, Vulkan does not */
+ [GPU_INDEX_U16] = sizeof(GLushort),
+ [GPU_INDEX_U32] = sizeof(GLuint)
};
return elem->index_len * table[elem->index_type];
#else
@@ -60,8 +60,8 @@ uint GWN_indexbuf_size_get(const Gwn_IndexBuf* elem)
#endif
}
-void GWN_indexbuf_init_ex(
- Gwn_IndexBufBuilder* builder, Gwn_PrimType prim_type,
+void GPU_indexbuf_init_ex(
+ GPUIndexBufBuilder* builder, GPUPrimType prim_type,
uint index_len, uint vertex_len, bool use_prim_restart)
{
builder->use_prim_restart = use_prim_restart;
@@ -72,20 +72,20 @@ void GWN_indexbuf_init_ex(
builder->data = calloc(builder->max_index_len, sizeof(uint));
}
-void GWN_indexbuf_init(Gwn_IndexBufBuilder* builder, Gwn_PrimType prim_type, uint prim_len, uint vertex_len)
+void GPU_indexbuf_init(GPUIndexBufBuilder* builder, GPUPrimType prim_type, uint prim_len, uint vertex_len)
{
uint verts_per_prim = 0;
switch (prim_type) {
- case GWN_PRIM_POINTS:
+ case GPU_PRIM_POINTS:
verts_per_prim = 1;
break;
- case GWN_PRIM_LINES:
+ case GPU_PRIM_LINES:
verts_per_prim = 2;
break;
- case GWN_PRIM_TRIS:
+ case GPU_PRIM_TRIS:
verts_per_prim = 3;
break;
- case GWN_PRIM_LINES_ADJ:
+ case GPU_PRIM_LINES_ADJ:
verts_per_prim = 4;
break;
default:
@@ -95,10 +95,10 @@ void GWN_indexbuf_init(Gwn_IndexBufBuilder* builder, Gwn_PrimType prim_type, uin
return;
}
- GWN_indexbuf_init_ex(builder, prim_type, prim_len * verts_per_prim, vertex_len, false);
+ GPU_indexbuf_init_ex(builder, prim_type, prim_len * verts_per_prim, vertex_len, false);
}
-void GWN_indexbuf_add_generic_vert(Gwn_IndexBufBuilder* builder, uint v)
+void GPU_indexbuf_add_generic_vert(GPUIndexBufBuilder* builder, uint v)
{
#if TRUST_NO_ONE
assert(builder->data != NULL);
@@ -108,58 +108,58 @@ void GWN_indexbuf_add_generic_vert(Gwn_IndexBufBuilder* builder, uint v)
builder->data[builder->index_len++] = v;
}
-void GWN_indexbuf_add_primitive_restart(Gwn_IndexBufBuilder* builder)
+void GPU_indexbuf_add_primitive_restart(GPUIndexBufBuilder* builder)
{
#if TRUST_NO_ONE
assert(builder->data != NULL);
assert(builder->index_len < builder->max_index_len);
assert(builder->use_prim_restart);
#endif
- builder->data[builder->index_len++] = GWN_PRIM_RESTART;
+ builder->data[builder->index_len++] = GPU_PRIM_RESTART;
}
-void GWN_indexbuf_add_point_vert(Gwn_IndexBufBuilder* builder, uint v)
+void GPU_indexbuf_add_point_vert(GPUIndexBufBuilder* builder, uint v)
{
#if TRUST_NO_ONE
- assert(builder->prim_type == GWN_PRIM_POINTS);
+ assert(builder->prim_type == GPU_PRIM_POINTS);
#endif
- GWN_indexbuf_add_generic_vert(builder, v);
+ GPU_indexbuf_add_generic_vert(builder, v);
}
-void GWN_indexbuf_add_line_verts(Gwn_IndexBufBuilder* builder, uint v1, uint v2)
+void GPU_indexbuf_add_line_verts(GPUIndexBufBuilder* builder, uint v1, uint v2)
{
#if TRUST_NO_ONE
- assert(builder->prim_type == GWN_PRIM_LINES);
+ assert(builder->prim_type == GPU_PRIM_LINES);
assert(v1 != v2);
#endif
- GWN_indexbuf_add_generic_vert(builder, v1);
- GWN_indexbuf_add_generic_vert(builder, v2);
+ GPU_indexbuf_add_generic_vert(builder, v1);
+ GPU_indexbuf_add_generic_vert(builder, v2);
}
-void GWN_indexbuf_add_tri_verts(Gwn_IndexBufBuilder* builder, uint v1, uint v2, uint v3)
+void GPU_indexbuf_add_tri_verts(GPUIndexBufBuilder* builder, uint v1, uint v2, uint v3)
{
#if TRUST_NO_ONE
- assert(builder->prim_type == GWN_PRIM_TRIS);
+ assert(builder->prim_type == GPU_PRIM_TRIS);
assert(v1 != v2 && v2 != v3 && v3 != v1);
#endif
- GWN_indexbuf_add_generic_vert(builder, v1);
- GWN_indexbuf_add_generic_vert(builder, v2);
- GWN_indexbuf_add_generic_vert(builder, v3);
+ GPU_indexbuf_add_generic_vert(builder, v1);
+ GPU_indexbuf_add_generic_vert(builder, v2);
+ GPU_indexbuf_add_generic_vert(builder, v3);
}
-void GWN_indexbuf_add_line_adj_verts(Gwn_IndexBufBuilder* builder, uint v1, uint v2, uint v3, uint v4)
+void GPU_indexbuf_add_line_adj_verts(GPUIndexBufBuilder* builder, uint v1, uint v2, uint v3, uint v4)
{
#if TRUST_NO_ONE
- assert(builder->prim_type == GWN_PRIM_LINES_ADJ);
+ assert(builder->prim_type == GPU_PRIM_LINES_ADJ);
assert(v2 != v3); /* only the line need diff indices */
#endif
- GWN_indexbuf_add_generic_vert(builder, v1);
- GWN_indexbuf_add_generic_vert(builder, v2);
- GWN_indexbuf_add_generic_vert(builder, v3);
- GWN_indexbuf_add_generic_vert(builder, v4);
+ GPU_indexbuf_add_generic_vert(builder, v1);
+ GPU_indexbuf_add_generic_vert(builder, v2);
+ GPU_indexbuf_add_generic_vert(builder, v3);
+ GPU_indexbuf_add_generic_vert(builder, v4);
}
-#if GWN_TRACK_INDEX_RANGE
+#if GPU_TRACK_INDEX_RANGE
/* Everything remains 32 bit while building to keep things simple.
* Find min/max after, then convert to smallest index type possible. */
@@ -174,7 +174,7 @@ static uint index_range(const uint values[], uint value_len, uint* min_out, uint
uint max_value = values[0];
for (uint i = 1; i < value_len; ++i) {
const uint value = values[i];
- if (value == GWN_PRIM_RESTART)
+ if (value == GPU_PRIM_RESTART)
continue;
else if (value < min_value)
min_value = value;
@@ -186,7 +186,7 @@ static uint index_range(const uint values[], uint value_len, uint* min_out, uint
return max_value - min_value;
}
-static void squeeze_indices_byte(Gwn_IndexBufBuilder *builder, Gwn_IndexBuf* elem)
+static void squeeze_indices_byte(GPUIndexBufBuilder *builder, GPUIndexBuf* elem)
{
const uint *values = builder->data;
const uint index_len = elem->index_len;
@@ -201,7 +201,7 @@ static void squeeze_indices_byte(Gwn_IndexBufBuilder *builder, Gwn_IndexBuf* ele
elem->min_index = 0;
elem->max_index -= base;
for (uint i = 0; i < index_len; ++i) {
- data[i] = (values[i] == GWN_PRIM_RESTART) ? 0xFF : (GLubyte)(values[i] - base);
+ data[i] = (values[i] == GPU_PRIM_RESTART) ? 0xFF : (GLubyte)(values[i] - base);
}
}
else {
@@ -212,7 +212,7 @@ static void squeeze_indices_byte(Gwn_IndexBufBuilder *builder, Gwn_IndexBuf* ele
}
}
-static void squeeze_indices_short(Gwn_IndexBufBuilder *builder, Gwn_IndexBuf* elem)
+static void squeeze_indices_short(GPUIndexBufBuilder *builder, GPUIndexBuf* elem)
{
const uint *values = builder->data;
const uint index_len = elem->index_len;
@@ -227,7 +227,7 @@ static void squeeze_indices_short(Gwn_IndexBufBuilder *builder, Gwn_IndexBuf* el
elem->min_index = 0;
elem->max_index -= base;
for (uint i = 0; i < index_len; ++i) {
- data[i] = (values[i] == GWN_PRIM_RESTART) ? 0xFFFF : (GLushort)(values[i] - base);
+ data[i] = (values[i] == GPU_PRIM_RESTART) ? 0xFFFF : (GLushort)(values[i] - base);
}
}
else {
@@ -238,16 +238,16 @@ static void squeeze_indices_short(Gwn_IndexBufBuilder *builder, Gwn_IndexBuf* el
}
}
-#endif /* GWN_TRACK_INDEX_RANGE */
+#endif /* GPU_TRACK_INDEX_RANGE */
-Gwn_IndexBuf* GWN_indexbuf_build(Gwn_IndexBufBuilder* builder)
+GPUIndexBuf* GPU_indexbuf_build(GPUIndexBufBuilder* builder)
{
- Gwn_IndexBuf* elem = calloc(1, sizeof(Gwn_IndexBuf));
- GWN_indexbuf_build_in_place(builder, elem);
+ GPUIndexBuf* elem = calloc(1, sizeof(GPUIndexBuf));
+ GPU_indexbuf_build_in_place(builder, elem);
return elem;
}
-void GWN_indexbuf_build_in_place(Gwn_IndexBufBuilder* builder, Gwn_IndexBuf* elem)
+void GPU_indexbuf_build_in_place(GPUIndexBufBuilder* builder, GPUIndexBuf* elem)
{
#if TRUST_NO_ONE
assert(builder->data != NULL);
@@ -255,7 +255,7 @@ void GWN_indexbuf_build_in_place(Gwn_IndexBufBuilder* builder, Gwn_IndexBuf* ele
elem->index_len = builder->index_len;
elem->use_prim_restart = builder->use_prim_restart;
-#if GWN_TRACK_INDEX_RANGE
+#if GPU_TRACK_INDEX_RANGE
uint range = index_range(builder->data, builder->index_len, &elem->min_index, &elem->max_index);
/* count the primitive restart index. */
@@ -264,29 +264,29 @@ void GWN_indexbuf_build_in_place(Gwn_IndexBufBuilder* builder, Gwn_IndexBuf* ele
}
if (range <= 0xFF) {
- elem->index_type = GWN_INDEX_U8;
+ elem->index_type = GPU_INDEX_U8;
squeeze_indices_byte(builder, elem);
}
else if (range <= 0xFFFF) {
- elem->index_type = GWN_INDEX_U16;
+ elem->index_type = GPU_INDEX_U16;
squeeze_indices_short(builder, elem);
}
else {
- elem->index_type = GWN_INDEX_U32;
+ elem->index_type = GPU_INDEX_U32;
elem->base_index = 0;
}
elem->gl_index_type = convert_index_type_to_gl(elem->index_type);
#endif
if (elem->vbo_id == 0) {
- elem->vbo_id = GWN_buf_id_alloc();
+ elem->vbo_id = GPU_buf_id_alloc();
}
/* send data to GPU */
/* GL_ELEMENT_ARRAY_BUFFER changes the state of the last VAO bound,
* so we use the GL_ARRAY_BUFFER here to create a buffer without
* interfering in the VAO state. */
glBindBuffer(GL_ARRAY_BUFFER, elem->vbo_id);
- glBufferData(GL_ARRAY_BUFFER, GWN_indexbuf_size_get(elem), builder->data, GL_STATIC_DRAW);
+ glBufferData(GL_ARRAY_BUFFER, GPU_indexbuf_size_get(elem), builder->data, GL_STATIC_DRAW);
/* discard builder (one-time use) */
free(builder->data);
@@ -294,15 +294,15 @@ void GWN_indexbuf_build_in_place(Gwn_IndexBufBuilder* builder, Gwn_IndexBuf* ele
/* other fields are safe to leave */
}
-void GWN_indexbuf_use(Gwn_IndexBuf* elem)
+void GPU_indexbuf_use(GPUIndexBuf* elem)
{
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, elem->vbo_id);
}
-void GWN_indexbuf_discard(Gwn_IndexBuf* elem)
+void GPU_indexbuf_discard(GPUIndexBuf* elem)
{
if (elem->vbo_id) {
- GWN_buf_id_free(elem->vbo_id);
+ GPU_buf_id_free(elem->vbo_id);
}
free(elem);
}
diff --git a/source/blender/gpu/intern/gpu_immediate.c b/source/blender/gpu/intern/gpu_immediate.c
index 661594faf39..a320935919a 100644
--- a/source/blender/gpu/intern/gpu_immediate.c
+++ b/source/blender/gpu/intern/gpu_immediate.c
@@ -23,10 +23,10 @@
* ***** END GPL LICENSE BLOCK *****
*/
-/** \file blender/gpu/intern/gwn_immediate.c
+/** \file blender/gpu/intern/gpu_immediate.c
* \ingroup gpu
*
- * Gawain immediate mode work-alike
+ * GPU immediate mode work-alike
*/
#include "UI_resources.h"
@@ -45,14 +45,14 @@
#include <stdlib.h>
/* necessary functions from matrix API */
-extern void GPU_matrix_bind(const Gwn_ShaderInterface*);
+extern void GPU_matrix_bind(const GPUShaderInterface*);
extern bool GPU_matrix_dirty_get(void);
typedef struct {
/* TODO: organize this struct by frequency of change (run-time) */
- Gwn_Batch* batch;
- Gwn_Context* context;
+ GPUBatch* batch;
+ GPUContext* context;
/* current draw call */
GLubyte* buffer_data;
@@ -60,9 +60,9 @@ typedef struct {
uint buffer_bytes_mapped;
uint vertex_len;
bool strict_vertex_len;
- Gwn_PrimType prim_type;
+ GPUPrimType prim_type;
- Gwn_VertFormat vertex_format;
+ GPUVertFormat vertex_format;
/* current vertex */
uint vertex_idx;
@@ -73,8 +73,8 @@ typedef struct {
GLuint vao_id;
GLuint bound_program;
- const Gwn_ShaderInterface* shader_interface;
- Gwn_AttrBinding attrib_binding;
+ const GPUShaderInterface* shader_interface;
+ GPUAttrBinding attrib_binding;
uint16_t prev_enabled_attrib_bits; /* <-- only affects this VAO, so we're ok */
} Immediate;
@@ -91,11 +91,11 @@ void immInit(void)
#endif
memset(&imm, 0, sizeof(Immediate));
- imm.vbo_id = GWN_buf_id_alloc();
+ imm.vbo_id = GPU_buf_id_alloc();
glBindBuffer(GL_ARRAY_BUFFER, imm.vbo_id);
glBufferData(GL_ARRAY_BUFFER, IMM_BUFFER_SIZE, NULL, GL_DYNAMIC_DRAW);
- imm.prim_type = GWN_PRIM_NONE;
+ imm.prim_type = GPU_PRIM_NONE;
imm.strict_vertex_len = true;
glBindBuffer(GL_ARRAY_BUFFER, 0);
@@ -106,38 +106,38 @@ void immActivate(void)
{
#if TRUST_NO_ONE
assert(initialized);
- assert(imm.prim_type == GWN_PRIM_NONE); /* make sure we're not between a Begin/End pair */
+ assert(imm.prim_type == GPU_PRIM_NONE); /* make sure we're not between a Begin/End pair */
assert(imm.vao_id == 0);
#endif
- imm.vao_id = GWN_vao_alloc();
- imm.context = GWN_context_active_get();
+ imm.vao_id = GPU_vao_alloc();
+ imm.context = GPU_context_active_get();
}
void immDeactivate(void)
{
#if TRUST_NO_ONE
assert(initialized);
- assert(imm.prim_type == GWN_PRIM_NONE); /* make sure we're not between a Begin/End pair */
+ assert(imm.prim_type == GPU_PRIM_NONE); /* make sure we're not between a Begin/End pair */
assert(imm.vao_id != 0);
#endif
- GWN_vao_free(imm.vao_id, imm.context);
+ GPU_vao_free(imm.vao_id, imm.context);
imm.vao_id = 0;
imm.prev_enabled_attrib_bits = 0;
}
void immDestroy(void)
{
- GWN_buf_id_free(imm.vbo_id);
+ GPU_buf_id_free(imm.vbo_id);
initialized = false;
}
-Gwn_VertFormat* immVertexFormat(void)
+GPUVertFormat* immVertexFormat(void)
{
- GWN_vertformat_clear(&imm.vertex_format);
+ GPU_vertformat_clear(&imm.vertex_format);
return &imm.vertex_format;
}
-void immBindProgram(GLuint program, const Gwn_ShaderInterface* shaderface)
+void immBindProgram(GLuint program, const GPUShaderInterface* shaderface)
{
#if TRUST_NO_ONE
assert(imm.bound_program == 0);
@@ -173,7 +173,7 @@ void immUnbindProgram(void)
}
#if TRUST_NO_ONE
-static bool vertex_count_makes_sense_for_primitive(uint vertex_len, Gwn_PrimType prim_type)
+static bool vertex_count_makes_sense_for_primitive(uint vertex_len, GPUPrimType prim_type)
{
/* does vertex_len make sense for this primitive type? */
if (vertex_len == 0) {
@@ -181,19 +181,19 @@ static bool vertex_count_makes_sense_for_primitive(uint vertex_len, Gwn_PrimType
}
switch (prim_type) {
- case GWN_PRIM_POINTS:
+ case GPU_PRIM_POINTS:
return true;
- case GWN_PRIM_LINES:
+ case GPU_PRIM_LINES:
return vertex_len % 2 == 0;
- case GWN_PRIM_LINE_STRIP:
- case GWN_PRIM_LINE_LOOP:
+ case GPU_PRIM_LINE_STRIP:
+ case GPU_PRIM_LINE_LOOP:
return vertex_len >= 2;
- case GWN_PRIM_LINE_STRIP_ADJ:
+ case GPU_PRIM_LINE_STRIP_ADJ:
return vertex_len >= 4;
- case GWN_PRIM_TRIS:
+ case GPU_PRIM_TRIS:
return vertex_len % 3 == 0;
- case GWN_PRIM_TRI_STRIP:
- case GWN_PRIM_TRI_FAN:
+ case GPU_PRIM_TRI_STRIP:
+ case GPU_PRIM_TRI_FAN:
return vertex_len >= 3;
default:
return false;
@@ -201,11 +201,11 @@ static bool vertex_count_makes_sense_for_primitive(uint vertex_len, Gwn_PrimType
}
#endif
-void immBegin(Gwn_PrimType prim_type, uint vertex_len)
+void immBegin(GPUPrimType prim_type, uint vertex_len)
{
#if TRUST_NO_ONE
assert(initialized);
- assert(imm.prim_type == GWN_PRIM_NONE); /* make sure we haven't already begun */
+ assert(imm.prim_type == GPU_PRIM_NONE); /* make sure we haven't already begun */
assert(vertex_count_makes_sense_for_primitive(vertex_len, prim_type));
#endif
imm.prim_type = prim_type;
@@ -250,7 +250,7 @@ void immBegin(Gwn_PrimType prim_type, uint vertex_len)
imm.vertex_data = imm.buffer_data;
}
-void immBeginAtMost(Gwn_PrimType prim_type, uint vertex_len)
+void immBeginAtMost(GPUPrimType prim_type, uint vertex_len)
{
#if TRUST_NO_ONE
assert(vertex_len > 0);
@@ -261,11 +261,11 @@ void immBeginAtMost(Gwn_PrimType prim_type, uint vertex_len)
}
-Gwn_Batch* immBeginBatch(Gwn_PrimType prim_type, uint vertex_len)
+GPUBatch* immBeginBatch(GPUPrimType prim_type, uint vertex_len)
{
#if TRUST_NO_ONE
assert(initialized);
- assert(imm.prim_type == GWN_PRIM_NONE); /* make sure we haven't already begun */
+ assert(imm.prim_type == GPU_PRIM_NONE); /* make sure we haven't already begun */
assert(vertex_count_makes_sense_for_primitive(vertex_len, prim_type));
#endif
imm.prim_type = prim_type;
@@ -273,19 +273,19 @@ Gwn_Batch* immBeginBatch(Gwn_PrimType prim_type, uint vertex_len)
imm.vertex_idx = 0;
imm.unassigned_attrib_bits = imm.attrib_binding.enabled_bits;
- Gwn_VertBuf* verts = GWN_vertbuf_create_with_format(&imm.vertex_format);
- GWN_vertbuf_data_alloc(verts, vertex_len);
+ GPUVertBuf* verts = GPU_vertbuf_create_with_format(&imm.vertex_format);
+ GPU_vertbuf_data_alloc(verts, vertex_len);
- imm.buffer_bytes_mapped = GWN_vertbuf_size_get(verts);
+ imm.buffer_bytes_mapped = GPU_vertbuf_size_get(verts);
imm.vertex_data = verts->data;
- imm.batch = GWN_batch_create_ex(prim_type, verts, NULL, GWN_BATCH_OWNS_VBO);
- imm.batch->phase = GWN_BATCH_BUILDING;
+ imm.batch = GPU_batch_create_ex(prim_type, verts, NULL, GPU_BATCH_OWNS_VBO);
+ imm.batch->phase = GPU_BATCH_BUILDING;
return imm.batch;
}
-Gwn_Batch* immBeginBatchAtMost(Gwn_PrimType prim_type, uint vertex_len)
+GPUBatch* immBeginBatchAtMost(GPUPrimType prim_type, uint vertex_len)
{
imm.strict_vertex_len = false;
return immBeginBatch(prim_type, vertex_len);
@@ -298,7 +298,7 @@ static void immDrawSetup(void)
/* enable/disable vertex attribs as needed */
if (imm.attrib_binding.enabled_bits != imm.prev_enabled_attrib_bits) {
- for (uint loc = 0; loc < GWN_VERT_ATTR_MAX_LEN; ++loc) {
+ for (uint loc = 0; loc < GPU_VERT_ATTR_MAX_LEN; ++loc) {
bool is_enabled = imm.attrib_binding.enabled_bits & (1 << loc);
bool was_enabled = imm.prev_enabled_attrib_bits & (1 << loc);
@@ -316,7 +316,7 @@ static void immDrawSetup(void)
const uint stride = imm.vertex_format.stride;
for (uint a_idx = 0; a_idx < imm.vertex_format.attr_len; ++a_idx) {
- const Gwn_VertAttr* a = imm.vertex_format.attribs + a_idx;
+ const GPUVertAttr* a = imm.vertex_format.attribs + a_idx;
const uint offset = imm.buffer_offset + a->offset;
const GLvoid* pointer = (const GLubyte*)0 + offset;
@@ -324,14 +324,14 @@ static void immDrawSetup(void)
const uint loc = read_attrib_location(&imm.attrib_binding, a_idx);
switch (a->fetch_mode) {
- case GWN_FETCH_FLOAT:
- case GWN_FETCH_INT_TO_FLOAT:
+ case GPU_FETCH_FLOAT:
+ case GPU_FETCH_INT_TO_FLOAT:
glVertexAttribPointer(loc, a->comp_len, a->gl_comp_type, GL_FALSE, stride, pointer);
break;
- case GWN_FETCH_INT_TO_FLOAT_UNIT:
+ case GPU_FETCH_INT_TO_FLOAT_UNIT:
glVertexAttribPointer(loc, a->comp_len, a->gl_comp_type, GL_TRUE, stride, pointer);
break;
- case GWN_FETCH_INT:
+ case GPU_FETCH_INT:
glVertexAttribIPointer(loc, a->comp_len, a->gl_comp_type, stride, pointer);
}
}
@@ -344,7 +344,7 @@ static void immDrawSetup(void)
void immEnd(void)
{
#if TRUST_NO_ONE
- assert(imm.prim_type != GWN_PRIM_NONE); /* make sure we're between a Begin/End pair */
+ assert(imm.prim_type != GPU_PRIM_NONE); /* make sure we're between a Begin/End pair */
#endif
uint buffer_bytes_used;
@@ -375,11 +375,11 @@ void immEnd(void)
if (imm.batch) {
if (buffer_bytes_used != imm.buffer_bytes_mapped) {
- GWN_vertbuf_data_resize(imm.batch->verts[0], imm.vertex_len);
+ GPU_vertbuf_data_resize(imm.batch->verts[0], imm.vertex_len);
/* TODO: resize only if vertex count is much smaller */
}
- GWN_batch_program_set(imm.batch, imm.bound_program, imm.shader_interface);
- imm.batch->phase = GWN_BATCH_READY_TO_DRAW;
+ GPU_batch_program_set(imm.batch, imm.bound_program, imm.shader_interface);
+ imm.batch->phase = GPU_BATCH_READY_TO_DRAW;
imm.batch = NULL; /* don't free, batch belongs to caller */
}
else {
@@ -395,7 +395,7 @@ void immEnd(void)
}
/* prep for next immBegin */
- imm.prim_type = GWN_PRIM_NONE;
+ imm.prim_type = GPU_PRIM_NONE;
imm.strict_vertex_len = true;
}
@@ -413,13 +413,13 @@ static void setAttribValueBit(uint attrib_id)
void immAttrib1f(uint attrib_id, float x)
{
- Gwn_VertAttr* attrib = imm.vertex_format.attribs + attrib_id;
+ GPUVertAttr* attrib = imm.vertex_format.attribs + attrib_id;
#if TRUST_NO_ONE
assert(attrib_id < imm.vertex_format.attr_len);
- assert(attrib->comp_type == GWN_COMP_F32);
+ assert(attrib->comp_type == GPU_COMP_F32);
assert(attrib->comp_len == 1);
assert(imm.vertex_idx < imm.vertex_len);
- assert(imm.prim_type != GWN_PRIM_NONE); /* make sure we're between a Begin/End pair */
+ assert(imm.prim_type != GPU_PRIM_NONE); /* make sure we're between a Begin/End pair */
#endif
setAttribValueBit(attrib_id);
@@ -431,13 +431,13 @@ void immAttrib1f(uint attrib_id, float x)
void immAttrib2f(uint attrib_id, float x, float y)
{
- Gwn_VertAttr* attrib = imm.vertex_format.attribs + attrib_id;
+ GPUVertAttr* attrib = imm.vertex_format.attribs + attrib_id;
#if TRUST_NO_ONE
assert(attrib_id < imm.vertex_format.attr_len);
- assert(attrib->comp_type == GWN_COMP_F32);
+ assert(attrib->comp_type == GPU_COMP_F32);
assert(attrib->comp_len == 2);
assert(imm.vertex_idx < imm.vertex_len);
- assert(imm.prim_type != GWN_PRIM_NONE); /* make sure we're between a Begin/End pair */
+ assert(imm.prim_type != GPU_PRIM_NONE); /* make sure we're between a Begin/End pair */
#endif
setAttribValueBit(attrib_id);
@@ -450,13 +450,13 @@ void immAttrib2f(uint attrib_id, float x, float y)
void immAttrib3f(uint attrib_id, float x, float y, float z)
{
- Gwn_VertAttr* attrib = imm.vertex_format.attribs + attrib_id;
+ GPUVertAttr* attrib = imm.vertex_format.attribs + attrib_id;
#if TRUST_NO_ONE
assert(attrib_id < imm.vertex_format.attr_len);
- assert(attrib->comp_type == GWN_COMP_F32);
+ assert(attrib->comp_type == GPU_COMP_F32);
assert(attrib->comp_len == 3);
assert(imm.vertex_idx < imm.vertex_len);
- assert(imm.prim_type != GWN_PRIM_NONE); /* make sure we're between a Begin/End pair */
+ assert(imm.prim_type != GPU_PRIM_NONE); /* make sure we're between a Begin/End pair */
#endif
setAttribValueBit(attrib_id);
@@ -470,13 +470,13 @@ void immAttrib3f(uint attrib_id, float x, float y, float z)
void immAttrib4f(uint attrib_id, float x, float y, float z, float w)
{
- Gwn_VertAttr* attrib = imm.vertex_format.attribs + attrib_id;
+ GPUVertAttr* attrib = imm.vertex_format.attribs + attrib_id;
#if TRUST_NO_ONE
assert(attrib_id < imm.vertex_format.attr_len);
- assert(attrib->comp_type == GWN_COMP_F32);
+ assert(attrib->comp_type == GPU_COMP_F32);
assert(attrib->comp_len == 4);
assert(imm.vertex_idx < imm.vertex_len);
- assert(imm.prim_type != GWN_PRIM_NONE); /* make sure we're between a Begin/End pair */
+ assert(imm.prim_type != GPU_PRIM_NONE); /* make sure we're between a Begin/End pair */
#endif
setAttribValueBit(attrib_id);
@@ -491,13 +491,13 @@ void immAttrib4f(uint attrib_id, float x, float y, float z, float w)
void immAttrib1u(uint attrib_id, uint x)
{
- Gwn_VertAttr* attrib = imm.vertex_format.attribs + attrib_id;
+ GPUVertAttr* attrib = imm.vertex_format.attribs + attrib_id;
#if TRUST_NO_ONE
assert(attrib_id < imm.vertex_format.attr_len);
- assert(attrib->comp_type == GWN_COMP_U32);
+ assert(attrib->comp_type == GPU_COMP_U32);
assert(attrib->comp_len == 1);
assert(imm.vertex_idx < imm.vertex_len);
- assert(imm.prim_type != GWN_PRIM_NONE); /* make sure we're between a Begin/End pair */
+ assert(imm.prim_type != GPU_PRIM_NONE); /* make sure we're between a Begin/End pair */
#endif
setAttribValueBit(attrib_id);
@@ -508,13 +508,13 @@ void immAttrib1u(uint attrib_id, uint x)
void immAttrib2i(uint attrib_id, int x, int y)
{
- Gwn_VertAttr* attrib = imm.vertex_format.attribs + attrib_id;
+ GPUVertAttr* attrib = imm.vertex_format.attribs + attrib_id;
#if TRUST_NO_ONE
assert(attrib_id < imm.vertex_format.attr_len);
- assert(attrib->comp_type == GWN_COMP_I32);
+ assert(attrib->comp_type == GPU_COMP_I32);
assert(attrib->comp_len == 2);
assert(imm.vertex_idx < imm.vertex_len);
- assert(imm.prim_type != GWN_PRIM_NONE); /* make sure we're between a Begin/End pair */
+ assert(imm.prim_type != GPU_PRIM_NONE); /* make sure we're between a Begin/End pair */
#endif
setAttribValueBit(attrib_id);
@@ -526,13 +526,13 @@ void immAttrib2i(uint attrib_id, int x, int y)
void immAttrib2s(uint attrib_id, short x, short y)
{
- Gwn_VertAttr* attrib = imm.vertex_format.attribs + attrib_id;
+ GPUVertAttr* attrib = imm.vertex_format.attribs + attrib_id;
#if TRUST_NO_ONE
assert(attrib_id < imm.vertex_format.attr_len);
- assert(attrib->comp_type == GWN_COMP_I16);
+ assert(attrib->comp_type == GPU_COMP_I16);
assert(attrib->comp_len == 2);
assert(imm.vertex_idx < imm.vertex_len);
- assert(imm.prim_type != GWN_PRIM_NONE); /* make sure we're between a Begin/End pair */
+ assert(imm.prim_type != GPU_PRIM_NONE); /* make sure we're between a Begin/End pair */
#endif
setAttribValueBit(attrib_id);
@@ -559,13 +559,13 @@ void immAttrib4fv(uint attrib_id, const float data[4])
void immAttrib3ub(uint attrib_id, unsigned char r, unsigned char g, unsigned char b)
{
- Gwn_VertAttr* attrib = imm.vertex_format.attribs + attrib_id;
+ GPUVertAttr* attrib = imm.vertex_format.attribs + attrib_id;
#if TRUST_NO_ONE
assert(attrib_id < imm.vertex_format.attr_len);
- assert(attrib->comp_type == GWN_COMP_U8);
+ assert(attrib->comp_type == GPU_COMP_U8);
assert(attrib->comp_len == 3);
assert(imm.vertex_idx < imm.vertex_len);
- assert(imm.prim_type != GWN_PRIM_NONE); /* make sure we're between a Begin/End pair */
+ assert(imm.prim_type != GPU_PRIM_NONE); /* make sure we're between a Begin/End pair */
#endif
setAttribValueBit(attrib_id);
@@ -579,13 +579,13 @@ void immAttrib3ub(uint attrib_id, unsigned char r, unsigned char g, unsigned cha
void immAttrib4ub(uint attrib_id, unsigned char r, unsigned char g, unsigned char b, unsigned char a)
{
- Gwn_VertAttr* attrib = imm.vertex_format.attribs + attrib_id;
+ GPUVertAttr* attrib = imm.vertex_format.attribs + attrib_id;
#if TRUST_NO_ONE
assert(attrib_id < imm.vertex_format.attr_len);
- assert(attrib->comp_type == GWN_COMP_U8);
+ assert(attrib->comp_type == GPU_COMP_U8);
assert(attrib->comp_len == 4);
assert(imm.vertex_idx < imm.vertex_len);
- assert(imm.prim_type != GWN_PRIM_NONE); /* make sure we're between a Begin/End pair */
+ assert(imm.prim_type != GPU_PRIM_NONE); /* make sure we're between a Begin/End pair */
#endif
setAttribValueBit(attrib_id);
@@ -613,7 +613,7 @@ void immSkipAttrib(uint attrib_id)
#if TRUST_NO_ONE
assert(attrib_id < imm.vertex_format.attr_len);
assert(imm.vertex_idx < imm.vertex_len);
- assert(imm.prim_type != GWN_PRIM_NONE); /* make sure we're between a Begin/End pair */
+ assert(imm.prim_type != GPU_PRIM_NONE); /* make sure we're between a Begin/End pair */
#endif
setAttribValueBit(attrib_id);
}
@@ -621,7 +621,7 @@ void immSkipAttrib(uint attrib_id)
static void immEndVertex(void) /* and move on to the next vertex */
{
#if TRUST_NO_ONE
- assert(imm.prim_type != GWN_PRIM_NONE); /* make sure we're between a Begin/End pair */
+ assert(imm.prim_type != GPU_PRIM_NONE); /* make sure we're between a Begin/End pair */
assert(imm.vertex_idx < imm.vertex_len);
#endif
@@ -633,7 +633,7 @@ static void immEndVertex(void) /* and move on to the next vertex */
#endif
for (uint a_idx = 0; a_idx < imm.vertex_format.attr_len; ++a_idx) {
if ((imm.unassigned_attrib_bits >> a_idx) & 1) {
- const Gwn_VertAttr* a = imm.vertex_format.attribs + a_idx;
+ const GPUVertAttr* a = imm.vertex_format.attribs + a_idx;
/* printf("copying %s from vertex %u to %u\n", a->name, imm.vertex_idx - 1, imm.vertex_idx); */
@@ -702,16 +702,16 @@ void immVertex2iv(uint attrib_id, const int data[2])
#if 0
#if TRUST_NO_ONE
- #define GET_UNIFORM const Gwn_ShaderInput* uniform = GWN_shaderinterface_uniform(imm.shader_interface, name); assert(uniform);
+ #define GET_UNIFORM const GPUShaderInput* uniform = GPU_shaderinterface_uniform(imm.shader_interface, name); assert(uniform);
#else
- #define GET_UNIFORM const Gwn_ShaderInput* uniform = GWN_shaderinterface_uniform(imm.shader_interface, name);
+ #define GET_UNIFORM const GPUShaderInput* uniform = GPU_shaderinterface_uniform(imm.shader_interface, name);
#endif
#else
/* NOTE: It is possible to have uniform fully optimized out from the shader.
* In this case we can't assert failure or allow NULL-pointer dereference.
* TODO(sergey): How can we detect existing-but-optimized-out uniform but still
* catch typos in uniform names passed to immUniform*() functions? */
- #define GET_UNIFORM const Gwn_ShaderInput* uniform = GWN_shaderinterface_uniform(imm.shader_interface, name); if (uniform == NULL) return;
+ #define GET_UNIFORM const GPUShaderInput* uniform = GPU_shaderinterface_uniform(imm.shader_interface, name); if (uniform == NULL) return;
#endif
void immUniform1f(const char* name, float x)
@@ -817,7 +817,7 @@ void immUniform4iv(const char* name, const int data[4])
void immUniformColor4f(float r, float g, float b, float a)
{
- const Gwn_ShaderInput* uniform = GWN_shaderinterface_uniform_builtin(imm.shader_interface, GWN_UNIFORM_COLOR);
+ const GPUShaderInput* uniform = GPU_shaderinterface_uniform_builtin(imm.shader_interface, GPU_UNIFORM_COLOR);
#if TRUST_NO_ONE
assert(uniform != NULL);
#endif
diff --git a/source/blender/gpu/intern/gpu_immediate_util.c b/source/blender/gpu/intern/gpu_immediate_util.c
index 4b2fb1b8e8a..b794048087a 100644
--- a/source/blender/gpu/intern/gpu_immediate_util.c
+++ b/source/blender/gpu/intern/gpu_immediate_util.c
@@ -18,10 +18,10 @@
* ***** END GPL LICENSE BLOCK *****
*/
-/** \file blender/gpu/intern/gwn_imm_util.c
+/** \file blender/gpu/intern/gpu_imm_util.c
* \ingroup gpu
*
- * Gawain immediate mode drawing utilities
+ * GPU immediate mode drawing utilities
*/
#include <stdio.h>
@@ -70,7 +70,7 @@ static const int cube_line_index[12][2] = {
void immRectf(uint pos, float x1, float y1, float x2, float y2)
{
- immBegin(GWN_PRIM_TRI_FAN, 4);
+ immBegin(GPU_PRIM_TRI_FAN, 4);
immVertex2f(pos, x1, y1);
immVertex2f(pos, x2, y1);
immVertex2f(pos, x2, y2);
@@ -80,7 +80,7 @@ void immRectf(uint pos, float x1, float y1, float x2, float y2)
void immRecti(uint pos, int x1, int y1, int x2, int y2)
{
- immBegin(GWN_PRIM_TRI_FAN, 4);
+ immBegin(GPU_PRIM_TRI_FAN, 4);
immVertex2i(pos, x1, y1);
immVertex2i(pos, x2, y1);
immVertex2i(pos, x2, y2);
@@ -125,8 +125,8 @@ void immRecti_fast_with_color(uint pos, uint col, int x1, int y1, int x2, int y2
#if 0 /* more complete version in case we want that */
void immRecti_complete(int x1, int y1, int x2, int y2, const float color[4])
{
- Gwn_VertFormat *format = immVertexFormat();
- uint pos = add_attrib(format, "pos", GWN_COMP_I32, 2, GWN_FETCH_INT_TO_FLOAT);
+ GPUVertFormat *format = immVertexFormat();
+ uint pos = add_attrib(format, "pos", GPU_COMP_I32, 2, GPU_FETCH_INT_TO_FLOAT);
immBindBuiltinProgram(GPU_SHADER_2D_UNIFORM_COLOR);
immUniformColor4fv(color);
immRecti(pos, x1, y1, x2, y2);
@@ -153,7 +153,7 @@ void imm_cpack(unsigned int x)
}
static void imm_draw_circle(
- Gwn_PrimType prim_type, const uint shdr_pos, float x, float y, float rad_x, float rad_y, int nsegments)
+ GPUPrimType prim_type, const uint shdr_pos, float x, float y, float rad_x, float rad_y, int nsegments)
{
immBegin(prim_type, nsegments);
for (int i = 0; i < nsegments; ++i) {
@@ -175,7 +175,7 @@ static void imm_draw_circle(
*/
void imm_draw_circle_wire_2d(uint shdr_pos, float x, float y, float rad, int nsegments)
{
- imm_draw_circle(GWN_PRIM_LINE_LOOP, shdr_pos, x, y, rad, rad, nsegments);
+ imm_draw_circle(GPU_PRIM_LINE_LOOP, shdr_pos, x, y, rad, rad, nsegments);
}
/**
@@ -190,23 +190,23 @@ void imm_draw_circle_wire_2d(uint shdr_pos, float x, float y, float rad, int nse
*/
void imm_draw_circle_fill_2d(uint shdr_pos, float x, float y, float rad, int nsegments)
{
- imm_draw_circle(GWN_PRIM_TRI_FAN, shdr_pos, x, y, rad, rad, nsegments);
+ imm_draw_circle(GPU_PRIM_TRI_FAN, shdr_pos, x, y, rad, rad, nsegments);
}
void imm_draw_circle_wire_aspect_2d(uint shdr_pos, float x, float y, float rad_x, float rad_y, int nsegments)
{
- imm_draw_circle(GWN_PRIM_LINE_LOOP, shdr_pos, x, y, rad_x, rad_y, nsegments);
+ imm_draw_circle(GPU_PRIM_LINE_LOOP, shdr_pos, x, y, rad_x, rad_y, nsegments);
}
void imm_draw_circle_fill_aspect_2d(uint shdr_pos, float x, float y, float rad_x, float rad_y, int nsegments)
{
- imm_draw_circle(GWN_PRIM_TRI_FAN, shdr_pos, x, y, rad_x, rad_y, nsegments);
+ imm_draw_circle(GPU_PRIM_TRI_FAN, shdr_pos, x, y, rad_x, rad_y, nsegments);
}
/**
* \note We could have `imm_draw_lined_disk_partial` but currently there is no need.
*/
static void imm_draw_disk_partial(
- Gwn_PrimType prim_type, unsigned pos, float x, float y,
+ GPUPrimType prim_type, unsigned pos, float x, float y,
float rad_inner, float rad_outer, int nsegments, float start, float sweep)
{
/* shift & reverse angle, increase 'nsegments' to match gluPartialDisk */
@@ -243,11 +243,11 @@ void imm_draw_disk_partial_fill_2d(
unsigned pos, float x, float y,
float rad_inner, float rad_outer, int nsegments, float start, float sweep)
{
- imm_draw_disk_partial(GWN_PRIM_TRI_STRIP, pos, x, y, rad_inner, rad_outer, nsegments, start, sweep);
+ imm_draw_disk_partial(GPU_PRIM_TRI_STRIP, pos, x, y, rad_inner, rad_outer, nsegments, start, sweep);
}
static void imm_draw_circle_3D(
- Gwn_PrimType prim_type, unsigned pos, float x, float y,
+ GPUPrimType prim_type, unsigned pos, float x, float y,
float rad, int nsegments)
{
immBegin(prim_type, nsegments);
@@ -260,12 +260,12 @@ static void imm_draw_circle_3D(
void imm_draw_circle_wire_3d(unsigned pos, float x, float y, float rad, int nsegments)
{
- imm_draw_circle_3D(GWN_PRIM_LINE_LOOP, pos, x, y, rad, nsegments);
+ imm_draw_circle_3D(GPU_PRIM_LINE_LOOP, pos, x, y, rad, nsegments);
}
void imm_draw_circle_fill_3d(unsigned pos, float x, float y, float rad, int nsegments)
{
- imm_draw_circle_3D(GWN_PRIM_TRI_FAN, pos, x, y, rad, nsegments);
+ imm_draw_circle_3D(GPU_PRIM_TRI_FAN, pos, x, y, rad, nsegments);
}
/**
@@ -279,7 +279,7 @@ void imm_draw_circle_fill_3d(unsigned pos, float x, float y, float rad, int nseg
*/
void imm_draw_box_wire_2d(unsigned pos, float x1, float y1, float x2, float y2)
{
- immBegin(GWN_PRIM_LINE_LOOP, 4);
+ immBegin(GPU_PRIM_LINE_LOOP, 4);
immVertex2f(pos, x1, y1);
immVertex2f(pos, x1, y2);
immVertex2f(pos, x2, y2);
@@ -289,8 +289,8 @@ void imm_draw_box_wire_2d(unsigned pos, float x1, float y1, float x2, float y2)
void imm_draw_box_wire_3d(unsigned pos, float x1, float y1, float x2, float y2)
{
- /* use this version when Gwn_VertFormat has a vec3 position */
- immBegin(GWN_PRIM_LINE_LOOP, 4);
+ /* use this version when GPUVertFormat has a vec3 position */
+ immBegin(GPU_PRIM_LINE_LOOP, 4);
immVertex3f(pos, x1, y1, 0.0f);
immVertex3f(pos, x1, y2, 0.0f);
immVertex3f(pos, x2, y2, 0.0f);
@@ -303,7 +303,7 @@ void imm_draw_box_wire_3d(unsigned pos, float x1, float y1, float x2, float y2)
*/
void imm_draw_box_checker_2d(float x1, float y1, float x2, float y2)
{
- uint pos = GWN_vertformat_attr_add(immVertexFormat(), "pos", GWN_COMP_F32, 2, GWN_FETCH_FLOAT);
+ uint pos = GPU_vertformat_attr_add(immVertexFormat(), "pos", GPU_COMP_F32, 2, GPU_FETCH_FLOAT);
immBindBuiltinProgram(GPU_SHADER_2D_CHECKER);
immUniform4f("color1", 0.15f, 0.15f, 0.15f, 1.0f);
@@ -323,7 +323,7 @@ void imm_draw_cube_fill_3d(uint pos, const float co[3], const float aspect[3])
madd_v3_v3v3v3(coords[i], co, cube_coords[i], aspect);
}
- immBegin(GWN_PRIM_TRIS, ARRAY_SIZE(cube_quad_index) * 3 * 2);
+ immBegin(GPU_PRIM_TRIS, ARRAY_SIZE(cube_quad_index) * 3 * 2);
for (int i = 0; i < ARRAY_SIZE(cube_quad_index); i++) {
immVertex3fv(pos, coords[cube_quad_index[i][0]]);
immVertex3fv(pos, coords[cube_quad_index[i][1]]);
@@ -344,7 +344,7 @@ void imm_draw_cube_wire_3d(uint pos, const float co[3], const float aspect[3])
madd_v3_v3v3v3(coords[i], co, cube_coords[i], aspect);
}
- immBegin(GWN_PRIM_LINES, ARRAY_SIZE(cube_line_index) * 2);
+ immBegin(GPU_PRIM_LINES, ARRAY_SIZE(cube_line_index) * 2);
for (int i = 0; i < ARRAY_SIZE(cube_line_index); i++) {
immVertex3fv(pos, coords[cube_line_index[i][0]]);
immVertex3fv(pos, coords[cube_line_index[i][1]]);
@@ -367,7 +367,7 @@ void imm_draw_cube_wire_3d(uint pos, const float co[3], const float aspect[3])
void imm_draw_cylinder_fill_normal_3d(
unsigned int pos, unsigned int nor, float base, float top, float height, int slices, int stacks)
{
- immBegin(GWN_PRIM_TRIS, 6 * slices * stacks);
+ immBegin(GPU_PRIM_TRIS, 6 * slices * stacks);
for (int i = 0; i < slices; ++i) {
const float angle1 = (float)(2 * M_PI) * ((float)i / (float)slices);
const float angle2 = (float)(2 * M_PI) * ((float)(i + 1) / (float)slices);
@@ -418,7 +418,7 @@ void imm_draw_cylinder_fill_normal_3d(
void imm_draw_cylinder_wire_3d(unsigned int pos, float base, float top, float height, int slices, int stacks)
{
- immBegin(GWN_PRIM_LINES, 6 * slices * stacks);
+ immBegin(GPU_PRIM_LINES, 6 * slices * stacks);
for (int i = 0; i < slices; ++i) {
const float angle1 = (float)(2 * M_PI) * ((float)i / (float)slices);
const float angle2 = (float)(2 * M_PI) * ((float)(i + 1) / (float)slices);
@@ -455,7 +455,7 @@ void imm_draw_cylinder_wire_3d(unsigned int pos, float base, float top, float he
void imm_draw_cylinder_fill_3d(unsigned int pos, float base, float top, float height, int slices, int stacks)
{
- immBegin(GWN_PRIM_TRIS, 6 * slices * stacks);
+ immBegin(GPU_PRIM_TRIS, 6 * slices * stacks);
for (int i = 0; i < slices; ++i) {
const float angle1 = (float)(2 * M_PI) * ((float)i / (float)slices);
const float angle2 = (float)(2 * M_PI) * ((float)(i + 1) / (float)slices);
diff --git a/source/blender/gpu/intern/gpu_matrix.c b/source/blender/gpu/intern/gpu_matrix.c
index 13c6fbea1c6..87fcc5f25a3 100644
--- a/source/blender/gpu/intern/gpu_matrix.c
+++ b/source/blender/gpu/intern/gpu_matrix.c
@@ -558,20 +558,20 @@ const float (*GPU_matrix_normal_inverse_get(float m[3][3]))[3]
return m;
}
-void GPU_matrix_bind(const Gwn_ShaderInterface *shaderface)
+void GPU_matrix_bind(const GPUShaderInterface *shaderface)
{
/* set uniform values to matrix stack values
* call this before a draw call if desired matrices are dirty
* call glUseProgram before this, as glUniform expects program to be bound
*/
- const Gwn_ShaderInput *MV = GWN_shaderinterface_uniform_builtin(shaderface, GWN_UNIFORM_MODELVIEW);
- const Gwn_ShaderInput *P = GWN_shaderinterface_uniform_builtin(shaderface, GWN_UNIFORM_PROJECTION);
- const Gwn_ShaderInput *MVP = GWN_shaderinterface_uniform_builtin(shaderface, GWN_UNIFORM_MVP);
+ const GPUShaderInput *MV = GPU_shaderinterface_uniform_builtin(shaderface, GPU_UNIFORM_MODELVIEW);
+ const GPUShaderInput *P = GPU_shaderinterface_uniform_builtin(shaderface, GPU_UNIFORM_PROJECTION);
+ const GPUShaderInput *MVP = GPU_shaderinterface_uniform_builtin(shaderface, GPU_UNIFORM_MVP);
- const Gwn_ShaderInput *N = GWN_shaderinterface_uniform_builtin(shaderface, GWN_UNIFORM_NORMAL);
- const Gwn_ShaderInput *MV_inv = GWN_shaderinterface_uniform_builtin(shaderface, GWN_UNIFORM_MODELVIEW_INV);
- const Gwn_ShaderInput *P_inv = GWN_shaderinterface_uniform_builtin(shaderface, GWN_UNIFORM_PROJECTION_INV);
+ const GPUShaderInput *N = GPU_shaderinterface_uniform_builtin(shaderface, GPU_UNIFORM_NORMAL);
+ const GPUShaderInput *MV_inv = GPU_shaderinterface_uniform_builtin(shaderface, GPU_UNIFORM_MODELVIEW_INV);
+ const GPUShaderInput *P_inv = GPU_shaderinterface_uniform_builtin(shaderface, GPU_UNIFORM_PROJECTION_INV);
if (MV) {
#if DEBUG_MATRIX_BIND
diff --git a/source/blender/gpu/intern/gpu_primitive.c b/source/blender/gpu/intern/gpu_primitive.c
index 0f0c28c05dc..189d17f2dd2 100644
--- a/source/blender/gpu/intern/gpu_primitive.c
+++ b/source/blender/gpu/intern/gpu_primitive.c
@@ -23,61 +23,61 @@
* ***** END GPL LICENSE BLOCK *****
*/
-/** \file blender/gpu/intern/gwn_primitive.c
+/** \file blender/gpu/intern/gpu_primitive.c
* \ingroup gpu
*
- * Gawain geometric primitives
+ * GPU geometric primitives
*/
#include "GPU_primitive.h"
#include "gpu_primitive_private.h"
-Gwn_PrimClass GWN_primtype_class(Gwn_PrimType prim_type)
+GPUPrimClass GPU_primtype_class(GPUPrimType prim_type)
{
- static const Gwn_PrimClass classes[] = {
- [GWN_PRIM_POINTS] = GWN_PRIM_CLASS_POINT,
- [GWN_PRIM_LINES] = GWN_PRIM_CLASS_LINE,
- [GWN_PRIM_LINE_STRIP] = GWN_PRIM_CLASS_LINE,
- [GWN_PRIM_LINE_LOOP] = GWN_PRIM_CLASS_LINE,
- [GWN_PRIM_TRIS] = GWN_PRIM_CLASS_SURFACE,
- [GWN_PRIM_TRI_STRIP] = GWN_PRIM_CLASS_SURFACE,
- [GWN_PRIM_TRI_FAN] = GWN_PRIM_CLASS_SURFACE,
+ static const GPUPrimClass classes[] = {
+ [GPU_PRIM_POINTS] = GPU_PRIM_CLASS_POINT,
+ [GPU_PRIM_LINES] = GPU_PRIM_CLASS_LINE,
+ [GPU_PRIM_LINE_STRIP] = GPU_PRIM_CLASS_LINE,
+ [GPU_PRIM_LINE_LOOP] = GPU_PRIM_CLASS_LINE,
+ [GPU_PRIM_TRIS] = GPU_PRIM_CLASS_SURFACE,
+ [GPU_PRIM_TRI_STRIP] = GPU_PRIM_CLASS_SURFACE,
+ [GPU_PRIM_TRI_FAN] = GPU_PRIM_CLASS_SURFACE,
- [GWN_PRIM_LINES_ADJ] = GWN_PRIM_CLASS_LINE,
- [GWN_PRIM_LINE_STRIP_ADJ] = GWN_PRIM_CLASS_LINE,
- [GWN_PRIM_TRIS_ADJ] = GWN_PRIM_CLASS_SURFACE,
+ [GPU_PRIM_LINES_ADJ] = GPU_PRIM_CLASS_LINE,
+ [GPU_PRIM_LINE_STRIP_ADJ] = GPU_PRIM_CLASS_LINE,
+ [GPU_PRIM_TRIS_ADJ] = GPU_PRIM_CLASS_SURFACE,
- [GWN_PRIM_NONE] = GWN_PRIM_CLASS_NONE
+ [GPU_PRIM_NONE] = GPU_PRIM_CLASS_NONE
};
return classes[prim_type];
}
-bool GWN_primtype_belongs_to_class(Gwn_PrimType prim_type, Gwn_PrimClass prim_class)
+bool GPU_primtype_belongs_to_class(GPUPrimType prim_type, GPUPrimClass prim_class)
{
- if (prim_class == GWN_PRIM_CLASS_NONE && prim_type == GWN_PRIM_NONE) {
+ if (prim_class == GPU_PRIM_CLASS_NONE && prim_type == GPU_PRIM_NONE) {
return true;
}
- return prim_class & GWN_primtype_class(prim_type);
+ return prim_class & GPU_primtype_class(prim_type);
}
-GLenum convert_prim_type_to_gl(Gwn_PrimType prim_type)
+GLenum convert_prim_type_to_gl(GPUPrimType prim_type)
{
#if TRUST_NO_ONE
- assert(prim_type != GWN_PRIM_NONE);
+ assert(prim_type != GPU_PRIM_NONE);
#endif
static const GLenum table[] = {
- [GWN_PRIM_POINTS] = GL_POINTS,
- [GWN_PRIM_LINES] = GL_LINES,
- [GWN_PRIM_LINE_STRIP] = GL_LINE_STRIP,
- [GWN_PRIM_LINE_LOOP] = GL_LINE_LOOP,
- [GWN_PRIM_TRIS] = GL_TRIANGLES,
- [GWN_PRIM_TRI_STRIP] = GL_TRIANGLE_STRIP,
- [GWN_PRIM_TRI_FAN] = GL_TRIANGLE_FAN,
+ [GPU_PRIM_POINTS] = GL_POINTS,
+ [GPU_PRIM_LINES] = GL_LINES,
+ [GPU_PRIM_LINE_STRIP] = GL_LINE_STRIP,
+ [GPU_PRIM_LINE_LOOP] = GL_LINE_LOOP,
+ [GPU_PRIM_TRIS] = GL_TRIANGLES,
+ [GPU_PRIM_TRI_STRIP] = GL_TRIANGLE_STRIP,
+ [GPU_PRIM_TRI_FAN] = GL_TRIANGLE_FAN,
- [GWN_PRIM_LINES_ADJ] = GL_LINES_ADJACENCY,
- [GWN_PRIM_LINE_STRIP_ADJ] = GL_LINE_STRIP_ADJACENCY,
- [GWN_PRIM_TRIS_ADJ] = GL_TRIANGLES_ADJACENCY,
+ [GPU_PRIM_LINES_ADJ] = GL_LINES_ADJACENCY,
+ [GPU_PRIM_LINE_STRIP_ADJ] = GL_LINE_STRIP_ADJACENCY,
+ [GPU_PRIM_TRIS_ADJ] = GL_TRIANGLES_ADJACENCY,
};
return table[prim_type];
diff --git a/source/blender/gpu/intern/gpu_primitive_private.h b/source/blender/gpu/intern/gpu_primitive_private.h
index 6d3f1e20da7..d057f29fdc5 100644
--- a/source/blender/gpu/intern/gpu_primitive_private.h
+++ b/source/blender/gpu/intern/gpu_primitive_private.h
@@ -23,15 +23,15 @@
* ***** END GPL LICENSE BLOCK *****
*/
-/** \file blender/gpu/gwn_primitive_private.h
+/** \file blender/gpu/intern/gpu_primitive_private.h
* \ingroup gpu
*
- * Gawain geometric primitives
+ * GPU geometric primitives
*/
-#ifndef __GWN_PRIMITIVE_PRIVATE_H__
-#define __GWN_PRIMITIVE_PRIVATE_H__
+#ifndef __GPU_PRIMITIVE_PRIVATE_H__
+#define __GPU_PRIMITIVE_PRIVATE_H__
-GLenum convert_prim_type_to_gl(Gwn_PrimType);
+GLenum convert_prim_type_to_gl(GPUPrimType);
-#endif /* __GWN_PRIMITIVE_PRIVATE_H__ */
+#endif /* __GPU_PRIMITIVE_PRIVATE_H__ */
diff --git a/source/blender/gpu/intern/gpu_shader.c b/source/blender/gpu/intern/gpu_shader.c
index 99baaa1164f..c5325b6ff21 100644
--- a/source/blender/gpu/intern/gpu_shader.c
+++ b/source/blender/gpu/intern/gpu_shader.c
@@ -496,24 +496,24 @@ GPUShader *GPU_shader_create_ex(const char *vertexcode,
return NULL;
}
- shader->interface = GWN_shaderinterface_create(shader->program);
+ shader->interface = GPU_shaderinterface_create(shader->program);
#ifdef WITH_OPENSUBDIV
/* TODO(sergey): Find a better place for this. */
if (use_opensubdiv) {
if (GLEW_VERSION_4_1) {
glProgramUniform1i(shader->program,
- GWN_shaderinterface_uniform(shader->interface, "FVarDataOffsetBuffer")->location,
+ GPU_shaderinterface_uniform(shader->interface, "FVarDataOffsetBuffer")->location,
30); /* GL_TEXTURE30 */
glProgramUniform1i(shader->program,
- GWN_shaderinterface_uniform(shader->interface, "FVarDataBuffer")->location,
+ GPU_shaderinterface_uniform(shader->interface, "FVarDataBuffer")->location,
31); /* GL_TEXTURE31 */
}
else {
glUseProgram(shader->program);
- glUniform1i(GWN_shaderinterface_uniform(shader->interface, "FVarDataOffsetBuffer")->location, 30);
- glUniform1i(GWN_shaderinterface_uniform(shader->interface, "FVarDataBuffer")->location, 31);
+ glUniform1i(GPU_shaderinterface_uniform(shader->interface, "FVarDataOffsetBuffer")->location, 30);
+ glUniform1i(GPU_shaderinterface_uniform(shader->interface, "FVarDataBuffer")->location, 31);
glUseProgram(0);
}
}
@@ -575,7 +575,7 @@ void GPU_shader_free(GPUShader *shader)
glDeleteProgram(shader->program);
if (shader->interface)
- GWN_shaderinterface_discard(shader->interface);
+ GPU_shaderinterface_discard(shader->interface);
MEM_freeN(shader);
}
@@ -583,14 +583,14 @@ void GPU_shader_free(GPUShader *shader)
int GPU_shader_get_uniform(GPUShader *shader, const char *name)
{
BLI_assert(shader && shader->program);
- const Gwn_ShaderInput *uniform = GWN_shaderinterface_uniform(shader->interface, name);
+ const GPUShaderInput *uniform = GPU_shaderinterface_uniform(shader->interface, name);
return uniform ? uniform->location : -1;
}
int GPU_shader_get_builtin_uniform(GPUShader *shader, int builtin)
{
BLI_assert(shader && shader->program);
- const Gwn_ShaderInput *uniform = GWN_shaderinterface_uniform_builtin(shader->interface, builtin);
+ const GPUShaderInput *uniform = GPU_shaderinterface_uniform_builtin(shader->interface, builtin);
return uniform ? uniform->location : -1;
}
@@ -598,7 +598,7 @@ int GPU_shader_get_uniform_block(GPUShader *shader, const char *name)
{
BLI_assert(shader && shader->program);
- const Gwn_ShaderInput *ubo = GWN_shaderinterface_ubo(shader->interface, name);
+ const GPUShaderInput *ubo = GPU_shaderinterface_ubo(shader->interface, name);
return ubo ? ubo->location : -1;
}
@@ -675,7 +675,7 @@ void GPU_shader_uniform_texture(GPUShader *UNUSED(shader), int location, GPUText
int GPU_shader_get_attribute(GPUShader *shader, const char *name)
{
BLI_assert(shader && shader->program);
- const Gwn_ShaderInput *attrib = GWN_shaderinterface_attr(shader->interface, name);
+ const GPUShaderInput *attrib = GPU_shaderinterface_attr(shader->interface, name);
return attrib ? attrib->location : -1;
}
diff --git a/source/blender/gpu/intern/gpu_shader_interface.c b/source/blender/gpu/intern/gpu_shader_interface.c
index 56b25726a84..ec2f52a2a2d 100644
--- a/source/blender/gpu/intern/gpu_shader_interface.c
+++ b/source/blender/gpu/intern/gpu_shader_interface.c
@@ -23,10 +23,10 @@
* ***** END GPL LICENSE BLOCK *****
*/
-/** \file blender/gpu/intern/gwn_shader_interface.c
+/** \file blender/gpu/intern/gpu_shader_interface.c
* \ingroup gpu
*
- * Gawain shader interface (C --> GLSL)
+ * GPU shader interface (C --> GLSL)
*/
#include "gpu_batch_private.h"
@@ -42,46 +42,46 @@
#include <stdio.h>
#endif
-static const char* BuiltinUniform_name(Gwn_UniformBuiltin u)
+static const char* BuiltinUniform_name(GPUUniformBuiltin u)
{
static const char* names[] = {
- [GWN_UNIFORM_NONE] = NULL,
-
- [GWN_UNIFORM_MODEL] = "ModelMatrix",
- [GWN_UNIFORM_VIEW] = "ViewMatrix",
- [GWN_UNIFORM_MODELVIEW] = "ModelViewMatrix",
- [GWN_UNIFORM_PROJECTION] = "ProjectionMatrix",
- [GWN_UNIFORM_VIEWPROJECTION] = "ViewProjectionMatrix",
- [GWN_UNIFORM_MVP] = "ModelViewProjectionMatrix",
-
- [GWN_UNIFORM_MODEL_INV] = "ModelMatrixInverse",
- [GWN_UNIFORM_VIEW_INV] = "ViewMatrixInverse",
- [GWN_UNIFORM_MODELVIEW_INV] = "ModelViewMatrixInverse",
- [GWN_UNIFORM_PROJECTION_INV] = "ProjectionMatrixInverse",
- [GWN_UNIFORM_VIEWPROJECTION_INV] = "ViewProjectionMatrixInverse",
-
- [GWN_UNIFORM_NORMAL] = "NormalMatrix",
- [GWN_UNIFORM_WORLDNORMAL] = "WorldNormalMatrix",
- [GWN_UNIFORM_CAMERATEXCO] = "CameraTexCoFactors",
- [GWN_UNIFORM_ORCO] = "OrcoTexCoFactors",
-
- [GWN_UNIFORM_COLOR] = "color",
- [GWN_UNIFORM_EYE] = "eye",
- [GWN_UNIFORM_CALLID] = "callId",
-
- [GWN_UNIFORM_CUSTOM] = NULL,
- [GWN_NUM_UNIFORMS] = NULL,
+ [GPU_UNIFORM_NONE] = NULL,
+
+ [GPU_UNIFORM_MODEL] = "ModelMatrix",
+ [GPU_UNIFORM_VIEW] = "ViewMatrix",
+ [GPU_UNIFORM_MODELVIEW] = "ModelViewMatrix",
+ [GPU_UNIFORM_PROJECTION] = "ProjectionMatrix",
+ [GPU_UNIFORM_VIEWPROJECTION] = "ViewProjectionMatrix",
+ [GPU_UNIFORM_MVP] = "ModelViewProjectionMatrix",
+
+ [GPU_UNIFORM_MODEL_INV] = "ModelMatrixInverse",
+ [GPU_UNIFORM_VIEW_INV] = "ViewMatrixInverse",
+ [GPU_UNIFORM_MODELVIEW_INV] = "ModelViewMatrixInverse",
+ [GPU_UNIFORM_PROJECTION_INV] = "ProjectionMatrixInverse",
+ [GPU_UNIFORM_VIEWPROJECTION_INV] = "ViewProjectionMatrixInverse",
+
+ [GPU_UNIFORM_NORMAL] = "NormalMatrix",
+ [GPU_UNIFORM_WORLDNORMAL] = "WorldNormalMatrix",
+ [GPU_UNIFORM_CAMERATEXCO] = "CameraTexCoFactors",
+ [GPU_UNIFORM_ORCO] = "OrcoTexCoFactors",
+
+ [GPU_UNIFORM_COLOR] = "color",
+ [GPU_UNIFORM_EYE] = "eye",
+ [GPU_UNIFORM_CALLID] = "callId",
+
+ [GPU_UNIFORM_CUSTOM] = NULL,
+ [GPU_NUM_UNIFORMS] = NULL,
};
return names[u];
}
-GWN_INLINE bool match(const char* a, const char* b)
+GPU_INLINE bool match(const char* a, const char* b)
{
return strcmp(a, b) == 0;
}
-GWN_INLINE uint hash_string(const char *str)
+GPU_INLINE uint hash_string(const char *str)
{
uint i = 0, c;
while ((c = *str++)) {
@@ -90,7 +90,7 @@ GWN_INLINE uint hash_string(const char *str)
return i;
}
-GWN_INLINE void set_input_name(Gwn_ShaderInterface* shaderface, Gwn_ShaderInput* input,
+GPU_INLINE void set_input_name(GPUShaderInterface* shaderface, GPUShaderInput* input,
const char* name, uint32_t name_len)
{
input->name_offset = shaderface->name_buffer_offset;
@@ -98,20 +98,20 @@ GWN_INLINE void set_input_name(Gwn_ShaderInterface* shaderface, Gwn_ShaderInput*
shaderface->name_buffer_offset += name_len + 1; /* include NULL terminator */
}
-GWN_INLINE void shader_input_to_bucket(Gwn_ShaderInput* input,
- Gwn_ShaderInput* buckets[GWN_NUM_SHADERINTERFACE_BUCKETS])
+GPU_INLINE void shader_input_to_bucket(GPUShaderInput* input,
+ GPUShaderInput* buckets[GPU_NUM_SHADERINTERFACE_BUCKETS])
{
- const uint bucket_index = input->name_hash % GWN_NUM_SHADERINTERFACE_BUCKETS;
+ const uint bucket_index = input->name_hash % GPU_NUM_SHADERINTERFACE_BUCKETS;
input->next = buckets[bucket_index];
buckets[bucket_index] = input;
}
-GWN_INLINE const Gwn_ShaderInput* buckets_lookup(Gwn_ShaderInput* const buckets[GWN_NUM_SHADERINTERFACE_BUCKETS],
+GPU_INLINE const GPUShaderInput* buckets_lookup(GPUShaderInput* const buckets[GPU_NUM_SHADERINTERFACE_BUCKETS],
const char *name_buffer, const char *name)
{
const uint name_hash = hash_string(name);
- const uint bucket_index = name_hash % GWN_NUM_SHADERINTERFACE_BUCKETS;
- const Gwn_ShaderInput* input = buckets[bucket_index];
+ const uint bucket_index = name_hash % GPU_NUM_SHADERINTERFACE_BUCKETS;
+ const GPUShaderInput* input = buckets[bucket_index];
if (input == NULL) {
/* Requested uniform is not found at all. */
return NULL;
@@ -129,7 +129,7 @@ GWN_INLINE const Gwn_ShaderInput* buckets_lookup(Gwn_ShaderInput* const buckets[
return NULL;
}
/* Work through possible collisions. */
- const Gwn_ShaderInput* next = input;
+ const GPUShaderInput* next = input;
while (next != NULL) {
input = next;
next = input->next;
@@ -143,37 +143,37 @@ GWN_INLINE const Gwn_ShaderInput* buckets_lookup(Gwn_ShaderInput* const buckets[
return NULL; /* not found */
}
-GWN_INLINE void buckets_free(Gwn_ShaderInput* buckets[GWN_NUM_SHADERINTERFACE_BUCKETS])
+GPU_INLINE void buckets_free(GPUShaderInput* buckets[GPU_NUM_SHADERINTERFACE_BUCKETS])
{
- for (uint bucket_index = 0; bucket_index < GWN_NUM_SHADERINTERFACE_BUCKETS; ++bucket_index) {
- Gwn_ShaderInput *input = buckets[bucket_index];
+ for (uint bucket_index = 0; bucket_index < GPU_NUM_SHADERINTERFACE_BUCKETS; ++bucket_index) {
+ GPUShaderInput *input = buckets[bucket_index];
while (input != NULL) {
- Gwn_ShaderInput *input_next = input->next;
+ GPUShaderInput *input_next = input->next;
free(input);
input = input_next;
}
}
}
-static bool setup_builtin_uniform(Gwn_ShaderInput* input, const char* name)
+static bool setup_builtin_uniform(GPUShaderInput* input, const char* name)
{
/* TODO: reject DOUBLE, IMAGE, ATOMIC_COUNTER gl_types */
/* detect built-in uniforms (name must match) */
- for (Gwn_UniformBuiltin u = GWN_UNIFORM_NONE + 1; u < GWN_UNIFORM_CUSTOM; ++u) {
+ for (GPUUniformBuiltin u = GPU_UNIFORM_NONE + 1; u < GPU_UNIFORM_CUSTOM; ++u) {
const char* builtin_name = BuiltinUniform_name(u);
if (match(name, builtin_name)) {
input->builtin_type = u;
return true;
}
}
- input->builtin_type = GWN_UNIFORM_CUSTOM;
+ input->builtin_type = GPU_UNIFORM_CUSTOM;
return false;
}
-static const Gwn_ShaderInput* add_uniform(Gwn_ShaderInterface* shaderface, const char* name)
+static const GPUShaderInput* add_uniform(GPUShaderInterface* shaderface, const char* name)
{
- Gwn_ShaderInput* input = malloc(sizeof(Gwn_ShaderInput));
+ GPUShaderInput* input = malloc(sizeof(GPUShaderInput));
input->location = glGetUniformLocation(shaderface->program, name);
@@ -186,13 +186,13 @@ static const Gwn_ShaderInput* add_uniform(Gwn_ShaderInterface* shaderface, const
setup_builtin_uniform(input, name);
shader_input_to_bucket(input, shaderface->uniform_buckets);
- if (input->builtin_type != GWN_UNIFORM_NONE &&
- input->builtin_type != GWN_UNIFORM_CUSTOM)
+ if (input->builtin_type != GPU_UNIFORM_NONE &&
+ input->builtin_type != GPU_UNIFORM_CUSTOM)
{
shaderface->builtin_uniforms[input->builtin_type] = input;
}
#if DEBUG_SHADER_INTERFACE
- printf("Gwn_ShaderInterface %p, program %d, uniform[] '%s' at location %d\n", shaderface,
+ printf("GPUShaderInterface %p, program %d, uniform[] '%s' at location %d\n", shaderface,
shaderface->program,
name,
input->location);
@@ -200,14 +200,14 @@ static const Gwn_ShaderInput* add_uniform(Gwn_ShaderInterface* shaderface, const
return input;
}
-Gwn_ShaderInterface* GWN_shaderinterface_create(int32_t program)
+GPUShaderInterface* GPU_shaderinterface_create(int32_t program)
{
- Gwn_ShaderInterface* shaderface = calloc(1, sizeof(Gwn_ShaderInterface));
+ GPUShaderInterface* shaderface = calloc(1, sizeof(GPUShaderInterface));
shaderface->program = program;
#if DEBUG_SHADER_INTERFACE
printf("%s {\n", __func__); /* enter function */
- printf("Gwn_ShaderInterface %p, program %d\n", shaderface, program);
+ printf("GPUShaderInterface %p, program %d\n", shaderface, program);
#endif
GLint max_attrib_name_len, attr_len;
@@ -223,7 +223,7 @@ Gwn_ShaderInterface* GWN_shaderinterface_create(int32_t program)
/* Attributes */
for (uint32_t i = 0; i < attr_len; ++i) {
- Gwn_ShaderInput* input = malloc(sizeof(Gwn_ShaderInput));
+ GPUShaderInput* input = malloc(sizeof(GPUShaderInput));
GLsizei remaining_buffer = name_buffer_len - shaderface->name_buffer_offset;
char* name = shaderface->name_buffer + shaderface->name_buffer_offset;
GLsizei name_len = 0;
@@ -250,7 +250,7 @@ Gwn_ShaderInterface* GWN_shaderinterface_create(int32_t program)
}
/* Uniform Blocks */
for (uint32_t i = 0; i < ubo_len; ++i) {
- Gwn_ShaderInput* input = malloc(sizeof(Gwn_ShaderInput));
+ GPUShaderInput* input = malloc(sizeof(GPUShaderInput));
GLsizei remaining_buffer = name_buffer_len - shaderface->name_buffer_offset;
char* name = shaderface->name_buffer + shaderface->name_buffer_offset;
GLsizei name_len = 0;
@@ -268,20 +268,20 @@ Gwn_ShaderInterface* GWN_shaderinterface_create(int32_t program)
#endif
}
/* Builtin Uniforms */
- for (Gwn_UniformBuiltin u = GWN_UNIFORM_NONE + 1; u < GWN_UNIFORM_CUSTOM; ++u) {
+ for (GPUUniformBuiltin u = GPU_UNIFORM_NONE + 1; u < GPU_UNIFORM_CUSTOM; ++u) {
const char* builtin_name = BuiltinUniform_name(u);
if (glGetUniformLocation(program, builtin_name) != -1) {
- add_uniform((Gwn_ShaderInterface*)shaderface, builtin_name);
+ add_uniform((GPUShaderInterface*)shaderface, builtin_name);
}
}
/* Batches ref buffer */
- shaderface->batches_len = GWN_SHADERINTERFACE_REF_ALLOC_COUNT;
- shaderface->batches = calloc(shaderface->batches_len, sizeof(Gwn_Batch*));
+ shaderface->batches_len = GPU_SHADERINTERFACE_REF_ALLOC_COUNT;
+ shaderface->batches = calloc(shaderface->batches_len, sizeof(GPUBatch*));
return shaderface;
}
-void GWN_shaderinterface_discard(Gwn_ShaderInterface* shaderface)
+void GPU_shaderinterface_discard(GPUShaderInterface* shaderface)
{
/* Free memory used by buckets and has entries. */
buckets_free(shaderface->uniform_buckets);
@@ -292,7 +292,7 @@ void GWN_shaderinterface_discard(Gwn_ShaderInterface* shaderface)
/* Remove this interface from all linked Batches vao cache. */
for (int i = 0; i < shaderface->batches_len; ++i) {
if (shaderface->batches[i] != NULL) {
- gwn_batch_remove_interface_ref(shaderface->batches[i], shaderface);
+ gpu_batch_remove_interface_ref(shaderface->batches[i], shaderface);
}
}
free(shaderface->batches);
@@ -300,39 +300,39 @@ void GWN_shaderinterface_discard(Gwn_ShaderInterface* shaderface)
free(shaderface);
}
-const Gwn_ShaderInput* GWN_shaderinterface_uniform(const Gwn_ShaderInterface* shaderface, const char* name)
+const GPUShaderInput* GPU_shaderinterface_uniform(const GPUShaderInterface* shaderface, const char* name)
{
/* TODO: Warn if we find a matching builtin, since these can be looked up much quicker. */
- const Gwn_ShaderInput* input = buckets_lookup(shaderface->uniform_buckets, shaderface->name_buffer, name);
+ const GPUShaderInput* input = buckets_lookup(shaderface->uniform_buckets, shaderface->name_buffer, name);
/* If input is not found add it so it's found next time. */
if (input == NULL) {
- input = add_uniform((Gwn_ShaderInterface*)shaderface, name);
+ input = add_uniform((GPUShaderInterface*)shaderface, name);
}
return (input->location != -1) ? input : NULL;
}
-const Gwn_ShaderInput* GWN_shaderinterface_uniform_builtin(
- const Gwn_ShaderInterface* shaderface, Gwn_UniformBuiltin builtin)
+const GPUShaderInput* GPU_shaderinterface_uniform_builtin(
+ const GPUShaderInterface* shaderface, GPUUniformBuiltin builtin)
{
#if TRUST_NO_ONE
- assert(builtin != GWN_UNIFORM_NONE);
- assert(builtin != GWN_UNIFORM_CUSTOM);
- assert(builtin != GWN_NUM_UNIFORMS);
+ assert(builtin != GPU_UNIFORM_NONE);
+ assert(builtin != GPU_UNIFORM_CUSTOM);
+ assert(builtin != GPU_NUM_UNIFORMS);
#endif
return shaderface->builtin_uniforms[builtin];
}
-const Gwn_ShaderInput* GWN_shaderinterface_ubo(const Gwn_ShaderInterface* shaderface, const char* name)
+const GPUShaderInput* GPU_shaderinterface_ubo(const GPUShaderInterface* shaderface, const char* name)
{
return buckets_lookup(shaderface->ubo_buckets, shaderface->name_buffer, name);
}
-const Gwn_ShaderInput* GWN_shaderinterface_attr(const Gwn_ShaderInterface* shaderface, const char* name)
+const GPUShaderInput* GPU_shaderinterface_attr(const GPUShaderInterface* shaderface, const char* name)
{
return buckets_lookup(shaderface->attrib_buckets, shaderface->name_buffer, name);
}
-void GWN_shaderinterface_add_batch_ref(Gwn_ShaderInterface* shaderface, Gwn_Batch* batch)
+void GPU_shaderinterface_add_batch_ref(GPUShaderInterface* shaderface, GPUBatch* batch)
{
int i; /* find first unused slot */
for (i = 0; i < shaderface->batches_len; ++i) {
@@ -343,14 +343,14 @@ void GWN_shaderinterface_add_batch_ref(Gwn_ShaderInterface* shaderface, Gwn_Batc
if (i == shaderface->batches_len) {
/* Not enough place, realloc the array. */
i = shaderface->batches_len;
- shaderface->batches_len += GWN_SHADERINTERFACE_REF_ALLOC_COUNT;
- shaderface->batches = realloc(shaderface->batches, sizeof(Gwn_Batch*) * shaderface->batches_len);
- memset(shaderface->batches + i, 0, sizeof(Gwn_Batch*) * GWN_SHADERINTERFACE_REF_ALLOC_COUNT);
+ shaderface->batches_len += GPU_SHADERINTERFACE_REF_ALLOC_COUNT;
+ shaderface->batches = realloc(shaderface->batches, sizeof(GPUBatch*) * shaderface->batches_len);
+ memset(shaderface->batches + i, 0, sizeof(GPUBatch*) * GPU_SHADERINTERFACE_REF_ALLOC_COUNT);
}
shaderface->batches[i] = batch;
}
-void GWN_shaderinterface_remove_batch_ref(Gwn_ShaderInterface* shaderface, Gwn_Batch* batch)
+void GPU_shaderinterface_remove_batch_ref(GPUShaderInterface* shaderface, GPUBatch* batch)
{
for (int i = 0; i < shaderface->batches_len; ++i) {
if (shaderface->batches[i] == batch) {
diff --git a/source/blender/gpu/intern/gpu_shader_private.h b/source/blender/gpu/intern/gpu_shader_private.h
index bf54d269fb5..d16aae79aae 100644
--- a/source/blender/gpu/intern/gpu_shader_private.h
+++ b/source/blender/gpu/intern/gpu_shader_private.h
@@ -35,7 +35,7 @@ struct GPUShader {
GLuint geometry; /* handle for geometry shader */
GLuint fragment; /* handle for fragment shader */
- Gwn_ShaderInterface *interface; /* cached uniform & attrib interface for shader */
+ GPUShaderInterface *interface; /* cached uniform & attrib interface for shader */
int feedback_transform_type;
};
diff --git a/source/blender/gpu/intern/gpu_texture.c b/source/blender/gpu/intern/gpu_texture.c
index d9248e06dfb..52afe17d885 100644
--- a/source/blender/gpu/intern/gpu_texture.c
+++ b/source/blender/gpu/intern/gpu_texture.c
@@ -911,28 +911,28 @@ GPUTexture *GPU_texture_create_cube(
tex_format, GPU_DATA_FLOAT, err_out);
}
-GPUTexture *GPU_texture_create_from_vertbuf(Gwn_VertBuf *vert)
+GPUTexture *GPU_texture_create_from_vertbuf(GPUVertBuf *vert)
{
- Gwn_VertFormat *format = &vert->format;
- Gwn_VertAttr *attr = &format->attribs[0];
+ GPUVertFormat *format = &vert->format;
+ GPUVertAttr *attr = &format->attribs[0];
/* Detect incompatible cases (not supported by texture buffers) */
BLI_assert(format->attr_len == 1 && vert->vbo_id != 0);
BLI_assert(attr->comp_len != 3); /* Not until OGL 4.0 */
- BLI_assert(attr->comp_type != GWN_COMP_I10);
- BLI_assert(attr->fetch_mode != GWN_FETCH_INT_TO_FLOAT);
+ BLI_assert(attr->comp_type != GPU_COMP_I10);
+ BLI_assert(attr->fetch_mode != GPU_FETCH_INT_TO_FLOAT);
unsigned int byte_per_comp = attr->sz / attr->comp_len;
- bool is_uint = ELEM(attr->comp_type, GWN_COMP_U8, GWN_COMP_U16, GWN_COMP_U32);
+ bool is_uint = ELEM(attr->comp_type, GPU_COMP_U8, GPU_COMP_U16, GPU_COMP_U32);
/* Cannot fetch signed int or 32bit ints as normalized float. */
- if (attr->fetch_mode == GWN_FETCH_INT_TO_FLOAT_UNIT) {
+ if (attr->fetch_mode == GPU_FETCH_INT_TO_FLOAT_UNIT) {
BLI_assert(is_uint || byte_per_comp <= 2);
}
GPUTextureFormat data_type;
switch (attr->fetch_mode) {
- case GWN_FETCH_FLOAT:
+ case GPU_FETCH_FLOAT:
switch (attr->comp_len) {
case 1: data_type = GPU_R32F; break;
case 2: data_type = GPU_RG32F; break;
@@ -940,7 +940,7 @@ GPUTexture *GPU_texture_create_from_vertbuf(Gwn_VertBuf *vert)
default: data_type = GPU_RGBA32F; break;
}
break;
- case GWN_FETCH_INT:
+ case GPU_FETCH_INT:
switch (attr->comp_len) {
case 1:
switch (byte_per_comp) {
@@ -965,7 +965,7 @@ GPUTexture *GPU_texture_create_from_vertbuf(Gwn_VertBuf *vert)
break;
}
break;
- case GWN_FETCH_INT_TO_FLOAT_UNIT:
+ case GPU_FETCH_INT_TO_FLOAT_UNIT:
switch (attr->comp_len) {
case 1: data_type = (byte_per_comp == 1) ? GPU_R8 : GPU_R16; break;
case 2: data_type = (byte_per_comp == 1) ? GPU_RG8 : GPU_RG16; break;
diff --git a/source/blender/gpu/intern/gpu_vertex_array_id.cpp b/source/blender/gpu/intern/gpu_vertex_array_id.cpp
index de5be15ec19..2f29bbfbc33 100644
--- a/source/blender/gpu/intern/gpu_vertex_array_id.cpp
+++ b/source/blender/gpu/intern/gpu_vertex_array_id.cpp
@@ -23,7 +23,7 @@
* ***** END GPL LICENSE BLOCK *****
*/
-/** \file blender/gpu/gwn_vertex_array_id.cpp
+/** \file blender/gpu/gpu_vertex_array_id.cpp
* \ingroup gpu
*
* Manage GL vertex array IDs in a thread-safe way
@@ -56,16 +56,16 @@ static bool thread_is_main() {
#endif
#endif
-struct Gwn_Context {
+struct GPUContext {
GLuint default_vao;
- std::unordered_set<Gwn_Batch*> batches; /* Batches that have VAOs from this context */
+ std::unordered_set<GPUBatch*> batches; /* Batches that have VAOs from this context */
std::vector<GLuint> orphaned_vertarray_ids;
std::mutex orphans_mutex; /* todo: try spinlock instead */
#if TRUST_NO_ONE
pthread_t thread; /* Thread on which this context is active. */
bool thread_is_used;
- Gwn_Context() {
+ GPUContext() {
thread_is_used = false;
}
#endif
@@ -73,12 +73,12 @@ struct Gwn_Context {
#if defined(_MSC_VER) && (_MSC_VER == 1800)
#define thread_local __declspec(thread)
-thread_local Gwn_Context* active_ctx = NULL;
+thread_local GPUContext* active_ctx = NULL;
#else
-static thread_local Gwn_Context* active_ctx = NULL;
+static thread_local GPUContext* active_ctx = NULL;
#endif
-static void clear_orphans(Gwn_Context* ctx)
+static void clear_orphans(GPUContext* ctx)
{
ctx->orphans_mutex.lock();
if (!ctx->orphaned_vertarray_ids.empty()) {
@@ -89,19 +89,19 @@ static void clear_orphans(Gwn_Context* ctx)
ctx->orphans_mutex.unlock();
}
-Gwn_Context* GWN_context_create(void)
+GPUContext* GPU_context_create(void)
{
#if TRUST_NO_ONE
/* assert(thread_is_main()); */
#endif
- Gwn_Context* ctx = new Gwn_Context;
+ GPUContext* ctx = new GPUContext;
glGenVertexArrays(1, &ctx->default_vao);
- GWN_context_active_set(ctx);
+ GPU_context_active_set(ctx);
return ctx;
}
-/* to be called after GWN_context_active_set(ctx_to_destroy) */
-void GWN_context_discard(Gwn_Context* ctx)
+/* to be called after GPU_context_active_set(ctx_to_destroy) */
+void GPU_context_discard(GPUContext* ctx)
{
#if TRUST_NO_ONE
/* Make sure no other thread has locked it. */
@@ -112,7 +112,7 @@ void GWN_context_discard(Gwn_Context* ctx)
/* delete remaining vaos */
while (!ctx->batches.empty()) {
/* this removes the array entry */
- gwn_batch_vao_cache_clear(*ctx->batches.begin());
+ GPU_batch_vao_cache_clear(*ctx->batches.begin());
}
glDeleteVertexArrays(1, &ctx->default_vao);
delete ctx;
@@ -120,7 +120,7 @@ void GWN_context_discard(Gwn_Context* ctx)
}
/* ctx can be NULL */
-void GWN_context_active_set(Gwn_Context* ctx)
+void GPU_context_active_set(GPUContext* ctx)
{
#if TRUST_NO_ONE
if (active_ctx) {
@@ -140,12 +140,12 @@ void GWN_context_active_set(Gwn_Context* ctx)
active_ctx = ctx;
}
-Gwn_Context* GWN_context_active_get(void)
+GPUContext* GPU_context_active_get(void)
{
return active_ctx;
}
-GLuint GWN_vao_default(void)
+GLuint GPU_vao_default(void)
{
#if TRUST_NO_ONE
assert(active_ctx); /* need at least an active context */
@@ -154,7 +154,7 @@ GLuint GWN_vao_default(void)
return active_ctx->default_vao;
}
-GLuint GWN_vao_alloc(void)
+GLuint GPU_vao_alloc(void)
{
#if TRUST_NO_ONE
assert(active_ctx); /* need at least an active context */
@@ -168,7 +168,7 @@ GLuint GWN_vao_alloc(void)
}
/* this can be called from multiple thread */
-void GWN_vao_free(GLuint vao_id, Gwn_Context* ctx)
+void GPU_vao_free(GLuint vao_id, GPUContext* ctx)
{
#if TRUST_NO_ONE
assert(ctx);
@@ -183,12 +183,12 @@ void GWN_vao_free(GLuint vao_id, Gwn_Context* ctx)
}
}
-void gwn_context_add_batch(Gwn_Context* ctx, Gwn_Batch* batch)
+void gpu_context_add_batch(GPUContext* ctx, GPUBatch* batch)
{
ctx->batches.emplace(batch);
}
-void gwn_context_remove_batch(Gwn_Context* ctx, Gwn_Batch* batch)
+void gpu_context_remove_batch(GPUContext* ctx, GPUBatch* batch)
{
ctx->orphans_mutex.lock();
ctx->batches.erase(batch);
diff --git a/source/blender/gpu/intern/gpu_vertex_buffer.c b/source/blender/gpu/intern/gpu_vertex_buffer.c
index 32f3d494015..5b29913800d 100644
--- a/source/blender/gpu/intern/gpu_vertex_buffer.c
+++ b/source/blender/gpu/intern/gpu_vertex_buffer.c
@@ -23,10 +23,10 @@
* ***** END GPL LICENSE BLOCK *****
*/
-/** \file blender/gpu/intern/gwn_vertex_buffer.c
+/** \file blender/gpu/intern/gpu_vertex_buffer.c
* \ingroup gpu
*
- * Gawain vertex buffer
+ * GPU vertex buffer
*/
#include "GPU_vertex_buffer.h"
@@ -39,27 +39,27 @@
static uint vbo_memory_usage;
-static GLenum convert_usage_type_to_gl(Gwn_UsageType type)
+static GLenum convert_usage_type_to_gl(GPUUsageType type)
{
static const GLenum table[] = {
- [GWN_USAGE_STREAM] = GL_STREAM_DRAW,
- [GWN_USAGE_STATIC] = GL_STATIC_DRAW,
- [GWN_USAGE_DYNAMIC] = GL_DYNAMIC_DRAW
+ [GPU_USAGE_STREAM] = GL_STREAM_DRAW,
+ [GPU_USAGE_STATIC] = GL_STATIC_DRAW,
+ [GPU_USAGE_DYNAMIC] = GL_DYNAMIC_DRAW
};
return table[type];
}
-Gwn_VertBuf* GWN_vertbuf_create(Gwn_UsageType usage)
+GPUVertBuf* GPU_vertbuf_create(GPUUsageType usage)
{
- Gwn_VertBuf* verts = malloc(sizeof(Gwn_VertBuf));
- GWN_vertbuf_init(verts, usage);
+ GPUVertBuf* verts = malloc(sizeof(GPUVertBuf));
+ GPU_vertbuf_init(verts, usage);
return verts;
}
-Gwn_VertBuf* GWN_vertbuf_create_with_format_ex(const Gwn_VertFormat* format, Gwn_UsageType usage)
+GPUVertBuf* GPU_vertbuf_create_with_format_ex(const GPUVertFormat* format, GPUUsageType usage)
{
- Gwn_VertBuf* verts = GWN_vertbuf_create(usage);
- GWN_vertformat_copy(&verts->format, format);
+ GPUVertBuf* verts = GPU_vertbuf_create(usage);
+ GPU_vertformat_copy(&verts->format, format);
if (!format->packed) {
VertexFormat_pack(&verts->format);
}
@@ -69,28 +69,28 @@ Gwn_VertBuf* GWN_vertbuf_create_with_format_ex(const Gwn_VertFormat* format, Gwn
/* TODO: implement those memory savings */
}
-void GWN_vertbuf_init(Gwn_VertBuf* verts, Gwn_UsageType usage)
+void GPU_vertbuf_init(GPUVertBuf* verts, GPUUsageType usage)
{
- memset(verts, 0, sizeof(Gwn_VertBuf));
+ memset(verts, 0, sizeof(GPUVertBuf));
verts->usage = usage;
verts->dirty = true;
}
-void GWN_vertbuf_init_with_format_ex(Gwn_VertBuf* verts, const Gwn_VertFormat* format, Gwn_UsageType usage)
+void GPU_vertbuf_init_with_format_ex(GPUVertBuf* verts, const GPUVertFormat* format, GPUUsageType usage)
{
- GWN_vertbuf_init(verts, usage);
- GWN_vertformat_copy(&verts->format, format);
+ GPU_vertbuf_init(verts, usage);
+ GPU_vertformat_copy(&verts->format, format);
if (!format->packed) {
VertexFormat_pack(&verts->format);
}
}
-void GWN_vertbuf_discard(Gwn_VertBuf* verts)
+void GPU_vertbuf_discard(GPUVertBuf* verts)
{
if (verts->vbo_id) {
- GWN_buf_id_free(verts->vbo_id);
+ GPU_buf_id_free(verts->vbo_id);
#if VRAM_USAGE
- vbo_memory_usage -= GWN_vertbuf_size_get(verts);
+ vbo_memory_usage -= GPU_vertbuf_size_get(verts);
#endif
}
if (verts->data) {
@@ -99,15 +99,15 @@ void GWN_vertbuf_discard(Gwn_VertBuf* verts)
free(verts);
}
-uint GWN_vertbuf_size_get(const Gwn_VertBuf* verts)
+uint GPU_vertbuf_size_get(const GPUVertBuf* verts)
{
return vertex_buffer_size(&verts->format, verts->vertex_len);
}
/* create a new allocation, discarding any existing data */
-void GWN_vertbuf_data_alloc(Gwn_VertBuf* verts, uint v_len)
+void GPU_vertbuf_data_alloc(GPUVertBuf* verts, uint v_len)
{
- Gwn_VertFormat* format = &verts->format;
+ GPUVertFormat* format = &verts->format;
if (!format->packed) {
VertexFormat_pack(format);
}
@@ -117,7 +117,7 @@ void GWN_vertbuf_data_alloc(Gwn_VertBuf* verts, uint v_len)
#endif
/* only create the buffer the 1st time */
if (verts->vbo_id == 0) {
- verts->vbo_id = GWN_buf_id_alloc();
+ verts->vbo_id = GPU_buf_id_alloc();
}
/* discard previous data if any */
if (verts->data) {
@@ -125,15 +125,15 @@ void GWN_vertbuf_data_alloc(Gwn_VertBuf* verts, uint v_len)
}
#if VRAM_USAGE
uint new_size = vertex_buffer_size(&verts->format, v_len);
- vbo_memory_usage += new_size - GWN_vertbuf_size_get(verts);
+ vbo_memory_usage += new_size - GPU_vertbuf_size_get(verts);
#endif
verts->dirty = true;
verts->vertex_len = verts->vertex_alloc = v_len;
- verts->data = malloc(sizeof(GLubyte) * GWN_vertbuf_size_get(verts));
+ verts->data = malloc(sizeof(GLubyte) * GPU_vertbuf_size_get(verts));
}
/* resize buffer keeping existing data */
-void GWN_vertbuf_data_resize(Gwn_VertBuf* verts, uint v_len)
+void GPU_vertbuf_data_resize(GPUVertBuf* verts, uint v_len)
{
#if TRUST_NO_ONE
assert(verts->data != NULL);
@@ -142,17 +142,17 @@ void GWN_vertbuf_data_resize(Gwn_VertBuf* verts, uint v_len)
#if VRAM_USAGE
uint new_size = vertex_buffer_size(&verts->format, v_len);
- vbo_memory_usage += new_size - GWN_vertbuf_size_get(verts);
+ vbo_memory_usage += new_size - GPU_vertbuf_size_get(verts);
#endif
verts->dirty = true;
verts->vertex_len = verts->vertex_alloc = v_len;
- verts->data = realloc(verts->data, sizeof(GLubyte) * GWN_vertbuf_size_get(verts));
+ verts->data = realloc(verts->data, sizeof(GLubyte) * GPU_vertbuf_size_get(verts));
}
/* Set vertex count but does not change allocation.
* Only this many verts will be uploaded to the GPU and rendered.
* This is usefull for streaming data. */
-void GWN_vertbuf_vertex_count_set(Gwn_VertBuf* verts, uint v_len)
+void GPU_vertbuf_vertex_count_set(GPUVertBuf* verts, uint v_len)
{
#if TRUST_NO_ONE
assert(verts->data != NULL); /* only for dynamic data */
@@ -161,15 +161,15 @@ void GWN_vertbuf_vertex_count_set(Gwn_VertBuf* verts, uint v_len)
#if VRAM_USAGE
uint new_size = vertex_buffer_size(&verts->format, v_len);
- vbo_memory_usage += new_size - GWN_vertbuf_size_get(verts);
+ vbo_memory_usage += new_size - GPU_vertbuf_size_get(verts);
#endif
verts->vertex_len = v_len;
}
-void GWN_vertbuf_attr_set(Gwn_VertBuf* verts, uint a_idx, uint v_idx, const void* data)
+void GPU_vertbuf_attr_set(GPUVertBuf* verts, uint a_idx, uint v_idx, const void* data)
{
- const Gwn_VertFormat* format = &verts->format;
- const Gwn_VertAttr* a = format->attribs + a_idx;
+ const GPUVertFormat* format = &verts->format;
+ const GPUVertAttr* a = format->attribs + a_idx;
#if TRUST_NO_ONE
assert(a_idx < format->attr_len);
@@ -180,23 +180,23 @@ void GWN_vertbuf_attr_set(Gwn_VertBuf* verts, uint a_idx, uint v_idx, const void
memcpy((GLubyte*)verts->data + a->offset + v_idx * format->stride, data, a->sz);
}
-void GWN_vertbuf_attr_fill(Gwn_VertBuf* verts, uint a_idx, const void* data)
+void GPU_vertbuf_attr_fill(GPUVertBuf* verts, uint a_idx, const void* data)
{
- const Gwn_VertFormat* format = &verts->format;
- const Gwn_VertAttr* a = format->attribs + a_idx;
+ const GPUVertFormat* format = &verts->format;
+ const GPUVertAttr* a = format->attribs + a_idx;
#if TRUST_NO_ONE
assert(a_idx < format->attr_len);
#endif
const uint stride = a->sz; /* tightly packed input data */
- GWN_vertbuf_attr_fill_stride(verts, a_idx, stride, data);
+ GPU_vertbuf_attr_fill_stride(verts, a_idx, stride, data);
}
-void GWN_vertbuf_attr_fill_stride(Gwn_VertBuf* verts, uint a_idx, uint stride, const void* data)
+void GPU_vertbuf_attr_fill_stride(GPUVertBuf* verts, uint a_idx, uint stride, const void* data)
{
- const Gwn_VertFormat* format = &verts->format;
- const Gwn_VertAttr* a = format->attribs + a_idx;
+ const GPUVertFormat* format = &verts->format;
+ const GPUVertAttr* a = format->attribs + a_idx;
#if TRUST_NO_ONE
assert(a_idx < format->attr_len);
@@ -217,10 +217,10 @@ void GWN_vertbuf_attr_fill_stride(Gwn_VertBuf* verts, uint a_idx, uint stride, c
}
}
-void GWN_vertbuf_attr_get_raw_data(Gwn_VertBuf* verts, uint a_idx, Gwn_VertBufRaw *access)
+void GPU_vertbuf_attr_get_raw_data(GPUVertBuf* verts, uint a_idx, GPUVertBufRaw *access)
{
- const Gwn_VertFormat* format = &verts->format;
- const Gwn_VertAttr* a = format->attribs + a_idx;
+ const GPUVertFormat* format = &verts->format;
+ const GPUVertAttr* a = format->attribs + a_idx;
#if TRUST_NO_ONE
assert(a_idx < format->attr_len);
@@ -238,23 +238,23 @@ void GWN_vertbuf_attr_get_raw_data(Gwn_VertBuf* verts, uint a_idx, Gwn_VertBufRa
#endif
}
-static void VertBuffer_upload_data(Gwn_VertBuf* verts)
+static void VertBuffer_upload_data(GPUVertBuf* verts)
{
- uint buffer_sz = GWN_vertbuf_size_get(verts);
+ uint buffer_sz = GPU_vertbuf_size_get(verts);
/* orphan the vbo to avoid sync */
glBufferData(GL_ARRAY_BUFFER, buffer_sz, NULL, convert_usage_type_to_gl(verts->usage));
/* upload data */
glBufferSubData(GL_ARRAY_BUFFER, 0, buffer_sz, verts->data);
- if (verts->usage == GWN_USAGE_STATIC) {
+ if (verts->usage == GPU_USAGE_STATIC) {
free(verts->data);
verts->data = NULL;
}
verts->dirty = false;
}
-void GWN_vertbuf_use(Gwn_VertBuf* verts)
+void GPU_vertbuf_use(GPUVertBuf* verts)
{
glBindBuffer(GL_ARRAY_BUFFER, verts->vbo_id);
if (verts->dirty) {
@@ -262,7 +262,7 @@ void GWN_vertbuf_use(Gwn_VertBuf* verts)
}
}
-uint GWN_vertbuf_get_memory_usage(void)
+uint GPU_vertbuf_get_memory_usage(void)
{
return vbo_memory_usage;
}
diff --git a/source/blender/gpu/intern/gpu_vertex_format.c b/source/blender/gpu/intern/gpu_vertex_format.c
index bd9f9250564..d0a907bcd0d 100644
--- a/source/blender/gpu/intern/gpu_vertex_format.c
+++ b/source/blender/gpu/intern/gpu_vertex_format.c
@@ -23,10 +23,10 @@
* ***** END GPL LICENSE BLOCK *****
*/
-/** \file blender/gpu/intern/gwn_vertex_format.c
+/** \file blender/gpu/intern/gpu_vertex_format.c
* \ingroup gpu
*
- * Gawain vertex format
+ * GPU vertex format
*/
#include "GPU_vertex_format.h"
@@ -40,26 +40,26 @@
#include <stdio.h>
#endif
-void GWN_vertformat_clear(Gwn_VertFormat* format)
+void GPU_vertformat_clear(GPUVertFormat* format)
{
#if TRUST_NO_ONE
- memset(format, 0, sizeof(Gwn_VertFormat));
+ memset(format, 0, sizeof(GPUVertFormat));
#else
format->attr_len = 0;
format->packed = false;
format->name_offset = 0;
format->name_len = 0;
- for (unsigned i = 0; i < GWN_VERT_ATTR_MAX_LEN; i++) {
+ for (unsigned i = 0; i < GPU_VERT_ATTR_MAX_LEN; i++) {
format->attribs[i].name_len = 0;
}
#endif
}
-void GWN_vertformat_copy(Gwn_VertFormat* dest, const Gwn_VertFormat* src)
+void GPU_vertformat_copy(GPUVertFormat* dest, const GPUVertFormat* src)
{
/* copy regular struct fields */
- memcpy(dest, src, sizeof(Gwn_VertFormat));
+ memcpy(dest, src, sizeof(GPUVertFormat));
for (unsigned i = 0; i < dest->attr_len; i++) {
for (unsigned j = 0; j < dest->attribs[i].name_len; j++) {
@@ -68,43 +68,43 @@ void GWN_vertformat_copy(Gwn_VertFormat* dest, const Gwn_VertFormat* src)
}
}
-static GLenum convert_comp_type_to_gl(Gwn_VertCompType type)
+static GLenum convert_comp_type_to_gl(GPUVertCompType type)
{
static const GLenum table[] = {
- [GWN_COMP_I8] = GL_BYTE,
- [GWN_COMP_U8] = GL_UNSIGNED_BYTE,
- [GWN_COMP_I16] = GL_SHORT,
- [GWN_COMP_U16] = GL_UNSIGNED_SHORT,
- [GWN_COMP_I32] = GL_INT,
- [GWN_COMP_U32] = GL_UNSIGNED_INT,
+ [GPU_COMP_I8] = GL_BYTE,
+ [GPU_COMP_U8] = GL_UNSIGNED_BYTE,
+ [GPU_COMP_I16] = GL_SHORT,
+ [GPU_COMP_U16] = GL_UNSIGNED_SHORT,
+ [GPU_COMP_I32] = GL_INT,
+ [GPU_COMP_U32] = GL_UNSIGNED_INT,
- [GWN_COMP_F32] = GL_FLOAT,
+ [GPU_COMP_F32] = GL_FLOAT,
- [GWN_COMP_I10] = GL_INT_2_10_10_10_REV
+ [GPU_COMP_I10] = GL_INT_2_10_10_10_REV
};
return table[type];
}
-static unsigned comp_sz(Gwn_VertCompType type)
+static unsigned comp_sz(GPUVertCompType type)
{
#if TRUST_NO_ONE
- assert(type <= GWN_COMP_F32); /* other types have irregular sizes (not bytes) */
+ assert(type <= GPU_COMP_F32); /* other types have irregular sizes (not bytes) */
#endif
const GLubyte sizes[] = {1,1,2,2,4,4,4};
return sizes[type];
}
-static unsigned attrib_sz(const Gwn_VertAttr *a)
+static unsigned attrib_sz(const GPUVertAttr *a)
{
- if (a->comp_type == GWN_COMP_I10) {
+ if (a->comp_type == GPU_COMP_I10) {
return 4; /* always packed as 10_10_10_2 */
}
return a->comp_len * comp_sz(a->comp_type);
}
-static unsigned attrib_align(const Gwn_VertAttr *a)
+static unsigned attrib_align(const GPUVertAttr *a)
{
- if (a->comp_type == GWN_COMP_I10) {
+ if (a->comp_type == GPU_COMP_I10) {
return 4; /* always packed as 10_10_10_2 */
}
unsigned c = comp_sz(a->comp_type);
@@ -116,7 +116,7 @@ static unsigned attrib_align(const Gwn_VertAttr *a)
}
}
-unsigned vertex_buffer_size(const Gwn_VertFormat* format, unsigned vertex_len)
+unsigned vertex_buffer_size(const GPUVertFormat* format, unsigned vertex_len)
{
#if TRUST_NO_ONE
assert(format->packed && format->stride > 0);
@@ -124,11 +124,11 @@ unsigned vertex_buffer_size(const Gwn_VertFormat* format, unsigned vertex_len)
return format->stride * vertex_len;
}
-static const char* copy_attrib_name(Gwn_VertFormat* format, const char* name)
+static const char* copy_attrib_name(GPUVertFormat* format, const char* name)
{
/* strncpy does 110% of what we need; let's do exactly 100% */
char* name_copy = format->names + format->name_offset;
- unsigned available = GWN_VERT_ATTR_NAMES_BUF_LEN - format->name_offset;
+ unsigned available = GPU_VERT_ATTR_NAMES_BUF_LEN - format->name_offset;
bool terminated = false;
for (unsigned i = 0; i < available; ++i) {
@@ -142,35 +142,35 @@ static const char* copy_attrib_name(Gwn_VertFormat* format, const char* name)
}
#if TRUST_NO_ONE
assert(terminated);
- assert(format->name_offset <= GWN_VERT_ATTR_NAMES_BUF_LEN);
+ assert(format->name_offset <= GPU_VERT_ATTR_NAMES_BUF_LEN);
#else
(void)terminated;
#endif
return name_copy;
}
-unsigned GWN_vertformat_attr_add(Gwn_VertFormat* format, const char* name, Gwn_VertCompType comp_type, unsigned comp_len, Gwn_VertFetchMode fetch_mode)
+unsigned GPU_vertformat_attr_add(GPUVertFormat* format, const char* name, GPUVertCompType comp_type, unsigned comp_len, GPUVertFetchMode fetch_mode)
{
#if TRUST_NO_ONE
- assert(format->name_len < GWN_VERT_ATTR_MAX_LEN); /* there's room for more */
- assert(format->attr_len < GWN_VERT_ATTR_MAX_LEN); /* there's room for more */
+ assert(format->name_len < GPU_VERT_ATTR_MAX_LEN); /* there's room for more */
+ assert(format->attr_len < GPU_VERT_ATTR_MAX_LEN); /* there's room for more */
assert(!format->packed); /* packed means frozen/locked */
assert((comp_len >= 1 && comp_len <= 4) || comp_len == 8 || comp_len == 12 || comp_len == 16);
switch (comp_type) {
- case GWN_COMP_F32:
+ case GPU_COMP_F32:
/* float type can only kept as float */
- assert(fetch_mode == GWN_FETCH_FLOAT);
+ assert(fetch_mode == GPU_FETCH_FLOAT);
break;
- case GWN_COMP_I10:
+ case GPU_COMP_I10:
/* 10_10_10 format intended for normals (xyz) or colors (rgb)
* extra component packed.w can be manually set to { -2, -1, 0, 1 } */
assert(comp_len == 3 || comp_len == 4);
- assert(fetch_mode == GWN_FETCH_INT_TO_FLOAT_UNIT); /* not strictly required, may relax later */
+ assert(fetch_mode == GPU_FETCH_INT_TO_FLOAT_UNIT); /* not strictly required, may relax later */
break;
default:
/* integer types can be kept as int or converted/normalized to float */
- assert(fetch_mode != GWN_FETCH_FLOAT);
+ assert(fetch_mode != GPU_FETCH_FLOAT);
/* only support float matrices (see Batch_update_program_bindings) */
assert(comp_len != 8 && comp_len != 12 && comp_len != 16);
}
@@ -178,12 +178,12 @@ unsigned GWN_vertformat_attr_add(Gwn_VertFormat* format, const char* name, Gwn_V
format->name_len++; /* multiname support */
const unsigned attrib_id = format->attr_len++;
- Gwn_VertAttr* attrib = format->attribs + attrib_id;
+ GPUVertAttr* attrib = format->attribs + attrib_id;
attrib->name[attrib->name_len++] = copy_attrib_name(format, name);
attrib->comp_type = comp_type;
attrib->gl_comp_type = convert_comp_type_to_gl(comp_type);
- attrib->comp_len = (comp_type == GWN_COMP_I10) ? 4 : comp_len; /* system needs 10_10_10_2 to be 4 or BGRA */
+ attrib->comp_len = (comp_type == GPU_COMP_I10) ? 4 : comp_len; /* system needs 10_10_10_2 to be 4 or BGRA */
attrib->sz = attrib_sz(attrib);
attrib->offset = 0; /* offsets & stride are calculated later (during pack) */
attrib->fetch_mode = fetch_mode;
@@ -191,12 +191,12 @@ unsigned GWN_vertformat_attr_add(Gwn_VertFormat* format, const char* name, Gwn_V
return attrib_id;
}
-void GWN_vertformat_alias_add(Gwn_VertFormat* format, const char* alias)
+void GPU_vertformat_alias_add(GPUVertFormat* format, const char* alias)
{
- Gwn_VertAttr* attrib = format->attribs + (format->attr_len - 1);
+ GPUVertAttr* attrib = format->attribs + (format->attr_len - 1);
#if TRUST_NO_ONE
- assert(format->name_len < GWN_VERT_ATTR_MAX_LEN); /* there's room for more */
- assert(attrib->name_len < GWN_VERT_ATTR_MAX_NAMES);
+ assert(format->name_len < GPU_VERT_ATTR_MAX_LEN); /* there's room for more */
+ assert(attrib->name_len < GPU_VERT_ATTR_MAX_NAMES);
#endif
format->name_len++; /* multiname support */
attrib->name[attrib->name_len++] = copy_attrib_name(format, alias);
@@ -221,7 +221,7 @@ static void show_pack(unsigned a_idx, unsigned sz, unsigned pad)
}
#endif
-void VertexFormat_pack(Gwn_VertFormat* format)
+void VertexFormat_pack(GPUVertFormat* format)
{
/* For now, attributes are packed in the order they were added,
* making sure each attrib is naturally aligned (add padding where necessary)
@@ -231,7 +231,7 @@ void VertexFormat_pack(Gwn_VertFormat* format)
/* TODO: realloc just enough to hold the final combo string. And just enough to
* hold used attribs, not all 16. */
- Gwn_VertAttr* a0 = format->attribs + 0;
+ GPUVertAttr* a0 = format->attribs + 0;
a0->offset = 0;
unsigned offset = a0->sz;
@@ -240,7 +240,7 @@ void VertexFormat_pack(Gwn_VertFormat* format)
#endif
for (unsigned a_idx = 1; a_idx < format->attr_len; ++a_idx) {
- Gwn_VertAttr* a = format->attribs + a_idx;
+ GPUVertAttr* a = format->attribs + a_idx;
unsigned mid_padding = padding(offset, attrib_align(a));
offset += mid_padding;
a->offset = offset;
@@ -263,7 +263,7 @@ void VertexFormat_pack(Gwn_VertFormat* format)
/* OpenGL ES packs in a different order as desktop GL but component conversion is the same.
- * Of the code here, only struct Gwn_PackedNormal needs to change. */
+ * Of the code here, only struct GPUPackedNormal needs to change. */
#define SIGNED_INT_10_MAX 511
#define SIGNED_INT_10_MIN -512
@@ -297,14 +297,14 @@ static int convert_i16(short x)
return x >> 6;
}
-Gwn_PackedNormal GWN_normal_convert_i10_v3(const float data[3])
+GPUPackedNormal GPU_normal_convert_i10_v3(const float data[3])
{
- Gwn_PackedNormal n = { .x = quantize(data[0]), .y = quantize(data[1]), .z = quantize(data[2]) };
+ GPUPackedNormal n = { .x = quantize(data[0]), .y = quantize(data[1]), .z = quantize(data[2]) };
return n;
}
-Gwn_PackedNormal GWN_normal_convert_i10_s3(const short data[3])
+GPUPackedNormal GPU_normal_convert_i10_s3(const short data[3])
{
- Gwn_PackedNormal n = { .x = convert_i16(data[0]), .y = convert_i16(data[1]), .z = convert_i16(data[2]) };
+ GPUPackedNormal n = { .x = convert_i16(data[0]), .y = convert_i16(data[1]), .z = convert_i16(data[2]) };
return n;
}
diff --git a/source/blender/gpu/intern/gpu_vertex_format_private.h b/source/blender/gpu/intern/gpu_vertex_format_private.h
index 3cae9969fd8..4ce3ebba714 100644
--- a/source/blender/gpu/intern/gpu_vertex_format_private.h
+++ b/source/blender/gpu/intern/gpu_vertex_format_private.h
@@ -23,17 +23,17 @@
* ***** END GPL LICENSE BLOCK *****
*/
-/** \file blender/gpu/gwn_vertex_format_private.h
+/** \file blender/gpu/intern/gpu_vertex_format_private.h
* \ingroup gpu
*
- * Gawain vertex format
+ * GPU vertex format
*/
-#ifndef __GWN_VERTEX_FORMAT_PRIVATE_H__
-#define __GWN_VERTEX_FORMAT_PRIVATE_H__
+#ifndef __GPU_VERTEX_FORMAT_PRIVATE_H__
+#define __GPU_VERTEX_FORMAT_PRIVATE_H__
-void VertexFormat_pack(Gwn_VertFormat*);
+void VertexFormat_pack(GPUVertFormat*);
uint padding(uint offset, uint alignment);
-uint vertex_buffer_size(const Gwn_VertFormat*, uint vertex_len);
+uint vertex_buffer_size(const GPUVertFormat*, uint vertex_len);
-#endif /* __GWN_VERTEX_FORMAT_PRIVATE_H__ */
+#endif /* __GPU_VERTEX_FORMAT_PRIVATE_H__ */
diff --git a/source/blender/gpu/intern/gpu_viewport.c b/source/blender/gpu/intern/gpu_viewport.c
index 0bf215f31a8..5d495779ba1 100644
--- a/source/blender/gpu/intern/gpu_viewport.c
+++ b/source/blender/gpu/intern/gpu_viewport.c
@@ -539,9 +539,9 @@ void GPU_viewport_draw_to_screen(GPUViewport *viewport, const rcti *rect)
glUniform1i(GPU_shader_get_uniform(shader, "image"), 0);
glUniform4f(GPU_shader_get_uniform(shader, "rect_icon"), halfx, halfy, 1.0f + halfx, 1.0f + halfy);
glUniform4f(GPU_shader_get_uniform(shader, "rect_geom"), x1, y1, x2, y2);
- glUniform4f(GPU_shader_get_builtin_uniform(shader, GWN_UNIFORM_COLOR), 1.0f, 1.0f, 1.0f, 1.0f);
+ glUniform4f(GPU_shader_get_builtin_uniform(shader, GPU_UNIFORM_COLOR), 1.0f, 1.0f, 1.0f, 1.0f);
- GWN_draw_primitive(GWN_PRIM_TRI_STRIP, 4);
+ GPU_draw_primitive(GPU_PRIM_TRI_STRIP, 4);
GPU_texture_unbind(color);
}