Welcome to mirror list, hosted at ThFree Co, Russian Federation.

git.blender.org/blender.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
Diffstat (limited to 'source/blender/gpu/intern/gpu_batch.c')
-rw-r--r--source/blender/gpu/intern/gpu_batch.c240
1 files changed, 120 insertions, 120 deletions
diff --git a/source/blender/gpu/intern/gpu_batch.c b/source/blender/gpu/intern/gpu_batch.c
index 90f30930884..4d455f6f464 100644
--- a/source/blender/gpu/intern/gpu_batch.c
+++ b/source/blender/gpu/intern/gpu_batch.c
@@ -23,10 +23,10 @@
* ***** END GPL LICENSE BLOCK *****
*/
-/** \file blender/gpu/intern/gwn_batch.c
+/** \file blender/gpu/intern/gpu_batch.c
* \ingroup gpu
*
- * Gawain geometry batch
+ * GPU geometry batch
* Contains VAOs + VBOs + Shader representing a drawable entity.
*/
@@ -44,9 +44,9 @@
#include <stdlib.h>
#include <string.h>
-static void batch_update_program_bindings(Gwn_Batch* batch, uint v_first);
+static void batch_update_program_bindings(GPUBatch* batch, uint v_first);
-void gwn_batch_vao_cache_clear(Gwn_Batch* batch)
+void GPU_batch_vao_cache_clear(GPUBatch* batch)
{
if (batch->context == NULL) {
return;
@@ -54,45 +54,45 @@ void gwn_batch_vao_cache_clear(Gwn_Batch* batch)
if (batch->is_dynamic_vao_count) {
for (int i = 0; i < batch->dynamic_vaos.count; ++i) {
if (batch->dynamic_vaos.vao_ids[i]) {
- GWN_vao_free(batch->dynamic_vaos.vao_ids[i], batch->context);
+ GPU_vao_free(batch->dynamic_vaos.vao_ids[i], batch->context);
}
if (batch->dynamic_vaos.interfaces[i]) {
- GWN_shaderinterface_remove_batch_ref((Gwn_ShaderInterface *)batch->dynamic_vaos.interfaces[i], batch);
+ GPU_shaderinterface_remove_batch_ref((GPUShaderInterface *)batch->dynamic_vaos.interfaces[i], batch);
}
}
free(batch->dynamic_vaos.interfaces);
free(batch->dynamic_vaos.vao_ids);
}
else {
- for (int i = 0; i < GWN_BATCH_VAO_STATIC_LEN; ++i) {
+ for (int i = 0; i < GPU_BATCH_VAO_STATIC_LEN; ++i) {
if (batch->static_vaos.vao_ids[i]) {
- GWN_vao_free(batch->static_vaos.vao_ids[i], batch->context);
+ GPU_vao_free(batch->static_vaos.vao_ids[i], batch->context);
}
if (batch->static_vaos.interfaces[i]) {
- GWN_shaderinterface_remove_batch_ref((Gwn_ShaderInterface *)batch->static_vaos.interfaces[i], batch);
+ GPU_shaderinterface_remove_batch_ref((GPUShaderInterface *)batch->static_vaos.interfaces[i], batch);
}
}
}
batch->is_dynamic_vao_count = false;
- for (int i = 0; i < GWN_BATCH_VAO_STATIC_LEN; ++i) {
+ for (int i = 0; i < GPU_BATCH_VAO_STATIC_LEN; ++i) {
batch->static_vaos.vao_ids[i] = 0;
batch->static_vaos.interfaces[i] = NULL;
}
- gwn_context_remove_batch(batch->context, batch);
+ gpu_context_remove_batch(batch->context, batch);
batch->context = NULL;
}
-Gwn_Batch* GWN_batch_create_ex(
- Gwn_PrimType prim_type, Gwn_VertBuf* verts, Gwn_IndexBuf* elem,
+GPUBatch* GPU_batch_create_ex(
+ GPUPrimType prim_type, GPUVertBuf* verts, GPUIndexBuf* elem,
uint owns_flag)
{
- Gwn_Batch* batch = calloc(1, sizeof(Gwn_Batch));
- GWN_batch_init_ex(batch, prim_type, verts, elem, owns_flag);
+ GPUBatch* batch = calloc(1, sizeof(GPUBatch));
+ GPU_batch_init_ex(batch, prim_type, verts, elem, owns_flag);
return batch;
}
-void GWN_batch_init_ex(
- Gwn_Batch* batch, Gwn_PrimType prim_type, Gwn_VertBuf* verts, Gwn_IndexBuf* elem,
+void GPU_batch_init_ex(
+ GPUBatch* batch, GPUPrimType prim_type, GPUVertBuf* verts, GPUIndexBuf* elem,
uint owns_flag)
{
#if TRUST_NO_ONE
@@ -100,49 +100,49 @@ void GWN_batch_init_ex(
#endif
batch->verts[0] = verts;
- for (int v = 1; v < GWN_BATCH_VBO_MAX_LEN; ++v) {
+ for (int v = 1; v < GPU_BATCH_VBO_MAX_LEN; ++v) {
batch->verts[v] = NULL;
}
batch->inst = NULL;
batch->elem = elem;
batch->gl_prim_type = convert_prim_type_to_gl(prim_type);
- batch->phase = GWN_BATCH_READY_TO_DRAW;
+ batch->phase = GPU_BATCH_READY_TO_DRAW;
batch->is_dynamic_vao_count = false;
batch->owns_flag = owns_flag;
batch->free_callback = NULL;
}
/* This will share the VBOs with the new batch. */
-Gwn_Batch* GWN_batch_duplicate(Gwn_Batch* batch_src)
+GPUBatch* GPU_batch_duplicate(GPUBatch* batch_src)
{
- Gwn_Batch* batch = GWN_batch_create_ex(GWN_PRIM_POINTS, batch_src->verts[0], batch_src->elem, 0);
+ GPUBatch* batch = GPU_batch_create_ex(GPU_PRIM_POINTS, batch_src->verts[0], batch_src->elem, 0);
batch->gl_prim_type = batch_src->gl_prim_type;
- for (int v = 1; v < GWN_BATCH_VBO_MAX_LEN; ++v) {
+ for (int v = 1; v < GPU_BATCH_VBO_MAX_LEN; ++v) {
batch->verts[v] = batch_src->verts[v];
}
return batch;
}
-void GWN_batch_discard(Gwn_Batch* batch)
+void GPU_batch_discard(GPUBatch* batch)
{
- if (batch->owns_flag & GWN_BATCH_OWNS_INDEX) {
- GWN_indexbuf_discard(batch->elem);
+ if (batch->owns_flag & GPU_BATCH_OWNS_INDEX) {
+ GPU_indexbuf_discard(batch->elem);
}
- if (batch->owns_flag & GWN_BATCH_OWNS_INSTANCES) {
- GWN_vertbuf_discard(batch->inst);
+ if (batch->owns_flag & GPU_BATCH_OWNS_INSTANCES) {
+ GPU_vertbuf_discard(batch->inst);
}
- if ((batch->owns_flag & ~GWN_BATCH_OWNS_INDEX) != 0) {
- for (int v = 0; v < GWN_BATCH_VBO_MAX_LEN; ++v) {
+ if ((batch->owns_flag & ~GPU_BATCH_OWNS_INDEX) != 0) {
+ for (int v = 0; v < GPU_BATCH_VBO_MAX_LEN; ++v) {
if (batch->verts[v] == NULL) {
break;
}
if (batch->owns_flag & (1 << v)) {
- GWN_vertbuf_discard(batch->verts[v]);
+ GPU_vertbuf_discard(batch->verts[v]);
}
}
}
- gwn_batch_vao_cache_clear(batch);
+ GPU_batch_vao_cache_clear(batch);
if (batch->free_callback) {
batch->free_callback(batch, batch->callback_data);
@@ -150,42 +150,42 @@ void GWN_batch_discard(Gwn_Batch* batch)
free(batch);
}
-void GWN_batch_callback_free_set(Gwn_Batch* batch, void (*callback)(Gwn_Batch*, void*), void* user_data)
+void GPU_batch_callback_free_set(GPUBatch* batch, void (*callback)(GPUBatch*, void*), void* user_data)
{
batch->free_callback = callback;
batch->callback_data = user_data;
}
-void GWN_batch_instbuf_set(Gwn_Batch* batch, Gwn_VertBuf* inst, bool own_vbo)
+void GPU_batch_instbuf_set(GPUBatch* batch, GPUVertBuf* inst, bool own_vbo)
{
#if TRUST_NO_ONE
assert(inst != NULL);
#endif
/* redo the bindings */
- gwn_batch_vao_cache_clear(batch);
+ GPU_batch_vao_cache_clear(batch);
- if (batch->inst != NULL && (batch->owns_flag & GWN_BATCH_OWNS_INSTANCES)) {
- GWN_vertbuf_discard(batch->inst);
+ if (batch->inst != NULL && (batch->owns_flag & GPU_BATCH_OWNS_INSTANCES)) {
+ GPU_vertbuf_discard(batch->inst);
}
batch->inst = inst;
if (own_vbo) {
- batch->owns_flag |= GWN_BATCH_OWNS_INSTANCES;
+ batch->owns_flag |= GPU_BATCH_OWNS_INSTANCES;
}
else {
- batch->owns_flag &= ~GWN_BATCH_OWNS_INSTANCES;
+ batch->owns_flag &= ~GPU_BATCH_OWNS_INSTANCES;
}
}
/* Returns the index of verts in the batch. */
-int GWN_batch_vertbuf_add_ex(
- Gwn_Batch* batch, Gwn_VertBuf* verts,
+int GPU_batch_vertbuf_add_ex(
+ GPUBatch* batch, GPUVertBuf* verts,
bool own_vbo)
{
/* redo the bindings */
- gwn_batch_vao_cache_clear(batch);
+ GPU_batch_vao_cache_clear(batch);
- for (uint v = 0; v < GWN_BATCH_VBO_MAX_LEN; ++v) {
+ for (uint v = 0; v < GPU_BATCH_VBO_MAX_LEN; ++v) {
if (batch->verts[v] == NULL) {
#if TRUST_NO_ONE
/* for now all VertexBuffers must have same vertex_len */
@@ -199,14 +199,14 @@ int GWN_batch_vertbuf_add_ex(
}
}
- /* we only make it this far if there is no room for another Gwn_VertBuf */
+ /* we only make it this far if there is no room for another GPUVertBuf */
#if TRUST_NO_ONE
assert(false);
#endif
return -1;
}
-static GLuint batch_vao_get(Gwn_Batch *batch)
+static GLuint batch_vao_get(GPUBatch *batch)
{
/* Search through cache */
if (batch->is_dynamic_vao_count) {
@@ -215,22 +215,22 @@ static GLuint batch_vao_get(Gwn_Batch *batch)
return batch->dynamic_vaos.vao_ids[i];
}
else {
- for (int i = 0; i < GWN_BATCH_VAO_STATIC_LEN; ++i)
+ for (int i = 0; i < GPU_BATCH_VAO_STATIC_LEN; ++i)
if (batch->static_vaos.interfaces[i] == batch->interface)
return batch->static_vaos.vao_ids[i];
}
/* Set context of this batch.
- * It will be bound to it until gwn_batch_vao_cache_clear is called.
+ * It will be bound to it until GPU_batch_vao_cache_clear is called.
* Until then it can only be drawn with this context. */
if (batch->context == NULL) {
- batch->context = GWN_context_active_get();
- gwn_context_add_batch(batch->context, batch);
+ batch->context = GPU_context_active_get();
+ gpu_context_add_batch(batch->context, batch);
}
#if TRUST_NO_ONE
else {
/* Make sure you are not trying to draw this batch in another context. */
- assert(batch->context == GWN_context_active_get());
+ assert(batch->context == GPU_context_active_get());
}
#endif
@@ -238,25 +238,25 @@ static GLuint batch_vao_get(Gwn_Batch *batch)
GLuint new_vao = 0;
if (!batch->is_dynamic_vao_count) {
int i; /* find first unused slot */
- for (i = 0; i < GWN_BATCH_VAO_STATIC_LEN; ++i)
+ for (i = 0; i < GPU_BATCH_VAO_STATIC_LEN; ++i)
if (batch->static_vaos.vao_ids[i] == 0)
break;
- if (i < GWN_BATCH_VAO_STATIC_LEN) {
+ if (i < GPU_BATCH_VAO_STATIC_LEN) {
batch->static_vaos.interfaces[i] = batch->interface;
- batch->static_vaos.vao_ids[i] = new_vao = GWN_vao_alloc();
+ batch->static_vaos.vao_ids[i] = new_vao = GPU_vao_alloc();
}
else {
/* Not enough place switch to dynamic. */
batch->is_dynamic_vao_count = true;
/* Erase previous entries, they will be added back if drawn again. */
- for (int j = 0; j < GWN_BATCH_VAO_STATIC_LEN; ++j) {
- GWN_shaderinterface_remove_batch_ref((Gwn_ShaderInterface*)batch->static_vaos.interfaces[j], batch);
- GWN_vao_free(batch->static_vaos.vao_ids[j], batch->context);
+ for (int j = 0; j < GPU_BATCH_VAO_STATIC_LEN; ++j) {
+ GPU_shaderinterface_remove_batch_ref((GPUShaderInterface*)batch->static_vaos.interfaces[j], batch);
+ GPU_vao_free(batch->static_vaos.vao_ids[j], batch->context);
}
/* Init dynamic arrays and let the branch below set the values. */
- batch->dynamic_vaos.count = GWN_BATCH_VAO_DYN_ALLOC_COUNT;
- batch->dynamic_vaos.interfaces = calloc(batch->dynamic_vaos.count, sizeof(Gwn_ShaderInterface*));
+ batch->dynamic_vaos.count = GPU_BATCH_VAO_DYN_ALLOC_COUNT;
+ batch->dynamic_vaos.interfaces = calloc(batch->dynamic_vaos.count, sizeof(GPUShaderInterface*));
batch->dynamic_vaos.vao_ids = calloc(batch->dynamic_vaos.count, sizeof(GLuint));
}
}
@@ -270,17 +270,17 @@ static GLuint batch_vao_get(Gwn_Batch *batch)
if (i == batch->dynamic_vaos.count) {
/* Not enough place, realloc the array. */
i = batch->dynamic_vaos.count;
- batch->dynamic_vaos.count += GWN_BATCH_VAO_DYN_ALLOC_COUNT;
- batch->dynamic_vaos.interfaces = realloc(batch->dynamic_vaos.interfaces, sizeof(Gwn_ShaderInterface*) * batch->dynamic_vaos.count);
+ batch->dynamic_vaos.count += GPU_BATCH_VAO_DYN_ALLOC_COUNT;
+ batch->dynamic_vaos.interfaces = realloc(batch->dynamic_vaos.interfaces, sizeof(GPUShaderInterface*) * batch->dynamic_vaos.count);
batch->dynamic_vaos.vao_ids = realloc(batch->dynamic_vaos.vao_ids, sizeof(GLuint) * batch->dynamic_vaos.count);
- memset(batch->dynamic_vaos.interfaces + i, 0, sizeof(Gwn_ShaderInterface*) * GWN_BATCH_VAO_DYN_ALLOC_COUNT);
- memset(batch->dynamic_vaos.vao_ids + i, 0, sizeof(GLuint) * GWN_BATCH_VAO_DYN_ALLOC_COUNT);
+ memset(batch->dynamic_vaos.interfaces + i, 0, sizeof(GPUShaderInterface*) * GPU_BATCH_VAO_DYN_ALLOC_COUNT);
+ memset(batch->dynamic_vaos.vao_ids + i, 0, sizeof(GLuint) * GPU_BATCH_VAO_DYN_ALLOC_COUNT);
}
batch->dynamic_vaos.interfaces[i] = batch->interface;
- batch->dynamic_vaos.vao_ids[i] = new_vao = GWN_vao_alloc();
+ batch->dynamic_vaos.vao_ids[i] = new_vao = GPU_vao_alloc();
}
- GWN_shaderinterface_add_batch_ref((Gwn_ShaderInterface*)batch->interface, batch);
+ GPU_shaderinterface_add_batch_ref((GPUShaderInterface*)batch->interface, batch);
#if TRUST_NO_ONE
assert(new_vao != 0);
@@ -294,7 +294,7 @@ static GLuint batch_vao_get(Gwn_Batch *batch)
return new_vao;
}
-void GWN_batch_program_set_no_use(Gwn_Batch* batch, uint32_t program, const Gwn_ShaderInterface* shaderface)
+void GPU_batch_program_set_no_use(GPUBatch* batch, uint32_t program, const GPUShaderInterface* shaderface)
{
#if TRUST_NO_ONE
assert(glIsProgram(shaderface->program));
@@ -305,18 +305,18 @@ void GWN_batch_program_set_no_use(Gwn_Batch* batch, uint32_t program, const Gwn_
batch->vao_id = batch_vao_get(batch);
}
-void GWN_batch_program_set(Gwn_Batch* batch, uint32_t program, const Gwn_ShaderInterface* shaderface)
+void GPU_batch_program_set(GPUBatch* batch, uint32_t program, const GPUShaderInterface* shaderface)
{
- GWN_batch_program_set_no_use(batch, program, shaderface);
- GWN_batch_program_use_begin(batch); /* hack! to make Batch_Uniform* simpler */
+ GPU_batch_program_set_no_use(batch, program, shaderface);
+ GPU_batch_program_use_begin(batch); /* hack! to make Batch_Uniform* simpler */
}
-void gwn_batch_remove_interface_ref(Gwn_Batch* batch, const Gwn_ShaderInterface* interface)
+void gpu_batch_remove_interface_ref(GPUBatch* batch, const GPUShaderInterface* interface)
{
if (batch->is_dynamic_vao_count) {
for (int i = 0; i < batch->dynamic_vaos.count; ++i) {
if (batch->dynamic_vaos.interfaces[i] == interface) {
- GWN_vao_free(batch->dynamic_vaos.vao_ids[i], batch->context);
+ GPU_vao_free(batch->dynamic_vaos.vao_ids[i], batch->context);
batch->dynamic_vaos.vao_ids[i] = 0;
batch->dynamic_vaos.interfaces[i] = NULL;
break; /* cannot have duplicates */
@@ -325,9 +325,9 @@ void gwn_batch_remove_interface_ref(Gwn_Batch* batch, const Gwn_ShaderInterface*
}
else {
int i;
- for (i = 0; i < GWN_BATCH_VAO_STATIC_LEN; ++i) {
+ for (i = 0; i < GPU_BATCH_VAO_STATIC_LEN; ++i) {
if (batch->static_vaos.interfaces[i] == interface) {
- GWN_vao_free(batch->static_vaos.vao_ids[i], batch->context);
+ GPU_vao_free(batch->static_vaos.vao_ids[i], batch->context);
batch->static_vaos.vao_ids[i] = 0;
batch->static_vaos.interfaces[i] = NULL;
break; /* cannot have duplicates */
@@ -337,28 +337,28 @@ void gwn_batch_remove_interface_ref(Gwn_Batch* batch, const Gwn_ShaderInterface*
}
static void create_bindings(
- Gwn_VertBuf* verts, const Gwn_ShaderInterface* interface,
+ GPUVertBuf* verts, const GPUShaderInterface* interface,
uint v_first, const bool use_instancing)
{
- const Gwn_VertFormat* format = &verts->format;
+ const GPUVertFormat* format = &verts->format;
const uint attr_len = format->attr_len;
const uint stride = format->stride;
- GWN_vertbuf_use(verts);
+ GPU_vertbuf_use(verts);
for (uint a_idx = 0; a_idx < attr_len; ++a_idx) {
- const Gwn_VertAttr* a = format->attribs + a_idx;
+ const GPUVertAttr* a = format->attribs + a_idx;
const GLvoid* pointer = (const GLubyte*)0 + a->offset + v_first * stride;
for (uint n_idx = 0; n_idx < a->name_len; ++n_idx) {
- const Gwn_ShaderInput* input = GWN_shaderinterface_attr(interface, a->name[n_idx]);
+ const GPUShaderInput* input = GPU_shaderinterface_attr(interface, a->name[n_idx]);
if (input == NULL) continue;
if (a->comp_len == 16 || a->comp_len == 12 || a->comp_len == 8) {
#if TRUST_NO_ONE
- assert(a->fetch_mode == GWN_FETCH_FLOAT);
+ assert(a->fetch_mode == GPU_FETCH_FLOAT);
assert(a->gl_comp_type == GL_FLOAT);
#endif
for (int i = 0; i < a->comp_len / 4; ++i) {
@@ -374,14 +374,14 @@ static void create_bindings(
glVertexAttribDivisor(input->location, (use_instancing) ? 1 : 0);
switch (a->fetch_mode) {
- case GWN_FETCH_FLOAT:
- case GWN_FETCH_INT_TO_FLOAT:
+ case GPU_FETCH_FLOAT:
+ case GPU_FETCH_INT_TO_FLOAT:
glVertexAttribPointer(input->location, a->comp_len, a->gl_comp_type, GL_FALSE, stride, pointer);
break;
- case GWN_FETCH_INT_TO_FLOAT_UNIT:
+ case GPU_FETCH_INT_TO_FLOAT_UNIT:
glVertexAttribPointer(input->location, a->comp_len, a->gl_comp_type, GL_TRUE, stride, pointer);
break;
- case GWN_FETCH_INT:
+ case GPU_FETCH_INT:
glVertexAttribIPointer(input->location, a->comp_len, a->gl_comp_type, stride, pointer);
break;
}
@@ -390,20 +390,20 @@ static void create_bindings(
}
}
-static void batch_update_program_bindings(Gwn_Batch* batch, uint v_first)
+static void batch_update_program_bindings(GPUBatch* batch, uint v_first)
{
- for (int v = 0; v < GWN_BATCH_VBO_MAX_LEN && batch->verts[v] != NULL; ++v) {
+ for (int v = 0; v < GPU_BATCH_VBO_MAX_LEN && batch->verts[v] != NULL; ++v) {
create_bindings(batch->verts[v], batch->interface, (batch->inst) ? 0 : v_first, false);
}
if (batch->inst) {
create_bindings(batch->inst, batch->interface, v_first, true);
}
if (batch->elem) {
- GWN_indexbuf_use(batch->elem);
+ GPU_indexbuf_use(batch->elem);
}
}
-void GWN_batch_program_use_begin(Gwn_Batch* batch)
+void GPU_batch_program_use_begin(GPUBatch* batch)
{
/* NOTE: use_program & done_using_program are fragile, depend on staying in sync with
* the GL context's active program. use_program doesn't mark other programs as "not used". */
@@ -415,7 +415,7 @@ void GWN_batch_program_use_begin(Gwn_Batch* batch)
}
}
-void GWN_batch_program_use_end(Gwn_Batch* batch)
+void GPU_batch_program_use_end(GPUBatch* batch)
{
if (batch->program_in_use) {
#if PROGRAM_NO_OPTI
@@ -426,99 +426,99 @@ void GWN_batch_program_use_end(Gwn_Batch* batch)
}
#if TRUST_NO_ONE
- #define GET_UNIFORM const Gwn_ShaderInput* uniform = GWN_shaderinterface_uniform(batch->interface, name); assert(uniform);
+ #define GET_UNIFORM const GPUShaderInput* uniform = GPU_shaderinterface_uniform(batch->interface, name); assert(uniform);
#else
- #define GET_UNIFORM const Gwn_ShaderInput* uniform = GWN_shaderinterface_uniform(batch->interface, name);
+ #define GET_UNIFORM const GPUShaderInput* uniform = GPU_shaderinterface_uniform(batch->interface, name);
#endif
-void GWN_batch_uniform_1ui(Gwn_Batch* batch, const char* name, int value)
+void GPU_batch_uniform_1ui(GPUBatch* batch, const char* name, int value)
{
GET_UNIFORM
glUniform1ui(uniform->location, value);
}
-void GWN_batch_uniform_1i(Gwn_Batch* batch, const char* name, int value)
+void GPU_batch_uniform_1i(GPUBatch* batch, const char* name, int value)
{
GET_UNIFORM
glUniform1i(uniform->location, value);
}
-void GWN_batch_uniform_1b(Gwn_Batch* batch, const char* name, bool value)
+void GPU_batch_uniform_1b(GPUBatch* batch, const char* name, bool value)
{
GET_UNIFORM
glUniform1i(uniform->location, value ? GL_TRUE : GL_FALSE);
}
-void GWN_batch_uniform_2f(Gwn_Batch* batch, const char* name, float x, float y)
+void GPU_batch_uniform_2f(GPUBatch* batch, const char* name, float x, float y)
{
GET_UNIFORM
glUniform2f(uniform->location, x, y);
}
-void GWN_batch_uniform_3f(Gwn_Batch* batch, const char* name, float x, float y, float z)
+void GPU_batch_uniform_3f(GPUBatch* batch, const char* name, float x, float y, float z)
{
GET_UNIFORM
glUniform3f(uniform->location, x, y, z);
}
-void GWN_batch_uniform_4f(Gwn_Batch* batch, const char* name, float x, float y, float z, float w)
+void GPU_batch_uniform_4f(GPUBatch* batch, const char* name, float x, float y, float z, float w)
{
GET_UNIFORM
glUniform4f(uniform->location, x, y, z, w);
}
-void GWN_batch_uniform_1f(Gwn_Batch* batch, const char* name, float x)
+void GPU_batch_uniform_1f(GPUBatch* batch, const char* name, float x)
{
GET_UNIFORM
glUniform1f(uniform->location, x);
}
-void GWN_batch_uniform_2fv(Gwn_Batch* batch, const char* name, const float data[2])
+void GPU_batch_uniform_2fv(GPUBatch* batch, const char* name, const float data[2])
{
GET_UNIFORM
glUniform2fv(uniform->location, 1, data);
}
-void GWN_batch_uniform_3fv(Gwn_Batch* batch, const char* name, const float data[3])
+void GPU_batch_uniform_3fv(GPUBatch* batch, const char* name, const float data[3])
{
GET_UNIFORM
glUniform3fv(uniform->location, 1, data);
}
-void GWN_batch_uniform_4fv(Gwn_Batch* batch, const char* name, const float data[4])
+void GPU_batch_uniform_4fv(GPUBatch* batch, const char* name, const float data[4])
{
GET_UNIFORM
glUniform4fv(uniform->location, 1, data);
}
-void GWN_batch_uniform_2fv_array(Gwn_Batch* batch, const char* name, const int len, const float *data)
+void GPU_batch_uniform_2fv_array(GPUBatch* batch, const char* name, const int len, const float *data)
{
GET_UNIFORM
glUniform2fv(uniform->location, len, data);
}
-void GWN_batch_uniform_4fv_array(Gwn_Batch* batch, const char* name, const int len, const float *data)
+void GPU_batch_uniform_4fv_array(GPUBatch* batch, const char* name, const int len, const float *data)
{
GET_UNIFORM
glUniform4fv(uniform->location, len, data);
}
-void GWN_batch_uniform_mat4(Gwn_Batch* batch, const char* name, const float data[4][4])
+void GPU_batch_uniform_mat4(GPUBatch* batch, const char* name, const float data[4][4])
{
GET_UNIFORM
glUniformMatrix4fv(uniform->location, 1, GL_FALSE, (const float *)data);
}
-static void primitive_restart_enable(const Gwn_IndexBuf *el)
+static void primitive_restart_enable(const GPUIndexBuf *el)
{
// TODO(fclem) Replace by GL_PRIMITIVE_RESTART_FIXED_INDEX when we have ogl 4.3
glEnable(GL_PRIMITIVE_RESTART);
GLuint restart_index = (GLuint)0xFFFFFFFF;
-#if GWN_TRACK_INDEX_RANGE
- if (el->index_type == GWN_INDEX_U8)
+#if GPU_TRACK_INDEX_RANGE
+ if (el->index_type == GPU_INDEX_U8)
restart_index = (GLuint)0xFF;
- else if (el->index_type == GWN_INDEX_U16)
+ else if (el->index_type == GPU_INDEX_U16)
restart_index = (GLuint)0xFFFF;
#endif
@@ -530,21 +530,21 @@ static void primitive_restart_disable(void)
glDisable(GL_PRIMITIVE_RESTART);
}
-void GWN_batch_draw(Gwn_Batch* batch)
+void GPU_batch_draw(GPUBatch* batch)
{
#if TRUST_NO_ONE
- assert(batch->phase == GWN_BATCH_READY_TO_DRAW);
+ assert(batch->phase == GPU_BATCH_READY_TO_DRAW);
assert(batch->verts[0]->vbo_id != 0);
#endif
- GWN_batch_program_use_begin(batch);
+ GPU_batch_program_use_begin(batch);
GPU_matrix_bind(batch->interface); // external call.
- GWN_batch_draw_range_ex(batch, 0, 0, false);
+ GPU_batch_draw_range_ex(batch, 0, 0, false);
- GWN_batch_program_use_end(batch);
+ GPU_batch_program_use_end(batch);
}
-void GWN_batch_draw_range_ex(Gwn_Batch* batch, int v_first, int v_count, bool force_instance)
+void GPU_batch_draw_range_ex(GPUBatch* batch, int v_first, int v_count, bool force_instance)
{
#if TRUST_NO_ONE
assert(!(force_instance && (batch->inst == NULL)) || v_count > 0); // we cannot infer length if force_instance
@@ -553,7 +553,7 @@ void GWN_batch_draw_range_ex(Gwn_Batch* batch, int v_first, int v_count, bool fo
// If using offset drawing, use the default VAO and redo bindings.
if (v_first != 0 && (do_instance || batch->elem)) {
- glBindVertexArray(GWN_vao_default());
+ glBindVertexArray(GPU_vao_default());
batch_update_program_bindings(batch, v_first);
}
else {
@@ -567,12 +567,12 @@ void GWN_batch_draw_range_ex(Gwn_Batch* batch, int v_first, int v_count, bool fo
}
if (batch->elem) {
- const Gwn_IndexBuf* el = batch->elem;
+ const GPUIndexBuf* el = batch->elem;
if (el->use_prim_restart) {
primitive_restart_enable(el);
}
-#if GWN_TRACK_INDEX_RANGE
+#if GPU_TRACK_INDEX_RANGE
glDrawElementsInstancedBaseVertex(batch->gl_prim_type,
el->index_len,
el->gl_index_type,
@@ -597,13 +597,13 @@ void GWN_batch_draw_range_ex(Gwn_Batch* batch, int v_first, int v_count, bool fo
}
if (batch->elem) {
- const Gwn_IndexBuf* el = batch->elem;
+ const GPUIndexBuf* el = batch->elem;
if (el->use_prim_restart) {
primitive_restart_enable(el);
}
-#if GWN_TRACK_INDEX_RANGE
+#if GPU_TRACK_INDEX_RANGE
if (el->base_index) {
glDrawRangeElementsBaseVertex(batch->gl_prim_type,
el->min_index,
@@ -634,10 +634,10 @@ void GWN_batch_draw_range_ex(Gwn_Batch* batch, int v_first, int v_count, bool fo
}
/* just draw some vertices and let shader place them where we want. */
-void GWN_draw_primitive(Gwn_PrimType prim_type, int v_count)
+void GPU_draw_primitive(GPUPrimType prim_type, int v_count)
{
/* we cannot draw without vao ... annoying ... */
- glBindVertexArray(GWN_vao_default());
+ glBindVertexArray(GPU_vao_default());
GLenum type = convert_prim_type_to_gl(prim_type);
glDrawArrays(type, 0, v_count);
@@ -652,10 +652,10 @@ void GWN_draw_primitive(Gwn_PrimType prim_type, int v_count)
/** \name Utilities
* \{ */
-void GWN_batch_program_set_builtin(Gwn_Batch *batch, GPUBuiltinShader shader_id)
+void GPU_batch_program_set_builtin(GPUBatch *batch, GPUBuiltinShader shader_id)
{
GPUShader *shader = GPU_shader_get_builtin_shader(shader_id);
- GWN_batch_program_set(batch, shader->program, shader->interface);
+ GPU_batch_program_set(batch, shader->program, shader->interface);
}
/** \} */