Welcome to mirror list, hosted at ThFree Co, Russian Federation.

git.blender.org/blender.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorClément Foucault <foucault.clem@gmail.com>2018-07-18 01:12:21 +0300
committerClément Foucault <foucault.clem@gmail.com>2018-07-18 12:49:15 +0300
commit8cd7828792419fb4eac9a2a477968535b4c71535 (patch)
tree8fc733149fe07b7d9edd4b8b1e709519b4481887 /source/blender/gpu/GPU_batch.h
parent247ad2034de2c33a6d9cb7d3b6f1ef7ffa5b859d (diff)
GWN: Port to GPU module: Replace GWN prefix by GPU
Diffstat (limited to 'source/blender/gpu/GPU_batch.h')
-rw-r--r--source/blender/gpu/GPU_batch.h158
1 files changed, 79 insertions, 79 deletions
diff --git a/source/blender/gpu/GPU_batch.h b/source/blender/gpu/GPU_batch.h
index 4c98eb8f537..bd0e3b43e6c 100644
--- a/source/blender/gpu/GPU_batch.h
+++ b/source/blender/gpu/GPU_batch.h
@@ -23,15 +23,15 @@
* ***** END GPL LICENSE BLOCK *****
*/
-/** \file blender/gpu/gwn_batch.h
+/** \file blender/gpu/GPU_batch.h
* \ingroup gpu
*
- * Gawain geometry batch
+ * GPU geometry batch
* Contains VAOs + VBOs + Shader representing a drawable entity.
*/
-#ifndef __GWN_BATCH_H__
-#define __GWN_BATCH_H__
+#ifndef __GPU_BATCH_H__
+#define __GPU_BATCH_H__
#include "GPU_vertex_buffer.h"
#include "GPU_element.h"
@@ -39,32 +39,32 @@
#include "GPU_shader.h"
typedef enum {
- GWN_BATCH_READY_TO_FORMAT,
- GWN_BATCH_READY_TO_BUILD,
- GWN_BATCH_BUILDING,
- GWN_BATCH_READY_TO_DRAW
-} Gwn_BatchPhase;
+ GPU_BATCH_READY_TO_FORMAT,
+ GPU_BATCH_READY_TO_BUILD,
+ GPU_BATCH_BUILDING,
+ GPU_BATCH_READY_TO_DRAW
+} GPUBatchPhase;
-#define GWN_BATCH_VBO_MAX_LEN 3
-#define GWN_BATCH_VAO_STATIC_LEN 3
-#define GWN_BATCH_VAO_DYN_ALLOC_COUNT 16
+#define GPU_BATCH_VBO_MAX_LEN 3
+#define GPU_BATCH_VAO_STATIC_LEN 3
+#define GPU_BATCH_VAO_DYN_ALLOC_COUNT 16
-typedef struct Gwn_Batch {
+typedef struct GPUBatch {
/* geometry */
- Gwn_VertBuf* verts[GWN_BATCH_VBO_MAX_LEN]; /* verts[0] is required, others can be NULL */
- Gwn_VertBuf* inst; /* instance attribs */
- Gwn_IndexBuf* elem; /* NULL if element list not needed */
+ GPUVertBuf* verts[GPU_BATCH_VBO_MAX_LEN]; /* verts[0] is required, others can be NULL */
+ GPUVertBuf* inst; /* instance attribs */
+ GPUIndexBuf* elem; /* NULL if element list not needed */
uint32_t gl_prim_type;
/* cached values (avoid dereferencing later) */
uint32_t vao_id;
uint32_t program;
- const struct Gwn_ShaderInterface* interface;
+ const struct GPUShaderInterface* interface;
/* book-keeping */
uint owns_flag;
- struct Gwn_Context *context; /* used to free all vaos. this implies all vaos were created under the same context. */
- Gwn_BatchPhase phase;
+ struct GPUContext *context; /* used to free all vaos. this implies all vaos were created under the same context. */
+ GPUBatchPhase phase;
bool program_in_use;
/* Vao management: remembers all geometry state (vertex attrib bindings & element buffer)
@@ -74,113 +74,113 @@ typedef struct Gwn_Batch {
union {
/* Static handle count */
struct {
- const struct Gwn_ShaderInterface* interfaces[GWN_BATCH_VAO_STATIC_LEN];
- uint32_t vao_ids[GWN_BATCH_VAO_STATIC_LEN];
+ const struct GPUShaderInterface* interfaces[GPU_BATCH_VAO_STATIC_LEN];
+ uint32_t vao_ids[GPU_BATCH_VAO_STATIC_LEN];
} static_vaos;
/* Dynamic handle count */
struct {
uint count;
- const struct Gwn_ShaderInterface** interfaces;
+ const struct GPUShaderInterface** interfaces;
uint32_t* vao_ids;
} dynamic_vaos;
};
/* XXX This is the only solution if we want to have some data structure using
* batches as key to identify nodes. We must destroy these nodes with this callback. */
- void (*free_callback)(struct Gwn_Batch*, void*);
+ void (*free_callback)(struct GPUBatch*, void*);
void* callback_data;
-} Gwn_Batch;
+} GPUBatch;
enum {
- GWN_BATCH_OWNS_VBO = (1 << 0),
+ GPU_BATCH_OWNS_VBO = (1 << 0),
/* each vbo index gets bit-shifted */
- GWN_BATCH_OWNS_INSTANCES = (1 << 30),
- GWN_BATCH_OWNS_INDEX = (1 << 31),
+ GPU_BATCH_OWNS_INSTANCES = (1 << 30),
+ GPU_BATCH_OWNS_INDEX = (1 << 31),
};
-Gwn_Batch* GWN_batch_create_ex(Gwn_PrimType, Gwn_VertBuf*, Gwn_IndexBuf*, uint owns_flag);
-void GWN_batch_init_ex(Gwn_Batch*, Gwn_PrimType, Gwn_VertBuf*, Gwn_IndexBuf*, uint owns_flag);
-Gwn_Batch* GWN_batch_duplicate(Gwn_Batch* batch_src);
+GPUBatch* GPU_batch_create_ex(GPUPrimType, GPUVertBuf*, GPUIndexBuf*, uint owns_flag);
+void GPU_batch_init_ex(GPUBatch*, GPUPrimType, GPUVertBuf*, GPUIndexBuf*, uint owns_flag);
+GPUBatch* GPU_batch_duplicate(GPUBatch* batch_src);
-#define GWN_batch_create(prim, verts, elem) \
- GWN_batch_create_ex(prim, verts, elem, 0)
-#define GWN_batch_init(batch, prim, verts, elem) \
- GWN_batch_init_ex(batch, prim, verts, elem, 0)
+#define GPU_batch_create(prim, verts, elem) \
+ GPU_batch_create_ex(prim, verts, elem, 0)
+#define GPU_batch_init(batch, prim, verts, elem) \
+ GPU_batch_init_ex(batch, prim, verts, elem, 0)
-void GWN_batch_discard(Gwn_Batch*); /* verts & elem are not discarded */
+void GPU_batch_discard(GPUBatch*); /* verts & elem are not discarded */
-void gwn_batch_vao_cache_clear(Gwn_Batch*);
+void GPU_batch_vao_cache_clear(GPUBatch*);
-void GWN_batch_callback_free_set(Gwn_Batch*, void (*callback)(Gwn_Batch*, void*), void*);
+void GPU_batch_callback_free_set(GPUBatch*, void (*callback)(GPUBatch*, void*), void*);
-void GWN_batch_instbuf_set(Gwn_Batch*, Gwn_VertBuf*, bool own_vbo); /* Instancing */
+void GPU_batch_instbuf_set(GPUBatch*, GPUVertBuf*, bool own_vbo); /* Instancing */
-int GWN_batch_vertbuf_add_ex(Gwn_Batch*, Gwn_VertBuf*, bool own_vbo);
+int GPU_batch_vertbuf_add_ex(GPUBatch*, GPUVertBuf*, bool own_vbo);
-#define GWN_batch_vertbuf_add(batch, verts) \
- GWN_batch_vertbuf_add_ex(batch, verts, false)
+#define GPU_batch_vertbuf_add(batch, verts) \
+ GPU_batch_vertbuf_add_ex(batch, verts, false)
-void GWN_batch_program_set_no_use(Gwn_Batch*, uint32_t program, const Gwn_ShaderInterface*);
-void GWN_batch_program_set(Gwn_Batch*, uint32_t program, const Gwn_ShaderInterface*);
-void GWN_batch_program_set_builtin(Gwn_Batch *batch, GPUBuiltinShader shader_id);
+void GPU_batch_program_set_no_use(GPUBatch*, uint32_t program, const GPUShaderInterface*);
+void GPU_batch_program_set(GPUBatch*, uint32_t program, const GPUShaderInterface*);
+void GPU_batch_program_set_builtin(GPUBatch *batch, GPUBuiltinShader shader_id);
/* Entire batch draws with one shader program, but can be redrawn later with another program. */
/* Vertex shader's inputs must be compatible with the batch's vertex format. */
-void GWN_batch_program_use_begin(Gwn_Batch*); /* call before Batch_Uniform (temp hack?) */
-void GWN_batch_program_use_end(Gwn_Batch*);
-
-void GWN_batch_uniform_1ui(Gwn_Batch*, const char* name, int value);
-void GWN_batch_uniform_1i(Gwn_Batch*, const char* name, int value);
-void GWN_batch_uniform_1b(Gwn_Batch*, const char* name, bool value);
-void GWN_batch_uniform_1f(Gwn_Batch*, const char* name, float value);
-void GWN_batch_uniform_2f(Gwn_Batch*, const char* name, float x, float y);
-void GWN_batch_uniform_3f(Gwn_Batch*, const char* name, float x, float y, float z);
-void GWN_batch_uniform_4f(Gwn_Batch*, const char* name, float x, float y, float z, float w);
-void GWN_batch_uniform_2fv(Gwn_Batch*, const char* name, const float data[2]);
-void GWN_batch_uniform_3fv(Gwn_Batch*, const char* name, const float data[3]);
-void GWN_batch_uniform_4fv(Gwn_Batch*, const char* name, const float data[4]);
-void GWN_batch_uniform_2fv_array(Gwn_Batch*, const char* name, int len, const float *data);
-void GWN_batch_uniform_4fv_array(Gwn_Batch*, const char* name, int len, const float *data);
-void GWN_batch_uniform_mat4(Gwn_Batch*, const char* name, const float data[4][4]);
-
-void GWN_batch_draw(Gwn_Batch*);
+void GPU_batch_program_use_begin(GPUBatch*); /* call before Batch_Uniform (temp hack?) */
+void GPU_batch_program_use_end(GPUBatch*);
+
+void GPU_batch_uniform_1ui(GPUBatch*, const char* name, int value);
+void GPU_batch_uniform_1i(GPUBatch*, const char* name, int value);
+void GPU_batch_uniform_1b(GPUBatch*, const char* name, bool value);
+void GPU_batch_uniform_1f(GPUBatch*, const char* name, float value);
+void GPU_batch_uniform_2f(GPUBatch*, const char* name, float x, float y);
+void GPU_batch_uniform_3f(GPUBatch*, const char* name, float x, float y, float z);
+void GPU_batch_uniform_4f(GPUBatch*, const char* name, float x, float y, float z, float w);
+void GPU_batch_uniform_2fv(GPUBatch*, const char* name, const float data[2]);
+void GPU_batch_uniform_3fv(GPUBatch*, const char* name, const float data[3]);
+void GPU_batch_uniform_4fv(GPUBatch*, const char* name, const float data[4]);
+void GPU_batch_uniform_2fv_array(GPUBatch*, const char* name, int len, const float *data);
+void GPU_batch_uniform_4fv_array(GPUBatch*, const char* name, int len, const float *data);
+void GPU_batch_uniform_mat4(GPUBatch*, const char* name, const float data[4][4]);
+
+void GPU_batch_draw(GPUBatch*);
/* This does not bind/unbind shader and does not call GPU_matrix_bind() */
-void GWN_batch_draw_range_ex(Gwn_Batch*, int v_first, int v_count, bool force_instance);
+void GPU_batch_draw_range_ex(GPUBatch*, int v_first, int v_count, bool force_instance);
/* Does not even need batch */
-void GWN_draw_primitive(Gwn_PrimType, int v_count);
+void GPU_draw_primitive(GPUPrimType, int v_count);
#if 0 /* future plans */
-/* Can multiple batches share a Gwn_VertBuf? Use ref count? */
+/* Can multiple batches share a GPUVertBuf? Use ref count? */
/* We often need a batch with its own data, to be created and discarded together. */
/* WithOwn variants reduce number of system allocations. */
typedef struct BatchWithOwnVertexBuffer {
- Gwn_Batch batch;
- Gwn_VertBuf verts; /* link batch.verts to this */
+ GPUBatch batch;
+ GPUVertBuf verts; /* link batch.verts to this */
} BatchWithOwnVertexBuffer;
typedef struct BatchWithOwnElementList {
- Gwn_Batch batch;
- Gwn_IndexBuf elem; /* link batch.elem to this */
+ GPUBatch batch;
+ GPUIndexBuf elem; /* link batch.elem to this */
} BatchWithOwnElementList;
typedef struct BatchWithOwnVertexBufferAndElementList {
- Gwn_Batch batch;
- Gwn_IndexBuf elem; /* link batch.elem to this */
- Gwn_VertBuf verts; /* link batch.verts to this */
+ GPUBatch batch;
+ GPUIndexBuf elem; /* link batch.elem to this */
+ GPUVertBuf verts; /* link batch.verts to this */
} BatchWithOwnVertexBufferAndElementList;
-Gwn_Batch* create_BatchWithOwnVertexBuffer(Gwn_PrimType, Gwn_VertFormat*, uint v_len, Gwn_IndexBuf*);
-Gwn_Batch* create_BatchWithOwnElementList(Gwn_PrimType, Gwn_VertBuf*, uint prim_len);
-Gwn_Batch* create_BatchWithOwnVertexBufferAndElementList(Gwn_PrimType, Gwn_VertFormat*, uint v_len, uint prim_len);
+GPUBatch* create_BatchWithOwnVertexBuffer(GPUPrimType, GPUVertFormat*, uint v_len, GPUIndexBuf*);
+GPUBatch* create_BatchWithOwnElementList(GPUPrimType, GPUVertBuf*, uint prim_len);
+GPUBatch* create_BatchWithOwnVertexBufferAndElementList(GPUPrimType, GPUVertFormat*, uint v_len, uint prim_len);
/* verts: shared, own */
/* elem: none, shared, own */
-Gwn_Batch* create_BatchInGeneral(Gwn_PrimType, VertexBufferStuff, ElementListStuff);
+GPUBatch* create_BatchInGeneral(GPUPrimType, VertexBufferStuff, ElementListStuff);
#endif /* future plans */
@@ -189,11 +189,11 @@ void gpu_batch_exit(void);
/* Macros */
-#define GWN_BATCH_DISCARD_SAFE(batch) do { \
+#define GPU_BATCH_DISCARD_SAFE(batch) do { \
if (batch != NULL) { \
- GWN_batch_discard(batch); \
+ GPU_batch_discard(batch); \
batch = NULL; \
} \
} while (0)
-#endif /* __GWN_BATCH_H__ */
+#endif /* __GPU_BATCH_H__ */