Welcome to mirror list, hosted at ThFree Co, Russian Federation.

git.blender.org/blender.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorCampbell Barton <ideasman42@gmail.com>2019-04-17 07:17:24 +0300
committerCampbell Barton <ideasman42@gmail.com>2019-04-17 07:21:24 +0300
commite12c08e8d170b7ca40f204a5b0423c23a9fbc2c1 (patch)
tree8cf3453d12edb177a218ef8009357518ec6cab6a /source/blender/gpu/GPU_batch.h
parentb3dabc200a4b0399ec6b81f2ff2730d07b44fcaa (diff)
ClangFormat: apply to source, most of intern
Apply clang format as proposed in T53211. For details on usage and instructions for migrating branches without conflicts, see: https://wiki.blender.org/wiki/Tools/ClangFormat
Diffstat (limited to 'source/blender/gpu/GPU_batch.h')
-rw-r--r--source/blender/gpu/GPU_batch.h187
1 files changed, 94 insertions, 93 deletions
diff --git a/source/blender/gpu/GPU_batch.h b/source/blender/gpu/GPU_batch.h
index 9f6c9f50109..b9397a61ea4 100644
--- a/source/blender/gpu/GPU_batch.h
+++ b/source/blender/gpu/GPU_batch.h
@@ -33,10 +33,10 @@
#include "GPU_shader.h"
typedef enum {
- GPU_BATCH_READY_TO_FORMAT,
- GPU_BATCH_READY_TO_BUILD,
- GPU_BATCH_BUILDING,
- GPU_BATCH_READY_TO_DRAW,
+ GPU_BATCH_READY_TO_FORMAT,
+ GPU_BATCH_READY_TO_BUILD,
+ GPU_BATCH_BUILDING,
+ GPU_BATCH_READY_TO_DRAW,
} GPUBatchPhase;
#define GPU_BATCH_VBO_MAX_LEN 4
@@ -44,69 +44,68 @@ typedef enum {
#define GPU_BATCH_VAO_DYN_ALLOC_COUNT 16
typedef struct GPUBatch {
- /* geometry */
-
- /** verts[0] is required, others can be NULL */
- GPUVertBuf *verts[GPU_BATCH_VBO_MAX_LEN];
- /** Instance attributes. */
- GPUVertBuf *inst;
- /** NULL if element list not needed */
- GPUIndexBuf *elem;
- uint32_t gl_prim_type;
-
- /* cached values (avoid dereferencing later) */
- uint32_t vao_id;
- uint32_t program;
- const struct GPUShaderInterface *interface;
-
- /* book-keeping */
- uint owns_flag;
- /** used to free all vaos. this implies all vaos were created under the same context. */
- struct GPUContext *context;
- GPUBatchPhase phase;
- bool program_in_use;
-
- /* Vao management: remembers all geometry state (vertex attribute bindings & element buffer)
- * for each shader interface. Start with a static number of vaos and fallback to dynamic count
- * if necessary. Once a batch goes dynamic it does not go back. */
- bool is_dynamic_vao_count;
- union {
- /** Static handle count */
- struct {
- const struct GPUShaderInterface *interfaces[GPU_BATCH_VAO_STATIC_LEN];
- uint32_t vao_ids[GPU_BATCH_VAO_STATIC_LEN];
- } static_vaos;
- /** Dynamic handle count */
- struct {
- uint count;
- const struct GPUShaderInterface **interfaces;
- uint32_t *vao_ids;
- } dynamic_vaos;
- };
-
- /* XXX This is the only solution if we want to have some data structure using
- * batches as key to identify nodes. We must destroy these nodes with this callback. */
- void (*free_callback)(struct GPUBatch *, void *);
- void *callback_data;
+ /* geometry */
+
+ /** verts[0] is required, others can be NULL */
+ GPUVertBuf *verts[GPU_BATCH_VBO_MAX_LEN];
+ /** Instance attributes. */
+ GPUVertBuf *inst;
+ /** NULL if element list not needed */
+ GPUIndexBuf *elem;
+ uint32_t gl_prim_type;
+
+ /* cached values (avoid dereferencing later) */
+ uint32_t vao_id;
+ uint32_t program;
+ const struct GPUShaderInterface *interface;
+
+ /* book-keeping */
+ uint owns_flag;
+ /** used to free all vaos. this implies all vaos were created under the same context. */
+ struct GPUContext *context;
+ GPUBatchPhase phase;
+ bool program_in_use;
+
+ /* Vao management: remembers all geometry state (vertex attribute bindings & element buffer)
+ * for each shader interface. Start with a static number of vaos and fallback to dynamic count
+ * if necessary. Once a batch goes dynamic it does not go back. */
+ bool is_dynamic_vao_count;
+ union {
+ /** Static handle count */
+ struct {
+ const struct GPUShaderInterface *interfaces[GPU_BATCH_VAO_STATIC_LEN];
+ uint32_t vao_ids[GPU_BATCH_VAO_STATIC_LEN];
+ } static_vaos;
+ /** Dynamic handle count */
+ struct {
+ uint count;
+ const struct GPUShaderInterface **interfaces;
+ uint32_t *vao_ids;
+ } dynamic_vaos;
+ };
+
+ /* XXX This is the only solution if we want to have some data structure using
+ * batches as key to identify nodes. We must destroy these nodes with this callback. */
+ void (*free_callback)(struct GPUBatch *, void *);
+ void *callback_data;
} GPUBatch;
enum {
- GPU_BATCH_OWNS_VBO = (1 << 0),
- /* each vbo index gets bit-shifted */
- GPU_BATCH_OWNS_INSTANCES = (1 << 30),
- GPU_BATCH_OWNS_INDEX = (1u << 31u),
+ GPU_BATCH_OWNS_VBO = (1 << 0),
+ /* each vbo index gets bit-shifted */
+ GPU_BATCH_OWNS_INSTANCES = (1 << 30),
+ GPU_BATCH_OWNS_INDEX = (1u << 31u),
};
GPUBatch *GPU_batch_create_ex(GPUPrimType, GPUVertBuf *, GPUIndexBuf *, uint owns_flag);
void GPU_batch_init_ex(GPUBatch *, GPUPrimType, GPUVertBuf *, GPUIndexBuf *, uint owns_flag);
void GPU_batch_copy(GPUBatch *batch_dst, GPUBatch *batch_src);
-#define GPU_batch_create(prim, verts, elem) \
- GPU_batch_create_ex(prim, verts, elem, 0)
-#define GPU_batch_init(batch, prim, verts, elem) \
- GPU_batch_init_ex(batch, prim, verts, elem, 0)
+#define GPU_batch_create(prim, verts, elem) GPU_batch_create_ex(prim, verts, elem, 0)
+#define GPU_batch_init(batch, prim, verts, elem) GPU_batch_init_ex(batch, prim, verts, elem, 0)
-void GPU_batch_clear(GPUBatch *); /* Same as discard but does not free. (does not clal free callback) */
+void GPU_batch_clear(
+ GPUBatch *); /* Same as discard but does not free. (does not clal free callback) */
void GPU_batch_discard(GPUBatch *); /* verts & elem are not discarded */
void GPU_batch_vao_cache_clear(GPUBatch *);
@@ -117,16 +116,15 @@ void GPU_batch_instbuf_set(GPUBatch *, GPUVertBuf *, bool own_vbo); /* Instancin
int GPU_batch_vertbuf_add_ex(GPUBatch *, GPUVertBuf *, bool own_vbo);
-#define GPU_batch_vertbuf_add(batch, verts) \
- GPU_batch_vertbuf_add_ex(batch, verts, false)
+#define GPU_batch_vertbuf_add(batch, verts) GPU_batch_vertbuf_add_ex(batch, verts, false)
void GPU_batch_program_set_no_use(GPUBatch *, uint32_t program, const GPUShaderInterface *);
void GPU_batch_program_set(GPUBatch *, uint32_t program, const GPUShaderInterface *);
void GPU_batch_program_set_shader(GPUBatch *, GPUShader *shader);
-void GPU_batch_program_set_builtin(
- GPUBatch *batch, eGPUBuiltinShader shader_id);
-void GPU_batch_program_set_builtin_with_config(
- GPUBatch *batch, eGPUBuiltinShader shader_id, eGPUShaderConfig sh_cfg);
+void GPU_batch_program_set_builtin(GPUBatch *batch, eGPUBuiltinShader shader_id);
+void GPU_batch_program_set_builtin_with_config(GPUBatch *batch,
+ eGPUBuiltinShader shader_id,
+ eGPUShaderConfig sh_cfg);
/* Entire batch draws with one shader program, but can be redrawn later with another program. */
/* Vertex shader's inputs must be compatible with the batch's vertex format. */
@@ -164,19 +162,19 @@ void GPU_draw_primitive(GPUPrimType, int v_count);
/* WithOwn variants reduce number of system allocations. */
typedef struct BatchWithOwnVertexBuffer {
- GPUBatch batch;
- GPUVertBuf verts; /* link batch.verts to this */
+ GPUBatch batch;
+ GPUVertBuf verts; /* link batch.verts to this */
} BatchWithOwnVertexBuffer;
typedef struct BatchWithOwnElementList {
- GPUBatch batch;
- GPUIndexBuf elem; /* link batch.elem to this */
+ GPUBatch batch;
+ GPUIndexBuf elem; /* link batch.elem to this */
} BatchWithOwnElementList;
typedef struct BatchWithOwnVertexBufferAndElementList {
- GPUBatch batch;
- GPUIndexBuf elem; /* link batch.elem to this */
- GPUVertBuf verts; /* link batch.verts to this */
+ GPUBatch batch;
+ GPUIndexBuf elem; /* link batch.elem to this */
+ GPUVertBuf verts; /* link batch.verts to this */
} BatchWithOwnVertexBufferAndElementList;
GPUBatch *create_BatchWithOwnVertexBuffer(GPUPrimType, GPUVertFormat *, uint v_len, GPUIndexBuf *);
@@ -193,28 +191,31 @@ void gpu_batch_exit(void);
/* Macros */
-#define GPU_BATCH_DISCARD_SAFE(batch) do { \
- if (batch != NULL) { \
- GPU_batch_discard(batch); \
- batch = NULL; \
- } \
-} while (0)
-
-#define GPU_BATCH_CLEAR_SAFE(batch) do { \
- if (batch != NULL) { \
- GPU_batch_clear(batch); \
- memset(batch, 0, sizeof(*(batch))); \
- } \
-} while (0)
-
-#define GPU_BATCH_DISCARD_ARRAY_SAFE(_batch_array, _len) do { \
- if (_batch_array != NULL) { \
- BLI_assert(_len > 0); \
- for (int _i = 0; _i < _len; _i++) { \
- GPU_BATCH_DISCARD_SAFE(_batch_array[_i]); \
- } \
- MEM_freeN(_batch_array); \
- } \
-} while (0)
+#define GPU_BATCH_DISCARD_SAFE(batch) \
+ do { \
+ if (batch != NULL) { \
+ GPU_batch_discard(batch); \
+ batch = NULL; \
+ } \
+ } while (0)
+
+#define GPU_BATCH_CLEAR_SAFE(batch) \
+ do { \
+ if (batch != NULL) { \
+ GPU_batch_clear(batch); \
+ memset(batch, 0, sizeof(*(batch))); \
+ } \
+ } while (0)
+
+#define GPU_BATCH_DISCARD_ARRAY_SAFE(_batch_array, _len) \
+ do { \
+ if (_batch_array != NULL) { \
+ BLI_assert(_len > 0); \
+ for (int _i = 0; _i < _len; _i++) { \
+ GPU_BATCH_DISCARD_SAFE(_batch_array[_i]); \
+ } \
+ MEM_freeN(_batch_array); \
+ } \
+ } while (0)
#endif /* __GPU_BATCH_H__ */