Welcome to mirror list, hosted at ThFree Co, Russian Federation.

GPU_batch.h « gpu « blender « source - git.blender.org/blender.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
blob: 855214c279ca9c371d5a0442a1059ea9d0aef9a1 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
/*
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License
 * as published by the Free Software Foundation; either version 2
 * of the License, or (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software Foundation,
 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
 *
 * The Original Code is Copyright (C) 2016 by Mike Erwin.
 * All rights reserved.
 */

/** \file
 * \ingroup gpu
 *
 * GPU geometry batch
 * Contains VAOs + VBOs + Shader representing a drawable entity.
 */

#pragma once

#include "GPU_element.h"
#include "GPU_shader.h"
#include "GPU_shader_interface.h"
#include "GPU_vertex_buffer.h"

#ifdef __cplusplus
extern "C" {
#endif

typedef enum {
  GPU_BATCH_UNUSED,
  GPU_BATCH_READY_TO_FORMAT,
  GPU_BATCH_READY_TO_BUILD,
  GPU_BATCH_BUILDING,
  GPU_BATCH_READY_TO_DRAW,
} GPUBatchPhase;

#define GPU_BATCH_VBO_MAX_LEN 6
#define GPU_BATCH_INST_VBO_MAX_LEN 2
#define GPU_BATCH_VAO_STATIC_LEN 3
#define GPU_BATCH_VAO_DYN_ALLOC_COUNT 16

typedef struct GPUBatch {
  /* geometry */

  /** verts[0] is required, others can be NULL */
  GPUVertBuf *verts[GPU_BATCH_VBO_MAX_LEN];
  /** Instance attributes. */
  GPUVertBuf *inst[GPU_BATCH_INST_VBO_MAX_LEN];
  /** NULL if element list not needed */
  GPUIndexBuf *elem;
  uint32_t gl_prim_type;

  /* cached values (avoid dereferencing later) */
  uint32_t vao_id;
  uint32_t program;
  const struct GPUShaderInterface *interface;

  /* book-keeping */
  uint owns_flag;
  /** used to free all vaos. this implies all vaos were created under the same context. */
  struct GPUContext *context;
  GPUBatchPhase phase;
  bool program_in_use;

  /* Vao management: remembers all geometry state (vertex attribute bindings & element buffer)
   * for each shader interface. Start with a static number of vaos and fallback to dynamic count
   * if necessary. Once a batch goes dynamic it does not go back. */
  bool is_dynamic_vao_count;
  union {
    /** Static handle count */
    struct {
      const struct GPUShaderInterface *interfaces[GPU_BATCH_VAO_STATIC_LEN];
      uint32_t vao_ids[GPU_BATCH_VAO_STATIC_LEN];
    } static_vaos;
    /** Dynamic handle count */
    struct {
      uint count;
      const struct GPUShaderInterface **interfaces;
      uint32_t *vao_ids;
    } dynamic_vaos;
  };

  /* XXX This is the only solution if we want to have some data structure using
   * batches as key to identify nodes. We must destroy these nodes with this callback. */
  void (*free_callback)(struct GPUBatch *, void *);
  void *callback_data;
} GPUBatch;

enum {
  GPU_BATCH_OWNS_VBO = (1 << 0),
  /* each vbo index gets bit-shifted */
  GPU_BATCH_OWNS_INSTANCES = (1 << 30),
  GPU_BATCH_OWNS_INDEX = (1u << 31u),
};

GPUBatch *GPU_batch_calloc(uint count);
GPUBatch *GPU_batch_create_ex(GPUPrimType, GPUVertBuf *, GPUIndexBuf *, uint owns_flag);
void GPU_batch_init_ex(GPUBatch *, GPUPrimType, GPUVertBuf *, GPUIndexBuf *, uint owns_flag);
void GPU_batch_copy(GPUBatch *batch_dst, GPUBatch *batch_src);

#define GPU_batch_create(prim, verts, elem) GPU_batch_create_ex(prim, verts, elem, 0)
#define GPU_batch_init(batch, prim, verts, elem) GPU_batch_init_ex(batch, prim, verts, elem, 0)

/* Same as discard but does not free. (does not call free callback). */
void GPU_batch_clear(GPUBatch *);

void GPU_batch_discard(GPUBatch *); /* verts & elem are not discarded */

void GPU_batch_vao_cache_clear(GPUBatch *);

void GPU_batch_callback_free_set(GPUBatch *, void (*callback)(GPUBatch *, void *), void *);

void GPU_batch_instbuf_set(GPUBatch *, GPUVertBuf *, bool own_vbo); /* Instancing */
void GPU_batch_elembuf_set(GPUBatch *batch, GPUIndexBuf *elem, bool own_ibo);

int GPU_batch_instbuf_add_ex(GPUBatch *, GPUVertBuf *, bool own_vbo);
int GPU_batch_vertbuf_add_ex(GPUBatch *, GPUVertBuf *, bool own_vbo);

#define GPU_batch_vertbuf_add(batch, verts) GPU_batch_vertbuf_add_ex(batch, verts, false)

void GPU_batch_set_shader(GPUBatch *batch, GPUShader *shader);
void GPU_batch_set_shader_no_bind(GPUBatch *batch, GPUShader *shader);
void GPU_batch_program_set_imm_shader(GPUBatch *batch);
void GPU_batch_program_set_builtin(GPUBatch *batch, eGPUBuiltinShader shader_id);
void GPU_batch_program_set_builtin_with_config(GPUBatch *batch,
                                               eGPUBuiltinShader shader_id,
                                               eGPUShaderConfig sh_cfg);
/* Entire batch draws with one shader program, but can be redrawn later with another program. */
/* Vertex shader's inputs must be compatible with the batch's vertex format. */

void GPU_batch_program_use_begin(GPUBatch *); /* call before Batch_Uniform (temp hack?) */
void GPU_batch_program_use_end(GPUBatch *);

void GPU_batch_uniform_1ui(GPUBatch *, const char *name, uint value);
void GPU_batch_uniform_1i(GPUBatch *, const char *name, int value);
void GPU_batch_uniform_1b(GPUBatch *, const char *name, bool value);
void GPU_batch_uniform_1f(GPUBatch *, const char *name, float value);
void GPU_batch_uniform_2f(GPUBatch *, const char *name, float x, float y);
void GPU_batch_uniform_3f(GPUBatch *, const char *name, float x, float y, float z);
void GPU_batch_uniform_4f(GPUBatch *, const char *name, float x, float y, float z, float w);
void GPU_batch_uniform_2fv(GPUBatch *, const char *name, const float data[2]);
void GPU_batch_uniform_3fv(GPUBatch *, const char *name, const float data[3]);
void GPU_batch_uniform_4fv(GPUBatch *, const char *name, const float data[4]);
void GPU_batch_uniform_2fv_array(GPUBatch *, const char *name, const int len, const float *data);
void GPU_batch_uniform_4fv_array(GPUBatch *, const char *name, const int len, const float *data);
void GPU_batch_uniform_mat4(GPUBatch *, const char *name, const float data[4][4]);

void GPU_batch_draw(GPUBatch *);

/* Needs to be called before GPU_batch_draw_advanced. */
void GPU_batch_bind(GPUBatch *);
/* This does not bind/unbind shader and does not call GPU_matrix_bind() */
void GPU_batch_draw_advanced(GPUBatch *, int v_first, int v_count, int i_first, int i_count);

/* Does not even need batch */
void GPU_draw_primitive(GPUPrimType, int v_count);

#if 0 /* future plans */

/* Can multiple batches share a GPUVertBuf? Use ref count? */

/* We often need a batch with its own data, to be created and discarded together. */
/* WithOwn variants reduce number of system allocations. */

typedef struct BatchWithOwnVertexBuffer {
  GPUBatch batch;
  GPUVertBuf verts; /* link batch.verts to this */
} BatchWithOwnVertexBuffer;

typedef struct BatchWithOwnElementList {
  GPUBatch batch;
  GPUIndexBuf elem; /* link batch.elem to this */
} BatchWithOwnElementList;

typedef struct BatchWithOwnVertexBufferAndElementList {
  GPUBatch batch;
  GPUIndexBuf elem; /* link batch.elem to this */
  GPUVertBuf verts; /* link batch.verts to this */
} BatchWithOwnVertexBufferAndElementList;

GPUBatch *create_BatchWithOwnVertexBuffer(GPUPrimType, GPUVertFormat *, uint v_len, GPUIndexBuf *);
GPUBatch *create_BatchWithOwnElementList(GPUPrimType, GPUVertBuf *, uint prim_len);
GPUBatch *create_BatchWithOwnVertexBufferAndElementList(GPUPrimType,
                                                        GPUVertFormat *,
                                                        uint v_len,
                                                        uint prim_len);
/* verts: shared, own */
/* elem: none, shared, own */
GPUBatch *create_BatchInGeneral(GPUPrimType, VertexBufferStuff, ElementListStuff);

#endif /* future plans */

/**
 * #GPUDrawList is an API to do lots of similar draw-calls very fast using multi-draw-indirect.
 * There is a fallback if the feature is not supported.
 */
typedef struct GPUDrawList GPUDrawList;

GPUDrawList *GPU_draw_list_create(int length);
void GPU_draw_list_discard(GPUDrawList *list);
void GPU_draw_list_init(GPUDrawList *list, GPUBatch *batch);
void GPU_draw_list_command_add(
    GPUDrawList *list, int v_first, int v_count, int i_first, int i_count);
void GPU_draw_list_submit(GPUDrawList *list);

void gpu_batch_init(void);
void gpu_batch_exit(void);

/* Macros */

#define GPU_BATCH_DISCARD_SAFE(batch) \
  do { \
    if (batch != NULL) { \
      GPU_batch_discard(batch); \
      batch = NULL; \
    } \
  } while (0)

#define GPU_BATCH_CLEAR_SAFE(batch) \
  do { \
    if (batch != NULL) { \
      GPU_batch_clear(batch); \
      memset(batch, 0, sizeof(*(batch))); \
    } \
  } while (0)

#define GPU_BATCH_DISCARD_ARRAY_SAFE(_batch_array, _len) \
  do { \
    if (_batch_array != NULL) { \
      BLI_assert(_len > 0); \
      for (int _i = 0; _i < _len; _i++) { \
        GPU_BATCH_DISCARD_SAFE(_batch_array[_i]); \
      } \
      MEM_freeN(_batch_array); \
    } \
  } while (0)

#ifdef __cplusplus
}
#endif