1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
|
/*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*
* Copyright 2020, Blender Foundation.
* All rights reserved.
*/
/** \file
* \ingroup gpu
*
* GPU geometry batch
* Contains VAOs + VBOs + Shader representing a drawable entity.
*/
#pragma once
#include "MEM_guardedalloc.h"
#include "gpu_batch_private.hh"
#include "gl_index_buffer.hh"
#include "gl_vertex_buffer.hh"
#include "glew-mx.h"
namespace blender {
namespace gpu {
class GLContext;
class GLShaderInterface;
#define GPU_VAO_STATIC_LEN 3
/* Vao management: remembers all geometry state (vertex attribute bindings & element buffer)
* for each shader interface. Start with a static number of vaos and fallback to dynamic count
* if necessary. Once a batch goes dynamic it does not go back. */
class GLVaoCache {
private:
/** Context for which the vao_cache_ was generated. */
GLContext *context_ = NULL;
/** Last interface this batch was drawn with. */
GLShaderInterface *interface_ = NULL;
/** Cached VAO for the last interface. */
GLuint vao_id_ = 0;
/** Used when arb_base_instance is not supported. */
GLuint vao_base_instance_ = 0;
int base_instance_ = 0;
bool is_dynamic_vao_count = false;
union {
/** Static handle count */
struct {
const GLShaderInterface *interfaces[GPU_VAO_STATIC_LEN];
GLuint vao_ids[GPU_VAO_STATIC_LEN];
} static_vaos;
/** Dynamic handle count */
struct {
uint count;
const GLShaderInterface **interfaces;
GLuint *vao_ids;
} dynamic_vaos;
};
public:
GLVaoCache();
~GLVaoCache();
GLuint vao_get(GPUBatch *batch);
GLuint base_instance_vao_get(GPUBatch *batch, int i_first);
GLuint lookup(const GLShaderInterface *interface);
void insert(const GLShaderInterface *interface, GLuint vao_id);
void remove(const GLShaderInterface *interface);
void clear(void);
private:
void init(void);
void context_check(void);
};
class GLBatch : public Batch {
public:
/** All vaos corresponding to all the GPUShaderInterface this batch was drawn with. */
GLVaoCache vao_cache_;
public:
void draw(int v_first, int v_count, int i_first, int i_count) override;
void bind(int i_first);
/* Convenience getters. */
GLIndexBuf *elem_(void) const
{
return static_cast<GLIndexBuf *>(unwrap(elem));
}
GLVertBuf *verts_(const int index) const
{
return static_cast<GLVertBuf *>(unwrap(verts[index]));
}
GLVertBuf *inst_(const int index) const
{
return static_cast<GLVertBuf *>(unwrap(inst[index]));
}
MEM_CXX_CLASS_ALLOC_FUNCS("GLBatch");
};
} // namespace gpu
} // namespace blender
|