Welcome to mirror list, hosted at ThFree Co, Russian Federation.

git.blender.org/blender.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
path: root/source
diff options
context:
space:
mode:
authorNicholas Bishop <nicholasbishop@gmail.com>2012-03-09 13:30:03 +0400
committerNicholas Bishop <nicholasbishop@gmail.com>2012-03-09 13:30:03 +0400
commit0641f1723b5a0290d4d67a52d7db66df1be79f61 (patch)
tree499b1f887b7eb02519fcb8c356e1f4845971989b /source
parentdfdfa3d51b48a6bffd90779a1778dcf0e9a751e4 (diff)
Optimize index buffers for multires drawing in sculpt mode.
All multires grids have exactly the same ordering, so rather than allocate a new index buffer for each PBVH node, just allocate one that can be reused for every grid. This requires more draw calls (one per grid rather than one per PBVH node), but less graphics memory.
Diffstat (limited to 'source')
-rw-r--r--source/blender/gpu/intern/gpu_buffers.c102
1 files changed, 65 insertions, 37 deletions
diff --git a/source/blender/gpu/intern/gpu_buffers.c b/source/blender/gpu/intern/gpu_buffers.c
index 770b8fc0adf..23bc79e4187 100644
--- a/source/blender/gpu/intern/gpu_buffers.c
+++ b/source/blender/gpu/intern/gpu_buffers.c
@@ -1453,73 +1453,95 @@ void GPU_update_grid_buffers(GPU_Buffers *buffers, DMGridData **grids,
#define FILL_QUAD_BUFFER(type_) \
{ \
type_ *quad_data; \
- int offset = 0; \
- int i, j, k; \
+ int i, j; \
\
glBufferDataARB(GL_ELEMENT_ARRAY_BUFFER_ARB, \
- sizeof(type_) * totquad * 4, NULL, \
+ sizeof(type_) * (*totquad) * 4, NULL, \
GL_STATIC_DRAW_ARB); \
\
/* Fill the quad buffer */ \
quad_data = glMapBufferARB(GL_ELEMENT_ARRAY_BUFFER_ARB, \
GL_WRITE_ONLY_ARB); \
if(quad_data) { \
- for(i = 0; i < totgrid; ++i) { \
+ for(i = 0; i < gridsize-1; ++i) { \
for(j = 0; j < gridsize-1; ++j) { \
- for(k = 0; k < gridsize-1; ++k) { \
- *(quad_data++)= offset + j*gridsize + k+1; \
- *(quad_data++)= offset + j*gridsize + k; \
- *(quad_data++)= offset + (j+1)*gridsize + k; \
- *(quad_data++)= offset + (j+1)*gridsize + k+1; \
- } \
+ *(quad_data++)= i*gridsize + j+1; \
+ *(quad_data++)= i*gridsize + j; \
+ *(quad_data++)= (i+1)*gridsize + j; \
+ *(quad_data++)= (i+1)*gridsize + j+1; \
} \
- \
- offset += gridsize*gridsize; \
} \
glUnmapBufferARB(GL_ELEMENT_ARRAY_BUFFER_ARB); \
} \
else { \
- glDeleteBuffersARB(1, &buffers->index_buf); \
- buffers->index_buf = 0; \
+ glDeleteBuffersARB(1, &buffer); \
+ buffer = 0; \
} \
}
/* end FILL_QUAD_BUFFER */
-GPU_Buffers *GPU_build_grid_buffers(int totgrid, int gridsize)
+static GLuint gpu_get_grid_buffer(int gridsize, GLenum *index_type, unsigned *totquad)
{
- GPU_Buffers *buffers;
- int totquad;
-
- buffers = MEM_callocN(sizeof(GPU_Buffers), "GPU_Buffers");
+ static int prev_gridsize = -1;
+ static GLenum prev_index_type = 0;
+ static GLuint buffer = 0;
+ static unsigned prev_totquad;
+
+ /* VBO is disabled; delete the previous buffer (if it exists) and
+ return an invalid handle */
+ if(!GLEW_ARB_vertex_buffer_object || (U.gameflags & USER_DISABLE_VBO)) {
+ if(buffer)
+ glDeleteBuffersARB(1, &buffer);
+ return 0;
+ }
- /* Count the number of quads */
- totquad= (gridsize-1)*(gridsize-1)*totgrid;
+ /* VBO is already built */
+ if(buffer && prev_gridsize == gridsize) {
+ *index_type = prev_index_type;
+ *totquad = prev_totquad;
+ return buffer;
+ }
- /* Generate index buffer object */
- if(GLEW_ARB_vertex_buffer_object && !(U.gameflags & USER_DISABLE_VBO))
- glGenBuffersARB(1, &buffers->index_buf);
+ /* Build new VBO */
+ glGenBuffersARB(1, &buffer);
+ if(buffer) {
+ *totquad= (gridsize-1)*(gridsize-1);
- if(buffers->index_buf) {
- glBindBufferARB(GL_ELEMENT_ARRAY_BUFFER_ARB, buffers->index_buf);
+ glBindBufferARB(GL_ELEMENT_ARRAY_BUFFER_ARB, buffer);
- if(totquad < USHRT_MAX) {
- buffers->index_type = GL_UNSIGNED_SHORT;
+ if(*totquad < USHRT_MAX) {
+ *index_type = GL_UNSIGNED_SHORT;
FILL_QUAD_BUFFER(unsigned short);
}
else {
- buffers->index_type = GL_UNSIGNED_INT;
+ *index_type = GL_UNSIGNED_INT;
FILL_QUAD_BUFFER(unsigned int);
}
glBindBufferARB(GL_ELEMENT_ARRAY_BUFFER_ARB, 0);
}
- /* Build VBO */
+ prev_gridsize = gridsize;
+ prev_index_type = *index_type;
+ prev_totquad = *totquad;
+ return buffer;
+}
+
+GPU_Buffers *GPU_build_grid_buffers(int totgrid, int gridsize)
+{
+ GPU_Buffers *buffers;
+
+ buffers = MEM_callocN(sizeof(GPU_Buffers), "GPU_Buffers");
+
+ /* Build index VBO */
+ buffers->index_buf = gpu_get_grid_buffer(gridsize, &buffers->index_type, &buffers->tot_quad);
+ buffers->totgrid = totgrid;
+ buffers->gridsize = gridsize;
+
+ /* Build coord/normal VBO */
if(buffers->index_buf)
glGenBuffersARB(1, &buffers->vert_buf);
- buffers->tot_quad = totquad;
-
return buffers;
}
@@ -1642,10 +1664,16 @@ void GPU_draw_buffers(GPU_Buffers *buffers, DMSetMaterial setMaterial)
glBindBufferARB(GL_ELEMENT_ARRAY_BUFFER_ARB, buffers->index_buf);
if(buffers->tot_quad) {
- glVertexPointer(3, GL_FLOAT, sizeof(DMGridData), (void*)offsetof(DMGridData, co));
- glNormalPointer(GL_FLOAT, sizeof(DMGridData), (void*)offsetof(DMGridData, no));
-
- glDrawElements(GL_QUADS, buffers->tot_quad * 4, buffers->index_type, 0);
+ unsigned offset = 0;
+ int i;
+ for(i = 0; i < buffers->totgrid; i++) {
+ glVertexPointer(3, GL_FLOAT, sizeof(DMGridData), offset + (char*)offsetof(DMGridData, co));
+ glNormalPointer(GL_FLOAT, sizeof(DMGridData), offset + (char*)offsetof(DMGridData, no));
+
+ glDrawElements(GL_QUADS, buffers->tot_quad * 4, buffers->index_type, 0);
+
+ offset += buffers->gridsize * buffers->gridsize * sizeof(DMGridData);
+ }
}
else {
glVertexPointer(3, GL_FLOAT, sizeof(VertexBufferFormat), (void*)offsetof(VertexBufferFormat, co));
@@ -1674,7 +1702,7 @@ void GPU_free_buffers(GPU_Buffers *buffers)
if(buffers) {
if(buffers->vert_buf)
glDeleteBuffersARB(1, &buffers->vert_buf);
- if(buffers->index_buf)
+ if(buffers->index_buf && buffers->tot_tri)
glDeleteBuffersARB(1, &buffers->index_buf);
MEM_freeN(buffers);