Welcome to mirror list, hosted at ThFree Co, Russian Federation.

git.blender.org/blender.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--source/blender/blenkernel/intern/pbvh.c3
-rw-r--r--source/blender/blenkernel/intern/pbvh_intern.h5
-rw-r--r--source/blender/gpu/GPU_buffers.h10
-rw-r--r--source/blender/gpu/intern/gpu_buffers.c124
-rw-r--r--source/blender/gpu/intern/gpu_init_exit.c1
5 files changed, 83 insertions, 60 deletions
diff --git a/source/blender/blenkernel/intern/pbvh.c b/source/blender/blenkernel/intern/pbvh.c
index d73f087a3fe..58ec75dc706 100644
--- a/source/blender/blenkernel/intern/pbvh.c
+++ b/source/blender/blenkernel/intern/pbvh.c
@@ -637,6 +637,7 @@ void BKE_pbvh_free(PBVH *bvh)
BLI_gset_free(node->bm_other_verts, NULL);
}
}
+ GPU_free_pbvh_buffer_multires(&bvh->grid_common_gpu_buffer);
if (bvh->deformed) {
if (bvh->verts) {
@@ -1100,7 +1101,7 @@ static void pbvh_update_draw_buffers(PBVH *bvh, PBVHNode **nodes, int totnode)
node->totprim,
bvh->grid_hidden,
bvh->gridkey.grid_size,
- &bvh->gridkey);
+ &bvh->gridkey, &bvh->grid_common_gpu_buffer);
break;
case PBVH_FACES:
node->draw_buffers =
diff --git a/source/blender/blenkernel/intern/pbvh_intern.h b/source/blender/blenkernel/intern/pbvh_intern.h
index bae323dedef..4d2307c3e12 100644
--- a/source/blender/blenkernel/intern/pbvh_intern.h
+++ b/source/blender/blenkernel/intern/pbvh_intern.h
@@ -145,6 +145,11 @@ struct PBVH {
const DMFlagMat *grid_flag_mats;
int totgrid;
BLI_bitmap **grid_hidden;
+ /* index_buf of GPU_PBVH_Buffers can be the same for all 'fully drawn' nodes (same size).
+ * Previously was stored in a static var in gpu_buffer.c, but this breaks in case we handle several different
+ * objects in sculpt mode with different sizes at the same time, so now storing that common gpu buffer
+ * in an opaque pointer per pbvh. See T47637. */
+ struct GridCommonGPUBuffer *grid_common_gpu_buffer;
/* Only used during BVH build and update,
* don't need to remain valid after */
diff --git a/source/blender/gpu/GPU_buffers.h b/source/blender/gpu/GPU_buffers.h
index ee7abe08aba..aefaf1a0f54 100644
--- a/source/blender/gpu/GPU_buffers.h
+++ b/source/blender/gpu/GPU_buffers.h
@@ -49,6 +49,7 @@ struct DerivedMesh;
struct GSet;
struct GPUVertPointLink;
struct GPUDrawObject;
+struct GridCommonGPUBuffer;
struct PBVH;
struct MVert;
@@ -160,9 +161,6 @@ void GPU_buffer_free(GPUBuffer *buffer);
void GPU_drawobject_free(struct DerivedMesh *dm);
-/* free special global multires grid buffer */
-void GPU_buffer_multires_free(bool force);
-
/* flag that controls data type to fill buffer with, a modifier will prepare. */
typedef enum {
GPU_BUFFER_VERTEX = 0,
@@ -231,8 +229,9 @@ GPU_PBVH_Buffers *GPU_build_mesh_pbvh_buffers(
const int *face_indices,
const int face_indices_len);
-GPU_PBVH_Buffers *GPU_build_grid_pbvh_buffers(int *grid_indices, int totgrid,
- unsigned int **grid_hidden, int gridsize, const struct CCGKey *key);
+GPU_PBVH_Buffers *GPU_build_grid_pbvh_buffers(
+ int *grid_indices, int totgrid,unsigned int **grid_hidden, int gridsize, const struct CCGKey *key,
+ struct GridCommonGPUBuffer **grid_common_gpu_buffer);
GPU_PBVH_Buffers *GPU_build_bmesh_pbvh_buffers(bool smooth_shading);
@@ -267,5 +266,6 @@ void GPU_init_draw_pbvh_BB(void);
bool GPU_pbvh_buffers_diffuse_changed(GPU_PBVH_Buffers *buffers, struct GSet *bm_faces, bool show_diffuse_color);
void GPU_free_pbvh_buffers(GPU_PBVH_Buffers *buffers);
+void GPU_free_pbvh_buffer_multires(struct GridCommonGPUBuffer **grid_common_gpu_buffer);
#endif
diff --git a/source/blender/gpu/intern/gpu_buffers.c b/source/blender/gpu/intern/gpu_buffers.c
index f80ce3c1fab..2c6f204d9d0 100644
--- a/source/blender/gpu/intern/gpu_buffers.c
+++ b/source/blender/gpu/intern/gpu_buffers.c
@@ -107,10 +107,12 @@ static GPUAttrib attribData[MAX_GPU_ATTRIB_DATA] = { { -1, 0, 0 } };
static ThreadMutex buffer_mutex = BLI_MUTEX_INITIALIZER;
/* multires global buffer, can be used for many grids having the same grid size */
-static GPUBuffer *mres_glob_buffer = NULL;
-static int mres_prev_gridsize = -1;
-static GLenum mres_prev_index_type = 0;
-static unsigned mres_prev_totquad = 0;
+typedef struct GridCommonGPUBuffer {
+ GPUBuffer *mres_buffer;
+ int mres_prev_gridsize;
+ GLenum mres_prev_index_type;
+ unsigned mres_prev_totquad;
+} GridCommonGPUBuffer;
void GPU_buffer_material_finalize(GPUDrawObject *gdo, GPUBufferMaterial *matinfo, int totmat)
{
@@ -407,33 +409,6 @@ void GPU_buffer_free(GPUBuffer *buffer)
BLI_mutex_unlock(&buffer_mutex);
}
-void GPU_buffer_multires_free(bool force)
-{
- if (!mres_glob_buffer) {
- /* Early output, no need to lock in this case, */
- return;
- }
-
- if (force && BLI_thread_is_main()) {
- if (mres_glob_buffer) {
- if (mres_glob_buffer->id)
- glDeleteBuffers(1, &mres_glob_buffer->id);
- MEM_freeN(mres_glob_buffer);
- }
- }
- else {
- BLI_mutex_lock(&buffer_mutex);
- gpu_buffer_free_intern(mres_glob_buffer);
- BLI_mutex_unlock(&buffer_mutex);
- }
-
- mres_glob_buffer = NULL;
- mres_prev_gridsize = -1;
- mres_prev_index_type = 0;
- mres_prev_totquad = 0;
-}
-
-
void GPU_drawobject_free(DerivedMesh *dm)
{
GPUDrawObject *gdo;
@@ -1009,6 +984,7 @@ struct GPU_PBVH_Buffers {
const int *grid_indices;
int totgrid;
bool has_hidden;
+ bool is_index_buf_global; /* Means index_buf uses global bvh's grid_common_gpu_buffer, **DO NOT** free it! */
bool use_bmesh;
@@ -1226,8 +1202,10 @@ GPU_PBVH_Buffers *GPU_build_mesh_pbvh_buffers(
/* An element index buffer is used for smooth shading, but flat
* shading requires separate vertex normals so an index buffer is
* can't be used there. */
- if (buffers->smooth)
+ if (buffers->smooth) {
buffers->index_buf = GPU_buffer_alloc(sizeof(unsigned short) * tottri * 3);
+ buffers->is_index_buf_global = false;
+ }
if (buffers->index_buf) {
/* Fill the triangle buffer */
@@ -1248,8 +1226,11 @@ GPU_PBVH_Buffers *GPU_build_mesh_pbvh_buffers(
GPU_buffer_unlock(buffers->index_buf, GPU_BINDING_INDEX);
}
else {
- GPU_buffer_free(buffers->index_buf);
+ if (!buffers->is_index_buf_global) {
+ GPU_buffer_free(buffers->index_buf);
+ }
buffers->index_buf = NULL;
+ buffers->is_index_buf_global = false;
}
}
@@ -1416,22 +1397,33 @@ void GPU_update_grid_pbvh_buffers(GPU_PBVH_Buffers *buffers, CCGElem **grids,
} (void)0
/* end FILL_QUAD_BUFFER */
-static GPUBuffer *gpu_get_grid_buffer(int gridsize, GLenum *index_type, unsigned *totquad)
+static GPUBuffer *gpu_get_grid_buffer(
+ int gridsize, GLenum *index_type, unsigned *totquad, GridCommonGPUBuffer **grid_common_gpu_buffer)
{
/* used in the FILL_QUAD_BUFFER macro */
BLI_bitmap * const *grid_hidden = NULL;
const int *grid_indices = NULL;
int totgrid = 1;
+ GridCommonGPUBuffer *gridbuff = *grid_common_gpu_buffer;
+
+ if (gridbuff == NULL) {
+ *grid_common_gpu_buffer = gridbuff = MEM_mallocN(sizeof(GridCommonGPUBuffer), __func__);
+ gridbuff->mres_buffer = NULL;
+ gridbuff->mres_prev_gridsize = -1;
+ gridbuff->mres_prev_index_type = 0;
+ gridbuff->mres_prev_totquad = 0;
+ }
+
/* VBO is already built */
- if (mres_glob_buffer && mres_prev_gridsize == gridsize) {
- *index_type = mres_prev_index_type;
- *totquad = mres_prev_totquad;
- return mres_glob_buffer;
+ if (gridbuff->mres_buffer && gridbuff->mres_prev_gridsize == gridsize) {
+ *index_type = gridbuff->mres_prev_index_type;
+ *totquad = gridbuff->mres_prev_totquad;
+ return gridbuff->mres_buffer;
}
/* we can't reuse old, delete the existing buffer */
- else if (mres_glob_buffer) {
- GPU_buffer_free(mres_glob_buffer);
+ else if (gridbuff->mres_buffer) {
+ GPU_buffer_free(gridbuff->mres_buffer);
}
/* Build new VBO */
@@ -1439,17 +1431,17 @@ static GPUBuffer *gpu_get_grid_buffer(int gridsize, GLenum *index_type, unsigned
if (gridsize * gridsize < USHRT_MAX) {
*index_type = GL_UNSIGNED_SHORT;
- FILL_QUAD_BUFFER(unsigned short, *totquad, mres_glob_buffer);
+ FILL_QUAD_BUFFER(unsigned short, *totquad, gridbuff->mres_buffer);
}
else {
*index_type = GL_UNSIGNED_INT;
- FILL_QUAD_BUFFER(unsigned int, *totquad, mres_glob_buffer);
+ FILL_QUAD_BUFFER(unsigned int, *totquad, gridbuff->mres_buffer);
}
- mres_prev_gridsize = gridsize;
- mres_prev_index_type = *index_type;
- mres_prev_totquad = *totquad;
- return mres_glob_buffer;
+ gridbuff->mres_prev_gridsize = gridsize;
+ gridbuff->mres_prev_index_type = *index_type;
+ gridbuff->mres_prev_totquad = *totquad;
+ return gridbuff->mres_buffer;
}
#define FILL_FAST_BUFFER(type_) \
@@ -1476,8 +1468,9 @@ static GPUBuffer *gpu_get_grid_buffer(int gridsize, GLenum *index_type, unsigned
} \
} (void)0
-GPU_PBVH_Buffers *GPU_build_grid_pbvh_buffers(int *grid_indices, int totgrid,
- BLI_bitmap **grid_hidden, int gridsize, const CCGKey *key)
+GPU_PBVH_Buffers *GPU_build_grid_pbvh_buffers(
+ int *grid_indices, int totgrid, BLI_bitmap **grid_hidden, int gridsize, const CCGKey *key,
+ GridCommonGPUBuffer **grid_common_gpu_buffer)
{
GPU_PBVH_Buffers *buffers;
int totquad;
@@ -1506,8 +1499,10 @@ GPU_PBVH_Buffers *GPU_build_grid_pbvh_buffers(int *grid_indices, int totgrid,
}
if (totquad == fully_visible_totquad) {
- buffers->index_buf = gpu_get_grid_buffer(gridsize, &buffers->index_type, &buffers->tot_quad);
+ buffers->index_buf = gpu_get_grid_buffer(
+ gridsize, &buffers->index_type, &buffers->tot_quad, grid_common_gpu_buffer);
buffers->has_hidden = false;
+ buffers->is_index_buf_global = true;
}
else {
buffers->tot_quad = totquad;
@@ -1522,6 +1517,7 @@ GPU_PBVH_Buffers *GPU_build_grid_pbvh_buffers(int *grid_indices, int totgrid,
}
buffers->has_hidden = true;
+ buffers->is_index_buf_global = false;
}
/* Build coord/normal VBO */
@@ -1746,8 +1742,9 @@ void GPU_update_bmesh_pbvh_buffers(GPU_PBVH_Buffers *buffers,
const int use_short = (maxvert < USHRT_MAX);
/* Initialize triangle index buffer */
- if (buffers->index_buf)
+ if (buffers->index_buf && !buffers->is_index_buf_global)
GPU_buffer_free(buffers->index_buf);
+ buffers->is_index_buf_global = false;
buffers->index_buf = GPU_buffer_alloc((use_short ?
sizeof(unsigned short) :
sizeof(unsigned int)) * 3 * tottri);
@@ -1792,12 +1789,19 @@ void GPU_update_bmesh_pbvh_buffers(GPU_PBVH_Buffers *buffers,
}
else {
/* Memory map failed */
- GPU_buffer_free(buffers->index_buf);
+ if (!buffers->is_index_buf_global) {
+ GPU_buffer_free(buffers->index_buf);
+ }
buffers->index_buf = NULL;
+ buffers->is_index_buf_global = false;
}
}
else if (buffers->index_buf) {
- GPU_buffer_free(buffers->index_buf);
+ if (!buffers->is_index_buf_global) {
+ GPU_buffer_free(buffers->index_buf);
+ }
+ buffers->index_buf = NULL;
+ buffers->is_index_buf_global = false;
}
}
@@ -1991,7 +1995,7 @@ void GPU_free_pbvh_buffers(GPU_PBVH_Buffers *buffers)
if (buffers) {
if (buffers->vert_buf)
GPU_buffer_free(buffers->vert_buf);
- if (buffers->index_buf && (buffers->tot_tri || buffers->has_hidden))
+ if (buffers->index_buf && !buffers->is_index_buf_global)
GPU_buffer_free(buffers->index_buf);
if (buffers->index_buf_fast)
GPU_buffer_free(buffers->index_buf_fast);
@@ -2004,6 +2008,20 @@ void GPU_free_pbvh_buffers(GPU_PBVH_Buffers *buffers)
}
}
+void GPU_free_pbvh_buffer_multires(GridCommonGPUBuffer **grid_common_gpu_buffer)
+{
+ GridCommonGPUBuffer *gridbuff = *grid_common_gpu_buffer;
+
+ if (gridbuff) {
+ if (gridbuff->mres_buffer) {
+ BLI_mutex_lock(&buffer_mutex);
+ gpu_buffer_free_intern(gridbuff->mres_buffer);
+ BLI_mutex_unlock(&buffer_mutex);
+ }
+ MEM_freeN(gridbuff);
+ *grid_common_gpu_buffer = NULL;
+ }
+}
/* debug function, draws the pbvh BB */
void GPU_draw_pbvh_BB(float min[3], float max[3], bool leaf)
diff --git a/source/blender/gpu/intern/gpu_init_exit.c b/source/blender/gpu/intern/gpu_init_exit.c
index da4dd65d2e1..8fed6a9ee80 100644
--- a/source/blender/gpu/intern/gpu_init_exit.c
+++ b/source/blender/gpu/intern/gpu_init_exit.c
@@ -73,7 +73,6 @@ void GPU_exit(void)
gpu_codegen_exit();
gpu_extensions_exit(); /* must come last */
- GPU_buffer_multires_free(true);
initialized = false;
}