From 0ffb96f701176fe5b3eb03bd90414ed7fd4502cf Mon Sep 17 00:00:00 2001 From: Brecht Van Lommel Date: Sat, 28 Sep 2019 19:15:06 +0200 Subject: Sculpt: minor optimizations for GPU draw buffer filling Ref T70295 --- source/blender/blenkernel/intern/pbvh.c | 28 ++++++++------- source/blender/gpu/intern/gpu_buffers.c | 61 +++++++++++++++++++-------------- 2 files changed, 50 insertions(+), 39 deletions(-) (limited to 'source') diff --git a/source/blender/blenkernel/intern/pbvh.c b/source/blender/blenkernel/intern/pbvh.c index 781e7b712d6..c920b80d818 100644 --- a/source/blender/blenkernel/intern/pbvh.c +++ b/source/blender/blenkernel/intern/pbvh.c @@ -2260,19 +2260,21 @@ void BKE_pbvh_update_normals(PBVH *bvh, struct SubdivCCG *subdiv_ccg) BKE_pbvh_search_gather( bvh, update_search_cb, POINTER_FROM_INT(PBVH_UpdateNormals), &nodes, &totnode); - if (bvh->type == PBVH_BMESH) { - pbvh_bmesh_normals_update(nodes, totnode); - } - else if (bvh->type == PBVH_FACES) { - pbvh_faces_update_normals(bvh, nodes, totnode); - } - else if (bvh->type == PBVH_GRIDS) { - struct CCGFace **faces; - int num_faces; - BKE_pbvh_get_grid_updates(bvh, true, (void ***)&faces, &num_faces); - if (num_faces > 0) { - BKE_subdiv_ccg_update_normals(subdiv_ccg, faces, num_faces); - MEM_freeN(faces); + if (totnode > 0) { + if (bvh->type == PBVH_BMESH) { + pbvh_bmesh_normals_update(nodes, totnode); + } + else if (bvh->type == PBVH_FACES) { + pbvh_faces_update_normals(bvh, nodes, totnode); + } + else if (bvh->type == PBVH_GRIDS) { + struct CCGFace **faces; + int num_faces; + BKE_pbvh_get_grid_updates(bvh, true, (void ***)&faces, &num_faces); + if (num_faces > 0) { + BKE_subdiv_ccg_update_normals(subdiv_ccg, faces, num_faces); + MEM_freeN(faces); + } } } diff --git a/source/blender/gpu/intern/gpu_buffers.c b/source/blender/gpu/intern/gpu_buffers.c index cf9a8142530..ce88b57420e 100644 --- a/source/blender/gpu/intern/gpu_buffers.c +++ b/source/blender/gpu/intern/gpu_buffers.c @@ -194,8 +194,8 @@ void GPU_pbvh_mesh_buffers_update(GPU_PBVH_Buffers *buffers, const int (*face_vert_indices)[3], const int update_flags) { - const bool show_mask = (update_flags & GPU_PBVH_BUFFERS_SHOW_MASK) != 0; - const bool show_vcol = (update_flags & GPU_PBVH_BUFFERS_SHOW_VCOL) != 0; + const bool show_mask = vmask && (update_flags & GPU_PBVH_BUFFERS_SHOW_MASK) != 0; + const bool show_vcol = vcol && (update_flags & GPU_PBVH_BUFFERS_SHOW_VCOL) != 0; bool empty_mask = true; { @@ -203,30 +203,38 @@ void GPU_pbvh_mesh_buffers_update(GPU_PBVH_Buffers *buffers, /* Build VBO */ if (gpu_pbvh_vert_buf_data_set(buffers, totelem)) { + GPUVertBufRaw pos_step = {0}; + GPUVertBufRaw nor_step = {0}; + GPUVertBufRaw msk_step = {0}; + GPUVertBufRaw col_step = {0}; + + GPU_vertbuf_attr_get_raw_data(buffers->vert_buf, g_vbo_id.pos, &pos_step); + GPU_vertbuf_attr_get_raw_data(buffers->vert_buf, g_vbo_id.nor, &nor_step); + if (show_mask) { + GPU_vertbuf_attr_get_raw_data(buffers->vert_buf, g_vbo_id.msk, &msk_step); + } + if (show_vcol) { + GPU_vertbuf_attr_get_raw_data(buffers->vert_buf, g_vbo_id.col, &col_step); + } + /* Vertex data is shared if smooth-shaded, but separate * copies are made for flat shading because normals * shouldn't be shared. */ if (buffers->smooth) { for (uint i = 0; i < totvert; i++) { - const MVert *v = &mvert[vert_indices[i]]; - GPU_vertbuf_attr_set(buffers->vert_buf, g_vbo_id.pos, i, v->co); - GPU_vertbuf_attr_set(buffers->vert_buf, g_vbo_id.nor, i, v->no); - } - - if (vmask && show_mask) { - for (uint i = 0; i < buffers->face_indices_len; i++) { - const MLoopTri *lt = &buffers->looptri[buffers->face_indices[i]]; - for (uint j = 0; j < 3; j++) { - int vidx = face_vert_indices[i][j]; - int v_index = buffers->mloop[lt->tri[j]].v; - float fmask = vmask[v_index]; - GPU_vertbuf_attr_set(buffers->vert_buf, g_vbo_id.msk, vidx, &fmask); - empty_mask = empty_mask && (fmask == 0.0f); - } + const int vidx = vert_indices[i]; + const MVert *v = &mvert[vidx]; + copy_v3_v3(GPU_vertbuf_raw_step(&pos_step), v->co); + copy_v3_v3_short(GPU_vertbuf_raw_step(&nor_step), v->no); + + if (show_mask) { + float mask = vmask[vidx]; + *(float *)GPU_vertbuf_raw_step(&msk_step) = mask; + empty_mask = empty_mask && (mask == 0.0f); } } - if (vcol && show_vcol) { + if (show_vcol) { for (uint i = 0; i < buffers->face_indices_len; i++) { const MLoopTri *lt = &buffers->looptri[buffers->face_indices[i]]; for (int j = 0; j < 3; j++) { @@ -266,27 +274,28 @@ void GPU_pbvh_mesh_buffers_update(GPU_PBVH_Buffers *buffers, } float fmask = 0.0f; - if (vmask && show_mask) { + if (show_mask) { fmask = (vmask[vtri[0]] + vmask[vtri[1]] + vmask[vtri[2]]) / 3.0f; } for (uint j = 0; j < 3; j++) { const MVert *v = &mvert[vtri[j]]; - GPU_vertbuf_attr_set(buffers->vert_buf, g_vbo_id.pos, vbo_index, v->co); - GPU_vertbuf_attr_set(buffers->vert_buf, g_vbo_id.nor, vbo_index, no); - GPU_vertbuf_attr_set(buffers->vert_buf, g_vbo_id.msk, vbo_index, &fmask); + copy_v3_v3(GPU_vertbuf_raw_step(&pos_step), v->co); + copy_v3_v3_short(GPU_vertbuf_raw_step(&nor_step), v->no); + if (show_mask) { + *(float *)GPU_vertbuf_raw_step(&msk_step) = fmask; + empty_mask = empty_mask && (fmask == 0.0f); + } - if (vcol && show_vcol) { + if (show_vcol) { const uint loop_index = lt->tri[j]; const uchar *elem = &vcol[loop_index].r; - GPU_vertbuf_attr_set(buffers->vert_buf, g_vbo_id.col, vbo_index, elem); + memcpy(GPU_vertbuf_raw_step(&col_step), elem, sizeof(uchar) * 4); } vbo_index++; } - - empty_mask = empty_mask && (fmask == 0.0f); } } -- cgit v1.2.3