Welcome to mirror list, hosted at ThFree Co, Russian Federation.

git.blender.org/blender.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
Diffstat (limited to 'source/blender/gpu/intern')
-rw-r--r--source/blender/gpu/intern/gpu_batch.c15
-rw-r--r--source/blender/gpu/intern/gpu_batch_utils.c1
-rw-r--r--source/blender/gpu/intern/gpu_buffers.c303
-rw-r--r--source/blender/gpu/intern/gpu_draw.c1
-rw-r--r--source/blender/gpu/intern/gpu_element.c2
-rw-r--r--source/blender/gpu/intern/gpu_extensions.c90
-rw-r--r--source/blender/gpu/intern/gpu_init_exit.c5
-rw-r--r--source/blender/gpu/intern/gpu_platform.c229
-rw-r--r--source/blender/gpu/intern/gpu_private.h4
-rw-r--r--source/blender/gpu/intern/gpu_shader.c1
-rw-r--r--source/blender/gpu/intern/gpu_texture.c1
11 files changed, 410 insertions, 242 deletions
diff --git a/source/blender/gpu/intern/gpu_batch.c b/source/blender/gpu/intern/gpu_batch.c
index 2620ba49799..fc578b4466c 100644
--- a/source/blender/gpu/intern/gpu_batch.c
+++ b/source/blender/gpu/intern/gpu_batch.c
@@ -29,6 +29,7 @@
#include "GPU_batch.h"
#include "GPU_batch_presets.h"
#include "GPU_extensions.h"
+#include "GPU_platform.h"
#include "GPU_matrix.h"
#include "GPU_shader.h"
@@ -718,8 +719,11 @@ void GPU_draw_primitive(GPUPrimType prim_type, int v_count)
#if 0
# define USE_MULTI_DRAW_INDIRECT 0
#else
+/* TODO: partial workaround for NVIDIA driver bug on recent GTX/RTX cards,
+ * that breaks instancing when using indirect draw-call (see T70011). */
# define USE_MULTI_DRAW_INDIRECT \
- (GL_ARB_multi_draw_indirect && GPU_arb_base_instance_is_supported())
+ (GL_ARB_multi_draw_indirect && GPU_arb_base_instance_is_supported() && \
+ !GPU_type_matches(GPU_DEVICE_NVIDIA, GPU_OS_ANY, GPU_DRIVER_OFFICIAL))
#endif
typedef struct GPUDrawCommand {
@@ -852,16 +856,19 @@ void GPU_draw_list_submit(GPUDrawList *list)
uintptr_t offset = list->cmd_offset;
uint cmd_len = list->cmd_len;
size_t bytes_used = cmd_len * sizeof(GPUDrawCommandIndexed);
- list->cmd_offset += bytes_used;
list->cmd_len = 0; /* Avoid reuse. */
- if (USE_MULTI_DRAW_INDIRECT) {
+ /* Only do multi-draw indirect if doing more than 2 drawcall.
+ * This avoids the overhead of buffer mapping if scene is
+ * not very instance friendly. */
+ if (USE_MULTI_DRAW_INDIRECT && cmd_len > 2) {
GLenum prim = batch->gl_prim_type;
glBindBuffer(GL_DRAW_INDIRECT_BUFFER, list->buffer_id);
glFlushMappedBufferRange(GL_DRAW_INDIRECT_BUFFER, 0, bytes_used);
glUnmapBuffer(GL_DRAW_INDIRECT_BUFFER);
list->commands = NULL; /* Unmapped */
+ list->cmd_offset += bytes_used;
if (batch->elem) {
glMultiDrawElementsIndirect(prim, INDEX_TYPE(batch->elem), (void *)offset, cmd_len, 0);
@@ -875,6 +882,8 @@ void GPU_draw_list_submit(GPUDrawList *list)
if (batch->elem) {
GPUDrawCommandIndexed *cmd = list->commands_indexed;
for (int i = 0; i < cmd_len; i++, cmd++) {
+ /* Index start was added by Draw manager. Avoid counting it twice. */
+ cmd->v_first -= batch->elem->index_start;
GPU_batch_draw_advanced(batch, cmd->v_first, cmd->v_count, cmd->i_first, cmd->i_count);
}
}
diff --git a/source/blender/gpu/intern/gpu_batch_utils.c b/source/blender/gpu/intern/gpu_batch_utils.c
index 3a8b392ef1d..825f72e175b 100644
--- a/source/blender/gpu/intern/gpu_batch_utils.c
+++ b/source/blender/gpu/intern/gpu_batch_utils.c
@@ -164,7 +164,6 @@ GPUBatch *GPU_batch_wire_from_poly_2d_encoded(const uchar *polys_flat,
BLI_assert(polys_step_len >= 2);
for (uint i_prev = polys_step_len - 1, i = 0; i < polys_step_len; i_prev = i++) {
union {
- uint8_t as_u8[4];
uint16_t as_u16[2];
uint32_t as_u32;
} data;
diff --git a/source/blender/gpu/intern/gpu_buffers.c b/source/blender/gpu/intern/gpu_buffers.c
index ed606ccb8c6..2c74afd2d8e 100644
--- a/source/blender/gpu/intern/gpu_buffers.c
+++ b/source/blender/gpu/intern/gpu_buffers.c
@@ -316,10 +316,12 @@ GPU_PBVH_Buffers *GPU_pbvh_mesh_buffers_build(const int (*face_vert_indices)[3],
const MLoopTri *looptri,
const MVert *mvert,
const int *face_indices,
- const int face_indices_len)
+ const int face_indices_len,
+ const struct Mesh *mesh)
{
GPU_PBVH_Buffers *buffers;
int i, tottri;
+ int tot_real_edges = 0;
buffers = MEM_callocN(sizeof(GPU_PBVH_Buffers), "GPU_Buffers");
@@ -332,6 +334,13 @@ GPU_PBVH_Buffers *GPU_pbvh_mesh_buffers_build(const int (*face_vert_indices)[3],
for (i = 0, tottri = 0; i < face_indices_len; i++) {
const MLoopTri *lt = &looptri[face_indices[i]];
if (!paint_is_face_hidden(lt, mvert, mloop)) {
+ int r_edges[3];
+ BKE_mesh_looptri_get_real_edges(mesh, lt, r_edges);
+ for (int j = 0; j < 3; j++) {
+ if (r_edges[j] != -1) {
+ tot_real_edges++;
+ }
+ }
tottri++;
}
}
@@ -355,7 +364,7 @@ GPU_PBVH_Buffers *GPU_pbvh_mesh_buffers_build(const int (*face_vert_indices)[3],
/* Fill the triangle and line buffers. */
GPUIndexBufBuilder elb, elb_lines;
GPU_indexbuf_init(&elb, GPU_PRIM_TRIS, tottri, INT_MAX);
- GPU_indexbuf_init(&elb_lines, GPU_PRIM_LINES, tottri * 3, INT_MAX);
+ GPU_indexbuf_init(&elb_lines, GPU_PRIM_LINES, tot_real_edges, INT_MAX);
for (i = 0; i < face_indices_len; i++) {
const MLoopTri *lt = &looptri[face_indices[i]];
@@ -366,11 +375,18 @@ GPU_PBVH_Buffers *GPU_pbvh_mesh_buffers_build(const int (*face_vert_indices)[3],
}
GPU_indexbuf_add_tri_verts(&elb, UNPACK3(face_vert_indices[i]));
+ int r_edges[3];
+ BKE_mesh_looptri_get_real_edges(mesh, lt, r_edges);
- /* TODO skip "non-real" edges. */
- GPU_indexbuf_add_line_verts(&elb_lines, face_vert_indices[i][0], face_vert_indices[i][1]);
- GPU_indexbuf_add_line_verts(&elb_lines, face_vert_indices[i][1], face_vert_indices[i][2]);
- GPU_indexbuf_add_line_verts(&elb_lines, face_vert_indices[i][2], face_vert_indices[i][0]);
+ if (r_edges[0] != -1) {
+ GPU_indexbuf_add_line_verts(&elb_lines, face_vert_indices[i][0], face_vert_indices[i][1]);
+ }
+ if (r_edges[1] != -1) {
+ GPU_indexbuf_add_line_verts(&elb_lines, face_vert_indices[i][1], face_vert_indices[i][2]);
+ }
+ if (r_edges[2] != -1) {
+ GPU_indexbuf_add_line_verts(&elb_lines, face_vert_indices[i][2], face_vert_indices[i][0]);
+ }
}
buffers->index_buf = GPU_indexbuf_build(&elb);
buffers->index_lines_buf = GPU_indexbuf_build(&elb_lines);
@@ -378,7 +394,7 @@ GPU_PBVH_Buffers *GPU_pbvh_mesh_buffers_build(const int (*face_vert_indices)[3],
else {
/* Fill the only the line buffer. */
GPUIndexBufBuilder elb_lines;
- GPU_indexbuf_init(&elb_lines, GPU_PRIM_LINES, tottri * 3, INT_MAX);
+ GPU_indexbuf_init(&elb_lines, GPU_PRIM_LINES, tot_real_edges, INT_MAX);
int vert_idx = 0;
for (i = 0; i < face_indices_len; i++) {
@@ -389,10 +405,18 @@ GPU_PBVH_Buffers *GPU_pbvh_mesh_buffers_build(const int (*face_vert_indices)[3],
continue;
}
- /* TODO skip "non-real" edges. */
- GPU_indexbuf_add_line_verts(&elb_lines, vert_idx * 3 + 0, vert_idx * 3 + 1);
- GPU_indexbuf_add_line_verts(&elb_lines, vert_idx * 3 + 1, vert_idx * 3 + 2);
- GPU_indexbuf_add_line_verts(&elb_lines, vert_idx * 3 + 2, vert_idx * 3 + 0);
+ int r_edges[3];
+ BKE_mesh_looptri_get_real_edges(mesh, lt, r_edges);
+ if (r_edges[0] != -1) {
+ GPU_indexbuf_add_line_verts(&elb_lines, vert_idx * 3 + 0, vert_idx * 3 + 1);
+ }
+ if (r_edges[1] != -1) {
+ GPU_indexbuf_add_line_verts(&elb_lines, vert_idx * 3 + 1, vert_idx * 3 + 2);
+ }
+ if (r_edges[2] != -1) {
+ GPU_indexbuf_add_line_verts(&elb_lines, vert_idx * 3 + 2, vert_idx * 3 + 0);
+ }
+
vert_idx++;
}
buffers->index_lines_buf = GPU_indexbuf_build(&elb_lines);
@@ -729,47 +753,36 @@ GPU_PBVH_Buffers *GPU_pbvh_grid_buffers_build(int totgrid, BLI_bitmap **grid_hid
/** \name BMesh PBVH
* \{ */
-/* Output a BMVert into a VertexBufferFormat array
- *
- * The vertex is skipped if hidden, otherwise the output goes into
- * index '*v_index' in the 'vert_data' array and '*v_index' is
- * incremented.
- */
-static void gpu_bmesh_vert_to_buffer_copy__gwn(BMVert *v,
- GPUVertBuf *vert_buf,
- int *v_index,
- const float fno[3],
- const float *fmask,
- const int cd_vert_mask_offset,
- const bool show_mask,
- const bool show_vcol,
- bool *empty_mask)
+/* Output a BMVert into a VertexBufferFormat array at v_index. */
+static void gpu_bmesh_vert_to_buffer_copy(BMVert *v,
+ GPUVertBuf *vert_buf,
+ int v_index,
+ const float fno[3],
+ const float *fmask,
+ const int cd_vert_mask_offset,
+ const bool show_mask,
+ const bool show_vcol,
+ bool *empty_mask)
{
- if (!BM_elem_flag_test(v, BM_ELEM_HIDDEN)) {
-
- /* Set coord, normal, and mask */
- GPU_vertbuf_attr_set(vert_buf, g_vbo_id.pos, *v_index, v->co);
-
- short no_short[3];
- normal_float_to_short_v3(no_short, fno ? fno : v->no);
- GPU_vertbuf_attr_set(vert_buf, g_vbo_id.nor, *v_index, no_short);
+ /* Vertex should always be visible if it's used by a visible face. */
+ BLI_assert(!BM_elem_flag_test(v, BM_ELEM_HIDDEN));
- if (show_mask) {
- float effective_mask = fmask ? *fmask : BM_ELEM_CD_GET_FLOAT(v, cd_vert_mask_offset);
- GPU_vertbuf_attr_set(vert_buf, g_vbo_id.msk, *v_index, &effective_mask);
- *empty_mask = *empty_mask && (effective_mask == 0.0f);
- }
+ /* Set coord, normal, and mask */
+ GPU_vertbuf_attr_set(vert_buf, g_vbo_id.pos, v_index, v->co);
- if (show_vcol) {
- static char vcol[4] = {255, 255, 255, 255};
- GPU_vertbuf_attr_set(vert_buf, g_vbo_id.col, *v_index, &vcol);
- }
+ short no_short[3];
+ normal_float_to_short_v3(no_short, fno ? fno : v->no);
+ GPU_vertbuf_attr_set(vert_buf, g_vbo_id.nor, v_index, no_short);
- /* Assign index for use in the triangle index buffer */
- /* note: caller must set: bm->elem_index_dirty |= BM_VERT; */
- BM_elem_index_set(v, (*v_index)); /* set_dirty! */
+ if (show_mask) {
+ float effective_mask = fmask ? *fmask : BM_ELEM_CD_GET_FLOAT(v, cd_vert_mask_offset);
+ GPU_vertbuf_attr_set(vert_buf, g_vbo_id.msk, v_index, &effective_mask);
+ *empty_mask = *empty_mask && (effective_mask == 0.0f);
+ }
- (*v_index)++;
+ if (show_vcol) {
+ static char vcol[4] = {255, 255, 255, 255};
+ GPU_vertbuf_attr_set(vert_buf, g_vbo_id.col, v_index, &vcol);
}
}
@@ -839,7 +852,7 @@ void GPU_pbvh_bmesh_buffers_update(GPU_PBVH_Buffers *buffers,
{
const bool show_mask = (update_flags & GPU_PBVH_BUFFERS_SHOW_MASK) != 0;
const bool show_vcol = (update_flags & GPU_PBVH_BUFFERS_SHOW_VCOL) != 0;
- int tottri, totvert, maxvert = 0;
+ int tottri, totvert;
bool empty_mask = true;
BMFace *f = NULL;
@@ -869,135 +882,118 @@ void GPU_pbvh_bmesh_buffers_update(GPU_PBVH_Buffers *buffers,
const int cd_vert_mask_offset = CustomData_get_offset(&bm->vdata, CD_PAINT_MASK);
/* Fill vertex buffer */
- if (gpu_pbvh_vert_buf_data_set(buffers, totvert)) {
- int v_index = 0;
-
- if (buffers->smooth) {
- GSetIterator gs_iter;
-
- /* Vertices get an index assigned for use in the triangle
- * index buffer */
- bm->elem_index_dirty |= BM_VERT;
-
- GSET_ITER (gs_iter, bm_unique_verts) {
- gpu_bmesh_vert_to_buffer_copy__gwn(BLI_gsetIterator_getKey(&gs_iter),
- buffers->vert_buf,
- &v_index,
- NULL,
- NULL,
- cd_vert_mask_offset,
- show_mask,
- show_vcol,
- &empty_mask);
- }
-
- GSET_ITER (gs_iter, bm_other_verts) {
- gpu_bmesh_vert_to_buffer_copy__gwn(BLI_gsetIterator_getKey(&gs_iter),
- buffers->vert_buf,
- &v_index,
- NULL,
- NULL,
- cd_vert_mask_offset,
- show_mask,
- show_vcol,
- &empty_mask);
- }
+ if (!gpu_pbvh_vert_buf_data_set(buffers, totvert)) {
+ /* Memory map failed */
+ return;
+ }
- maxvert = v_index;
- }
- else {
- GSetIterator gs_iter;
+ int v_index = 0;
- GPUIndexBufBuilder elb_lines;
- GPU_indexbuf_init(&elb_lines, GPU_PRIM_LINES, tottri * 3, totvert);
+ if (buffers->smooth) {
+ /* Fill the vertex and triangle buffer in one pass over faces. */
+ GPUIndexBufBuilder elb, elb_lines;
+ GPU_indexbuf_init(&elb, GPU_PRIM_TRIS, tottri, totvert);
+ GPU_indexbuf_init(&elb_lines, GPU_PRIM_LINES, tottri * 3, totvert);
- GSET_ITER (gs_iter, bm_faces) {
- f = BLI_gsetIterator_getKey(&gs_iter);
+ GHash *bm_vert_to_index = BLI_ghash_int_new_ex("bm_vert_to_index", totvert);
- BLI_assert(f->len == 3);
+ GSetIterator gs_iter;
+ GSET_ITER (gs_iter, bm_faces) {
+ f = BLI_gsetIterator_getKey(&gs_iter);
- if (!BM_elem_flag_test(f, BM_ELEM_HIDDEN)) {
- BMVert *v[3];
- float fmask = 0.0f;
- int i;
+ if (!BM_elem_flag_test(f, BM_ELEM_HIDDEN)) {
+ BMVert *v[3];
+ BM_face_as_array_vert_tri(f, v);
- BM_face_as_array_vert_tri(f, v);
+ uint idx[3];
+ for (int i = 0; i < 3; i++) {
+ void **idx_p;
+ if (!BLI_ghash_ensure_p(bm_vert_to_index, v[i], &idx_p)) {
+ /* Add vertex to the vertex buffer each time a new one is encountered */
+ *idx_p = POINTER_FROM_UINT(v_index);
- /* Average mask value */
- for (i = 0; i < 3; i++) {
- fmask += BM_ELEM_CD_GET_FLOAT(v[i], cd_vert_mask_offset);
+ gpu_bmesh_vert_to_buffer_copy(v[i],
+ buffers->vert_buf,
+ v_index,
+ NULL,
+ NULL,
+ cd_vert_mask_offset,
+ show_mask,
+ show_vcol,
+ &empty_mask);
+
+ idx[i] = v_index;
+ v_index++;
}
- fmask /= 3.0f;
-
- GPU_indexbuf_add_line_verts(&elb_lines, v_index + 0, v_index + 1);
- GPU_indexbuf_add_line_verts(&elb_lines, v_index + 1, v_index + 2);
- GPU_indexbuf_add_line_verts(&elb_lines, v_index + 2, v_index + 0);
-
- for (i = 0; i < 3; i++) {
- gpu_bmesh_vert_to_buffer_copy__gwn(v[i],
- buffers->vert_buf,
- &v_index,
- f->no,
- &fmask,
- cd_vert_mask_offset,
- show_mask,
- show_vcol,
- &empty_mask);
+ else {
+ /* Vertex already in the vertex buffer, just get the index. */
+ idx[i] = POINTER_AS_UINT(*idx_p);
}
}
- }
- buffers->index_lines_buf = GPU_indexbuf_build(&elb_lines);
- buffers->tot_tri = tottri;
+ GPU_indexbuf_add_tri_verts(&elb, idx[0], idx[1], idx[2]);
+
+ GPU_indexbuf_add_line_verts(&elb_lines, idx[0], idx[1]);
+ GPU_indexbuf_add_line_verts(&elb_lines, idx[1], idx[2]);
+ GPU_indexbuf_add_line_verts(&elb_lines, idx[2], idx[0]);
+ }
}
- /* gpu_bmesh_vert_to_buffer_copy sets dirty index values */
- bm->elem_index_dirty |= BM_VERT;
+ BLI_ghash_free(bm_vert_to_index, NULL, NULL);
+
+ buffers->tot_tri = tottri;
+ if (buffers->index_buf == NULL) {
+ buffers->index_buf = GPU_indexbuf_build(&elb);
+ }
+ else {
+ GPU_indexbuf_build_in_place(&elb, buffers->index_buf);
+ }
+ buffers->index_lines_buf = GPU_indexbuf_build(&elb_lines);
}
else {
- /* Memory map failed */
- return;
- }
+ GSetIterator gs_iter;
- if (buffers->smooth) {
- /* Fill the triangle buffer */
- GPUIndexBufBuilder elb, elb_lines;
- GPU_indexbuf_init(&elb, GPU_PRIM_TRIS, tottri, maxvert);
- GPU_indexbuf_init(&elb_lines, GPU_PRIM_LINES, tottri * 3, maxvert);
-
- /* Fill triangle index buffer */
- {
- GSetIterator gs_iter;
+ GPUIndexBufBuilder elb_lines;
+ GPU_indexbuf_init(&elb_lines, GPU_PRIM_LINES, tottri * 3, tottri * 3);
- GSET_ITER (gs_iter, bm_faces) {
- f = BLI_gsetIterator_getKey(&gs_iter);
+ GSET_ITER (gs_iter, bm_faces) {
+ f = BLI_gsetIterator_getKey(&gs_iter);
- if (!BM_elem_flag_test(f, BM_ELEM_HIDDEN)) {
- BMVert *v[3];
+ BLI_assert(f->len == 3);
- BM_face_as_array_vert_tri(f, v);
+ if (!BM_elem_flag_test(f, BM_ELEM_HIDDEN)) {
+ BMVert *v[3];
+ float fmask = 0.0f;
+ int i;
- const uint idx[3] = {
- BM_elem_index_get(v[0]), BM_elem_index_get(v[1]), BM_elem_index_get(v[2])};
- GPU_indexbuf_add_tri_verts(&elb, idx[0], idx[1], idx[2]);
+ BM_face_as_array_vert_tri(f, v);
- GPU_indexbuf_add_line_verts(&elb_lines, idx[0], idx[1]);
- GPU_indexbuf_add_line_verts(&elb_lines, idx[1], idx[2]);
- GPU_indexbuf_add_line_verts(&elb_lines, idx[2], idx[0]);
+ /* Average mask value */
+ for (i = 0; i < 3; i++) {
+ fmask += BM_ELEM_CD_GET_FLOAT(v[i], cd_vert_mask_offset);
+ }
+ fmask /= 3.0f;
+
+ GPU_indexbuf_add_line_verts(&elb_lines, v_index + 0, v_index + 1);
+ GPU_indexbuf_add_line_verts(&elb_lines, v_index + 1, v_index + 2);
+ GPU_indexbuf_add_line_verts(&elb_lines, v_index + 2, v_index + 0);
+
+ for (i = 0; i < 3; i++) {
+ gpu_bmesh_vert_to_buffer_copy(v[i],
+ buffers->vert_buf,
+ v_index++,
+ f->no,
+ &fmask,
+ cd_vert_mask_offset,
+ show_mask,
+ show_vcol,
+ &empty_mask);
}
}
-
- buffers->tot_tri = tottri;
-
- if (buffers->index_buf == NULL) {
- buffers->index_buf = GPU_indexbuf_build(&elb);
- }
- else {
- GPU_indexbuf_build_in_place(&elb, buffers->index_buf);
- }
-
- buffers->index_lines_buf = GPU_indexbuf_build(&elb_lines);
}
+
+ buffers->index_lines_buf = GPU_indexbuf_build(&elb_lines);
+ buffers->tot_tri = tottri;
}
/* Get material index from the last face we iterated on. */
@@ -1054,6 +1050,7 @@ void GPU_pbvh_buffers_update_flush(GPU_PBVH_Buffers *buffers)
GPU_BATCH_DISCARD_SAFE(buffers->triangles);
GPU_INDEXBUF_DISCARD_SAFE(buffers->index_buf);
GPU_VERTBUF_DISCARD_SAFE(buffers->vert_buf);
+ buffers->clear_bmesh_on_flush = false;
}
/* Force flushing to the GPU. */
diff --git a/source/blender/gpu/intern/gpu_draw.c b/source/blender/gpu/intern/gpu_draw.c
index c9ae6c60293..7fa2eb6424c 100644
--- a/source/blender/gpu/intern/gpu_draw.c
+++ b/source/blender/gpu/intern/gpu_draw.c
@@ -66,6 +66,7 @@
#include "GPU_draw.h"
#include "GPU_extensions.h"
#include "GPU_glew.h"
+#include "GPU_platform.h"
#include "GPU_texture.h"
#include "PIL_time.h"
diff --git a/source/blender/gpu/intern/gpu_element.c b/source/blender/gpu/intern/gpu_element.c
index 166a6236893..518829d1c78 100644
--- a/source/blender/gpu/intern/gpu_element.c
+++ b/source/blender/gpu/intern/gpu_element.c
@@ -243,7 +243,7 @@ GPUIndexBuf *GPU_indexbuf_create_subrange(GPUIndexBuf *elem_src, uint start, uin
{
GPUIndexBuf *elem = MEM_callocN(sizeof(GPUIndexBuf), "GPUIndexBuf");
BLI_assert(elem_src && !elem_src->is_subrange);
- BLI_assert(start + length <= elem_src->index_len);
+ BLI_assert((length == 0) || (start + length <= elem_src->index_len));
#if GPU_TRACK_INDEX_RANGE
elem->index_type = elem_src->index_type;
elem->gl_index_type = elem_src->gl_index_type;
diff --git a/source/blender/gpu/intern/gpu_extensions.c b/source/blender/gpu/intern/gpu_extensions.c
index c6425854ee4..33f918559f7 100644
--- a/source/blender/gpu/intern/gpu_extensions.c
+++ b/source/blender/gpu/intern/gpu_extensions.c
@@ -35,6 +35,7 @@
#include "GPU_framebuffer.h"
#include "GPU_glew.h"
#include "GPU_texture.h"
+#include "GPU_platform.h"
#include "intern/gpu_private.h"
@@ -68,9 +69,6 @@ static struct GPUGlobal {
GLint maxubosize;
GLint maxubobinds;
int samples_color_texture_max;
- eGPUDeviceType device;
- eGPUOSType os;
- eGPUDriverType driver;
float line_width_range[2];
/* workaround for different calculation of dfdy factors on GPUs. Some GPUs/drivers
* calculate dfdy in shader differently when drawing to an offscreen buffer. First
@@ -126,13 +124,6 @@ static void gpu_detect_mip_render_workaround(void)
GPU_texture_free(tex);
}
-/* GPU Types */
-
-bool GPU_type_matches(eGPUDeviceType device, eGPUOSType os, eGPUDriverType driver)
-{
- return (GG.device & device) && (GG.os & os) && (GG.driver & driver);
-}
-
/* GPU Extensions */
int GPU_max_texture_size(void)
@@ -266,11 +257,7 @@ void gpu_extensions_init(void)
const char *renderer = (const char *)glGetString(GL_RENDERER);
const char *version = (const char *)glGetString(GL_VERSION);
- if (strstr(vendor, "ATI") || strstr(vendor, "AMD")) {
- GG.device = GPU_DEVICE_ATI;
- GG.driver = GPU_DRIVER_OFFICIAL;
-
-#ifdef _WIN32
+ if (GPU_type_matches(GPU_DEVICE_ATI, GPU_OS_WIN, GPU_DRIVER_OFFICIAL)) {
if (strstr(version, "4.5.13399") || strstr(version, "4.5.13417") ||
strstr(version, "4.5.13422")) {
/* The renderers include:
@@ -282,75 +269,14 @@ void gpu_extensions_init(void)
GG.unused_fb_slot_workaround = true;
}
-#endif
+ }
-#if defined(__APPLE__)
+ if (GPU_type_matches(GPU_DEVICE_ATI, GPU_OS_MAC, GPU_DRIVER_OFFICIAL)) {
if (strstr(renderer, "AMD Radeon Pro") || strstr(renderer, "AMD Radeon R9") ||
strstr(renderer, "AMD Radeon RX")) {
GG.depth_blitting_workaround = true;
}
-#endif
- }
- else if (strstr(vendor, "NVIDIA")) {
- GG.device = GPU_DEVICE_NVIDIA;
- GG.driver = GPU_DRIVER_OFFICIAL;
- }
- else if (strstr(vendor, "Intel") ||
- /* src/mesa/drivers/dri/intel/intel_context.c */
- strstr(renderer, "Mesa DRI Intel") || strstr(renderer, "Mesa DRI Mobile Intel")) {
- GG.device = GPU_DEVICE_INTEL;
- GG.driver = GPU_DRIVER_OFFICIAL;
-
- if (strstr(renderer, "UHD Graphics") ||
- /* Not UHD but affected by the same bugs. */
- strstr(renderer, "HD Graphics 530") || strstr(renderer, "Kaby Lake GT2")) {
- GG.device |= GPU_DEVICE_INTEL_UHD;
- }
}
- else if ((strstr(renderer, "Mesa DRI R")) ||
- (strstr(renderer, "Radeon") && strstr(vendor, "X.Org")) ||
- (strstr(renderer, "AMD") && strstr(vendor, "X.Org")) ||
- (strstr(renderer, "Gallium ") && strstr(renderer, " on ATI ")) ||
- (strstr(renderer, "Gallium ") && strstr(renderer, " on AMD "))) {
- GG.device = GPU_DEVICE_ATI;
- GG.driver = GPU_DRIVER_OPENSOURCE;
- }
- else if (strstr(renderer, "Nouveau") || strstr(vendor, "nouveau")) {
- GG.device = GPU_DEVICE_NVIDIA;
- GG.driver = GPU_DRIVER_OPENSOURCE;
- }
- else if (strstr(vendor, "Mesa")) {
- GG.device = GPU_DEVICE_SOFTWARE;
- GG.driver = GPU_DRIVER_SOFTWARE;
- }
- else if (strstr(vendor, "Microsoft")) {
- GG.device = GPU_DEVICE_SOFTWARE;
- GG.driver = GPU_DRIVER_SOFTWARE;
- }
- else if (strstr(renderer, "Apple Software Renderer")) {
- GG.device = GPU_DEVICE_SOFTWARE;
- GG.driver = GPU_DRIVER_SOFTWARE;
- }
- else if (strstr(renderer, "llvmpipe")) {
- GG.device = GPU_DEVICE_SOFTWARE;
- GG.driver = GPU_DRIVER_SOFTWARE;
- }
- else {
- printf("Warning: Could not find a matching GPU name. Things may not behave as expected.\n");
- printf("Detected OpenGL configuration:\n");
- printf("Vendor: %s\n", vendor);
- printf("Renderer: %s\n", renderer);
- GG.device = GPU_DEVICE_ANY;
- GG.driver = GPU_DRIVER_ANY;
- }
-
-#ifdef _WIN32
- GG.os = GPU_OS_WIN;
-#elif defined(__APPLE__)
- GG.os = GPU_OS_MAC;
-#else
- GG.os = GPU_OS_UNIX;
-#endif
GG.glew_arb_base_instance_is_supported = GLEW_ARB_base_instance;
gpu_detect_mip_render_workaround();
@@ -372,11 +298,12 @@ void gpu_extensions_init(void)
GG.dfdyfactors[0] = 1.0;
GG.dfdyfactors[1] = 1.0;
- if ((strstr(vendor, "ATI") && strstr(version, "3.3.10750"))) {
+ if (GPU_type_matches(GPU_DEVICE_ATI, GPU_OS_ANY, GPU_DRIVER_ANY) &&
+ strstr(version, "3.3.10750")) {
GG.dfdyfactors[0] = 1.0;
GG.dfdyfactors[1] = -1.0;
}
- else if ((GG.device == GPU_DEVICE_INTEL) && (GG.os == GPU_OS_WIN)) {
+ else if (GPU_type_matches(GPU_DEVICE_INTEL, GPU_OS_WIN, GPU_DRIVER_ANY)) {
if (strstr(version, "4.0.0 - Build 10.18.10.3308") ||
strstr(version, "4.0.0 - Build 9.18.10.3186") ||
strstr(version, "4.0.0 - Build 9.18.10.3165") ||
@@ -401,8 +328,7 @@ void gpu_extensions_init(void)
GG.context_local_shaders_workaround = true;
}
}
- else if ((GG.device == GPU_DEVICE_ATI) && (GG.os == GPU_OS_UNIX) &&
- (GG.driver == GPU_DRIVER_OPENSOURCE)) {
+ else if (GPU_type_matches(GPU_DEVICE_ATI, GPU_OS_UNIX, GPU_DRIVER_OPENSOURCE)) {
/* See T70187: merging vertices fail. This has been tested from 18.2.2 till 19.3.0~dev of the
* Mesa driver */
GG.unused_fb_slot_workaround = true;
diff --git a/source/blender/gpu/intern/gpu_init_exit.c b/source/blender/gpu/intern/gpu_init_exit.c
index 0009e7d8c47..7b6016e11cb 100644
--- a/source/blender/gpu/intern/gpu_init_exit.c
+++ b/source/blender/gpu/intern/gpu_init_exit.c
@@ -46,7 +46,7 @@ void GPU_init(void)
}
initialized = true;
-
+ gpu_platform_init();
gpu_extensions_init(); /* must come first */
gpu_codegen_init();
@@ -82,7 +82,8 @@ void GPU_exit(void)
gpu_framebuffer_module_exit();
gpu_codegen_exit();
- gpu_extensions_exit(); /* must come last */
+ gpu_extensions_exit();
+ gpu_platform_exit(); /* must come last */
initialized = false;
}
diff --git a/source/blender/gpu/intern/gpu_platform.c b/source/blender/gpu/intern/gpu_platform.c
new file mode 100644
index 00000000000..871052bb070
--- /dev/null
+++ b/source/blender/gpu/intern/gpu_platform.c
@@ -0,0 +1,229 @@
+/*
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * The Original Code is Copyright (C) 2005 Blender Foundation.
+ * All rights reserved.
+ */
+
+/** \file
+ * \ingroup gpu
+ *
+ * Wrap OpenGL features such as textures, shaders and GLSL
+ * with checks for drivers and GPU support.
+ */
+#include "GPU_platform.h"
+#include "GPU_glew.h"
+#include "gpu_private.h"
+
+#include <string.h>
+
+#include "BLI_dynstr.h"
+#include "BLI_string.h"
+
+#include "MEM_guardedalloc.h"
+
+static struct GPUPlatformGlobal {
+ bool initialized;
+ eGPUDeviceType device;
+ eGPUOSType os;
+ eGPUDriverType driver;
+ eGPUSupportLevel support_level;
+ char *support_key;
+ char *gpu_name;
+} GPG = {false};
+
+typedef struct GPUPlatformSupportTest {
+ eGPUSupportLevel support_level;
+ eGPUDeviceType device;
+ eGPUOSType os;
+ eGPUDriverType driver;
+ const char *vendor;
+ const char *renderer;
+ const char *version;
+} GPUPlatformSupportTest;
+
+eGPUSupportLevel GPU_platform_support_level(void)
+{
+ return GPG.support_level;
+}
+
+const char *GPU_platform_support_level_key(void)
+{
+ return GPG.support_key;
+}
+
+const char *GPU_platform_gpu_name(void)
+{
+ return GPG.gpu_name;
+}
+
+/* GPU Types */
+bool GPU_type_matches(eGPUDeviceType device, eGPUOSType os, eGPUDriverType driver)
+{
+ return (GPG.device & device) && (GPG.os & os) && (GPG.driver & driver);
+}
+
+static char *gpu_platform_create_key(eGPUSupportLevel support_level,
+ const char *vendor,
+ const char *renderer,
+ const char *version)
+{
+ DynStr *ds = BLI_dynstr_new();
+ BLI_dynstr_append(ds, "{");
+ BLI_dynstr_append(ds, vendor);
+ BLI_dynstr_append(ds, "/");
+ BLI_dynstr_append(ds, renderer);
+ BLI_dynstr_append(ds, "/");
+ BLI_dynstr_append(ds, version);
+ BLI_dynstr_append(ds, "}");
+ BLI_dynstr_append(ds, "=");
+ if (support_level == GPU_SUPPORT_LEVEL_SUPPORTED) {
+ BLI_dynstr_append(ds, "SUPPORTED");
+ }
+ else if (support_level == GPU_SUPPORT_LEVEL_LIMITED) {
+ BLI_dynstr_append(ds, "LIMITED");
+ }
+ else {
+ BLI_dynstr_append(ds, "UNSUPPORTED");
+ }
+
+ char *support_key = BLI_dynstr_get_cstring(ds);
+ BLI_dynstr_free(ds);
+ BLI_str_replace_char(support_key, '\n', ' ');
+ BLI_str_replace_char(support_key, '\r', ' ');
+ return support_key;
+}
+
+static char *gpu_platform_create_gpu_name(const char *vendor,
+ const char *renderer,
+ const char *version)
+{
+ DynStr *ds = BLI_dynstr_new();
+ BLI_dynstr_append(ds, vendor);
+ BLI_dynstr_append(ds, " ");
+ BLI_dynstr_append(ds, renderer);
+ BLI_dynstr_append(ds, " ");
+ BLI_dynstr_append(ds, version);
+
+ char *gpu_name = BLI_dynstr_get_cstring(ds);
+ BLI_dynstr_free(ds);
+ BLI_str_replace_char(gpu_name, '\n', ' ');
+ BLI_str_replace_char(gpu_name, '\r', ' ');
+ return gpu_name;
+}
+
+void gpu_platform_init(void)
+{
+ if (GPG.initialized) {
+ return;
+ }
+
+#ifdef _WIN32
+ GPG.os = GPU_OS_WIN;
+#elif defined(__APPLE__)
+ GPG.os = GPU_OS_MAC;
+#else
+ GPG.os = GPU_OS_UNIX;
+#endif
+
+ const char *vendor = (const char *)glGetString(GL_VENDOR);
+ const char *renderer = (const char *)glGetString(GL_RENDERER);
+ const char *version = (const char *)glGetString(GL_VERSION);
+
+ if (strstr(vendor, "ATI") || strstr(vendor, "AMD")) {
+ GPG.device = GPU_DEVICE_ATI;
+ GPG.driver = GPU_DRIVER_OFFICIAL;
+ }
+ else if (strstr(vendor, "NVIDIA")) {
+ GPG.device = GPU_DEVICE_NVIDIA;
+ GPG.driver = GPU_DRIVER_OFFICIAL;
+ }
+ else if (strstr(vendor, "Intel") ||
+ /* src/mesa/drivers/dri/intel/intel_context.c */
+ strstr(renderer, "Mesa DRI Intel") || strstr(renderer, "Mesa DRI Mobile Intel")) {
+ GPG.device = GPU_DEVICE_INTEL;
+ GPG.driver = GPU_DRIVER_OFFICIAL;
+
+ if (strstr(renderer, "UHD Graphics") ||
+ /* Not UHD but affected by the same bugs. */
+ strstr(renderer, "HD Graphics 530") || strstr(renderer, "Kaby Lake GT2")) {
+ GPG.device |= GPU_DEVICE_INTEL_UHD;
+ }
+ }
+ else if ((strstr(renderer, "Mesa DRI R")) ||
+ (strstr(renderer, "Radeon") && strstr(vendor, "X.Org")) ||
+ (strstr(renderer, "AMD") && strstr(vendor, "X.Org")) ||
+ (strstr(renderer, "Gallium ") && strstr(renderer, " on ATI ")) ||
+ (strstr(renderer, "Gallium ") && strstr(renderer, " on AMD "))) {
+ GPG.device = GPU_DEVICE_ATI;
+ GPG.driver = GPU_DRIVER_OPENSOURCE;
+ }
+ else if (strstr(renderer, "Nouveau") || strstr(vendor, "nouveau")) {
+ GPG.device = GPU_DEVICE_NVIDIA;
+ GPG.driver = GPU_DRIVER_OPENSOURCE;
+ }
+ else if (strstr(vendor, "Mesa")) {
+ GPG.device = GPU_DEVICE_SOFTWARE;
+ GPG.driver = GPU_DRIVER_SOFTWARE;
+ }
+ else if (strstr(vendor, "Microsoft")) {
+ GPG.device = GPU_DEVICE_SOFTWARE;
+ GPG.driver = GPU_DRIVER_SOFTWARE;
+ }
+ else if (strstr(renderer, "Apple Software Renderer")) {
+ GPG.device = GPU_DEVICE_SOFTWARE;
+ GPG.driver = GPU_DRIVER_SOFTWARE;
+ }
+ else if (strstr(renderer, "llvmpipe") || strstr(renderer, "softpipe")) {
+ GPG.device = GPU_DEVICE_SOFTWARE;
+ GPG.driver = GPU_DRIVER_SOFTWARE;
+ }
+ else {
+ printf("Warning: Could not find a matching GPU name. Things may not behave as expected.\n");
+ printf("Detected OpenGL configuration:\n");
+ printf("Vendor: %s\n", vendor);
+ printf("Renderer: %s\n", renderer);
+ GPG.device = GPU_DEVICE_ANY;
+ GPG.driver = GPU_DRIVER_ANY;
+ }
+
+ /* Detect support level */
+ if (!GLEW_VERSION_3_3) {
+ GPG.support_level = GPU_SUPPORT_LEVEL_UNSUPPORTED;
+ }
+ else {
+ if (GPU_type_matches(GPU_DEVICE_INTEL, GPU_OS_WIN, GPU_DRIVER_ANY)) {
+ /* Old Intel drivers with known bugs that cause material properties to crash.
+ * Version Build 10.18.14.5067 is the latest available and appears to be working
+ * ok with our workarounds, so excluded from this list. */
+ if (strstr(version, "Build 7.14") || strstr(version, "Build 7.15") ||
+ strstr(version, "Build 8.15") || strstr(version, "Build 9.17") ||
+ strstr(version, "Build 9.18") || strstr(version, "Build 10.18.10.3") ||
+ strstr(version, "Build 10.18.10.4") || strstr(version, "Build 10.18.10.5") ||
+ strstr(version, "Build 10.18.14.4")) {
+ GPG.support_level = GPU_SUPPORT_LEVEL_LIMITED;
+ }
+ }
+ }
+ GPG.support_key = gpu_platform_create_key(GPG.support_level, vendor, renderer, version);
+ GPG.gpu_name = gpu_platform_create_gpu_name(vendor, renderer, version);
+ GPG.initialized = true;
+}
+
+void gpu_platform_exit(void)
+{
+ MEM_SAFE_FREE(GPG.support_key);
+ MEM_SAFE_FREE(GPG.gpu_name);
+}
diff --git a/source/blender/gpu/intern/gpu_private.h b/source/blender/gpu/intern/gpu_private.h
index b9af8f1b38c..7846bff87f4 100644
--- a/source/blender/gpu/intern/gpu_private.h
+++ b/source/blender/gpu/intern/gpu_private.h
@@ -22,6 +22,10 @@
#define __GPU_PRIVATE_H__
/* call this before running any of the functions below */
+void gpu_platform_init(void);
+void gpu_platform_exit(void);
+
+/* call this before running any of the functions below */
void gpu_extensions_init(void);
void gpu_extensions_exit(void);
diff --git a/source/blender/gpu/intern/gpu_shader.c b/source/blender/gpu/intern/gpu_shader.c
index 015df078228..5df73d1a0c6 100644
--- a/source/blender/gpu/intern/gpu_shader.c
+++ b/source/blender/gpu/intern/gpu_shader.c
@@ -36,6 +36,7 @@
#include "DNA_space_types.h"
#include "GPU_extensions.h"
+#include "GPU_platform.h"
#include "GPU_matrix.h"
#include "GPU_shader.h"
#include "GPU_texture.h"
diff --git a/source/blender/gpu/intern/gpu_texture.c b/source/blender/gpu/intern/gpu_texture.c
index a54d90f37f5..497fc13a2c8 100644
--- a/source/blender/gpu/intern/gpu_texture.c
+++ b/source/blender/gpu/intern/gpu_texture.c
@@ -38,6 +38,7 @@
#include "GPU_extensions.h"
#include "GPU_glew.h"
#include "GPU_framebuffer.h"
+#include "GPU_platform.h"
#include "GPU_texture.h"
#include "gpu_context_private.h"