From b4d053efc75424fca4b413ac1bc7a7e826fac629 Mon Sep 17 00:00:00 2001 From: Campbell Barton Date: Mon, 19 Jun 2017 20:18:04 +1000 Subject: Gawain API naming refactor Use consistent prefix for gawain API names as well as some abbreviations to avoid over-long names, see: D2678 --- source/blender/gpu/intern/gpu_batch.c | 60 ++++----- source/blender/gpu/intern/gpu_buffers.c | 180 ++++++++++++------------- source/blender/gpu/intern/gpu_codegen.c | 2 +- source/blender/gpu/intern/gpu_compositing.c | 84 ++++++------ source/blender/gpu/intern/gpu_draw.c | 100 +++++++------- source/blender/gpu/intern/gpu_framebuffer.c | 36 ++--- source/blender/gpu/intern/gpu_immediate_util.c | 30 ++--- source/blender/gpu/intern/gpu_matrix.c | 14 +- source/blender/gpu/intern/gpu_shader.c | 16 +-- source/blender/gpu/intern/gpu_shader_private.h | 2 +- source/blender/gpu/intern/gpu_viewport.c | 16 +-- 11 files changed, 270 insertions(+), 270 deletions(-) (limited to 'source/blender/gpu/intern') diff --git a/source/blender/gpu/intern/gpu_batch.c b/source/blender/gpu/intern/gpu_batch.c index e98f07cb0e9..92bab13dcb5 100644 --- a/source/blender/gpu/intern/gpu_batch.c +++ b/source/blender/gpu/intern/gpu_batch.c @@ -31,20 +31,20 @@ #include "GPU_batch.h" #include "gpu_shader_private.h" -void Batch_set_builtin_program(Batch *batch, GPUBuiltinShader shader_id) +void Batch_set_builtin_program(Gwn_Batch *batch, GPUBuiltinShader shader_id) { GPUShader *shader = GPU_shader_get_builtin_shader(shader_id); - Batch_set_program(batch, shader->program, shader->interface); + GWN_batch_program_set(batch, shader->program, shader->interface); } -static Batch *sphere_high = NULL; -static Batch *sphere_med = NULL; -static Batch *sphere_low = NULL; -static Batch *sphere_wire_low = NULL; -static Batch *sphere_wire_med = NULL; +static Gwn_Batch *sphere_high = NULL; +static Gwn_Batch *sphere_med = NULL; +static Gwn_Batch *sphere_low = NULL; +static Gwn_Batch *sphere_wire_low = NULL; +static Gwn_Batch *sphere_wire_med = NULL; -static VertexBuffer *vbo; -static VertexFormat format = {0}; +static Gwn_VertBuf *vbo; +static Gwn_VertFormat format = {0}; static unsigned int pos_id, nor_id; static unsigned int vert; @@ -55,24 +55,24 @@ static void batch_sphere_lat_lon_vert(float lat, float lon) pos[1] = cosf(lat); pos[2] = sinf(lat) * sinf(lon); - VertexBuffer_set_attrib(vbo, nor_id, vert, pos); - VertexBuffer_set_attrib(vbo, pos_id, vert++, pos); + GWN_vertbuf_attr_set(vbo, nor_id, vert, pos); + GWN_vertbuf_attr_set(vbo, pos_id, vert++, pos); } /* Replacement for gluSphere */ -static Batch *batch_sphere(int lat_res, int lon_res) +static Gwn_Batch *batch_sphere(int lat_res, int lon_res) { const float lon_inc = 2 * M_PI / lon_res; const float lat_inc = M_PI / lat_res; float lon, lat; if (format.attrib_ct == 0) { - pos_id = VertexFormat_add_attrib(&format, "pos", COMP_F32, 3, KEEP_FLOAT); - nor_id = VertexFormat_add_attrib(&format, "nor", COMP_F32, 3, KEEP_FLOAT); + pos_id = GWN_vertformat_attr_add(&format, "pos", GWN_COMP_F32, 3, GWN_FETCH_FLOAT); + nor_id = GWN_vertformat_attr_add(&format, "nor", GWN_COMP_F32, 3, GWN_FETCH_FLOAT); } - vbo = VertexBuffer_create_with_format(&format); - VertexBuffer_allocate_data(vbo, (lat_res - 1) * lon_res * 6); + vbo = GWN_vertbuf_create_with_format(&format); + GWN_vertbuf_data_alloc(vbo, (lat_res - 1) * lon_res * 6); vert = 0; lon = 0.0f; @@ -93,22 +93,22 @@ static Batch *batch_sphere(int lat_res, int lon_res) } } - return Batch_create(PRIM_TRIANGLES, vbo, NULL); + return GWN_batch_create(GWN_PRIM_TRIS, vbo, NULL); } -static Batch *batch_sphere_wire(int lat_res, int lon_res) +static Gwn_Batch *batch_sphere_wire(int lat_res, int lon_res) { const float lon_inc = 2 * M_PI / lon_res; const float lat_inc = M_PI / lat_res; float lon, lat; if (format.attrib_ct == 0) { - pos_id = VertexFormat_add_attrib(&format, "pos", COMP_F32, 3, KEEP_FLOAT); - nor_id = VertexFormat_add_attrib(&format, "nor", COMP_F32, 3, KEEP_FLOAT); + pos_id = GWN_vertformat_attr_add(&format, "pos", GWN_COMP_F32, 3, GWN_FETCH_FLOAT); + nor_id = GWN_vertformat_attr_add(&format, "nor", GWN_COMP_F32, 3, GWN_FETCH_FLOAT); } - vbo = VertexBuffer_create_with_format(&format); - VertexBuffer_allocate_data(vbo, (lat_res * lon_res * 2) + ((lat_res - 1) * lon_res * 2)); + vbo = GWN_vertbuf_create_with_format(&format); + GWN_vertbuf_data_alloc(vbo, (lat_res * lon_res * 2) + ((lat_res - 1) * lon_res * 2)); vert = 0; lon = 0.0f; @@ -125,10 +125,10 @@ static Batch *batch_sphere_wire(int lat_res, int lon_res) } } - return Batch_create(PRIM_LINES, vbo, NULL); + return GWN_batch_create(GWN_PRIM_LINES, vbo, NULL); } -Batch *Batch_get_sphere(int lod) +Gwn_Batch *Batch_get_sphere(int lod) { BLI_assert(lod >= 0 && lod <= 2); @@ -140,7 +140,7 @@ Batch *Batch_get_sphere(int lod) return sphere_high; } -Batch *Batch_get_sphere_wire(int lod) +Gwn_Batch *Batch_get_sphere_wire(int lod) { BLI_assert(lod >= 0 && lod <= 1); @@ -163,9 +163,9 @@ void gpu_batch_init(void) void gpu_batch_exit(void) { - Batch_discard_all(sphere_low); - Batch_discard_all(sphere_med); - Batch_discard_all(sphere_high); - Batch_discard_all(sphere_wire_low); - Batch_discard_all(sphere_wire_med); + GWN_batch_discard_all(sphere_low); + GWN_batch_discard_all(sphere_med); + GWN_batch_discard_all(sphere_high); + GWN_batch_discard_all(sphere_wire_low); + GWN_batch_discard_all(sphere_wire_med); } diff --git a/source/blender/gpu/intern/gpu_buffers.c b/source/blender/gpu/intern/gpu_buffers.c index 6281d0a3b67..fb2e271f9a2 100644 --- a/source/blender/gpu/intern/gpu_buffers.c +++ b/source/blender/gpu/intern/gpu_buffers.c @@ -108,7 +108,7 @@ static ThreadMutex buffer_mutex = BLI_MUTEX_INITIALIZER; /* multires global buffer, can be used for many grids having the same grid size */ typedef struct GridCommonGPUBuffer { - ElementList *mres_buffer; + Gwn_IndexBuf *mres_buffer; int mres_prev_gridsize; unsigned mres_prev_totquad; } GridCommonGPUBuffer; @@ -945,11 +945,11 @@ void GPU_buffer_draw_elements(GPUBuffer *UNUSED(elements), unsigned int mode, in * drawing and doesn't interact at all with the buffer code above */ struct GPU_PBVH_Buffers { - ElementList *index_buf, *index_buf_fast; - VertexBuffer *vert_buf; + Gwn_IndexBuf *index_buf, *index_buf_fast; + Gwn_VertBuf *vert_buf; - Batch *triangles; - Batch *triangles_fast; + Gwn_Batch *triangles; + Gwn_Batch *triangles_fast; /* mesh pointers in case buffer allocation fails */ const MPoly *mpoly; @@ -988,30 +988,30 @@ typedef struct { uint pos, nor, col; } VertexBufferAttrID; -static void gpu_pbvh_vert_format_init__gwn(VertexFormat *format, VertexBufferAttrID *vbo_id) +static void gpu_pbvh_vert_format_init__gwn(Gwn_VertFormat *format, VertexBufferAttrID *vbo_id) { - vbo_id->pos = VertexFormat_add_attrib(format, "pos", COMP_F32, 3, KEEP_FLOAT); - vbo_id->nor = VertexFormat_add_attrib(format, "nor", COMP_I16, 3, NORMALIZE_INT_TO_FLOAT); - vbo_id->col = VertexFormat_add_attrib(format, "color", COMP_U8, 3, NORMALIZE_INT_TO_FLOAT); + vbo_id->pos = GWN_vertformat_attr_add(format, "pos", GWN_COMP_F32, 3, GWN_FETCH_FLOAT); + vbo_id->nor = GWN_vertformat_attr_add(format, "nor", GWN_COMP_I16, 3, GWN_FETCH_INT_TO_FLOAT_UNIT); + vbo_id->col = GWN_vertformat_attr_add(format, "color", GWN_COMP_U8, 3, GWN_FETCH_INT_TO_FLOAT_UNIT); } static void gpu_pbvh_batch_init(GPU_PBVH_Buffers *buffers) { /* force flushing to the GPU */ if (buffers->vert_buf->data) { - VertexBuffer_use(buffers->vert_buf); + GWN_vertbuf_use(buffers->vert_buf); } - BATCH_DISCARD_SAFE(buffers->triangles); - buffers->triangles = Batch_create( - PRIM_TRIANGLES, buffers->vert_buf, + GWN_BATCH_DISCARD_SAFE(buffers->triangles); + buffers->triangles = GWN_batch_create( + GWN_PRIM_TRIS, buffers->vert_buf, /* can be NULL */ buffers->index_buf); - BATCH_DISCARD_SAFE(buffers->triangles_fast); + GWN_BATCH_DISCARD_SAFE(buffers->triangles_fast); if (buffers->index_buf_fast) { - buffers->triangles_fast = Batch_create( - PRIM_TRIANGLES, buffers->vert_buf, + buffers->triangles_fast = GWN_batch_create( + GWN_PRIM_TRIS, buffers->vert_buf, /* can be NULL */ buffers->index_buf_fast); } @@ -1078,15 +1078,15 @@ void GPU_pbvh_mesh_buffers_update( rgba_float_to_uchar(diffuse_color_ub, diffuse_color); /* Build VBO */ - VERTEXBUFFER_DISCARD_SAFE(buffers->vert_buf); + GWN_VERTBUF_DISCARD_SAFE(buffers->vert_buf); /* match 'VertexBufferFormat' */ - VertexFormat format = {0}; + Gwn_VertFormat format = {0}; VertexBufferAttrID vbo_id; gpu_pbvh_vert_format_init__gwn(&format, &vbo_id); - buffers->vert_buf = VertexBuffer_create_with_format(&format); - VertexBuffer_allocate_data(buffers->vert_buf, totelem); + buffers->vert_buf = GWN_vertbuf_create_with_format(&format); + GWN_vertbuf_data_alloc(buffers->vert_buf, totelem); if (buffers->vert_buf->data) { /* Vertex data is shared if smooth-shaded, but separate @@ -1095,8 +1095,8 @@ void GPU_pbvh_mesh_buffers_update( if (buffers->smooth) { for (uint i = 0; i < totvert; ++i) { const MVert *v = &mvert[vert_indices[i]]; - VertexBuffer_set_attrib(buffers->vert_buf, vbo_id.pos, i, v->co); - VertexBuffer_set_attrib(buffers->vert_buf, vbo_id.nor, i, v->no); + GWN_vertbuf_attr_set(buffers->vert_buf, vbo_id.pos, i, v->co); + GWN_vertbuf_attr_set(buffers->vert_buf, vbo_id.nor, i, v->no); } for (uint i = 0; i < buffers->face_indices_len; i++) { @@ -1107,10 +1107,10 @@ void GPU_pbvh_mesh_buffers_update( int v_index = buffers->mloop[lt->tri[j]].v; uchar color_ub[3]; gpu_color_from_mask_copy(vmask[v_index], diffuse_color, color_ub); - VertexBuffer_set_attrib(buffers->vert_buf, vbo_id.col, vidx, color_ub); + GWN_vertbuf_attr_set(buffers->vert_buf, vbo_id.col, vidx, color_ub); } else { - VertexBuffer_set_attrib(buffers->vert_buf, vbo_id.col, vidx, diffuse_color_ub); + GWN_vertbuf_attr_set(buffers->vert_buf, vbo_id.col, vidx, diffuse_color_ub); } } } @@ -1153,9 +1153,9 @@ void GPU_pbvh_mesh_buffers_update( for (uint j = 0; j < 3; j++) { const MVert *v = &mvert[vtri[j]]; - VertexBuffer_set_attrib(buffers->vert_buf, vbo_id.pos, vbo_index, v->co); - VertexBuffer_set_attrib(buffers->vert_buf, vbo_id.nor, vbo_index, no); - VertexBuffer_set_attrib(buffers->vert_buf, vbo_id.col, vbo_index, color_ub); + GWN_vertbuf_attr_set(buffers->vert_buf, vbo_id.pos, vbo_index, v->co); + GWN_vertbuf_attr_set(buffers->vert_buf, vbo_id.nor, vbo_index, no); + GWN_vertbuf_attr_set(buffers->vert_buf, vbo_id.col, vbo_index, color_ub); vbo_index++; } @@ -1165,7 +1165,7 @@ void GPU_pbvh_mesh_buffers_update( gpu_pbvh_batch_init(buffers); } else { - VERTEXBUFFER_DISCARD_SAFE(buffers->vert_buf); + GWN_VERTBUF_DISCARD_SAFE(buffers->vert_buf); } } @@ -1220,8 +1220,8 @@ GPU_PBVH_Buffers *GPU_pbvh_mesh_buffers_build( if (buffers->smooth) { /* Fill the triangle buffer */ buffers->index_buf = NULL; - ElementListBuilder elb; - ElementListBuilder_init(&elb, PRIM_TRIANGLES, tottri, INT_MAX); + Gwn_IndexBufBuilder elb; + GWN_indexbuf_init(&elb, GWN_PRIM_TRIS, tottri, INT_MAX); for (i = 0; i < face_indices_len; ++i) { const MLoopTri *lt = &looptri[face_indices[i]]; @@ -1230,13 +1230,13 @@ GPU_PBVH_Buffers *GPU_pbvh_mesh_buffers_build( if (paint_is_face_hidden(lt, mvert, mloop)) continue; - add_triangle_vertices(&elb, UNPACK3(face_vert_indices[i])); + GWN_indexbuf_add_tri_verts(&elb, UNPACK3(face_vert_indices[i])); } - buffers->index_buf = ElementList_build(&elb); + buffers->index_buf = GWN_indexbuf_build(&elb); } else { if (!buffers->is_index_buf_global) { - ELEMENTLIST_DISCARD_SAFE(buffers->index_buf); + GWN_INDEXBUF_DISCARD_SAFE(buffers->index_buf); } buffers->index_buf = NULL; buffers->is_index_buf_global = false; @@ -1281,14 +1281,14 @@ void GPU_pbvh_grid_buffers_update( copy_v4_v4(buffers->diffuse_color, diffuse_color); - VertexFormat format = {0}; + Gwn_VertFormat format = {0}; VertexBufferAttrID vbo_id; gpu_pbvh_vert_format_init__gwn(&format, &vbo_id); /* Build coord/normal VBO */ - VERTEXBUFFER_DISCARD_SAFE(buffers->vert_buf); - buffers->vert_buf = VertexBuffer_create_with_format(&format); - VertexBuffer_allocate_data(buffers->vert_buf, totgrid * key->grid_area); + GWN_VERTBUF_DISCARD_SAFE(buffers->vert_buf); + buffers->vert_buf = GWN_vertbuf_create_with_format(&format); + GWN_vertbuf_data_alloc(buffers->vert_buf, totgrid * key->grid_area); uint vbo_index_offset = 0; if (buffers->vert_buf->data) { @@ -1299,18 +1299,18 @@ void GPU_pbvh_grid_buffers_update( for (y = 0; y < key->grid_size; y++) { for (x = 0; x < key->grid_size; x++) { CCGElem *elem = CCG_grid_elem(key, grid, x, y); - VertexBuffer_set_attrib(buffers->vert_buf, vbo_id.pos, vbo_index, CCG_elem_co(key, elem)); + GWN_vertbuf_attr_set(buffers->vert_buf, vbo_id.pos, vbo_index, CCG_elem_co(key, elem)); if (buffers->smooth) { short no_short[3]; normal_float_to_short_v3(no_short, CCG_elem_no(key, elem)); - VertexBuffer_set_attrib(buffers->vert_buf, vbo_id.nor, vbo_index, no_short); + GWN_vertbuf_attr_set(buffers->vert_buf, vbo_id.nor, vbo_index, no_short); if (has_mask) { uchar color_ub[3]; gpu_color_from_mask_copy(*CCG_elem_mask(key, elem), diffuse_color, color_ub); - VertexBuffer_set_attrib(buffers->vert_buf, vbo_id.col, vbo_index, color_ub); + GWN_vertbuf_attr_set(buffers->vert_buf, vbo_id.col, vbo_index, color_ub); } } vbo_index += 1; @@ -1337,7 +1337,7 @@ void GPU_pbvh_grid_buffers_update( vbo_index = vbo_index_offset + ((j + 1) * key->grid_size + k); short no_short[3]; normal_float_to_short_v3(no_short, fno); - VertexBuffer_set_attrib(buffers->vert_buf, vbo_id.nor, vbo_index, no_short); + GWN_vertbuf_attr_set(buffers->vert_buf, vbo_id.nor, vbo_index, no_short); if (has_mask) { uchar color_ub[3]; @@ -1348,7 +1348,7 @@ void GPU_pbvh_grid_buffers_update( elems[3], diffuse_color, color_ub); - VertexBuffer_set_attrib(buffers->vert_buf, vbo_id.col, vbo_index, color_ub); + GWN_vertbuf_attr_set(buffers->vert_buf, vbo_id.col, vbo_index, color_ub); } } } @@ -1360,7 +1360,7 @@ void GPU_pbvh_grid_buffers_update( gpu_pbvh_batch_init(buffers); } else { - VERTEXBUFFER_DISCARD_SAFE(buffers->vert_buf); + GWN_VERTBUF_DISCARD_SAFE(buffers->vert_buf); } } @@ -1380,9 +1380,9 @@ void GPU_pbvh_grid_buffers_update( int offset = 0; \ int i, j, k; \ \ - ElementListBuilder elb; \ - ElementListBuilder_init( \ - &elb, PRIM_TRIANGLES, tot_quad_ * 2, max_vert_); \ + Gwn_IndexBufBuilder elb; \ + GWN_indexbuf_init( \ + &elb, GWN_PRIM_TRIS, tot_quad_ * 2, max_vert_); \ \ /* Fill the buffer */ \ for (i = 0; i < totgrid; ++i) { \ @@ -1398,23 +1398,23 @@ void GPU_pbvh_grid_buffers_update( { \ continue; \ } \ - add_generic_vertex(&elb, offset + j * gridsize + k + 1); \ - add_generic_vertex(&elb, offset + j * gridsize + k); \ - add_generic_vertex(&elb, offset + (j + 1) * gridsize + k); \ + GWN_indexbuf_add_generic_vert(&elb, offset + j * gridsize + k + 1); \ + GWN_indexbuf_add_generic_vert(&elb, offset + j * gridsize + k); \ + GWN_indexbuf_add_generic_vert(&elb, offset + (j + 1) * gridsize + k); \ \ - add_generic_vertex(&elb, offset + (j + 1) * gridsize + k + 1); \ - add_generic_vertex(&elb, offset + j * gridsize + k + 1); \ - add_generic_vertex(&elb, offset + (j + 1) * gridsize + k); \ + GWN_indexbuf_add_generic_vert(&elb, offset + (j + 1) * gridsize + k + 1); \ + GWN_indexbuf_add_generic_vert(&elb, offset + j * gridsize + k + 1); \ + GWN_indexbuf_add_generic_vert(&elb, offset + (j + 1) * gridsize + k); \ } \ } \ \ offset += gridsize * gridsize; \ } \ - buffer_ = ElementList_build(&elb); \ + buffer_ = GWN_indexbuf_build(&elb); \ } (void)0 /* end FILL_QUAD_BUFFER */ -static ElementList *gpu_get_grid_buffer( +static Gwn_IndexBuf *gpu_get_grid_buffer( int gridsize, unsigned *totquad, GridCommonGPUBuffer **grid_common_gpu_buffer, /* remove this arg when gawain gets base-vertex support! */ int totgrid) @@ -1440,7 +1440,7 @@ static ElementList *gpu_get_grid_buffer( } /* we can't reuse old, delete the existing buffer */ else if (gridbuff->mres_buffer) { - ElementList_discard(gridbuff->mres_buffer); + GWN_indexbuf_discard(gridbuff->mres_buffer); gridbuff->mres_buffer = NULL; } @@ -1457,17 +1457,17 @@ static ElementList *gpu_get_grid_buffer( #define FILL_FAST_BUFFER() \ { \ - ElementListBuilder elb; \ - ElementListBuilder_init(&elb, PRIM_TRIANGLES, 6 * totgrid, INT_MAX); \ + Gwn_IndexBufBuilder elb; \ + GWN_indexbuf_init(&elb, GWN_PRIM_TRIS, 6 * totgrid, INT_MAX); \ for (int i = 0; i < totgrid; i++) { \ - add_generic_vertex(&elb, i * gridsize * gridsize + gridsize - 1); \ - add_generic_vertex(&elb, i * gridsize * gridsize); \ - add_generic_vertex(&elb, (i + 1) * gridsize * gridsize - gridsize); \ - add_generic_vertex(&elb, (i + 1) * gridsize * gridsize - 1); \ - add_generic_vertex(&elb, i * gridsize * gridsize + gridsize - 1); \ - add_generic_vertex(&elb, (i + 1) * gridsize * gridsize - gridsize); \ + GWN_indexbuf_add_generic_vert(&elb, i * gridsize * gridsize + gridsize - 1); \ + GWN_indexbuf_add_generic_vert(&elb, i * gridsize * gridsize); \ + GWN_indexbuf_add_generic_vert(&elb, (i + 1) * gridsize * gridsize - gridsize); \ + GWN_indexbuf_add_generic_vert(&elb, (i + 1) * gridsize * gridsize - 1); \ + GWN_indexbuf_add_generic_vert(&elb, i * gridsize * gridsize + gridsize - 1); \ + GWN_indexbuf_add_generic_vert(&elb, (i + 1) * gridsize * gridsize - gridsize); \ } \ - buffers->index_buf_fast = ElementList_build(&elb); \ + buffers->index_buf_fast = GWN_indexbuf_build(&elb); \ } (void)0 GPU_PBVH_Buffers *GPU_pbvh_grid_buffers_build( @@ -1538,7 +1538,7 @@ GPU_PBVH_Buffers *GPU_pbvh_grid_buffers_build( */ static void gpu_bmesh_vert_to_buffer_copy__gwn( BMVert *v, - VertexBuffer *vert_buf, + Gwn_VertBuf *vert_buf, const VertexBufferAttrID *vbo_id, int *v_index, const float fno[3], @@ -1549,12 +1549,12 @@ static void gpu_bmesh_vert_to_buffer_copy__gwn( if (!BM_elem_flag_test(v, BM_ELEM_HIDDEN)) { /* Set coord, normal, and mask */ - VertexBuffer_set_attrib(vert_buf, vbo_id->pos, *v_index, v->co); + GWN_vertbuf_attr_set(vert_buf, vbo_id->pos, *v_index, v->co); { short no_short[3]; normal_float_to_short_v3(no_short, fno ? fno : v->no); - VertexBuffer_set_attrib(vert_buf, vbo_id->nor, *v_index, no_short); + GWN_vertbuf_attr_set(vert_buf, vbo_id->nor, *v_index, no_short); } { @@ -1564,7 +1564,7 @@ static void gpu_bmesh_vert_to_buffer_copy__gwn( BM_ELEM_CD_GET_FLOAT(v, cd_vert_mask_offset), diffuse_color, color_ub); - VertexBuffer_set_attrib(vert_buf, vbo_id->col, *v_index, color_ub); + GWN_vertbuf_attr_set(vert_buf, vbo_id->col, *v_index, color_ub); } /* Assign index for use in the triangle index buffer */ @@ -1660,14 +1660,14 @@ void GPU_pbvh_bmesh_buffers_update( copy_v4_v4(buffers->diffuse_color, diffuse_color); /* Initialize vertex buffer */ - VERTEXBUFFER_DISCARD_SAFE(buffers->vert_buf); + GWN_VERTBUF_DISCARD_SAFE(buffers->vert_buf); /* match 'VertexBufferFormat' */ - VertexFormat format = {0}; + Gwn_VertFormat format = {0}; VertexBufferAttrID vbo_id; gpu_pbvh_vert_format_init__gwn(&format, &vbo_id); - buffers->vert_buf = VertexBuffer_create_with_format(&format); - VertexBuffer_allocate_data(buffers->vert_buf, totvert); + buffers->vert_buf = GWN_vertbuf_create_with_format(&format); + GWN_vertbuf_data_alloc(buffers->vert_buf, totvert); /* Fill vertex buffer */ if (buffers->vert_buf->data) { @@ -1736,7 +1736,7 @@ void GPU_pbvh_bmesh_buffers_update( bm->elem_index_dirty |= BM_VERT; } else { - VERTEXBUFFER_DISCARD_SAFE(buffers->vert_buf); + GWN_VERTBUF_DISCARD_SAFE(buffers->vert_buf); /* Memory map failed */ return; } @@ -1744,12 +1744,12 @@ void GPU_pbvh_bmesh_buffers_update( if (buffers->smooth) { /* Fill the triangle buffer */ buffers->index_buf = NULL; - ElementListBuilder elb; - ElementListBuilder_init(&elb, PRIM_TRIANGLES, tottri, maxvert); + Gwn_IndexBufBuilder elb; + GWN_indexbuf_init(&elb, GWN_PRIM_TRIS, tottri, maxvert); /* Initialize triangle index buffer */ if (buffers->triangles && !buffers->is_index_buf_global) { - BATCH_DISCARD_SAFE(buffers->triangles); + GWN_BATCH_DISCARD_SAFE(buffers->triangles); } buffers->is_index_buf_global = false; @@ -1767,19 +1767,19 @@ void GPU_pbvh_bmesh_buffers_update( l_iter = l_first = BM_FACE_FIRST_LOOP(f); do { - add_generic_vertex(&elb, BM_elem_index_get(l_iter->v)); + GWN_indexbuf_add_generic_vert(&elb, BM_elem_index_get(l_iter->v)); } while ((l_iter = l_iter->next) != l_first); } } buffers->tot_tri = tottri; - buffers->index_buf = ElementList_build(&elb); + buffers->index_buf = GWN_indexbuf_build(&elb); } } else if (buffers->index_buf) { if (!buffers->is_index_buf_global) { - ELEMENTLIST_DISCARD_SAFE(buffers->index_buf); + GWN_INDEXBUF_DISCARD_SAFE(buffers->index_buf); } buffers->index_buf = NULL; buffers->is_index_buf_global = false; @@ -1807,7 +1807,7 @@ void GPU_pbvh_buffers_draw( { UNUSED_VARS(wireframe, fast, setMaterial); bool do_fast = fast && buffers->triangles_fast; - Batch *triangles = do_fast ? buffers->triangles_fast : buffers->triangles; + Gwn_Batch *triangles = do_fast ? buffers->triangles_fast : buffers->triangles; if (triangles) { @@ -1817,7 +1817,7 @@ void GPU_pbvh_buffers_draw( buffers->smooth ? GPU_SHADER_SIMPLE_LIGHTING_SMOOTH_COLOR : GPU_SHADER_SIMPLE_LIGHTING_FLAT_COLOR; GPUShader *shader = GPU_shader_get_builtin_shader(shader_id); - Batch_set_program( + GWN_batch_program_set( triangles, GPU_shader_get_program(shader), GPU_shader_get_interface(shader)); @@ -1830,11 +1830,11 @@ void GPU_pbvh_buffers_draw( GPU_shader_uniform_vector(shader, GPU_shader_get_uniform(shader, "global"), 1, 1, &world_light); } - Batch_draw(triangles); + GWN_batch_draw(triangles); } } -Batch *GPU_pbvh_buffers_batch_get(GPU_PBVH_Buffers *buffers, bool fast) +Gwn_Batch *GPU_pbvh_buffers_batch_get(GPU_PBVH_Buffers *buffers, bool fast) { return (fast && buffers->triangles_fast) ? buffers->triangles_fast : buffers->triangles; @@ -1886,13 +1886,13 @@ bool GPU_pbvh_buffers_diffuse_changed(GPU_PBVH_Buffers *buffers, GSet *bm_faces, void GPU_pbvh_buffers_free(GPU_PBVH_Buffers *buffers) { if (buffers) { - BATCH_DISCARD_SAFE(buffers->triangles); - BATCH_DISCARD_SAFE(buffers->triangles_fast); + GWN_BATCH_DISCARD_SAFE(buffers->triangles); + GWN_BATCH_DISCARD_SAFE(buffers->triangles_fast); if (!buffers->is_index_buf_global) { - ELEMENTLIST_DISCARD_SAFE(buffers->index_buf); + GWN_INDEXBUF_DISCARD_SAFE(buffers->index_buf); } - ELEMENTLIST_DISCARD_SAFE(buffers->index_buf_fast); - VertexBuffer_discard(buffers->vert_buf); + GWN_INDEXBUF_DISCARD_SAFE(buffers->index_buf_fast); + GWN_vertbuf_discard(buffers->vert_buf); #ifdef USE_BASE_ELEM if (buffers->baseelemarray) @@ -1912,7 +1912,7 @@ void GPU_pbvh_multires_buffers_free(GridCommonGPUBuffer **grid_common_gpu_buffer if (gridbuff) { if (gridbuff->mres_buffer) { BLI_mutex_lock(&buffer_mutex); - ELEMENTLIST_DISCARD_SAFE(gridbuff->mres_buffer); + GWN_INDEXBUF_DISCARD_SAFE(gridbuff->mres_buffer); BLI_mutex_unlock(&buffer_mutex); } MEM_freeN(gridbuff); @@ -1932,7 +1932,7 @@ void GPU_pbvh_BB_draw(float min[3], float max[3], bool leaf, unsigned int pos) * could keep a static batch & index buffer, change the VBO contents per draw */ - immBegin(PRIM_LINES, 24); + immBegin(GWN_PRIM_LINES, 24); /* top */ immVertex3f(pos, min[0], min[1], max[2]); diff --git a/source/blender/gpu/intern/gpu_codegen.c b/source/blender/gpu/intern/gpu_codegen.c index 7e80021f8fc..1f0f5e375ce 100644 --- a/source/blender/gpu/intern/gpu_codegen.c +++ b/source/blender/gpu/intern/gpu_codegen.c @@ -783,7 +783,7 @@ static const char *attrib_prefix_get(CustomDataType type) case CD_TANGENT: return "t"; case CD_MCOL: return "c"; case CD_AUTO_FROM_NAME: return "a"; - default: BLI_assert(false && "Attrib Prefix type not found : This should not happen!"); return ""; + default: BLI_assert(false && "Gwn_VertAttr Prefix type not found : This should not happen!"); return ""; } } diff --git a/source/blender/gpu/intern/gpu_compositing.c b/source/blender/gpu/intern/gpu_compositing.c index c3fab8117b8..afd28aece12 100644 --- a/source/blender/gpu/intern/gpu_compositing.c +++ b/source/blender/gpu/intern/gpu_compositing.c @@ -195,8 +195,8 @@ struct GPUFX { /* we have a stencil, restore the previous state */ bool restore_stencil; - Batch *quad_batch; - Batch *point_batch; + Gwn_Batch *quad_batch; + Gwn_Batch *point_batch; }; #if 0 @@ -273,31 +273,31 @@ GPUFX *GPU_fx_compositor_create(void) GPUFX *fx = MEM_callocN(sizeof(GPUFX), "GPUFX compositor"); /* Quad buffer */ - static VertexFormat format = {0}; + static Gwn_VertFormat format = {0}; static unsigned int pos, uvs; if (format.attrib_ct == 0) { - pos = VertexFormat_add_attrib(&format, "pos", COMP_F32, 2, KEEP_FLOAT); - uvs = VertexFormat_add_attrib(&format, "uvs", COMP_F32, 2, KEEP_FLOAT); + pos = GWN_vertformat_attr_add(&format, "pos", GWN_COMP_F32, 2, GWN_FETCH_FLOAT); + uvs = GWN_vertformat_attr_add(&format, "uvs", GWN_COMP_F32, 2, GWN_FETCH_FLOAT); } - VertexBuffer *vbo = VertexBuffer_create_with_format(&format); - VertexBuffer_allocate_data(vbo, 4); + Gwn_VertBuf *vbo = GWN_vertbuf_create_with_format(&format); + GWN_vertbuf_data_alloc(vbo, 4); for (int i = 0; i < 4; ++i) { - VertexBuffer_set_attrib(vbo, pos, i, fullscreencos[i]); - VertexBuffer_set_attrib(vbo, uvs, i, fullscreenuvs[i]); + GWN_vertbuf_attr_set(vbo, pos, i, fullscreencos[i]); + GWN_vertbuf_attr_set(vbo, uvs, i, fullscreenuvs[i]); } - fx->quad_batch = Batch_create(PRIM_TRIANGLE_STRIP, vbo, NULL); + fx->quad_batch = GWN_batch_create(GWN_PRIM_TRI_STRIP, vbo, NULL); /* Point Buffer */ - static VertexFormat format_point = {0}; + static Gwn_VertFormat format_point = {0}; static unsigned int dummy_attrib; if (format_point.attrib_ct == 0) { - dummy_attrib = VertexFormat_add_attrib(&format_point, "pos", COMP_F32, 2, KEEP_FLOAT); + dummy_attrib = GWN_vertformat_attr_add(&format_point, "pos", GWN_COMP_F32, 2, GWN_FETCH_FLOAT); } float dummy[2] = {0.0f, 0.0f}; - VertexBuffer *vbo_point = VertexBuffer_create_with_format(&format_point); - VertexBuffer_allocate_data(vbo_point, 1); - VertexBuffer_set_attrib(vbo_point, dummy_attrib, 0, &dummy); - fx->point_batch = Batch_create(PRIM_POINTS, vbo_point, NULL); + Gwn_VertBuf *vbo_point = GWN_vertbuf_create_with_format(&format_point); + GWN_vertbuf_data_alloc(vbo_point, 1); + GWN_vertbuf_attr_set(vbo_point, dummy_attrib, 0, &dummy); + fx->point_batch = GWN_batch_create(GWN_PRIM_POINTS, vbo_point, NULL); return fx; } @@ -387,8 +387,8 @@ static void cleanup_fx_gl_data(GPUFX *fx, bool do_fbo) void GPU_fx_compositor_destroy(GPUFX *fx) { cleanup_fx_gl_data(fx, true); - Batch_discard_all(fx->quad_batch); - Batch_discard_all(fx->point_batch); + GWN_batch_discard_all(fx->quad_batch); + GWN_batch_discard_all(fx->point_batch); MEM_freeN(fx); } @@ -731,7 +731,7 @@ void GPU_fx_compositor_XRay_resolve(GPUFX *fx) GPUDepthResolveInterface *interface = GPU_fx_shader_get_interface(depth_resolve_shader); /* set up quad buffer */ - Batch_set_program(fx->quad_batch, GPU_shader_get_program(depth_resolve_shader), GPU_shader_get_interface(depth_resolve_shader)); + GWN_batch_program_set(fx->quad_batch, GPU_shader_get_program(depth_resolve_shader), GPU_shader_get_interface(depth_resolve_shader)); GPU_texture_bind(fx->depth_buffer_xray, 0); GPU_texture_compare_mode(fx->depth_buffer_xray, false); @@ -739,7 +739,7 @@ void GPU_fx_compositor_XRay_resolve(GPUFX *fx) GPU_shader_uniform_texture(depth_resolve_shader, interface->depth_uniform, fx->depth_buffer_xray); /* draw */ - Batch_draw(fx->quad_batch); + GWN_batch_draw(fx->quad_batch); /* disable bindings */ GPU_texture_compare_mode(fx->depth_buffer_xray, true); @@ -837,7 +837,7 @@ bool GPU_fx_do_composite_pass( GPUSSAOShaderInterface *interface = GPU_fx_shader_get_interface(ssao_shader); - Batch_set_program(fx->quad_batch, GPU_shader_get_program(ssao_shader), GPU_shader_get_interface(ssao_shader)); + GWN_batch_program_set(fx->quad_batch, GPU_shader_get_program(ssao_shader), GPU_shader_get_interface(ssao_shader)); GPU_shader_uniform_vector(ssao_shader, interface->ssao_uniform, 4, 1, ssao_params); GPU_shader_uniform_vector(ssao_shader, interface->ssao_color_uniform, 4, 1, fx_ssao->color); @@ -861,7 +861,7 @@ bool GPU_fx_do_composite_pass( /* draw */ gpu_fx_bind_render_target(&passes_left, fx, ofs, target); - Batch_draw(fx->quad_batch); + GWN_batch_draw(fx->quad_batch); /* disable bindings */ GPU_texture_unbind(src); @@ -930,7 +930,7 @@ bool GPU_fx_do_composite_pass( GPUDOFHQPassOneInterface *interface = GPU_fx_shader_get_interface(dof_shader_pass1); - Batch_set_program(fx->quad_batch, GPU_shader_get_program(dof_shader_pass1), GPU_shader_get_interface(dof_shader_pass1)); + GWN_batch_program_set(fx->quad_batch, GPU_shader_get_program(dof_shader_pass1), GPU_shader_get_interface(dof_shader_pass1)); GPU_shader_uniform_vector(dof_shader_pass1, interface->dof_uniform, 4, 1, dof_params); GPU_shader_uniform_vector(dof_shader_pass1, interface->invrendertargetdim_uniform, 2, 1, invrendertargetdim); @@ -957,7 +957,7 @@ bool GPU_fx_do_composite_pass( GPU_framebuffer_check_valid(fx->gbuffer, NULL); - Batch_draw(fx->quad_batch); + GWN_batch_draw(fx->quad_batch); /* disable bindings */ GPU_texture_filter_mode(src, true); @@ -982,7 +982,7 @@ bool GPU_fx_do_composite_pass( GPUDOFHQPassTwoInterface *interface = GPU_fx_shader_get_interface(dof_shader_pass2); - Batch_set_program(fx->point_batch, GPU_shader_get_program(dof_shader_pass2), GPU_shader_get_interface(dof_shader_pass2)); + GWN_batch_program_set(fx->point_batch, GPU_shader_get_program(dof_shader_pass2), GPU_shader_get_interface(dof_shader_pass2)); GPU_texture_bind(fx->dof_nearfar_coc, numslots++); GPU_texture_bind(fx->dof_half_downsampled_far, numslots++); @@ -1007,7 +1007,7 @@ bool GPU_fx_do_composite_pass( glClearColor(0.0, 0.0, 0.0, 0.0); glClear(GL_COLOR_BUFFER_BIT); /* the draw call we all waited for, draw a point per pixel, scaled per circle of confusion */ - Batch_draw_stupid_instanced(fx->point_batch, 0, fx->dof_downsampled_w * fx->dof_downsampled_h, 0, 0, NULL, NULL); + GWN_batch_draw_stupid_instanced(fx->point_batch, 0, fx->dof_downsampled_w * fx->dof_downsampled_h, 0, 0, NULL, NULL); GPU_texture_unbind(fx->dof_half_downsampled_far); GPU_framebuffer_texture_detach(fx->dof_far_blur); @@ -1023,8 +1023,8 @@ bool GPU_fx_do_composite_pass( /* have to clear the buffer unfortunately */ glClear(GL_COLOR_BUFFER_BIT); /* the draw call we all waited for, draw a point per pixel, scaled per circle of confusion */ - Batch_draw_stupid_instanced(fx->point_batch, 0, fx->dof_downsampled_w * fx->dof_downsampled_h, 0, 0, NULL, NULL); - Batch_done_using_program(fx->point_batch); + GWN_batch_draw_stupid_instanced(fx->point_batch, 0, fx->dof_downsampled_w * fx->dof_downsampled_h, 0, 0, NULL, NULL); + GWN_batch_program_use_end(fx->point_batch); /* disable bindings */ glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA); @@ -1045,7 +1045,7 @@ bool GPU_fx_do_composite_pass( GPUDOFHQPassThreeInterface *interface = GPU_fx_shader_get_interface(dof_shader_pass3); - Batch_set_program(fx->quad_batch, GPU_shader_get_program(dof_shader_pass3), GPU_shader_get_interface(dof_shader_pass3)); + GWN_batch_program_set(fx->quad_batch, GPU_shader_get_program(dof_shader_pass3), GPU_shader_get_interface(dof_shader_pass3)); GPU_shader_uniform_vector(dof_shader_pass3, interface->dof_uniform, 4, 1, dof_params); @@ -1071,7 +1071,7 @@ bool GPU_fx_do_composite_pass( /* if this is the last pass, prepare for rendering on the frambuffer */ gpu_fx_bind_render_target(&passes_left, fx, ofs, target); - Batch_draw(fx->quad_batch); + GWN_batch_draw(fx->quad_batch); /* disable bindings */ GPU_texture_unbind(fx->dof_near_blur); @@ -1124,7 +1124,7 @@ bool GPU_fx_do_composite_pass( GPUDOFPassOneInterface *interface = GPU_fx_shader_get_interface(dof_shader_pass1); - Batch_set_program(fx->quad_batch, GPU_shader_get_program(dof_shader_pass1), GPU_shader_get_interface(dof_shader_pass1)); + GWN_batch_program_set(fx->quad_batch, GPU_shader_get_program(dof_shader_pass1), GPU_shader_get_interface(dof_shader_pass1)); GPU_shader_uniform_vector(dof_shader_pass1, interface->dof_uniform, 4, 1, dof_params); GPU_shader_uniform_vector(dof_shader_pass1, interface->invrendertargetdim_uniform, 2, 1, invrendertargetdim); @@ -1143,7 +1143,7 @@ bool GPU_fx_do_composite_pass( /* binding takes care of setting the viewport to the downsampled size */ GPU_texture_bind_as_framebuffer(fx->dof_near_coc_buffer); - Batch_draw(fx->quad_batch); + GWN_batch_draw(fx->quad_batch); /* disable bindings */ GPU_texture_unbind(src); GPU_texture_compare_mode(fx->depth_buffer, true); @@ -1166,7 +1166,7 @@ bool GPU_fx_do_composite_pass( dof_params[2] = GPU_texture_width(fx->dof_near_coc_blurred_buffer) / (scale_camera * fx_dof->sensor); /* Blurring vertically */ - Batch_set_program(fx->quad_batch, GPU_shader_get_program(dof_shader_pass2), GPU_shader_get_interface(dof_shader_pass2)); + GWN_batch_program_set(fx->quad_batch, GPU_shader_get_program(dof_shader_pass2), GPU_shader_get_interface(dof_shader_pass2)); GPU_shader_uniform_vector(dof_shader_pass2, interface->dof_uniform, 4, 1, dof_params); GPU_shader_uniform_vector(dof_shader_pass2, interface->invrendertargetdim_uniform, 2, 1, invrendertargetdim); @@ -1184,10 +1184,10 @@ bool GPU_fx_do_composite_pass( GPU_framebuffer_texture_attach(fx->gbuffer, fx->dof_near_coc_final_buffer, 0, 0); /* Drawing quad */ - Batch_draw(fx->quad_batch); + GWN_batch_draw(fx->quad_batch); /* Rebind Shader */ - Batch_set_program(fx->quad_batch, GPU_shader_get_program(dof_shader_pass2), GPU_shader_get_interface(dof_shader_pass2)); + GWN_batch_program_set(fx->quad_batch, GPU_shader_get_program(dof_shader_pass2), GPU_shader_get_interface(dof_shader_pass2)); /* *unbind/detach */ GPU_texture_unbind(fx->dof_near_coc_buffer); @@ -1204,7 +1204,7 @@ bool GPU_fx_do_composite_pass( GPU_framebuffer_texture_attach(fx->gbuffer, fx->dof_near_coc_blurred_buffer, 0, 0); - Batch_draw(fx->quad_batch); + GWN_batch_draw(fx->quad_batch); /* *unbind/detach */ GPU_texture_compare_mode(fx->depth_buffer, true); @@ -1223,7 +1223,7 @@ bool GPU_fx_do_composite_pass( { GPUDOFPassThreeInterface *interface = GPU_fx_shader_get_interface(dof_shader_pass3); - Batch_set_program(fx->quad_batch, GPU_shader_get_program(dof_shader_pass3), GPU_shader_get_interface(dof_shader_pass3)); + GWN_batch_program_set(fx->quad_batch, GPU_shader_get_program(dof_shader_pass3), GPU_shader_get_interface(dof_shader_pass3)); GPU_texture_bind(fx->dof_near_coc_buffer, numslots++); GPU_shader_uniform_texture(dof_shader_pass3, interface->near_coc_downsampled, fx->dof_near_coc_buffer); @@ -1233,7 +1233,7 @@ bool GPU_fx_do_composite_pass( GPU_framebuffer_texture_attach(fx->gbuffer, fx->dof_near_coc_final_buffer, 0, 0); - Batch_draw(fx->quad_batch); + GWN_batch_draw(fx->quad_batch); /* disable bindings */ GPU_texture_unbind(fx->dof_near_coc_buffer); GPU_texture_unbind(fx->dof_near_coc_blurred_buffer); @@ -1251,7 +1251,7 @@ bool GPU_fx_do_composite_pass( GPUDOFPassFourInterface *interface = GPU_fx_shader_get_interface(dof_shader_pass4); - Batch_set_program(fx->quad_batch, GPU_shader_get_program(dof_shader_pass4), GPU_shader_get_interface(dof_shader_pass4)); + GWN_batch_program_set(fx->quad_batch, GPU_shader_get_program(dof_shader_pass4), GPU_shader_get_interface(dof_shader_pass4)); GPU_texture_bind(fx->dof_near_coc_final_buffer, numslots++); GPU_shader_uniform_texture(dof_shader_pass4, interface->near_coc_downsampled, fx->dof_near_coc_final_buffer); @@ -1259,7 +1259,7 @@ bool GPU_fx_do_composite_pass( GPU_framebuffer_texture_attach(fx->gbuffer, fx->dof_near_coc_buffer, 0, 0); - Batch_draw(fx->quad_batch); + GWN_batch_draw(fx->quad_batch); /* disable bindings */ GPU_texture_unbind(fx->dof_near_coc_final_buffer); @@ -1276,7 +1276,7 @@ bool GPU_fx_do_composite_pass( GPUDOFPassFiveInterface *interface = GPU_fx_shader_get_interface(dof_shader_pass5); - Batch_set_program(fx->quad_batch, GPU_shader_get_program(dof_shader_pass5), GPU_shader_get_interface(dof_shader_pass5)); + GWN_batch_program_set(fx->quad_batch, GPU_shader_get_program(dof_shader_pass5), GPU_shader_get_interface(dof_shader_pass5)); GPU_shader_uniform_vector(dof_shader_pass5, interface->dof_uniform, 4, 1, dof_params); GPU_shader_uniform_vector(dof_shader_pass5, interface->invrendertargetdim_uniform, 2, 1, invrendertargetdim); @@ -1299,7 +1299,7 @@ bool GPU_fx_do_composite_pass( /* if this is the last pass, prepare for rendering on the frambuffer */ gpu_fx_bind_render_target(&passes_left, fx, ofs, target); - Batch_draw(fx->quad_batch); + GWN_batch_draw(fx->quad_batch); /* disable bindings */ GPU_texture_unbind(fx->dof_near_coc_buffer); GPU_texture_unbind(fx->dof_near_coc_blurred_buffer); diff --git a/source/blender/gpu/intern/gpu_draw.c b/source/blender/gpu/intern/gpu_draw.c index 529727b5e17..9ed6f387e90 100644 --- a/source/blender/gpu/intern/gpu_draw.c +++ b/source/blender/gpu/intern/gpu_draw.c @@ -2369,7 +2369,7 @@ static GPUAttribStack state = { }; #define AttribStack state -#define Attrib state.attrib_stack[state.top] +#define Gwn_VertAttr state.attrib_stack[state.top] /** * Replacement for glPush/PopAttributes @@ -2379,48 +2379,48 @@ static GPUAttribStack state = { */ void gpuPushAttrib(eGPUAttribMask mask) { - Attrib.mask = mask; + Gwn_VertAttr.mask = mask; if ((mask & GPU_DEPTH_BUFFER_BIT) != 0) { - Attrib.is_depth_test = glIsEnabled(GL_DEPTH_TEST); - glGetIntegerv(GL_DEPTH_FUNC, &Attrib.depth_func); - glGetDoublev(GL_DEPTH_CLEAR_VALUE, &Attrib.depth_clear_value); - glGetBooleanv(GL_DEPTH_WRITEMASK, (GLboolean *)&Attrib.depth_write_mask); + Gwn_VertAttr.is_depth_test = glIsEnabled(GL_DEPTH_TEST); + glGetIntegerv(GL_DEPTH_FUNC, &Gwn_VertAttr.depth_func); + glGetDoublev(GL_DEPTH_CLEAR_VALUE, &Gwn_VertAttr.depth_clear_value); + glGetBooleanv(GL_DEPTH_WRITEMASK, (GLboolean *)&Gwn_VertAttr.depth_write_mask); } if ((mask & GPU_ENABLE_BIT) != 0) { - Attrib.is_blend = glIsEnabled(GL_BLEND); + Gwn_VertAttr.is_blend = glIsEnabled(GL_BLEND); for (int i = 0; i < 6; i++) { - Attrib.is_clip_plane[i] = glIsEnabled(GL_CLIP_PLANE0 + i); + Gwn_VertAttr.is_clip_plane[i] = glIsEnabled(GL_CLIP_PLANE0 + i); } - Attrib.is_cull_face = glIsEnabled(GL_CULL_FACE); - Attrib.is_depth_test = glIsEnabled(GL_DEPTH_TEST); - Attrib.is_dither = glIsEnabled(GL_DITHER); - Attrib.is_line_smooth = glIsEnabled(GL_LINE_SMOOTH); - Attrib.is_color_logic_op = glIsEnabled(GL_COLOR_LOGIC_OP); - Attrib.is_multisample = glIsEnabled(GL_MULTISAMPLE); - Attrib.is_polygon_offset_line = glIsEnabled(GL_POLYGON_OFFSET_LINE); - Attrib.is_polygon_offset_fill = glIsEnabled(GL_POLYGON_OFFSET_FILL); - Attrib.is_polygon_smooth = glIsEnabled(GL_POLYGON_SMOOTH); - Attrib.is_sample_alpha_to_coverage = glIsEnabled(GL_SAMPLE_ALPHA_TO_COVERAGE); - Attrib.is_scissor_test = glIsEnabled(GL_SCISSOR_TEST); - Attrib.is_stencil_test = glIsEnabled(GL_STENCIL_TEST); + Gwn_VertAttr.is_cull_face = glIsEnabled(GL_CULL_FACE); + Gwn_VertAttr.is_depth_test = glIsEnabled(GL_DEPTH_TEST); + Gwn_VertAttr.is_dither = glIsEnabled(GL_DITHER); + Gwn_VertAttr.is_line_smooth = glIsEnabled(GL_LINE_SMOOTH); + Gwn_VertAttr.is_color_logic_op = glIsEnabled(GL_COLOR_LOGIC_OP); + Gwn_VertAttr.is_multisample = glIsEnabled(GL_MULTISAMPLE); + Gwn_VertAttr.is_polygon_offset_line = glIsEnabled(GL_POLYGON_OFFSET_LINE); + Gwn_VertAttr.is_polygon_offset_fill = glIsEnabled(GL_POLYGON_OFFSET_FILL); + Gwn_VertAttr.is_polygon_smooth = glIsEnabled(GL_POLYGON_SMOOTH); + Gwn_VertAttr.is_sample_alpha_to_coverage = glIsEnabled(GL_SAMPLE_ALPHA_TO_COVERAGE); + Gwn_VertAttr.is_scissor_test = glIsEnabled(GL_SCISSOR_TEST); + Gwn_VertAttr.is_stencil_test = glIsEnabled(GL_STENCIL_TEST); } if ((mask & GPU_SCISSOR_BIT) != 0) { - Attrib.is_scissor_test = glIsEnabled(GL_SCISSOR_TEST); - glGetIntegerv(GL_SCISSOR_BOX, (GLint *)&Attrib.scissor_box); + Gwn_VertAttr.is_scissor_test = glIsEnabled(GL_SCISSOR_TEST); + glGetIntegerv(GL_SCISSOR_BOX, (GLint *)&Gwn_VertAttr.scissor_box); } if ((mask & GPU_VIEWPORT_BIT) != 0) { - glGetDoublev(GL_DEPTH_RANGE, (GLdouble *)&Attrib.near_far); - glGetIntegerv(GL_VIEWPORT, (GLint *)&Attrib.viewport); + glGetDoublev(GL_DEPTH_RANGE, (GLdouble *)&Gwn_VertAttr.near_far); + glGetIntegerv(GL_VIEWPORT, (GLint *)&Gwn_VertAttr.viewport); } if ((mask & GPU_BLEND_BIT) != 0) { - Attrib.is_blend = glIsEnabled(GL_BLEND); + Gwn_VertAttr.is_blend = glIsEnabled(GL_BLEND); } BLI_assert(AttribStack.top < STATE_STACK_DEPTH); @@ -2441,52 +2441,52 @@ void gpuPopAttrib(void) BLI_assert(AttribStack.top > 0); AttribStack.top--; - GLint mask = Attrib.mask; + GLint mask = Gwn_VertAttr.mask; if ((mask & GPU_DEPTH_BUFFER_BIT) != 0) { - restore_mask(GL_DEPTH_TEST, Attrib.is_depth_test); - glDepthFunc(Attrib.depth_func); - glClearDepth(Attrib.depth_clear_value); - glDepthMask(Attrib.depth_write_mask); + restore_mask(GL_DEPTH_TEST, Gwn_VertAttr.is_depth_test); + glDepthFunc(Gwn_VertAttr.depth_func); + glClearDepth(Gwn_VertAttr.depth_clear_value); + glDepthMask(Gwn_VertAttr.depth_write_mask); } if ((mask & GPU_ENABLE_BIT) != 0) { - restore_mask(GL_BLEND, Attrib.is_blend); + restore_mask(GL_BLEND, Gwn_VertAttr.is_blend); for (int i = 0; i < 6; i++) { - restore_mask(GL_CLIP_PLANE0 + i, Attrib.is_clip_plane[i]); + restore_mask(GL_CLIP_PLANE0 + i, Gwn_VertAttr.is_clip_plane[i]); } - restore_mask(GL_CULL_FACE, Attrib.is_cull_face); - restore_mask(GL_DEPTH_TEST, Attrib.is_depth_test); - restore_mask(GL_DITHER, Attrib.is_dither); - restore_mask(GL_LINE_SMOOTH, Attrib.is_line_smooth); - restore_mask(GL_COLOR_LOGIC_OP, Attrib.is_color_logic_op); - restore_mask(GL_MULTISAMPLE, Attrib.is_multisample); - restore_mask(GL_POLYGON_OFFSET_LINE, Attrib.is_polygon_offset_line); - restore_mask(GL_POLYGON_OFFSET_FILL, Attrib.is_polygon_offset_fill); - restore_mask(GL_POLYGON_SMOOTH, Attrib.is_polygon_smooth); - restore_mask(GL_SAMPLE_ALPHA_TO_COVERAGE, Attrib.is_sample_alpha_to_coverage); - restore_mask(GL_SCISSOR_TEST, Attrib.is_scissor_test); - restore_mask(GL_STENCIL_TEST, Attrib.is_stencil_test); + restore_mask(GL_CULL_FACE, Gwn_VertAttr.is_cull_face); + restore_mask(GL_DEPTH_TEST, Gwn_VertAttr.is_depth_test); + restore_mask(GL_DITHER, Gwn_VertAttr.is_dither); + restore_mask(GL_LINE_SMOOTH, Gwn_VertAttr.is_line_smooth); + restore_mask(GL_COLOR_LOGIC_OP, Gwn_VertAttr.is_color_logic_op); + restore_mask(GL_MULTISAMPLE, Gwn_VertAttr.is_multisample); + restore_mask(GL_POLYGON_OFFSET_LINE, Gwn_VertAttr.is_polygon_offset_line); + restore_mask(GL_POLYGON_OFFSET_FILL, Gwn_VertAttr.is_polygon_offset_fill); + restore_mask(GL_POLYGON_SMOOTH, Gwn_VertAttr.is_polygon_smooth); + restore_mask(GL_SAMPLE_ALPHA_TO_COVERAGE, Gwn_VertAttr.is_sample_alpha_to_coverage); + restore_mask(GL_SCISSOR_TEST, Gwn_VertAttr.is_scissor_test); + restore_mask(GL_STENCIL_TEST, Gwn_VertAttr.is_stencil_test); } if ((mask & GPU_VIEWPORT_BIT) != 0) { - glViewport(Attrib.viewport[0], Attrib.viewport[1], Attrib.viewport[2], Attrib.viewport[3]); - glDepthRange(Attrib.near_far[0], Attrib.near_far[1]); + glViewport(Gwn_VertAttr.viewport[0], Gwn_VertAttr.viewport[1], Gwn_VertAttr.viewport[2], Gwn_VertAttr.viewport[3]); + glDepthRange(Gwn_VertAttr.near_far[0], Gwn_VertAttr.near_far[1]); } if ((mask & GPU_SCISSOR_BIT) != 0) { - restore_mask(GL_SCISSOR_TEST, Attrib.is_scissor_test); - glScissor(Attrib.scissor_box[0], Attrib.scissor_box[1], Attrib.scissor_box[2], Attrib.scissor_box[3]); + restore_mask(GL_SCISSOR_TEST, Gwn_VertAttr.is_scissor_test); + glScissor(Gwn_VertAttr.scissor_box[0], Gwn_VertAttr.scissor_box[1], Gwn_VertAttr.scissor_box[2], Gwn_VertAttr.scissor_box[3]); } if ((mask & GPU_BLEND_BIT) != 0) { - restore_mask(GL_BLEND, Attrib.is_blend); + restore_mask(GL_BLEND, Gwn_VertAttr.is_blend); } } -#undef Attrib +#undef Gwn_VertAttr #undef AttribStack /** \} */ diff --git a/source/blender/gpu/intern/gpu_framebuffer.c b/source/blender/gpu/intern/gpu_framebuffer.c index 96c740410c5..d791f839e20 100644 --- a/source/blender/gpu/intern/gpu_framebuffer.c +++ b/source/blender/gpu/intern/gpu_framebuffer.c @@ -448,9 +448,9 @@ void GPU_framebuffer_blur( const float fullscreencos[4][2] = {{-1.0f, -1.0f}, {1.0f, -1.0f}, {-1.0f, 1.0f}, {1.0f, 1.0f}}; const float fullscreenuvs[4][2] = {{0.0f, 0.0f}, {1.0f, 0.0f}, {0.0f, 1.0f}, {1.0f, 1.0f}}; - static VertexFormat format = {0}; - static VertexBuffer vbo = {{0}}; - static Batch batch = {{0}}; + static Gwn_VertFormat format = {0}; + static Gwn_VertBuf vbo = {{0}}; + static Gwn_Batch batch = {{0}}; const float scaleh[2] = {1.0f / GPU_texture_width(blurtex), 0.0f}; const float scalev[2] = {0.0f, 1.0f / GPU_texture_height(tex)}; @@ -464,23 +464,23 @@ void GPU_framebuffer_blur( if (format.attrib_ct == 0) { unsigned int i = 0; /* Vertex format */ - unsigned int pos = VertexFormat_add_attrib(&format, "pos", COMP_F32, 2, KEEP_FLOAT); - unsigned int uvs = VertexFormat_add_attrib(&format, "uvs", COMP_F32, 2, KEEP_FLOAT); + unsigned int pos = GWN_vertformat_attr_add(&format, "pos", GWN_COMP_F32, 2, GWN_FETCH_FLOAT); + unsigned int uvs = GWN_vertformat_attr_add(&format, "uvs", GWN_COMP_F32, 2, GWN_FETCH_FLOAT); /* Vertices */ - VertexBuffer_init_with_format(&vbo, &format); - VertexBuffer_allocate_data(&vbo, 36); + GWN_vertbuf_init_with_format(&vbo, &format); + GWN_vertbuf_data_alloc(&vbo, 36); for (int j = 0; j < 3; ++j) { - VertexBuffer_set_attrib(&vbo, uvs, i, fullscreenuvs[j]); - VertexBuffer_set_attrib(&vbo, pos, i++, fullscreencos[j]); + GWN_vertbuf_attr_set(&vbo, uvs, i, fullscreenuvs[j]); + GWN_vertbuf_attr_set(&vbo, pos, i++, fullscreencos[j]); } for (int j = 1; j < 4; ++j) { - VertexBuffer_set_attrib(&vbo, uvs, i, fullscreenuvs[j]); - VertexBuffer_set_attrib(&vbo, pos, i++, fullscreencos[j]); + GWN_vertbuf_attr_set(&vbo, uvs, i, fullscreenuvs[j]); + GWN_vertbuf_attr_set(&vbo, pos, i++, fullscreencos[j]); } - Batch_init(&batch, GL_TRIANGLES, &vbo, NULL); + GWN_batch_init(&batch, GL_TRIANGLES, &vbo, NULL); } glDisable(GL_DEPTH_TEST); @@ -499,9 +499,9 @@ void GPU_framebuffer_blur( GPU_texture_bind(tex, 0); Batch_set_builtin_program(&batch, GPU_SHADER_SEP_GAUSSIAN_BLUR); - Batch_Uniform2f(&batch, "ScaleU", scaleh[0], scaleh[1]); - Batch_Uniform1i(&batch, "textureSource", GL_TEXTURE0); - Batch_draw(&batch); + GWN_batch_uniform_2f(&batch, "ScaleU", scaleh[0], scaleh[1]); + GWN_batch_uniform_1i(&batch, "textureSource", GL_TEXTURE0); + GWN_batch_draw(&batch); /* Blurring vertically */ glBindFramebuffer(GL_FRAMEBUFFER, fb->object); @@ -515,9 +515,9 @@ void GPU_framebuffer_blur( /* Hack to make the following uniform stick */ Batch_set_builtin_program(&batch, GPU_SHADER_SEP_GAUSSIAN_BLUR); - Batch_Uniform2f(&batch, "ScaleU", scalev[0], scalev[1]); - Batch_Uniform1i(&batch, "textureSource", GL_TEXTURE0); - Batch_draw(&batch); + GWN_batch_uniform_2f(&batch, "ScaleU", scalev[0], scalev[1]); + GWN_batch_uniform_1i(&batch, "textureSource", GL_TEXTURE0); + GWN_batch_draw(&batch); } void GPU_framebuffer_blit(GPUFrameBuffer *fb_read, int read_slot, GPUFrameBuffer *fb_write, int write_slot, bool use_depth) diff --git a/source/blender/gpu/intern/gpu_immediate_util.c b/source/blender/gpu/intern/gpu_immediate_util.c index 40da4cf091b..a4e54d15034 100644 --- a/source/blender/gpu/intern/gpu_immediate_util.c +++ b/source/blender/gpu/intern/gpu_immediate_util.c @@ -51,7 +51,7 @@ void imm_cpack(unsigned int x) (((x) >> 16) & 0xFF)); } -static void imm_draw_circle(PrimitiveType prim_type, const uint shdr_pos, float x, float y, float rad, int nsegments) +static void imm_draw_circle(Gwn_PrimType prim_type, const uint shdr_pos, float x, float y, float rad, int nsegments) { immBegin(prim_type, nsegments); for (int i = 0; i < nsegments; ++i) { @@ -73,7 +73,7 @@ static void imm_draw_circle(PrimitiveType prim_type, const uint shdr_pos, float */ void imm_draw_circle_wire(uint shdr_pos, float x, float y, float rad, int nsegments) { - imm_draw_circle(PRIM_LINE_LOOP, shdr_pos, x, y, rad, nsegments); + imm_draw_circle(GWN_PRIM_LINE_LOOP, shdr_pos, x, y, rad, nsegments); } /** @@ -88,14 +88,14 @@ void imm_draw_circle_wire(uint shdr_pos, float x, float y, float rad, int nsegme */ void imm_draw_circle_fill(uint shdr_pos, float x, float y, float rad, int nsegments) { - imm_draw_circle(PRIM_TRIANGLE_FAN, shdr_pos, x, y, rad, nsegments); + imm_draw_circle(GWN_PRIM_TRI_FAN, shdr_pos, x, y, rad, nsegments); } /** * \note We could have `imm_draw_lined_disk_partial` but currently there is no need. */ static void imm_draw_disk_partial( - PrimitiveType prim_type, unsigned pos, float x, float y, + Gwn_PrimType prim_type, unsigned pos, float x, float y, float rad_inner, float rad_outer, int nsegments, float start, float sweep) { /* shift & reverse angle, increase 'nsegments' to match gluPartialDisk */ @@ -132,11 +132,11 @@ void imm_draw_disk_partial_fill( unsigned pos, float x, float y, float rad_inner, float rad_outer, int nsegments, float start, float sweep) { - imm_draw_disk_partial(PRIM_TRIANGLE_STRIP, pos, x, y, rad_inner, rad_outer, nsegments, start, sweep); + imm_draw_disk_partial(GWN_PRIM_TRI_STRIP, pos, x, y, rad_inner, rad_outer, nsegments, start, sweep); } static void imm_draw_circle_3D( - PrimitiveType prim_type, unsigned pos, float x, float y, + Gwn_PrimType prim_type, unsigned pos, float x, float y, float rad, int nsegments) { immBegin(prim_type, nsegments); @@ -149,12 +149,12 @@ static void imm_draw_circle_3D( void imm_draw_circle_wire_3d(unsigned pos, float x, float y, float rad, int nsegments) { - imm_draw_circle_3D(PRIM_LINE_LOOP, pos, x, y, rad, nsegments); + imm_draw_circle_3D(GWN_PRIM_LINE_LOOP, pos, x, y, rad, nsegments); } void imm_draw_circle_fill_3d(unsigned pos, float x, float y, float rad, int nsegments) { - imm_draw_circle_3D(PRIM_TRIANGLE_FAN, pos, x, y, rad, nsegments); + imm_draw_circle_3D(GWN_PRIM_TRI_FAN, pos, x, y, rad, nsegments); } /** @@ -168,7 +168,7 @@ void imm_draw_circle_fill_3d(unsigned pos, float x, float y, float rad, int nseg */ void imm_draw_line_box(unsigned pos, float x1, float y1, float x2, float y2) { - immBegin(PRIM_LINE_LOOP, 4); + immBegin(GWN_PRIM_LINE_LOOP, 4); immVertex2f(pos, x1, y1); immVertex2f(pos, x1, y2); immVertex2f(pos, x2, y2); @@ -178,8 +178,8 @@ void imm_draw_line_box(unsigned pos, float x1, float y1, float x2, float y2) void imm_draw_line_box_3d(unsigned pos, float x1, float y1, float x2, float y2) { - /* use this version when VertexFormat has a vec3 position */ - immBegin(PRIM_LINE_LOOP, 4); + /* use this version when Gwn_VertFormat has a vec3 position */ + immBegin(GWN_PRIM_LINE_LOOP, 4); immVertex3f(pos, x1, y1, 0.0f); immVertex3f(pos, x1, y2, 0.0f); immVertex3f(pos, x2, y2, 0.0f); @@ -192,7 +192,7 @@ void imm_draw_line_box_3d(unsigned pos, float x1, float y1, float x2, float y2) */ void imm_draw_checker_box(float x1, float y1, float x2, float y2) { - unsigned int pos = VertexFormat_add_attrib(immVertexFormat(), "pos", COMP_F32, 2, KEEP_FLOAT); + unsigned int pos = GWN_vertformat_attr_add(immVertexFormat(), "pos", GWN_COMP_F32, 2, GWN_FETCH_FLOAT); immBindBuiltinProgram(GPU_SHADER_2D_CHECKER); immUniform4f("color1", 0.15f, 0.15f, 0.15f, 1.0f); @@ -219,7 +219,7 @@ void imm_draw_checker_box(float x1, float y1, float x2, float y2) void imm_draw_cylinder_fill_normal_3d( unsigned int pos, unsigned int nor, float base, float top, float height, int slices, int stacks) { - immBegin(PRIM_TRIANGLES, 6 * slices * stacks); + immBegin(GWN_PRIM_TRIS, 6 * slices * stacks); for (int i = 0; i < slices; ++i) { const float angle1 = 2 * M_PI * ((float)i / (float)slices); const float angle2 = 2 * M_PI * ((float)(i + 1) / (float)slices); @@ -270,7 +270,7 @@ void imm_draw_cylinder_fill_normal_3d( void imm_draw_cylinder_wire_3d(unsigned int pos, float base, float top, float height, int slices, int stacks) { - immBegin(PRIM_LINES, 6 * slices * stacks); + immBegin(GWN_PRIM_LINES, 6 * slices * stacks); for (int i = 0; i < slices; ++i) { const float angle1 = 2 * M_PI * ((float)i / (float)slices); const float angle2 = 2 * M_PI * ((float)(i + 1) / (float)slices); @@ -307,7 +307,7 @@ void imm_draw_cylinder_wire_3d(unsigned int pos, float base, float top, float he void imm_draw_cylinder_fill_3d(unsigned int pos, float base, float top, float height, int slices, int stacks) { - immBegin(PRIM_TRIANGLES, 6 * slices * stacks); + immBegin(GWN_PRIM_TRIS, 6 * slices * stacks); for (int i = 0; i < slices; ++i) { const float angle1 = 2 * M_PI * ((float)i / (float)slices); const float angle2 = 2 * M_PI * ((float)(i + 1) / (float)slices); diff --git a/source/blender/gpu/intern/gpu_matrix.c b/source/blender/gpu/intern/gpu_matrix.c index 775d1cd87b2..f03a076a9fd 100644 --- a/source/blender/gpu/intern/gpu_matrix.c +++ b/source/blender/gpu/intern/gpu_matrix.c @@ -554,20 +554,20 @@ const float (*gpuGetNormalMatrixInverse(float m[3][3]))[3] return m; } -void gpuBindMatrices(const ShaderInterface* shaderface) +void gpuBindMatrices(const Gwn_ShaderInterface* shaderface) { /* set uniform values to matrix stack values * call this before a draw call if desired matrices are dirty * call glUseProgram before this, as glUniform expects program to be bound */ - const ShaderInput *MV = ShaderInterface_builtin_uniform(shaderface, UNIFORM_MODELVIEW); - const ShaderInput *P = ShaderInterface_builtin_uniform(shaderface, UNIFORM_PROJECTION); - const ShaderInput *MVP = ShaderInterface_builtin_uniform(shaderface, UNIFORM_MVP); + const Gwn_ShaderInput *MV = GWN_shaderinterface_uniform_builtin(shaderface, GWN_UNIFORM_MODELVIEW); + const Gwn_ShaderInput *P = GWN_shaderinterface_uniform_builtin(shaderface, GWN_UNIFORM_PROJECTION); + const Gwn_ShaderInput *MVP = GWN_shaderinterface_uniform_builtin(shaderface, GWN_UNIFORM_MVP); - const ShaderInput *N = ShaderInterface_builtin_uniform(shaderface, UNIFORM_NORMAL); - const ShaderInput *MV_inv = ShaderInterface_builtin_uniform(shaderface, UNIFORM_MODELVIEW_INV); - const ShaderInput *P_inv = ShaderInterface_builtin_uniform(shaderface, UNIFORM_PROJECTION_INV); + const Gwn_ShaderInput *N = GWN_shaderinterface_uniform_builtin(shaderface, GWN_UNIFORM_NORMAL); + const Gwn_ShaderInput *MV_inv = GWN_shaderinterface_uniform_builtin(shaderface, GWN_UNIFORM_MODELVIEW_INV); + const Gwn_ShaderInput *P_inv = GWN_shaderinterface_uniform_builtin(shaderface, GWN_UNIFORM_PROJECTION_INV); if (MV) { #if DEBUG_MATRIX_BIND diff --git a/source/blender/gpu/intern/gpu_shader.c b/source/blender/gpu/intern/gpu_shader.c index 24eb88d0114..f0a1c182713 100644 --- a/source/blender/gpu/intern/gpu_shader.c +++ b/source/blender/gpu/intern/gpu_shader.c @@ -426,24 +426,24 @@ GPUShader *GPU_shader_create_ex(const char *vertexcode, return NULL; } - shader->interface = ShaderInterface_create(shader->program); + shader->interface = GWN_shaderinterface_create(shader->program); #ifdef WITH_OPENSUBDIV /* TODO(sergey): Find a better place for this. */ if (use_opensubdiv) { if (GLEW_VERSION_4_1) { glProgramUniform1i(shader->program, - ShaderInterface_uniform(shader->interface, "FVarDataOffsetBuffer")->location, + GWN_shaderinterface_uniform(shader->interface, "FVarDataOffsetBuffer")->location, 30); /* GL_TEXTURE30 */ glProgramUniform1i(shader->program, - ShaderInterface_uniform(shader->interface, "FVarDataBuffer")->location, + GWN_shaderinterface_uniform(shader->interface, "FVarDataBuffer")->location, 31); /* GL_TEXTURE31 */ } else { glUseProgram(shader->program); - glUniform1i(ShaderInterface_uniform(shader->interface, "FVarDataOffsetBuffer")->location, 30); - glUniform1i(ShaderInterface_uniform(shader->interface, "FVarDataBuffer")->location, 31); + glUniform1i(GWN_shaderinterface_uniform(shader->interface, "FVarDataOffsetBuffer")->location, 30); + glUniform1i(GWN_shaderinterface_uniform(shader->interface, "FVarDataBuffer")->location, 31); glUseProgram(0); } } @@ -482,7 +482,7 @@ void GPU_shader_free(GPUShader *shader) MEM_freeN(shader->uniform_interface); if (shader->interface) - ShaderInterface_discard(shader->interface); + GWN_shaderinterface_discard(shader->interface); MEM_freeN(shader); } @@ -490,7 +490,7 @@ void GPU_shader_free(GPUShader *shader) int GPU_shader_get_uniform(GPUShader *shader, const char *name) { BLI_assert(shader && shader->program); - const ShaderInput *uniform = ShaderInterface_uniform(shader->interface, name); + const Gwn_ShaderInput *uniform = GWN_shaderinterface_uniform(shader->interface, name); return uniform ? uniform->location : -1; } @@ -599,7 +599,7 @@ void GPU_shader_uniform_texture(GPUShader *UNUSED(shader), int location, GPUText int GPU_shader_get_attribute(GPUShader *shader, const char *name) { BLI_assert(shader && shader->program); - const ShaderInput *attrib = ShaderInterface_attrib(shader->interface, name); + const Gwn_ShaderInput *attrib = GWN_shaderinterface_attr(shader->interface, name); return attrib ? attrib->location : -1; } diff --git a/source/blender/gpu/intern/gpu_shader_private.h b/source/blender/gpu/intern/gpu_shader_private.h index c0c24187da5..2de05b5746b 100644 --- a/source/blender/gpu/intern/gpu_shader_private.h +++ b/source/blender/gpu/intern/gpu_shader_private.h @@ -38,7 +38,7 @@ struct GPUShader { void *uniform_interface; /* cached uniform interface for shader. Data depends on shader */ /* NOTE: ^-- only FX compositing shaders use this */ - ShaderInterface *interface; /* cached uniform & attrib interface for shader */ + Gwn_ShaderInterface *interface; /* cached uniform & attrib interface for shader */ }; #endif /* __GPU_SHADER_PRIVATE_H__ */ diff --git a/source/blender/gpu/intern/gpu_viewport.c b/source/blender/gpu/intern/gpu_viewport.c index 50d2f1e6b18..9bd990e9796 100644 --- a/source/blender/gpu/intern/gpu_viewport.c +++ b/source/blender/gpu/intern/gpu_viewport.c @@ -399,16 +399,16 @@ static void draw_ofs_to_screen(GPUViewport *viewport) const float w = (float)GPU_texture_width(color); const float h = (float)GPU_texture_height(color); - VertexFormat *format = immVertexFormat(); - unsigned int texcoord = VertexFormat_add_attrib(format, "texCoord", COMP_F32, 2, KEEP_FLOAT); - unsigned int pos = VertexFormat_add_attrib(format, "pos", COMP_F32, 2, KEEP_FLOAT); + Gwn_VertFormat *format = immVertexFormat(); + unsigned int texcoord = GWN_vertformat_attr_add(format, "texCoord", GWN_COMP_F32, 2, GWN_FETCH_FLOAT); + unsigned int pos = GWN_vertformat_attr_add(format, "pos", GWN_COMP_F32, 2, GWN_FETCH_FLOAT); immBindBuiltinProgram(GPU_SHADER_3D_IMAGE_MODULATE_ALPHA); GPU_texture_bind(color, 0); immUniform1i("image", 0); /* default GL_TEXTURE0 unit */ - immBegin(PRIM_TRIANGLE_STRIP, 4); + immBegin(GWN_PRIM_TRI_STRIP, 4); immAttrib2f(texcoord, 0.0f, 0.0f); immVertex2f(pos, 0.0f, 0.0f); @@ -535,9 +535,9 @@ void GPU_viewport_debug_depth_draw(GPUViewport *viewport, const float znear, con const float w = (float)GPU_texture_width(viewport->debug_depth); const float h = (float)GPU_texture_height(viewport->debug_depth); - VertexFormat *format = immVertexFormat(); - unsigned int texcoord = VertexFormat_add_attrib(format, "texCoord", COMP_F32, 2, KEEP_FLOAT); - unsigned int pos = VertexFormat_add_attrib(format, "pos", COMP_F32, 2, KEEP_FLOAT); + Gwn_VertFormat *format = immVertexFormat(); + unsigned int texcoord = GWN_vertformat_attr_add(format, "texCoord", GWN_COMP_F32, 2, GWN_FETCH_FLOAT); + unsigned int pos = GWN_vertformat_attr_add(format, "pos", GWN_COMP_F32, 2, GWN_FETCH_FLOAT); immBindBuiltinProgram(GPU_SHADER_3D_IMAGE_DEPTH); @@ -547,7 +547,7 @@ void GPU_viewport_debug_depth_draw(GPUViewport *viewport, const float znear, con immUniform1f("zfar", zfar); immUniform1i("image", 0); /* default GL_TEXTURE0 unit */ - immBegin(PRIM_TRIANGLE_STRIP, 4); + immBegin(GWN_PRIM_TRI_STRIP, 4); immAttrib2f(texcoord, 0.0f, 0.0f); immVertex2f(pos, 0.0f, 0.0f); -- cgit v1.2.3