Welcome to mirror list, hosted at ThFree Co, Russian Federation.

git.blender.org/blender.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
Diffstat (limited to 'source/blender/gpu/intern')
-rw-r--r--source/blender/gpu/intern/gpu_codegen.c2
-rw-r--r--source/blender/gpu/intern/gpu_debug.c6
-rw-r--r--source/blender/gpu/intern/gpu_select.c272
-rw-r--r--source/blender/gpu/intern/gpu_select_pick.c744
-rw-r--r--source/blender/gpu/intern/gpu_select_private.h53
-rw-r--r--source/blender/gpu/intern/gpu_select_sample_query.c209
6 files changed, 1129 insertions, 157 deletions
diff --git a/source/blender/gpu/intern/gpu_codegen.c b/source/blender/gpu/intern/gpu_codegen.c
index 211394e7932..c3896fbd659 100644
--- a/source/blender/gpu/intern/gpu_codegen.c
+++ b/source/blender/gpu/intern/gpu_codegen.c
@@ -360,7 +360,7 @@ static void codegen_print_datatype(DynStr *ds, const GPUType type, float *data)
BLI_dynstr_appendf(ds, "%s(", GPU_DATATYPE_STR[type]);
for (i = 0; i < type; i++) {
- BLI_dynstr_appendf(ds, "%f", data[i]);
+ BLI_dynstr_appendf(ds, "%.12f", data[i]);
if (i == type - 1)
BLI_dynstr_append(ds, ")");
else
diff --git a/source/blender/gpu/intern/gpu_debug.c b/source/blender/gpu/intern/gpu_debug.c
index d632e767ca9..ba68d1a6a0f 100644
--- a/source/blender/gpu/intern/gpu_debug.c
+++ b/source/blender/gpu/intern/gpu_debug.c
@@ -161,7 +161,7 @@ const char *gpuErrorString(GLenum err)
#endif
-static const char* source_name(GLenum source)
+static const char *source_name(GLenum source)
{
switch (source) {
case GL_DEBUG_SOURCE_API: return "API";
@@ -174,7 +174,7 @@ static const char* source_name(GLenum source)
}
}
-static const char* message_type_name(GLenum message)
+static const char *message_type_name(GLenum message)
{
switch (message) {
case GL_DEBUG_TYPE_ERROR: return "error";
@@ -188,7 +188,7 @@ static const char* message_type_name(GLenum message)
}
}
-static const char* category_name_amd(GLenum category)
+static const char *category_name_amd(GLenum category)
{
switch (category) {
case GL_DEBUG_CATEGORY_API_ERROR_AMD: return "API error";
diff --git a/source/blender/gpu/intern/gpu_select.c b/source/blender/gpu/intern/gpu_select.c
index 58582232cd5..9496ff137dc 100644
--- a/source/blender/gpu/intern/gpu_select.c
+++ b/source/blender/gpu/intern/gpu_select.c
@@ -29,109 +29,86 @@
* Interface for accessing gpu-related methods for selection. The semantics will be
* similar to glRenderMode(GL_SELECT) since the goal is to maintain compatibility.
*/
+#include <stdlib.h>
+
#include "GPU_select.h"
#include "GPU_extensions.h"
#include "GPU_glew.h"
-
+
#include "MEM_guardedalloc.h"
#include "DNA_userdef_types.h"
#include "BLI_utildefines.h"
-/* Ad hoc number of queries to allocate to skip doing many glGenQueries */
-#define ALLOC_QUERIES 200
-
-typedef struct GPUQueryState {
+#include "gpu_select_private.h"
+
+/* Internal algorithm used */
+enum {
+ /** GL_SELECT, legacy OpenGL selection */
+ ALGO_GL_LEGACY = 1,
+ /** glBegin/EndQuery(GL_SAMPLES_PASSED... ), `gpu_select_query.c`
+ * Only sets 4th component (ID) correctly. */
+ ALGO_GL_QUERY = 2,
+ /** Read depth buffer for every drawing pass and extract depths, `gpu_select_pick.c`
+ * Only sets 4th component (ID) correctly. */
+ ALGO_GL_PICK = 3,
+};
+
+typedef struct GPUSelectState {
/* To ignore selection id calls when not initialized */
bool select_is_active;
- /* Tracks whether a query has been issued so that gpu_load_id can end the previous one */
- bool query_issued;
- /* array holding the OpenGL query identifiers */
- unsigned int *queries;
- /* array holding the id corresponding to each query */
- unsigned int *id;
- /* number of queries in *queries and *id */
- unsigned int num_of_queries;
- /* index to the next query to start */
- unsigned int active_query;
/* flag to cache user preference for occlusion based selection */
bool use_gpu_select;
- /* cache on initialization */
- unsigned int *buffer;
- /* buffer size (stores number of integers, for actual size multiply by sizeof integer)*/
- unsigned int bufsize;
/* mode of operation */
char mode;
- unsigned int index;
- int oldhits;
-} GPUQueryState;
+ /* internal algorithm for selection */
+ char algorithm;
+ /* allow GPU_select_begin/end without drawing */
+ bool use_cache;
+} GPUSelectState;
-static GPUQueryState g_query_state = {0};
+static GPUSelectState g_select_state = {0};
/**
* initialize and provide buffer for results
*/
-void GPU_select_begin(unsigned int *buffer, unsigned int bufsize, rctf *input, char mode, int oldhits)
+void GPU_select_begin(unsigned int *buffer, unsigned int bufsize, const rcti *input, char mode, int oldhits)
{
- g_query_state.select_is_active = true;
- g_query_state.query_issued = false;
- g_query_state.active_query = 0;
- g_query_state.use_gpu_select = GPU_select_query_check_active();
- g_query_state.num_of_queries = 0;
- g_query_state.bufsize = bufsize;
- g_query_state.buffer = buffer;
- g_query_state.mode = mode;
- g_query_state.index = 0;
- g_query_state.oldhits = oldhits;
+ g_select_state.select_is_active = true;
+ g_select_state.use_gpu_select = GPU_select_query_check_active();
+ g_select_state.mode = mode;
- if (!g_query_state.use_gpu_select) {
- glSelectBuffer(bufsize, (GLuint *)buffer);
- glRenderMode(GL_SELECT);
- glInitNames();
- glPushName(-1);
+ if (ELEM(g_select_state.mode, GPU_SELECT_PICK_ALL, GPU_SELECT_PICK_NEAREST)) {
+ g_select_state.algorithm = ALGO_GL_PICK;
+ }
+ else if (!g_select_state.use_gpu_select) {
+ g_select_state.algorithm = ALGO_GL_LEGACY;
}
else {
- float viewport[4];
-
- g_query_state.num_of_queries = ALLOC_QUERIES;
-
- g_query_state.queries = MEM_mallocN(g_query_state.num_of_queries * sizeof(*g_query_state.queries), "gpu selection queries");
- g_query_state.id = MEM_mallocN(g_query_state.num_of_queries * sizeof(*g_query_state.id), "gpu selection ids");
- glGenQueries(g_query_state.num_of_queries, g_query_state.queries);
-
- glPushAttrib(GL_DEPTH_BUFFER_BIT | GL_VIEWPORT_BIT);
- /* disable writing to the framebuffer */
- glColorMask(GL_FALSE, GL_FALSE, GL_FALSE, GL_FALSE);
-
- /* In order to save some fill rate we minimize the viewport using rect.
- * We need to get the region of the scissor so that our geometry doesn't
- * get rejected before the depth test. Should probably cull rect against
- * scissor for viewport but this is a rare case I think */
- glGetFloatv(GL_SCISSOR_BOX, viewport);
- if (!input || input->xmin == input->xmax) {
- glViewport(viewport[0], viewport[1], 24, 24);
- }
- else {
- glViewport(viewport[0], viewport[1], (int)(input->xmax - input->xmin), (int)(input->ymax - input->ymin));
- }
+ g_select_state.algorithm = ALGO_GL_QUERY;
+ }
- /* occlusion queries operates on fragments that pass tests and since we are interested on all
- * objects in the view frustum independently of their order, we need to disable the depth test */
- if (mode == GPU_SELECT_ALL) {
- glDisable(GL_DEPTH_TEST);
- glDepthMask(GL_FALSE);
+ switch (g_select_state.algorithm) {
+ case ALGO_GL_LEGACY:
+ {
+ g_select_state.use_cache = false;
+ glSelectBuffer(bufsize, (GLuint *)buffer);
+ glRenderMode(GL_SELECT);
+ glInitNames();
+ glPushName(-1);
+ break;
}
- else if (mode == GPU_SELECT_NEAREST_FIRST_PASS) {
- glClear(GL_DEPTH_BUFFER_BIT);
- glEnable(GL_DEPTH_TEST);
- glDepthMask(GL_TRUE);
- glDepthFunc(GL_LEQUAL);
+ case ALGO_GL_QUERY:
+ {
+ g_select_state.use_cache = false;
+ gpu_select_query_begin((unsigned int (*)[4])buffer, bufsize / 4, input, mode, oldhits);
+ break;
}
- else if (mode == GPU_SELECT_NEAREST_SECOND_PASS) {
- glEnable(GL_DEPTH_TEST);
- glDepthMask(GL_FALSE);
- glDepthFunc(GL_EQUAL);
+ default: /* ALGO_GL_PICK */
+ {
+ gpu_select_pick_begin((unsigned int (*)[4])buffer, bufsize / 4, input, mode);
+ break;
}
}
}
@@ -146,41 +123,24 @@ void GPU_select_begin(unsigned int *buffer, unsigned int bufsize, rctf *input, c
bool GPU_select_load_id(unsigned int id)
{
/* if no selection mode active, ignore */
- if (!g_query_state.select_is_active)
+ if (!g_select_state.select_is_active)
return true;
- if (!g_query_state.use_gpu_select) {
- glLoadName(id);
- }
- else {
- if (g_query_state.query_issued) {
- glEndQuery(GL_SAMPLES_PASSED);
+ switch (g_select_state.algorithm) {
+ case ALGO_GL_LEGACY:
+ {
+ glLoadName(id);
+ return true;
}
- /* if required, allocate extra queries */
- if (g_query_state.active_query == g_query_state.num_of_queries) {
- g_query_state.num_of_queries += ALLOC_QUERIES;
- g_query_state.queries = MEM_reallocN(g_query_state.queries, g_query_state.num_of_queries * sizeof(*g_query_state.queries));
- g_query_state.id = MEM_reallocN(g_query_state.id, g_query_state.num_of_queries * sizeof(*g_query_state.id));
- glGenQueries(ALLOC_QUERIES, &g_query_state.queries[g_query_state.active_query]);
+ case ALGO_GL_QUERY:
+ {
+ return gpu_select_query_load_id(id);
}
-
- glBeginQuery(GL_SAMPLES_PASSED, g_query_state.queries[g_query_state.active_query]);
- g_query_state.id[g_query_state.active_query] = id;
- g_query_state.active_query++;
- g_query_state.query_issued = true;
-
- if (g_query_state.mode == GPU_SELECT_NEAREST_SECOND_PASS && g_query_state.index < g_query_state.oldhits) {
- if (g_query_state.buffer[g_query_state.index * 4 + 3] == id) {
- g_query_state.index++;
- return true;
- }
- else {
- return false;
- }
+ default: /* ALGO_GL_PICK */
+ {
+ return gpu_select_pick_load_id(id);
}
}
-
- return true;
}
/**
@@ -191,59 +151,27 @@ bool GPU_select_load_id(unsigned int id)
unsigned int GPU_select_end(void)
{
unsigned int hits = 0;
- if (!g_query_state.use_gpu_select) {
- glPopName();
- hits = glRenderMode(GL_RENDER);
- }
- else {
- int i;
- if (g_query_state.query_issued) {
- glEndQuery(GL_SAMPLES_PASSED);
+ switch (g_select_state.algorithm) {
+ case ALGO_GL_LEGACY:
+ {
+ glPopName();
+ hits = glRenderMode(GL_RENDER);
+ break;
}
-
- for (i = 0; i < g_query_state.active_query; i++) {
- unsigned int result;
- glGetQueryObjectuiv(g_query_state.queries[i], GL_QUERY_RESULT, &result);
- if (result > 0) {
- if (g_query_state.mode != GPU_SELECT_NEAREST_SECOND_PASS) {
- int maxhits = g_query_state.bufsize / 4;
-
- if (hits < maxhits) {
- g_query_state.buffer[hits * 4] = 1;
- g_query_state.buffer[hits * 4 + 1] = 0xFFFF;
- g_query_state.buffer[hits * 4 + 2] = 0xFFFF;
- g_query_state.buffer[hits * 4 + 3] = g_query_state.id[i];
-
- hits++;
- }
- else {
- hits = -1;
- break;
- }
- }
- else {
- int j;
- /* search in buffer and make selected object first */
- for (j = 0; j < g_query_state.oldhits; j++) {
- if (g_query_state.buffer[j * 4 + 3] == g_query_state.id[i]) {
- g_query_state.buffer[j * 4 + 1] = 0;
- g_query_state.buffer[j * 4 + 2] = 0;
- }
- }
- break;
- }
- }
+ case ALGO_GL_QUERY:
+ {
+ hits = gpu_select_query_end();
+ break;
+ }
+ default: /* ALGO_GL_PICK */
+ {
+ hits = gpu_select_pick_end();
+ break;
}
-
- glDeleteQueries(g_query_state.num_of_queries, g_query_state.queries);
- MEM_freeN(g_query_state.queries);
- MEM_freeN(g_query_state.id);
- glPopAttrib();
- glColorMask(GL_TRUE, GL_TRUE, GL_TRUE, GL_TRUE);
}
- g_query_state.select_is_active = false;
+ g_select_state.select_is_active = false;
return hits;
}
@@ -260,3 +188,41 @@ bool GPU_select_query_check_active(void)
GPU_type_matches(GPU_DEVICE_NVIDIA, GPU_OS_UNIX, GPU_DRIVER_OPENSOURCE))));
}
+
+/* ----------------------------------------------------------------------------
+ * Caching
+ *
+ * Support multiple begin/end's as long as they are within the initial region.
+ * Currently only used by ALGO_GL_PICK.
+ */
+
+void GPU_select_cache_begin(void)
+{
+ /* validate on GPU_select_begin, clear if not supported */
+ BLI_assert(g_select_state.use_cache == false);
+ g_select_state.use_cache = true;
+ if (g_select_state.algorithm == ALGO_GL_PICK) {
+ gpu_select_pick_cache_begin();
+ }
+}
+
+void GPU_select_cache_load_id(void)
+{
+ BLI_assert(g_select_state.use_cache == true);
+ if (g_select_state.algorithm == ALGO_GL_PICK) {
+ gpu_select_pick_cache_load_id();
+ }
+}
+
+void GPU_select_cache_end(void)
+{
+ if (g_select_state.algorithm == ALGO_GL_PICK) {
+ gpu_select_pick_cache_end();
+ }
+ g_select_state.use_cache = false;
+}
+
+bool GPU_select_is_cached(void)
+{
+ return g_select_state.use_cache && gpu_select_pick_is_cached();
+}
diff --git a/source/blender/gpu/intern/gpu_select_pick.c b/source/blender/gpu/intern/gpu_select_pick.c
new file mode 100644
index 00000000000..0a77420fa25
--- /dev/null
+++ b/source/blender/gpu/intern/gpu_select_pick.c
@@ -0,0 +1,744 @@
+/*
+ * ***** BEGIN GPL LICENSE BLOCK *****
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * The Original Code is Copyright (C) 2017 Blender Foundation.
+ * All rights reserved.
+ *
+ * ***** END GPL LICENSE BLOCK *****
+ */
+
+/** \file blender/gpu/intern/gpu_select_pick.c
+ * \ingroup gpu
+ *
+ * Custom select code for picking small regions (not efficient for large regions).
+ * `gpu_select_pick_*` API.
+ */
+#include <string.h>
+#include <stdlib.h>
+#include <float.h>
+
+#include "GPU_select.h"
+#include "GPU_extensions.h"
+#include "GPU_glew.h"
+
+#include "MEM_guardedalloc.h"
+
+#include "BLI_rect.h"
+#include "BLI_listbase.h"
+#include "BLI_math_vector.h"
+#include "BLI_utildefines.h"
+
+#include "gpu_select_private.h"
+
+#include "BLI_strict_flags.h"
+
+/* #define DEBUG_PRINT */
+
+/* Alloc number for depths */
+#define ALLOC_DEPTHS 200
+
+/* Z-depth of cleared depth buffer */
+#define DEPTH_MAX 0xffffffff
+
+/* ----------------------------------------------------------------------------
+ * SubRectStride
+ */
+
+/* For looping over a sub-region of a rect, could be moved into 'rct.c'*/
+typedef struct SubRectStride {
+ unsigned int start; /* start here */
+ unsigned int span; /* read these */
+ unsigned int span_len; /* len times (read span 'len' times). */
+ unsigned int skip; /* skip those */
+} SubRectStride;
+
+/* we may want to change back to float if uint isn't well supported */
+typedef unsigned int depth_t;
+
+/**
+ * Calculate values needed for looping over a sub-region (smaller buffer within a larger buffer).
+ *
+ * 'src' must be bigger than 'dst'.
+ */
+static void rect_subregion_stride_calc(const rcti *src, const rcti *dst, SubRectStride *r_sub)
+{
+ const int src_x = BLI_rcti_size_x(src);
+ // const int src_y = BLI_rcti_size_y(src);
+ const int dst_x = BLI_rcti_size_x(dst);
+ const int dst_y = BLI_rcti_size_y(dst);
+ const int x = dst->xmin - src->xmin;
+ const int y = dst->ymin - src->ymin;
+
+ BLI_assert(src->xmin <= dst->xmin && src->ymin <= dst->ymin &&
+ src->ymax >= dst->ymax && src->ymax >= dst->ymax);
+ BLI_assert(x >= 0 && y >= 0);
+
+ r_sub->start = (unsigned int)((src_x * y) + x);
+ r_sub->span = (unsigned int)dst_x;
+ r_sub->span_len = (unsigned int)dst_y;
+ r_sub->skip = (unsigned int)(src_x - dst_x);
+}
+
+/**
+ * Ignore depth clearing as a change,
+ * only check if its been changed _and_ filled in (ignore clearing since XRAY does this).
+ */
+BLI_INLINE bool depth_is_filled(const depth_t *prev, const depth_t *curr)
+{
+ return (*prev != *curr) && (*curr != DEPTH_MAX);
+}
+
+/* ----------------------------------------------------------------------------
+ * DepthBufCache
+ *
+ * Result of reading glReadPixels,
+ * use for both cache and non-cached storage.
+ */
+
+/* store result of glReadPixels */
+typedef struct DepthBufCache {
+ struct DepthBufCache *next, *prev;
+ unsigned int id;
+ depth_t buf[0];
+} DepthBufCache;
+
+static DepthBufCache *depth_buf_malloc(unsigned int rect_len)
+{
+ DepthBufCache *rect = MEM_mallocN(sizeof(DepthBufCache) + sizeof(depth_t) * rect_len, __func__);
+ rect->id = SELECT_ID_NONE;
+ return rect;
+}
+
+static bool depth_buf_rect_depth_any(
+ const DepthBufCache *rect_depth,
+ unsigned int rect_len)
+{
+ const depth_t *curr = rect_depth->buf;
+ for (unsigned int i = 0; i < rect_len; i++, curr++) {
+ if (*curr != DEPTH_MAX) {
+ return true;
+ }
+ }
+ return false;
+}
+
+static bool depth_buf_subrect_depth_any(
+ const DepthBufCache *rect_depth,
+ const SubRectStride *sub_rect)
+{
+ const depth_t *curr = rect_depth->buf + sub_rect->start;
+ for (unsigned int i = 0; i < sub_rect->span_len; i++) {
+ const depth_t *curr_end = curr + sub_rect->span;
+ for (; curr < curr_end; curr++, curr++) {
+ if (*curr != DEPTH_MAX) {
+ return true;
+ }
+ }
+ curr += sub_rect->skip;
+ }
+ return false;
+}
+
+static bool depth_buf_rect_depth_any_filled(
+ const DepthBufCache *rect_prev, const DepthBufCache *rect_curr,
+ unsigned int rect_len)
+{
+#if 0
+ return memcmp(rect_depth_a->buf, rect_depth_b->buf, rect_len * sizeof(depth_t)) != 0;
+#else
+ const depth_t *prev = rect_prev->buf;
+ const depth_t *curr = rect_curr->buf;
+ for (unsigned int i = 0; i < rect_len; i++, curr++, prev++) {
+ if (depth_is_filled(prev, curr)) {
+ return true;
+ }
+ }
+ return false;
+#endif
+}
+
+/**
+ * Both buffers are the same size, just check if the sub-rect contains any differences.
+ */
+static bool depth_buf_subrect_depth_any_filled(
+ const DepthBufCache *rect_src, const DepthBufCache *rect_dst,
+ const SubRectStride *sub_rect)
+{
+ /* same as above but different rect sizes */
+ const depth_t *prev = rect_src->buf + sub_rect->start;
+ const depth_t *curr = rect_dst->buf + sub_rect->start;
+ for (unsigned int i = 0; i < sub_rect->span_len; i++) {
+ const depth_t *curr_end = curr + sub_rect->span;
+ for (; curr < curr_end; prev++, curr++) {
+ if (depth_is_filled(prev, curr)) {
+ return true;
+ }
+ }
+ prev += sub_rect->skip;
+ curr += sub_rect->skip;
+ }
+ return false;
+}
+
+/* ----------------------------------------------------------------------------
+ * DepthID
+ *
+ * Internal structure for storing hits.
+ */
+
+typedef struct DepthID {
+ unsigned int id;
+ depth_t depth;
+} DepthID;
+
+static int depth_id_cmp(const void *v1, const void *v2)
+{
+ const DepthID *d1 = v1, *d2 = v2;
+ if (d1->id < d2->id) {
+ return -1;
+ }
+ else if (d1->id > d2->id) {
+ return 1;
+ }
+ else {
+ return 0;
+ }
+}
+
+static int depth_cmp(const void *v1, const void *v2)
+{
+ const DepthID *d1 = v1, *d2 = v2;
+ if (d1->depth < d2->depth) {
+ return -1;
+ }
+ else if (d1->depth > d2->depth) {
+ return 1;
+ }
+ else {
+ return 0;
+ }
+}
+
+/* depth sorting */
+typedef struct GPUPickState {
+ /* cache on initialization */
+ unsigned int (*buffer)[4];
+
+ /* buffer size (stores number of integers, for actual size multiply by sizeof integer)*/
+ unsigned int bufsize;
+ /* mode of operation */
+ char mode;
+
+ /* OpenGL drawing, never use when (is_cached == true). */
+ struct {
+ /* The current depth, accumulated as we draw */
+ DepthBufCache *rect_depth;
+ /* Scratch buffer, avoid allocs every time (when not caching) */
+ DepthBufCache *rect_depth_test;
+
+ /* Pass to glReadPixels (x, y, w, h) */
+ int clip_readpixels[4];
+
+ /* Set after first draw */
+ bool is_init;
+ unsigned int prev_id;
+ } gl;
+
+ /* src: data stored in 'cache' and 'gl',
+ * dst: use when cached region is smaller (where src -> dst isn't 1:1) */
+ struct {
+ rcti clip_rect;
+ unsigned int rect_len;
+ } src, dst;
+
+ /* Store cache between `GPU_select_cache_begin/end` */
+ bool use_cache;
+ bool is_cached;
+ struct {
+ /* Cleanup used for iterating over both source and destination buffers:
+ * src.clip_rect -> dst.clip_rect */
+ SubRectStride sub_rect;
+
+ /* List of DepthBufCache, sized of 'src.clip_rect' */
+ ListBase bufs;
+ } cache;
+
+ /* Pickign methods */
+ union {
+ /* GPU_SELECT_PICK_ALL */
+ struct {
+ DepthID *hits;
+ unsigned int hits_len;
+ unsigned int hits_len_alloc;
+ } all;
+
+ /* GPU_SELECT_PICK_NEAREST */
+ struct {
+ unsigned int *rect_id;
+ } nearest;
+ };
+} GPUPickState;
+
+
+static GPUPickState g_pick_state = {0};
+
+void gpu_select_pick_begin(
+ unsigned int (*buffer)[4], unsigned int bufsize,
+ const rcti *input, char mode)
+{
+ GPUPickState *ps = &g_pick_state;
+
+#ifdef DEBUG_PRINT
+ printf("%s: mode=%d, use_cache=%d, is_cache=%d\n", __func__, mode, ps->use_cache, ps->is_cached);
+#endif
+
+ ps->bufsize = bufsize;
+ ps->buffer = buffer;
+ ps->mode = mode;
+
+ const unsigned int rect_len = (unsigned int)(BLI_rcti_size_x(input) * BLI_rcti_size_y(input));
+ ps->dst.clip_rect = *input;
+ ps->dst.rect_len = rect_len;
+
+ /* Restrict OpenGL operations for when we don't have cache */
+ if (ps->is_cached == false) {
+
+ glPushAttrib(GL_DEPTH_BUFFER_BIT | GL_VIEWPORT_BIT);
+ /* disable writing to the framebuffer */
+ glColorMask(GL_FALSE, GL_FALSE, GL_FALSE, GL_FALSE);
+
+ glEnable(GL_DEPTH_TEST);
+ glDepthMask(GL_TRUE);
+
+ if (mode == GPU_SELECT_PICK_ALL) {
+ glDepthFunc(GL_ALWAYS);
+ }
+ else {
+ glDepthFunc(GL_LEQUAL);
+ }
+
+ /* set just in case */
+ glPixelTransferf(GL_DEPTH_BIAS, 0.0);
+ glPixelTransferf(GL_DEPTH_SCALE, 1.0);
+
+ float viewport[4];
+ glGetFloatv(GL_SCISSOR_BOX, viewport);
+
+ ps->src.clip_rect = *input;
+ ps->src.rect_len = rect_len;
+
+ ps->gl.clip_readpixels[0] = (int)viewport[0];
+ ps->gl.clip_readpixels[1] = (int)viewport[1];
+ ps->gl.clip_readpixels[2] = BLI_rcti_size_x(&ps->src.clip_rect);
+ ps->gl.clip_readpixels[3] = BLI_rcti_size_y(&ps->src.clip_rect);
+
+ glViewport(UNPACK4(ps->gl.clip_readpixels));
+
+ /* It's possible we don't want to clear depth buffer,
+ * so existing elements are masked by current z-buffer. */
+ glClear(GL_DEPTH_BUFFER_BIT);
+
+ /* scratch buffer (read new values here) */
+ ps->gl.rect_depth_test = depth_buf_malloc(rect_len);
+ ps->gl.rect_depth = depth_buf_malloc(rect_len);
+
+ /* set initial 'far' value */
+#if 0
+ glReadPixels(UNPACK4(ps->gl.clip_readpixels), GL_DEPTH_COMPONENT, GL_UNSIGNED_INT, ps->gl.rect_depth->buf);
+#else
+ for (unsigned int i = 0; i < rect_len; i++) {
+ ps->gl.rect_depth->buf[i] = DEPTH_MAX;
+ }
+#endif
+
+ ps->gl.is_init = false;
+ ps->gl.prev_id = 0;
+ }
+ else {
+ /* Using cache (ps->is_cached == true) */
+ /* src.clip_rect -> dst.clip_rect */
+ rect_subregion_stride_calc(&ps->src.clip_rect, &ps->dst.clip_rect, &ps->cache.sub_rect);
+ BLI_assert(ps->gl.rect_depth == NULL);
+ BLI_assert(ps->gl.rect_depth_test == NULL);
+ }
+
+ if (mode == GPU_SELECT_PICK_ALL) {
+ ps->all.hits = MEM_mallocN(sizeof(*ps->all.hits) * ALLOC_DEPTHS, __func__);
+ ps->all.hits_len = 0;
+ ps->all.hits_len_alloc = ALLOC_DEPTHS;
+ }
+ else {
+ /* Set to 0xff for SELECT_ID_NONE */
+ ps->nearest.rect_id = MEM_mallocN(sizeof(unsigned int) * ps->dst.rect_len, __func__);
+ memset(ps->nearest.rect_id, 0xff, sizeof(unsigned int) * ps->dst.rect_len);
+ }
+}
+
+/**
+ * Given 2x depths, we know are different - update the depth information
+ * use for both cached/uncached depth buffers.
+ */
+static void gpu_select_load_id_pass_all(const DepthBufCache *rect_curr)
+{
+ GPUPickState *ps = &g_pick_state;
+ const unsigned int id = rect_curr->id;
+ /* find the best depth for this pass and store in 'all.hits' */
+ depth_t depth_best = DEPTH_MAX;
+
+#define EVAL_TEST() \
+ if (depth_best > *curr) { \
+ depth_best = *curr; \
+ } ((void)0)
+
+ if (ps->is_cached == false) {
+ const depth_t *curr = rect_curr->buf;
+ BLI_assert(ps->src.rect_len == ps->dst.rect_len);
+ const unsigned int rect_len = ps->src.rect_len;
+ for (unsigned int i = 0; i < rect_len; i++, curr++) {
+ EVAL_TEST();
+ }
+ }
+ else {
+ /* same as above but different rect sizes */
+ const depth_t *curr = rect_curr->buf + ps->cache.sub_rect.start;
+ for (unsigned int i = 0; i < ps->cache.sub_rect.span_len; i++) {
+ const depth_t *curr_end = curr + ps->cache.sub_rect.span;
+ for (; curr < curr_end; curr++) {
+ EVAL_TEST();
+ }
+ curr += ps->cache.sub_rect.skip;
+ }
+ }
+
+#undef EVAL_TEST
+
+ /* ensure enough space */
+ if (UNLIKELY(ps->all.hits_len == ps->all.hits_len_alloc)) {
+ ps->all.hits_len_alloc += ALLOC_DEPTHS;
+ ps->all.hits = MEM_reallocN(ps->all.hits, ps->all.hits_len_alloc * sizeof(*ps->all.hits));
+ }
+ DepthID *d = &ps->all.hits[ps->all.hits_len++];
+ d->id = id;
+ d->depth = depth_best;
+}
+
+static void gpu_select_load_id_pass_nearest(const DepthBufCache *rect_prev, const DepthBufCache *rect_curr)
+{
+ GPUPickState *ps = &g_pick_state;
+ const unsigned int id = rect_curr->id;
+ /* keep track each pixels ID in 'nearest.rect_id' */
+ if (id != SELECT_ID_NONE) {
+ unsigned int *id_ptr = ps->nearest.rect_id;
+
+ /* Check against DEPTH_MAX because XRAY will clear the buffer,
+ * so previously set values will become unset.
+ * In this case just leave those id's left as-is. */
+#define EVAL_TEST() \
+ if (depth_is_filled(prev, curr)) { \
+ *id_ptr = id; \
+ } ((void)0)
+
+ if (ps->is_cached == false) {
+ const depth_t *prev = rect_prev->buf;
+ const depth_t *curr = rect_curr->buf;
+ BLI_assert(ps->src.rect_len == ps->dst.rect_len);
+ const unsigned int rect_len = ps->src.rect_len;
+ for (unsigned int i = 0; i < rect_len; i++, curr++, prev++, id_ptr++) {
+ EVAL_TEST();
+ }
+ }
+ else {
+ /* same as above but different rect sizes */
+ const depth_t *prev = rect_prev->buf + ps->cache.sub_rect.start;
+ const depth_t *curr = rect_curr->buf + ps->cache.sub_rect.start;
+ for (unsigned int i = 0; i < ps->cache.sub_rect.span_len; i++) {
+ const depth_t *curr_end = curr + ps->cache.sub_rect.span;
+ for (; curr < curr_end; prev++, curr++, id_ptr++) {
+ EVAL_TEST();
+ }
+ prev += ps->cache.sub_rect.skip;
+ curr += ps->cache.sub_rect.skip;
+ }
+ }
+
+#undef EVAL_TEST
+ }
+}
+
+
+bool gpu_select_pick_load_id(unsigned int id)
+{
+ GPUPickState *ps = &g_pick_state;
+ if (ps->gl.is_init) {
+ const unsigned int rect_len = ps->src.rect_len;
+ glReadPixels(UNPACK4(ps->gl.clip_readpixels), GL_DEPTH_COMPONENT, GL_UNSIGNED_INT, ps->gl.rect_depth_test->buf);
+ /* perform initial check since most cases the array remains unchanged */
+
+ bool do_pass = false;
+ if (g_pick_state.mode == GPU_SELECT_PICK_ALL) {
+ if (depth_buf_rect_depth_any(ps->gl.rect_depth_test, rect_len)) {
+ ps->gl.rect_depth_test->id = ps->gl.prev_id;
+ gpu_select_load_id_pass_all(ps->gl.rect_depth_test);
+ do_pass = true;
+ }
+ }
+ else {
+ if (depth_buf_rect_depth_any_filled(ps->gl.rect_depth, ps->gl.rect_depth_test, rect_len)) {
+ ps->gl.rect_depth_test->id = ps->gl.prev_id;
+ gpu_select_load_id_pass_nearest(ps->gl.rect_depth, ps->gl.rect_depth_test);
+ do_pass = true;
+ }
+ }
+
+ if (do_pass) {
+ /* Store depth in cache */
+ if (ps->use_cache) {
+ BLI_addtail(&ps->cache.bufs, ps->gl.rect_depth);
+ ps->gl.rect_depth = depth_buf_malloc(ps->src.rect_len);
+ }
+
+ SWAP(DepthBufCache *, ps->gl.rect_depth, ps->gl.rect_depth_test);
+
+ if (g_pick_state.mode == GPU_SELECT_PICK_ALL) {
+ /* we want new depths every time */
+ glClear(GL_DEPTH_BUFFER_BIT);
+ }
+ }
+ }
+
+ ps->gl.is_init = true;
+ ps->gl.prev_id = id;
+
+ return true;
+}
+
+unsigned int gpu_select_pick_end(void)
+{
+ GPUPickState *ps = &g_pick_state;
+
+#ifdef DEBUG_PRINT
+ printf("%s\n", __func__);
+#endif
+
+ if (ps->is_cached == false) {
+ if (ps->gl.is_init) {
+ /* force finishing last pass */
+ gpu_select_pick_load_id(ps->gl.prev_id);
+ }
+
+ glPopAttrib();
+ glColorMask(GL_TRUE, GL_TRUE, GL_TRUE, GL_TRUE);
+ }
+
+ /* assign but never free directly since it may be in cache */
+ DepthBufCache *rect_depth_final;
+
+ /* Store depth in cache */
+ if (ps->use_cache && !ps->is_cached) {
+ BLI_addtail(&ps->cache.bufs, ps->gl.rect_depth);
+ ps->gl.rect_depth = NULL;
+ rect_depth_final = ps->cache.bufs.last;
+ }
+ else if (ps->is_cached) {
+ rect_depth_final = ps->cache.bufs.last;
+ }
+ else {
+ /* common case, no cache */
+ rect_depth_final = ps->gl.rect_depth;
+ }
+
+ unsigned int maxhits = g_pick_state.bufsize;
+ DepthID *depth_data;
+ unsigned int depth_data_len = 0;
+
+ if (g_pick_state.mode == GPU_SELECT_PICK_ALL) {
+ depth_data = ps->all.hits;
+ depth_data_len = ps->all.hits_len;
+ /* move ownership */
+ ps->all.hits = NULL;
+ ps->all.hits_len = 0;
+ ps->all.hits_len_alloc = 0;
+ }
+ else {
+ /* GPU_SELECT_PICK_NEAREST */
+
+ /* Over alloc (unlikely we have as many depths as pixels) */
+ unsigned int depth_data_len_first_pass = 0;
+ depth_data = MEM_mallocN(ps->dst.rect_len * sizeof(*depth_data), __func__);
+
+ /* Partially de-duplicating copy,
+ * when contiguous ID's are found - update their closest depth.
+ * This isn't essential but means there is less data to sort. */
+
+#define EVAL_TEST(i_src, i_dst) \
+ { \
+ const unsigned int id = ps->nearest.rect_id[i_dst]; \
+ if (id != SELECT_ID_NONE) { \
+ const depth_t depth = rect_depth_final->buf[i_src]; \
+ if (depth_last == NULL || depth_last->id != id) { \
+ DepthID *d = &depth_data[depth_data_len_first_pass++]; \
+ d->id = id; \
+ d->depth = depth; \
+ } \
+ else if (depth_last->depth > depth) { \
+ depth_last->depth = depth; \
+ } \
+ } \
+ } ((void)0)
+
+ {
+ DepthID *depth_last = NULL;
+ if (ps->is_cached == false) {
+ for (unsigned int i = 0; i < ps->src.rect_len; i++) {
+ EVAL_TEST(i, i);
+ }
+ }
+ else {
+ /* same as above but different rect sizes */
+ unsigned int i_src = ps->cache.sub_rect.start, i_dst = 0;
+ for (unsigned int j = 0; j < ps->cache.sub_rect.span_len; j++) {
+ const unsigned int i_src_end = i_src + ps->cache.sub_rect.span;
+ for (; i_src < i_src_end; i_src++, i_dst++) {
+ EVAL_TEST(i_src, i_dst);
+ }
+ i_src += ps->cache.sub_rect.skip;
+ }
+ }
+ }
+
+#undef EVAL_TEST
+
+ qsort(depth_data, depth_data_len_first_pass, sizeof(DepthID), depth_id_cmp);
+
+ /* Sort by ID's then keep the best depth for each ID */
+ depth_data_len = 0;
+ {
+ DepthID *depth_last = NULL;
+ for (unsigned int i = 0; i < depth_data_len_first_pass; i++) {
+ if (depth_last == NULL || depth_last->id != depth_data[i].id) {
+ depth_last = &depth_data[depth_data_len++];
+ *depth_last = depth_data[i];
+ }
+ else if (depth_last->depth > depth_data[i].depth) {
+ depth_last->depth = depth_data[i].depth;
+ }
+ }
+ }
+ }
+
+ /* Finally sort each unique (id, depth) pair by depth
+ * so the final hit-list is sorted by depth (nearest first) */
+ unsigned int hits = 0;
+
+ if (depth_data_len > maxhits) {
+ hits = (unsigned int)-1;
+ }
+ else {
+ /* leave sorting up to the caller */
+ qsort(depth_data, depth_data_len, sizeof(DepthID), depth_cmp);
+
+ for (unsigned int i = 0; i < depth_data_len; i++) {
+#ifdef DEBUG_PRINT
+ printf(" hit: %u: depth %u\n", depth_data[i].id, depth_data[i].depth);
+#endif
+ /* first 3 are dummy values */
+ g_pick_state.buffer[hits][0] = 1;
+ g_pick_state.buffer[hits][1] = 0x0; /* depth_data[i].depth; */ /* unused */
+ g_pick_state.buffer[hits][2] = 0x0; /* z-far is currently never used. */
+ g_pick_state.buffer[hits][3] = depth_data[i].id;
+ hits++;
+ }
+ BLI_assert(hits < maxhits);
+ }
+
+ MEM_freeN(depth_data);
+
+ MEM_SAFE_FREE(ps->gl.rect_depth);
+ MEM_SAFE_FREE(ps->gl.rect_depth_test);
+
+ if (g_pick_state.mode == GPU_SELECT_PICK_ALL) {
+ /* 'hits' already freed as 'depth_data' */
+ }
+ else {
+ MEM_freeN(ps->nearest.rect_id);
+ ps->nearest.rect_id = NULL;
+ }
+
+ if (ps->use_cache) {
+ ps->is_cached = true;
+ }
+
+ return hits;
+}
+
+/* ----------------------------------------------------------------------------
+ * Caching
+ *
+ * Support multiple begin/end's reusing depth buffers.
+ */
+
+void gpu_select_pick_cache_begin(void)
+{
+ BLI_assert(g_pick_state.use_cache == false);
+#ifdef DEBUG_PRINT
+ printf("%s\n", __func__);
+#endif
+ g_pick_state.use_cache = true;
+ g_pick_state.is_cached = false;
+}
+
+void gpu_select_pick_cache_end(void)
+{
+#ifdef DEBUG_PRINT
+ printf("%s: with %d buffers\n", __func__, BLI_listbase_count(&g_pick_state.cache.bufs));
+#endif
+ g_pick_state.use_cache = false;
+ g_pick_state.is_cached = false;
+
+ BLI_freelistN(&g_pick_state.cache.bufs);
+}
+
+/* is drawing needed? */
+bool gpu_select_pick_is_cached(void)
+{
+ return g_pick_state.is_cached;
+}
+
+void gpu_select_pick_cache_load_id(void)
+{
+ BLI_assert(g_pick_state.is_cached == true);
+ GPUPickState *ps = &g_pick_state;
+#ifdef DEBUG_PRINT
+ printf("%s (building depth from cache)\n", __func__);
+#endif
+ for (DepthBufCache *rect_depth = ps->cache.bufs.first; rect_depth; rect_depth = rect_depth->next) {
+ if (rect_depth->next != NULL) {
+ /* we know the buffers differ, but this sub-region may not.
+ * double check before adding an id-pass */
+ if (g_pick_state.mode == GPU_SELECT_PICK_ALL) {
+ if (depth_buf_subrect_depth_any(rect_depth->next, &ps->cache.sub_rect)) {
+ gpu_select_load_id_pass_all(rect_depth->next);
+ }
+ }
+ else {
+ if (depth_buf_subrect_depth_any_filled(rect_depth, rect_depth->next, &ps->cache.sub_rect)) {
+ gpu_select_load_id_pass_nearest(rect_depth, rect_depth->next);
+ }
+ }
+ }
+ }
+}
diff --git a/source/blender/gpu/intern/gpu_select_private.h b/source/blender/gpu/intern/gpu_select_private.h
new file mode 100644
index 00000000000..8935bd7b253
--- /dev/null
+++ b/source/blender/gpu/intern/gpu_select_private.h
@@ -0,0 +1,53 @@
+/*
+ * ***** BEGIN GPL LICENSE BLOCK *****
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * The Original Code is Copyright (C) 2014 Blender Foundation.
+ * All rights reserved.
+ *
+ * Contributor(s): Antony Riakiotakis.
+ *
+ * ***** END GPL LICENSE BLOCK *****
+ */
+
+/** \file blender/gpu/intern/gpu_select_private.h
+ * \ingroup gpu
+ *
+ * Selection implementations.
+ */
+
+#ifndef __GPU_SELECT_PRIVATE_H__
+#define __GPU_SELECT_PRIVATE_H__
+
+/* gpu_select_pick */
+void gpu_select_pick_begin(unsigned int (*buffer)[4], unsigned int bufsize, const rcti *input, char mode);
+bool gpu_select_pick_load_id(unsigned int id);
+unsigned int gpu_select_pick_end(void);
+
+void gpu_select_pick_cache_begin(void);
+void gpu_select_pick_cache_end(void);
+bool gpu_select_pick_is_cached(void);
+void gpu_select_pick_cache_load_id(void);
+
+/* gpu_select_sample_query */
+void gpu_select_query_begin(unsigned int (*buffer)[4], unsigned int bufsize, const rcti *input, char mode, int oldhits);
+bool gpu_select_query_load_id(unsigned int id);
+unsigned int gpu_select_query_end(void);
+
+
+#define SELECT_ID_NONE ((unsigned int)0xffffffff)
+
+#endif /* __GPU_SELECT_PRIVATE_H__ */
diff --git a/source/blender/gpu/intern/gpu_select_sample_query.c b/source/blender/gpu/intern/gpu_select_sample_query.c
new file mode 100644
index 00000000000..ba5fefc5227
--- /dev/null
+++ b/source/blender/gpu/intern/gpu_select_sample_query.c
@@ -0,0 +1,209 @@
+/*
+ * ***** BEGIN GPL LICENSE BLOCK *****
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * The Original Code is Copyright (C) 2014 Blender Foundation.
+ * All rights reserved.
+ *
+ * Contributor(s): Antony Riakiotakis.
+ *
+ * ***** END GPL LICENSE BLOCK *****
+ */
+
+/** \file blender/gpu/intern/gpu_select_sample_query.c
+ * \ingroup gpu
+ *
+ * Interface for accessing gpu-related methods for selection. The semantics will be
+ * similar to glRenderMode(GL_SELECT) since the goal is to maintain compatibility.
+ */
+
+#include <stdlib.h>
+
+#include "GPU_select.h"
+#include "GPU_extensions.h"
+#include "GPU_glew.h"
+
+#include "MEM_guardedalloc.h"
+
+#include "BLI_rect.h"
+
+#include "BLI_utildefines.h"
+
+#include "gpu_select_private.h"
+
+
+/* Ad hoc number of queries to allocate to skip doing many glGenQueries */
+#define ALLOC_QUERIES 200
+
+typedef struct GPUQueryState {
+ /* Tracks whether a query has been issued so that gpu_load_id can end the previous one */
+ bool query_issued;
+ /* array holding the OpenGL query identifiers */
+ unsigned int *queries;
+ /* array holding the id corresponding to each query */
+ unsigned int *id;
+ /* number of queries in *queries and *id */
+ unsigned int num_of_queries;
+ /* index to the next query to start */
+ unsigned int active_query;
+ /* cache on initialization */
+ unsigned int (*buffer)[4];
+ /* buffer size (stores number of integers, for actual size multiply by sizeof integer)*/
+ unsigned int bufsize;
+ /* mode of operation */
+ char mode;
+ unsigned int index;
+ int oldhits;
+} GPUQueryState;
+
+static GPUQueryState g_query_state = {0};
+
+
+void gpu_select_query_begin(
+ unsigned int (*buffer)[4], unsigned int bufsize,
+ const rcti *input, char mode,
+ int oldhits)
+{
+ float viewport[4];
+
+ g_query_state.query_issued = false;
+ g_query_state.active_query = 0;
+ g_query_state.num_of_queries = 0;
+ g_query_state.bufsize = bufsize;
+ g_query_state.buffer = buffer;
+ g_query_state.mode = mode;
+ g_query_state.index = 0;
+ g_query_state.oldhits = oldhits;
+
+ g_query_state.num_of_queries = ALLOC_QUERIES;
+
+ g_query_state.queries = MEM_mallocN(g_query_state.num_of_queries * sizeof(*g_query_state.queries), "gpu selection queries");
+ g_query_state.id = MEM_mallocN(g_query_state.num_of_queries * sizeof(*g_query_state.id), "gpu selection ids");
+ glGenQueries(g_query_state.num_of_queries, g_query_state.queries);
+
+ glPushAttrib(GL_DEPTH_BUFFER_BIT | GL_VIEWPORT_BIT);
+ /* disable writing to the framebuffer */
+ glColorMask(GL_FALSE, GL_FALSE, GL_FALSE, GL_FALSE);
+
+ /* In order to save some fill rate we minimize the viewport using rect.
+ * We need to get the region of the scissor so that our geometry doesn't
+ * get rejected before the depth test. Should probably cull rect against
+ * scissor for viewport but this is a rare case I think */
+ glGetFloatv(GL_SCISSOR_BOX, viewport);
+ glViewport(viewport[0], viewport[1], BLI_rcti_size_x(input), BLI_rcti_size_y(input));
+
+ /* occlusion queries operates on fragments that pass tests and since we are interested on all
+ * objects in the view frustum independently of their order, we need to disable the depth test */
+ if (mode == GPU_SELECT_ALL) {
+ glDisable(GL_DEPTH_TEST);
+ glDepthMask(GL_FALSE);
+ }
+ else if (mode == GPU_SELECT_NEAREST_FIRST_PASS) {
+ glClear(GL_DEPTH_BUFFER_BIT);
+ glEnable(GL_DEPTH_TEST);
+ glDepthMask(GL_TRUE);
+ glDepthFunc(GL_LEQUAL);
+ }
+ else if (mode == GPU_SELECT_NEAREST_SECOND_PASS) {
+ glEnable(GL_DEPTH_TEST);
+ glDepthMask(GL_FALSE);
+ glDepthFunc(GL_EQUAL);
+ }
+}
+
+bool gpu_select_query_load_id(unsigned int id)
+{
+ if (g_query_state.query_issued) {
+ glEndQuery(GL_SAMPLES_PASSED);
+ }
+ /* if required, allocate extra queries */
+ if (g_query_state.active_query == g_query_state.num_of_queries) {
+ g_query_state.num_of_queries += ALLOC_QUERIES;
+ g_query_state.queries = MEM_reallocN(g_query_state.queries, g_query_state.num_of_queries * sizeof(*g_query_state.queries));
+ g_query_state.id = MEM_reallocN(g_query_state.id, g_query_state.num_of_queries * sizeof(*g_query_state.id));
+ glGenQueries(ALLOC_QUERIES, &g_query_state.queries[g_query_state.active_query]);
+ }
+
+ glBeginQuery(GL_SAMPLES_PASSED, g_query_state.queries[g_query_state.active_query]);
+ g_query_state.id[g_query_state.active_query] = id;
+ g_query_state.active_query++;
+ g_query_state.query_issued = true;
+
+ if (g_query_state.mode == GPU_SELECT_NEAREST_SECOND_PASS && g_query_state.index < g_query_state.oldhits) {
+ if (g_query_state.buffer[g_query_state.index][3] == id) {
+ g_query_state.index++;
+ return true;
+ }
+ else {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+unsigned int gpu_select_query_end(void)
+{
+ int i;
+
+ unsigned int hits = 0;
+ const unsigned int maxhits = g_query_state.bufsize;
+
+ if (g_query_state.query_issued) {
+ glEndQuery(GL_SAMPLES_PASSED);
+ }
+
+ for (i = 0; i < g_query_state.active_query; i++) {
+ unsigned int result;
+ glGetQueryObjectuiv(g_query_state.queries[i], GL_QUERY_RESULT, &result);
+ if (result > 0) {
+ if (g_query_state.mode != GPU_SELECT_NEAREST_SECOND_PASS) {
+
+ if (hits < maxhits) {
+ g_query_state.buffer[hits][0] = 1;
+ g_query_state.buffer[hits][1] = 0xFFFF;
+ g_query_state.buffer[hits][2] = 0xFFFF;
+ g_query_state.buffer[hits][3] = g_query_state.id[i];
+
+ hits++;
+ }
+ else {
+ hits = -1;
+ break;
+ }
+ }
+ else {
+ int j;
+ /* search in buffer and make selected object first */
+ for (j = 0; j < g_query_state.oldhits; j++) {
+ if (g_query_state.buffer[j][3] == g_query_state.id[i]) {
+ g_query_state.buffer[j][1] = 0;
+ g_query_state.buffer[j][2] = 0;
+ }
+ }
+ break;
+ }
+ }
+ }
+
+ glDeleteQueries(g_query_state.num_of_queries, g_query_state.queries);
+ MEM_freeN(g_query_state.queries);
+ MEM_freeN(g_query_state.id);
+ glPopAttrib();
+ glColorMask(GL_TRUE, GL_TRUE, GL_TRUE, GL_TRUE);
+
+ return hits;
+}