Welcome to mirror list, hosted at ThFree Co, Russian Federation.

git.blender.org/blender.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorBrecht Van Lommel <brechtvanlommel@gmail.com>2020-03-17 16:41:48 +0300
committerBrecht Van Lommel <brechtvanlommel@gmail.com>2020-03-18 13:23:05 +0300
commitb0a1cf2c9ae696b07f7a236bc855a5ab4a493dcb (patch)
tree92295af11db5e984da42bfac7ca60190b8549a3f /source/blender/draw/intern
parent8dcfd392e4e62f193b666304425bc5ae635ecffe (diff)
Objects: add Volume object type, and prototypes for Hair and PointCloud
Only the volume object is exposed in the user interface. It is based on OpenVDB internally. Drawing and rendering code will follow in another commit. https://wiki.blender.org/wiki/Source/Objects/Volume https://wiki.blender.org/wiki/Reference/Release_Notes/2.83/Volumes Hair and PointCloud object types are hidden behind a WITH_NEW_OBJECT_TYPES build option. These are unfinished, and included only to make it easier to cooperate on development in the future and avoid tricky merges. https://wiki.blender.org/wiki/Source/Objects/New_Object_Types Ref T73201, T68981 Differential Revision: https://developer.blender.org/D6945
Diffstat (limited to 'source/blender/draw/intern')
-rw-r--r--source/blender/draw/intern/draw_cache_impl_hair.c349
-rw-r--r--source/blender/draw/intern/draw_cache_impl_pointcloud.c176
-rw-r--r--source/blender/draw/intern/draw_cache_impl_volume.c297
3 files changed, 822 insertions, 0 deletions
diff --git a/source/blender/draw/intern/draw_cache_impl_hair.c b/source/blender/draw/intern/draw_cache_impl_hair.c
new file mode 100644
index 00000000000..007f6258184
--- /dev/null
+++ b/source/blender/draw/intern/draw_cache_impl_hair.c
@@ -0,0 +1,349 @@
+/*
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * The Original Code is Copyright (C) 2017 by Blender Foundation.
+ * All rights reserved.
+ */
+
+/** \file
+ * \ingroup draw
+ *
+ * \brief Hair API for render engines
+ */
+
+#include <string.h>
+
+#include "MEM_guardedalloc.h"
+
+#include "BLI_math_base.h"
+#include "BLI_math_vector.h"
+#include "BLI_utildefines.h"
+
+#include "DNA_hair_types.h"
+#include "DNA_object_types.h"
+
+#include "BKE_hair.h"
+
+#include "GPU_batch.h"
+#include "GPU_texture.h"
+
+#include "draw_cache_impl.h" /* own include */
+#include "draw_hair_private.h" /* own include */
+
+static void hair_batch_cache_clear(Hair *hair);
+
+/* ---------------------------------------------------------------------- */
+/* Hair GPUBatch Cache */
+
+typedef struct HairBatchCache {
+ ParticleHairCache hair;
+
+ /* settings to determine if cache is invalid */
+ bool is_dirty;
+} HairBatchCache;
+
+/* GPUBatch cache management. */
+
+static bool hair_batch_cache_valid(Hair *hair)
+{
+ HairBatchCache *cache = hair->batch_cache;
+ return (cache && cache->is_dirty == false);
+}
+
+static void hair_batch_cache_init(Hair *hair)
+{
+ HairBatchCache *cache = hair->batch_cache;
+
+ if (!cache) {
+ cache = hair->batch_cache = MEM_callocN(sizeof(*cache), __func__);
+ }
+ else {
+ memset(cache, 0, sizeof(*cache));
+ }
+
+ cache->is_dirty = false;
+}
+
+void DRW_hair_batch_cache_validate(Hair *hair)
+{
+ if (!hair_batch_cache_valid(hair)) {
+ hair_batch_cache_clear(hair);
+ hair_batch_cache_init(hair);
+ }
+}
+
+static HairBatchCache *hair_batch_cache_get(Hair *hair)
+{
+ DRW_hair_batch_cache_validate(hair);
+ return hair->batch_cache;
+}
+
+void DRW_hair_batch_cache_dirty_tag(Hair *hair, int mode)
+{
+ HairBatchCache *cache = hair->batch_cache;
+ if (cache == NULL) {
+ return;
+ }
+ switch (mode) {
+ case BKE_HAIR_BATCH_DIRTY_ALL:
+ cache->is_dirty = true;
+ break;
+ default:
+ BLI_assert(0);
+ }
+}
+
+static void hair_batch_cache_clear(Hair *hair)
+{
+ HairBatchCache *cache = hair->batch_cache;
+ if (!cache) {
+ return;
+ }
+
+ particle_batch_cache_clear_hair(&cache->hair);
+}
+
+void DRW_hair_batch_cache_free(Hair *hair)
+{
+ hair_batch_cache_clear(hair);
+ MEM_SAFE_FREE(hair->batch_cache);
+}
+
+static void ensure_seg_pt_count(Hair *hair, ParticleHairCache *hair_cache)
+{
+ if ((hair_cache->pos != NULL && hair_cache->indices != NULL) ||
+ (hair_cache->proc_point_buf != NULL)) {
+ return;
+ }
+
+ hair_cache->strands_len = 0;
+ hair_cache->elems_len = 0;
+ hair_cache->point_len = 0;
+
+ HairCurve *curve = hair->curves;
+ int num_curves = hair->totcurve;
+ for (int i = 0; i < num_curves; i++, curve++) {
+ hair_cache->strands_len++;
+ hair_cache->elems_len += curve->numpoints + 1;
+ hair_cache->point_len += curve->numpoints;
+ }
+}
+
+static void hair_batch_cache_fill_segments_proc_pos(Hair *hair, GPUVertBufRaw *attr_step)
+{
+ /* TODO: use hair radius layer if available. */
+ HairCurve *curve = hair->curves;
+ int num_curves = hair->totcurve;
+ for (int i = 0; i < num_curves; i++, curve++) {
+ float(*curve_co)[3] = hair->co + curve->firstpoint;
+ float total_len = 0.0f;
+ float *co_prev = NULL, *seg_data_first;
+ for (int j = 0; j < curve->numpoints; j++) {
+ float *seg_data = (float *)GPU_vertbuf_raw_step(attr_step);
+ copy_v3_v3(seg_data, curve_co[j]);
+ if (co_prev) {
+ total_len += len_v3v3(co_prev, curve_co[j]);
+ }
+ else {
+ seg_data_first = seg_data;
+ }
+ seg_data[3] = total_len;
+ co_prev = curve_co[j];
+ }
+ if (total_len > 0.0f) {
+ /* Divide by total length to have a [0-1] number. */
+ for (int j = 0; j < curve->numpoints; j++, seg_data_first += 4) {
+ seg_data_first[3] /= total_len;
+ }
+ }
+ }
+}
+
+static void hair_batch_cache_ensure_procedural_pos(Hair *hair, ParticleHairCache *cache)
+{
+ if (cache->proc_point_buf != NULL) {
+ return;
+ }
+
+ /* initialize vertex format */
+ GPUVertFormat format = {0};
+ uint pos_id = GPU_vertformat_attr_add(&format, "posTime", GPU_COMP_F32, 4, GPU_FETCH_FLOAT);
+
+ cache->proc_point_buf = GPU_vertbuf_create_with_format(&format);
+ GPU_vertbuf_data_alloc(cache->proc_point_buf, cache->point_len);
+
+ GPUVertBufRaw pos_step;
+ GPU_vertbuf_attr_get_raw_data(cache->proc_point_buf, pos_id, &pos_step);
+
+ hair_batch_cache_fill_segments_proc_pos(hair, &pos_step);
+
+ /* Create vbo immediately to bind to texture buffer. */
+ GPU_vertbuf_use(cache->proc_point_buf);
+
+ cache->point_tex = GPU_texture_create_from_vertbuf(cache->proc_point_buf);
+}
+
+static void hair_batch_cache_fill_strands_data(Hair *hair,
+ GPUVertBufRaw *data_step,
+ GPUVertBufRaw *seg_step)
+{
+ HairCurve *curve = hair->curves;
+ int num_curves = hair->totcurve;
+ for (int i = 0; i < num_curves; i++, curve++) {
+ *(uint *)GPU_vertbuf_raw_step(data_step) = curve->firstpoint;
+ *(ushort *)GPU_vertbuf_raw_step(seg_step) = curve->numpoints - 1;
+ }
+}
+
+static void hair_batch_cache_ensure_procedural_strand_data(Hair *hair, ParticleHairCache *cache)
+{
+ GPUVertBufRaw data_step, seg_step;
+
+ GPUVertFormat format_data = {0};
+ uint data_id = GPU_vertformat_attr_add(&format_data, "data", GPU_COMP_U32, 1, GPU_FETCH_INT);
+
+ GPUVertFormat format_seg = {0};
+ uint seg_id = GPU_vertformat_attr_add(&format_seg, "data", GPU_COMP_U16, 1, GPU_FETCH_INT);
+
+ /* Strand Data */
+ cache->proc_strand_buf = GPU_vertbuf_create_with_format(&format_data);
+ GPU_vertbuf_data_alloc(cache->proc_strand_buf, cache->strands_len);
+ GPU_vertbuf_attr_get_raw_data(cache->proc_strand_buf, data_id, &data_step);
+
+ cache->proc_strand_seg_buf = GPU_vertbuf_create_with_format(&format_seg);
+ GPU_vertbuf_data_alloc(cache->proc_strand_seg_buf, cache->strands_len);
+ GPU_vertbuf_attr_get_raw_data(cache->proc_strand_seg_buf, seg_id, &seg_step);
+
+ hair_batch_cache_fill_strands_data(hair, &data_step, &seg_step);
+
+ /* Create vbo immediately to bind to texture buffer. */
+ GPU_vertbuf_use(cache->proc_strand_buf);
+ cache->strand_tex = GPU_texture_create_from_vertbuf(cache->proc_strand_buf);
+
+ GPU_vertbuf_use(cache->proc_strand_seg_buf);
+ cache->strand_seg_tex = GPU_texture_create_from_vertbuf(cache->proc_strand_seg_buf);
+}
+
+static void hair_batch_cache_ensure_procedural_final_points(ParticleHairCache *cache, int subdiv)
+{
+ /* Same format as point_tex. */
+ GPUVertFormat format = {0};
+ GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 4, GPU_FETCH_FLOAT);
+
+ cache->final[subdiv].proc_buf = GPU_vertbuf_create_with_format(&format);
+
+ /* Create a destination buffer for the transform feedback. Sized appropriately */
+ /* Those are points! not line segments. */
+ GPU_vertbuf_data_alloc(cache->final[subdiv].proc_buf,
+ cache->final[subdiv].strands_res * cache->strands_len);
+
+ /* Create vbo immediately to bind to texture buffer. */
+ GPU_vertbuf_use(cache->final[subdiv].proc_buf);
+
+ cache->final[subdiv].proc_tex = GPU_texture_create_from_vertbuf(cache->final[subdiv].proc_buf);
+}
+
+static void hair_batch_cache_fill_segments_indices(Hair *hair,
+ const int res,
+ GPUIndexBufBuilder *elb)
+{
+ HairCurve *curve = hair->curves;
+ int num_curves = hair->totcurve;
+ uint curr_point = 0;
+ for (int i = 0; i < num_curves; i++, curve++) {
+ for (int k = 0; k < res; k++) {
+ GPU_indexbuf_add_generic_vert(elb, curr_point++);
+ }
+ GPU_indexbuf_add_primitive_restart(elb);
+ }
+}
+
+static void hair_batch_cache_ensure_procedural_indices(Hair *hair,
+ ParticleHairCache *cache,
+ int thickness_res,
+ int subdiv)
+{
+ BLI_assert(thickness_res <= MAX_THICKRES); /* Cylinder strip not currently supported. */
+
+ if (cache->final[subdiv].proc_hairs[thickness_res - 1] != NULL) {
+ return;
+ }
+
+ int verts_per_hair = cache->final[subdiv].strands_res * thickness_res;
+ /* +1 for primitive restart */
+ int element_count = (verts_per_hair + 1) * cache->strands_len;
+ GPUPrimType prim_type = (thickness_res == 1) ? GPU_PRIM_LINE_STRIP : GPU_PRIM_TRI_STRIP;
+
+ static GPUVertFormat format = {0};
+ GPU_vertformat_clear(&format);
+
+ /* initialize vertex format */
+ GPU_vertformat_attr_add(&format, "dummy", GPU_COMP_U8, 1, GPU_FETCH_INT_TO_FLOAT_UNIT);
+
+ GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
+ GPU_vertbuf_data_alloc(vbo, 1);
+
+ GPUIndexBufBuilder elb;
+ GPU_indexbuf_init_ex(&elb, prim_type, element_count, element_count);
+
+ hair_batch_cache_fill_segments_indices(hair, verts_per_hair, &elb);
+
+ cache->final[subdiv].proc_hairs[thickness_res - 1] = GPU_batch_create_ex(
+ prim_type, vbo, GPU_indexbuf_build(&elb), GPU_BATCH_OWNS_VBO | GPU_BATCH_OWNS_INDEX);
+}
+
+/* Ensure all textures and buffers needed for GPU accelerated drawing. */
+bool hair_ensure_procedural_data(Object *object,
+ ParticleHairCache **r_hair_cache,
+ int subdiv,
+ int thickness_res)
+{
+ bool need_ft_update = false;
+ Hair *hair = object->data;
+
+ HairBatchCache *cache = hair_batch_cache_get(hair);
+ *r_hair_cache = &cache->hair;
+
+ const int steps = 2; /* TODO: don't hardcode? */
+ (*r_hair_cache)->final[subdiv].strands_res = 1 << (steps + subdiv);
+
+ /* Refreshed on combing and simulation. */
+ if ((*r_hair_cache)->proc_point_buf == NULL) {
+ ensure_seg_pt_count(hair, &cache->hair);
+ hair_batch_cache_ensure_procedural_pos(hair, &cache->hair);
+ need_ft_update = true;
+ }
+
+ /* Refreshed if active layer or custom data changes. */
+ if ((*r_hair_cache)->strand_tex == NULL) {
+ hair_batch_cache_ensure_procedural_strand_data(hair, &cache->hair);
+ }
+
+ /* Refreshed only on subdiv count change. */
+ if ((*r_hair_cache)->final[subdiv].proc_buf == NULL) {
+ hair_batch_cache_ensure_procedural_final_points(&cache->hair, subdiv);
+ need_ft_update = true;
+ }
+ if ((*r_hair_cache)->final[subdiv].proc_hairs[thickness_res - 1] == NULL) {
+ hair_batch_cache_ensure_procedural_indices(hair, &cache->hair, thickness_res, subdiv);
+ }
+
+ return need_ft_update;
+}
+
+int DRW_hair_material_count_get(Hair *hair)
+{
+ return max_ii(1, hair->totcol);
+}
diff --git a/source/blender/draw/intern/draw_cache_impl_pointcloud.c b/source/blender/draw/intern/draw_cache_impl_pointcloud.c
new file mode 100644
index 00000000000..83757cb714a
--- /dev/null
+++ b/source/blender/draw/intern/draw_cache_impl_pointcloud.c
@@ -0,0 +1,176 @@
+/*
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * The Original Code is Copyright (C) 2017 by Blender Foundation.
+ * All rights reserved.
+ */
+
+/** \file
+ * \ingroup draw
+ *
+ * \brief PointCloud API for render engines
+ */
+
+#include <string.h>
+
+#include "MEM_guardedalloc.h"
+
+#include "BLI_math_base.h"
+#include "BLI_utildefines.h"
+
+#include "DNA_object_types.h"
+#include "DNA_pointcloud_types.h"
+
+#include "BKE_pointcloud.h"
+
+#include "GPU_batch.h"
+
+#include "draw_cache_impl.h" /* own include */
+
+static void pointcloud_batch_cache_clear(PointCloud *pointcloud);
+
+/* ---------------------------------------------------------------------- */
+/* PointCloud GPUBatch Cache */
+
+typedef struct PointCloudBatchCache {
+ GPUVertBuf *pos;
+ GPUBatch *batch;
+
+ /* settings to determine if cache is invalid */
+ bool is_dirty;
+} PointCloudBatchCache;
+
+/* GPUBatch cache management. */
+
+static bool pointcloud_batch_cache_valid(PointCloud *pointcloud)
+{
+ PointCloudBatchCache *cache = pointcloud->batch_cache;
+ return (cache && cache->is_dirty == false);
+}
+
+static void pointcloud_batch_cache_init(PointCloud *pointcloud)
+{
+ PointCloudBatchCache *cache = pointcloud->batch_cache;
+
+ if (!cache) {
+ cache = pointcloud->batch_cache = MEM_callocN(sizeof(*cache), __func__);
+ }
+ else {
+ memset(cache, 0, sizeof(*cache));
+ }
+
+ cache->is_dirty = false;
+}
+
+void DRW_pointcloud_batch_cache_validate(PointCloud *pointcloud)
+{
+ if (!pointcloud_batch_cache_valid(pointcloud)) {
+ pointcloud_batch_cache_clear(pointcloud);
+ pointcloud_batch_cache_init(pointcloud);
+ }
+}
+
+static PointCloudBatchCache *pointcloud_batch_cache_get(PointCloud *pointcloud)
+{
+ return pointcloud->batch_cache;
+}
+
+void DRW_pointcloud_batch_cache_dirty_tag(PointCloud *pointcloud, int mode)
+{
+ PointCloudBatchCache *cache = pointcloud->batch_cache;
+ if (cache == NULL) {
+ return;
+ }
+ switch (mode) {
+ case BKE_POINTCLOUD_BATCH_DIRTY_ALL:
+ cache->is_dirty = true;
+ break;
+ default:
+ BLI_assert(0);
+ }
+}
+
+static void pointcloud_batch_cache_clear(PointCloud *pointcloud)
+{
+ PointCloudBatchCache *cache = pointcloud->batch_cache;
+ if (!cache) {
+ return;
+ }
+
+ GPU_BATCH_DISCARD_SAFE(cache->batch);
+ GPU_VERTBUF_DISCARD_SAFE(cache->pos);
+}
+
+void DRW_pointcloud_batch_cache_free(PointCloud *pointcloud)
+{
+ pointcloud_batch_cache_clear(pointcloud);
+ MEM_SAFE_FREE(pointcloud->batch_cache);
+}
+
+static void pointcloud_batch_cache_ensure_pos(Object *ob, PointCloudBatchCache *cache)
+{
+ if (cache->pos != NULL) {
+ return;
+ }
+
+ PointCloud *pointcloud = ob->data;
+
+ static GPUVertFormat format = {0};
+ static uint pos_id;
+ static uint radius_id;
+ if (format.attr_len == 0) {
+ /* initialize vertex format */
+ pos_id = GPU_vertformat_attr_add(&format, "pointcloud_pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
+ radius_id = GPU_vertformat_attr_add(
+ &format, "pointcloud_radius", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
+ }
+
+ GPU_VERTBUF_DISCARD_SAFE(cache->pos);
+ cache->pos = GPU_vertbuf_create_with_format(&format);
+ GPU_vertbuf_data_alloc(cache->pos, pointcloud->totpoint);
+ GPU_vertbuf_attr_fill(cache->pos, pos_id, pointcloud->co);
+
+ if (pointcloud->radius) {
+ GPU_vertbuf_attr_fill(cache->pos, radius_id, pointcloud->radius);
+ }
+ else if (pointcloud->totpoint) {
+ /* TODO: optimize for constant radius by not including in vertex buffer at all? */
+ float *radius = MEM_malloc_arrayN(pointcloud->totpoint, sizeof(float), __func__);
+ for (int i = 0; i < pointcloud->totpoint; i++) {
+ /* TODO: add default radius to PointCloud data structure. */
+ radius[i] = 0.01f;
+ }
+ GPU_vertbuf_attr_fill(cache->pos, radius_id, radius);
+ MEM_freeN(radius);
+ }
+}
+
+GPUBatch *DRW_pointcloud_batch_cache_get_dots(Object *ob)
+{
+ PointCloud *pointcloud = ob->data;
+ PointCloudBatchCache *cache = pointcloud_batch_cache_get(pointcloud);
+
+ if (cache->batch == NULL) {
+ pointcloud_batch_cache_ensure_pos(ob, cache);
+ cache->batch = GPU_batch_create(GPU_PRIM_POINTS, cache->pos, NULL);
+ }
+
+ return cache->batch;
+}
+
+int DRW_pointcloud_material_count_get(PointCloud *pointcloud)
+{
+ return max_ii(1, pointcloud->totcol);
+}
diff --git a/source/blender/draw/intern/draw_cache_impl_volume.c b/source/blender/draw/intern/draw_cache_impl_volume.c
new file mode 100644
index 00000000000..cdac8b33fba
--- /dev/null
+++ b/source/blender/draw/intern/draw_cache_impl_volume.c
@@ -0,0 +1,297 @@
+/*
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * The Original Code is Copyright (C) 2017 by Blender Foundation.
+ * All rights reserved.
+ */
+
+/** \file
+ * \ingroup draw
+ *
+ * \brief Volume API for render engines
+ */
+
+#include <string.h>
+
+#include "MEM_guardedalloc.h"
+
+#include "BLI_math_base.h"
+#include "BLI_math_vector.h"
+#include "BLI_utildefines.h"
+
+#include "DNA_object_types.h"
+#include "DNA_volume_types.h"
+
+#include "BKE_global.h"
+#include "BKE_volume.h"
+#include "BKE_volume_render.h"
+
+#include "GPU_batch.h"
+#include "GPU_texture.h"
+
+#include "DRW_render.h"
+
+#include "draw_cache.h" /* own include */
+#include "draw_cache_impl.h" /* own include */
+
+static void volume_batch_cache_clear(Volume *volume);
+
+/* ---------------------------------------------------------------------- */
+/* Volume GPUBatch Cache */
+
+typedef struct VolumeBatchCache {
+ /* 3D textures */
+ ListBase grids;
+
+ /* Wireframe */
+ struct {
+ GPUVertBuf *pos_nor_in_order;
+ GPUBatch *batch;
+ } face_wire;
+
+ /* settings to determine if cache is invalid */
+ bool is_dirty;
+} VolumeBatchCache;
+
+/* GPUBatch cache management. */
+
+static bool volume_batch_cache_valid(Volume *volume)
+{
+ VolumeBatchCache *cache = volume->batch_cache;
+ return (cache && cache->is_dirty == false);
+}
+
+static void volume_batch_cache_init(Volume *volume)
+{
+ VolumeBatchCache *cache = volume->batch_cache;
+
+ if (!cache) {
+ cache = volume->batch_cache = MEM_callocN(sizeof(*cache), __func__);
+ }
+ else {
+ memset(cache, 0, sizeof(*cache));
+ }
+
+ cache->is_dirty = false;
+}
+
+void DRW_volume_batch_cache_validate(Volume *volume)
+{
+ if (!volume_batch_cache_valid(volume)) {
+ volume_batch_cache_clear(volume);
+ volume_batch_cache_init(volume);
+ }
+}
+
+static VolumeBatchCache *volume_batch_cache_get(Volume *volume)
+{
+ DRW_volume_batch_cache_validate(volume);
+ return volume->batch_cache;
+}
+
+void DRW_volume_batch_cache_dirty_tag(Volume *volume, int mode)
+{
+ VolumeBatchCache *cache = volume->batch_cache;
+ if (cache == NULL) {
+ return;
+ }
+ switch (mode) {
+ case BKE_VOLUME_BATCH_DIRTY_ALL:
+ cache->is_dirty = true;
+ break;
+ default:
+ BLI_assert(0);
+ }
+}
+
+static void volume_batch_cache_clear(Volume *volume)
+{
+ VolumeBatchCache *cache = volume->batch_cache;
+ if (!cache) {
+ return;
+ }
+
+ for (DRWVolumeGrid *grid = cache->grids.first; grid; grid = grid->next) {
+ MEM_SAFE_FREE(grid->name);
+ DRW_TEXTURE_FREE_SAFE(grid->texture);
+ }
+ BLI_freelistN(&cache->grids);
+
+ GPU_VERTBUF_DISCARD_SAFE(cache->face_wire.pos_nor_in_order);
+ GPU_BATCH_DISCARD_SAFE(cache->face_wire.batch);
+}
+
+void DRW_volume_batch_cache_free(Volume *volume)
+{
+ volume_batch_cache_clear(volume);
+ MEM_SAFE_FREE(volume->batch_cache);
+}
+
+static void drw_volume_wireframe_cb(
+ void *userdata, float (*verts)[3], int (*edges)[2], int totvert, int totedge)
+{
+ Volume *volume = userdata;
+ VolumeBatchCache *cache = volume->batch_cache;
+
+ /* Create vertex buffer. */
+ static GPUVertFormat format = {0};
+ static uint pos_id, nor_id;
+ if (format.attr_len == 0) {
+ pos_id = GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 3, GPU_FETCH_FLOAT);
+ nor_id = GPU_vertformat_attr_add(&format, "nor", GPU_COMP_I10, 4, GPU_FETCH_INT_TO_FLOAT_UNIT);
+ }
+
+ static float normal[3] = {1.0f, 0.0f, 0.0f};
+ GPUPackedNormal packed_normal = GPU_normal_convert_i10_v3(normal);
+
+ cache->face_wire.pos_nor_in_order = GPU_vertbuf_create_with_format(&format);
+ GPU_vertbuf_data_alloc(cache->face_wire.pos_nor_in_order, totvert);
+ GPU_vertbuf_attr_fill(cache->face_wire.pos_nor_in_order, pos_id, verts);
+ GPU_vertbuf_attr_fill_stride(cache->face_wire.pos_nor_in_order, nor_id, 0, &packed_normal);
+
+ /* Create wiredata. */
+ GPUVertBuf *vbo_wiredata = MEM_callocN(sizeof(GPUVertBuf), __func__);
+ DRW_vertbuf_create_wiredata(vbo_wiredata, totvert);
+
+ if (volume->display.wireframe_type == VOLUME_WIREFRAME_POINTS) {
+ /* Create batch. */
+ cache->face_wire.batch = GPU_batch_create(
+ GPU_PRIM_POINTS, cache->face_wire.pos_nor_in_order, NULL);
+ }
+ else {
+ /* Create edge index buffer. */
+ GPUIndexBufBuilder elb;
+ GPU_indexbuf_init(&elb, GPU_PRIM_LINES, totedge, totvert);
+ for (int i = 0; i < totedge; i++) {
+ GPU_indexbuf_add_line_verts(&elb, edges[i][0], edges[i][1]);
+ }
+ GPUIndexBuf *ibo = GPU_indexbuf_build(&elb);
+
+ /* Create batch. */
+ cache->face_wire.batch = GPU_batch_create_ex(
+ GPU_PRIM_LINES, cache->face_wire.pos_nor_in_order, ibo, GPU_BATCH_OWNS_INDEX);
+ }
+
+ GPU_batch_vertbuf_add_ex(cache->face_wire.batch, vbo_wiredata, true);
+}
+
+GPUBatch *DRW_volume_batch_cache_get_wireframes_face(Volume *volume)
+{
+ if (volume->display.wireframe_type == VOLUME_WIREFRAME_NONE) {
+ return NULL;
+ }
+
+ VolumeBatchCache *cache = volume_batch_cache_get(volume);
+
+ if (cache->face_wire.batch == NULL) {
+ VolumeGrid *volume_grid = BKE_volume_grid_active_get(volume);
+ if (volume_grid == NULL) {
+ return NULL;
+ }
+
+ /* Create wireframe from OpenVDB tree. */
+ BKE_volume_grid_wireframe(volume, volume_grid, drw_volume_wireframe_cb, volume);
+ }
+
+ return cache->face_wire.batch;
+}
+
+static DRWVolumeGrid *volume_grid_cache_get(Volume *volume,
+ VolumeGrid *grid,
+ VolumeBatchCache *cache)
+{
+ const char *name = BKE_volume_grid_name(grid);
+
+ /* Return cached grid. */
+ DRWVolumeGrid *cache_grid;
+ for (cache_grid = cache->grids.first; cache_grid; cache_grid = cache_grid->next) {
+ if (STREQ(cache_grid->name, name)) {
+ return cache_grid;
+ }
+ }
+
+ /* Allocate new grid. */
+ cache_grid = MEM_callocN(sizeof(DRWVolumeGrid), __func__);
+ cache_grid->name = BLI_strdup(name);
+ BLI_addtail(&cache->grids, cache_grid);
+
+ /* TODO: can we load this earlier, avoid accessing the global and take
+ * advantage of dependency graph multithreading? */
+ BKE_volume_load(volume, G.main);
+
+ /* Test if we support textures with the number of channels. */
+ size_t channels = BKE_volume_grid_channels(grid);
+ if (!ELEM(channels, 1, 3)) {
+ return cache_grid;
+ }
+
+ /* Load grid tree into memory, if not loaded already. */
+ const bool was_loaded = BKE_volume_grid_is_loaded(grid);
+ BKE_volume_grid_load(volume, grid);
+
+ /* Compute dense voxel grid size. */
+ int64_t dense_min[3], dense_max[3], resolution[3] = {0};
+ if (BKE_volume_grid_dense_bounds(volume, grid, dense_min, dense_max)) {
+ resolution[0] = dense_max[0] - dense_min[0];
+ resolution[1] = dense_max[1] - dense_min[1];
+ resolution[2] = dense_max[2] - dense_min[2];
+ }
+ size_t num_voxels = resolution[0] * resolution[1] * resolution[2];
+ size_t elem_size = sizeof(float) * channels;
+
+ /* Allocate and load voxels. */
+ float *voxels = (num_voxels > 0) ? MEM_malloc_arrayN(num_voxels, elem_size, __func__) : NULL;
+ if (voxels != NULL) {
+ BKE_volume_grid_dense_voxels(volume, grid, dense_min, dense_max, voxels);
+
+ /* Create GPU texture. */
+ cache_grid->texture = GPU_texture_create_3d(resolution[0],
+ resolution[1],
+ resolution[2],
+ (channels == 3) ? GPU_RGB16F : GPU_R16F,
+ voxels,
+ NULL);
+
+ GPU_texture_bind(cache_grid->texture, 0);
+ GPU_texture_swizzle_channel_auto(cache_grid->texture, channels);
+ GPU_texture_unbind(cache_grid->texture);
+
+ MEM_freeN(voxels);
+
+ /* Compute transform matrices. */
+ BKE_volume_grid_dense_transform_matrix(
+ grid, dense_min, dense_max, cache_grid->texture_to_object);
+ invert_m4_m4(cache_grid->object_to_texture, cache_grid->texture_to_object);
+ }
+
+ /* Free grid from memory if it wasn't previously loaded. */
+ if (!was_loaded) {
+ BKE_volume_grid_unload(volume, grid);
+ }
+
+ return cache_grid;
+}
+
+DRWVolumeGrid *DRW_volume_batch_cache_get_grid(Volume *volume, VolumeGrid *volume_grid)
+{
+ VolumeBatchCache *cache = volume_batch_cache_get(volume);
+ DRWVolumeGrid *grid = volume_grid_cache_get(volume, volume_grid, cache);
+ return (grid->texture != NULL) ? grid : NULL;
+}
+
+int DRW_volume_material_count_get(Volume *volume)
+{
+ return max_ii(1, volume->totcol);
+}