Welcome to mirror list, hosted at ThFree Co, Russian Federation.

git.blender.org/blender.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
path: root/source
diff options
context:
space:
mode:
authorJacques Lucke <jacques@blender.org>2022-04-06 16:47:58 +0300
committerJacques Lucke <jacques@blender.org>2022-04-06 16:47:58 +0300
commit0d66f3ae0db109f7854767a188c4574e101e893d (patch)
tree3bf0ec7b86c315b82b6f0e773b78e8c574352ff3 /source
parent55661f7c21aba43b520478c2be88a76546d47b17 (diff)
progress
Diffstat (limited to 'source')
-rw-r--r--source/blender/blenlib/BLI_generic_virtual_array.hh44
-rw-r--r--source/blender/blenlib/intern/generic_virtual_array.cc26
2 files changed, 70 insertions, 0 deletions
diff --git a/source/blender/blenlib/BLI_generic_virtual_array.hh b/source/blender/blenlib/BLI_generic_virtual_array.hh
index cb1a984c8ce..6325a9e4d21 100644
--- a/source/blender/blenlib/BLI_generic_virtual_array.hh
+++ b/source/blender/blenlib/BLI_generic_virtual_array.hh
@@ -136,6 +136,9 @@ class GVArrayCommon {
void materialize_to_uninitialized(void *dst) const;
void materialize_to_uninitialized(const IndexMask mask, void *dst) const;
+ void materialize_compressed(IndexMask mask, void *dst) const;
+ void materialize_compressed_to_uninitialized(IndexMask mask, void *dst) const;
+
/**
* Returns true when the virtual array is stored as a span internally.
*/
@@ -339,6 +342,16 @@ template<typename T> class GVArrayImpl_For_VArray : public GVArrayImpl {
varray_.materialize_to_uninitialized(mask, MutableSpan((T *)dst, mask.min_array_size()));
}
+ void materialize_compressed(const IndexMask mask, void *dst) const override
+ {
+ varray_.materialize_compressed(mask, MutableSpan((T *)dst, mask.size()));
+ }
+
+ void materialize_compressed_to_uninitialized(const IndexMask mask, void *dst) const override
+ {
+ varray_.materialize_compressed_to_uninitialized(mask, MutableSpan((T *)dst, mask.size()));
+ }
+
bool try_assign_VArray(void *varray) const override
{
*(VArray<T> *)varray = varray_;
@@ -403,6 +416,27 @@ template<typename T> class VArrayImpl_For_GVArray : public VArrayImpl<T> {
{
return varray_.may_have_ownership();
}
+
+ void materialize(IndexMask mask, MutableSpan<T> r_span) const override
+ {
+ varray_.materialize(mask, r_span.data());
+ }
+
+ void materialize_to_uninitialized(IndexMask mask, MutableSpan<T> r_span) const override
+ {
+ varray_.materialize_to_uninitialized(mask, r_span.data());
+ }
+
+ void materialize_compressed(IndexMask mask, MutableSpan<T> r_span) const override
+ {
+ varray_.materialize_compressed(mask, r_span.data());
+ }
+
+ void materialize_compressed_to_uninitialized(IndexMask mask,
+ MutableSpan<T> r_span) const override
+ {
+ varray_.materialize_compressed_to_uninitialized(mask, r_span.data());
+ }
};
/* Used to convert any typed virtual mutable array into a generic one. */
@@ -482,6 +516,16 @@ template<typename T> class GVMutableArrayImpl_For_VMutableArray : public GVMutab
varray_.materialize_to_uninitialized(mask, MutableSpan((T *)dst, mask.min_array_size()));
}
+ void materialize_compressed(const IndexMask mask, void *dst) const override
+ {
+ varray_.materialize_compressed(mask, MutableSpan((T *)dst, mask.size()));
+ }
+
+ void materialize_compressed_to_uninitialized(const IndexMask mask, void *dst) const override
+ {
+ varray_.materialize_compressed_to_uninitialized(mask, MutableSpan((T *)dst, mask.size()));
+ }
+
bool try_assign_VArray(void *varray) const override
{
*(VArray<T> *)varray = varray_;
diff --git a/source/blender/blenlib/intern/generic_virtual_array.cc b/source/blender/blenlib/intern/generic_virtual_array.cc
index b105d4452fc..6cdbbde671a 100644
--- a/source/blender/blenlib/intern/generic_virtual_array.cc
+++ b/source/blender/blenlib/intern/generic_virtual_array.cc
@@ -505,6 +505,22 @@ class GVArrayImpl_For_SlicedGVArray : public GVArrayImpl {
{
varray_.get_internal_single(r_value);
}
+
+ void materialize_compressed_to_uninitialized(const IndexMask mask, void *dst) const override
+ {
+ if (mask.is_range()) {
+ const IndexRange mask_range = mask.as_range();
+ const IndexRange offset_mask_range{mask_range.start() + offset_, mask_range.size()};
+ varray_.materialize_compressed_to_uninitialized(offset_mask_range, dst);
+ }
+ else {
+ Vector<int64_t, 32> offset_mask_indices(mask.size());
+ for (const int64_t i : mask.index_range()) {
+ offset_mask_indices[i] = mask[i] + offset_;
+ }
+ varray_.materialize_compressed_to_uninitialized(offset_mask_indices.as_span(), dst);
+ }
+ }
};
/** \} */
@@ -562,6 +578,16 @@ void GVArrayCommon::materialize_to_uninitialized(const IndexMask mask, void *dst
impl_->materialize_to_uninitialized(mask, dst);
}
+void GVArrayCommon::materialize_compressed(IndexMask mask, void *dst) const
+{
+ impl_->materialize_compressed(mask, dst);
+}
+
+void GVArrayCommon::materialize_compressed_to_uninitialized(IndexMask mask, void *dst) const
+{
+ impl_->materialize_compressed_to_uninitialized(mask, dst);
+}
+
bool GVArrayCommon::may_have_ownership() const
{
return impl_->may_have_ownership();