Welcome to mirror list, hosted at ThFree Co, Russian Federation.

git.blender.org/blender.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJacques Lucke <jacques@blender.org>2022-04-07 11:02:34 +0300
committerJacques Lucke <jacques@blender.org>2022-04-07 11:02:34 +0300
commit384a02a214cad88f3180deee36b22529c213ddaf (patch)
treebf81dfad32c961f6be6ba8b529144bfb180fba69 /source/blender/blenlib/BLI_virtual_array.hh
parent2aff04917f9e0420174e01dff0936d5237a7bbbd (diff)
BLI: add missing materialize methods for virtual arrays
This does two things: * Introduce new `materialize_compressed` methods. Those are used when the dst array should not have any gaps. * Add materialize methods in various classes where they were missing (and therefore caused overhead, because slower fallbacks had to be used).
Diffstat (limited to 'source/blender/blenlib/BLI_virtual_array.hh')
-rw-r--r--source/blender/blenlib/BLI_virtual_array.hh121
1 files changed, 120 insertions, 1 deletions
diff --git a/source/blender/blenlib/BLI_virtual_array.hh b/source/blender/blenlib/BLI_virtual_array.hh
index 3aa25bf6819..206e0191a54 100644
--- a/source/blender/blenlib/BLI_virtual_array.hh
+++ b/source/blender/blenlib/BLI_virtual_array.hh
@@ -107,7 +107,7 @@ template<typename T> class VArrayImpl {
/**
* Copy values from the virtual array into the provided span. The index of the value in the
- * virtual is the same as the index in the span.
+ * virtual array is the same as the index in the span.
*/
virtual void materialize(IndexMask mask, MutableSpan<T> r_span) const
{
@@ -147,6 +147,35 @@ template<typename T> class VArrayImpl {
}
/**
+ * Copy values from the virtual array into the provided span. Contrary to #materialize, the index
+ * in virtual array is not the same as the index in the output span. Instead, the span is filled
+ * without gaps.
+ */
+ virtual void materialize_compressed(IndexMask mask, MutableSpan<T> r_span) const
+ {
+ BLI_assert(mask.size() == r_span.size());
+ mask.to_best_mask_type([&](auto best_mask) {
+ for (const int64_t i : IndexRange(best_mask.size())) {
+ r_span[i] = this->get(best_mask[i]);
+ }
+ });
+ }
+
+ /**
+ * Same as #materialize_compressed but #r_span is expected to be uninitialized.
+ */
+ virtual void materialize_compressed_to_uninitialized(IndexMask mask, MutableSpan<T> r_span) const
+ {
+ BLI_assert(mask.size() == r_span.size());
+ T *dst = r_span.data();
+ mask.to_best_mask_type([&](auto best_mask) {
+ for (const int64_t i : IndexRange(best_mask.size())) {
+ new (dst + i) T(this->get(best_mask[i]));
+ }
+ });
+ }
+
+ /**
* If this virtual wraps another #GVArray, this method should assign the wrapped array to the
* provided reference. This allows losslessly converting between generic and typed virtual
* arrays in all cases.
@@ -265,6 +294,25 @@ template<typename T> class VArrayImpl_For_Span : public VMutableArrayImpl<T> {
const Span<T> other_span = other.get_internal_span();
return data_ == other_span.data();
}
+
+ void materialize_compressed(IndexMask mask, MutableSpan<T> r_span) const
+ {
+ mask.to_best_mask_type([&](auto best_mask) {
+ for (const int64_t i : IndexRange(best_mask.size())) {
+ r_span[i] = data_[best_mask[i]];
+ }
+ });
+ }
+
+ void materialize_compressed_to_uninitialized(IndexMask mask, MutableSpan<T> r_span) const
+ {
+ T *dst = r_span.data();
+ mask.to_best_mask_type([&](auto best_mask) {
+ for (const int64_t i : IndexRange(best_mask.size())) {
+ new (dst + i) T(data_[best_mask[i]]);
+ }
+ });
+ }
};
/**
@@ -341,6 +389,20 @@ template<typename T> class VArrayImpl_For_Single final : public VArrayImpl<T> {
{
return value_;
}
+
+ void materialize_compressed(IndexMask mask, MutableSpan<T> r_span) const override
+ {
+ BLI_assert(mask.size() == r_span.size());
+ UNUSED_VARS_NDEBUG(mask);
+ r_span.fill(value_);
+ }
+
+ void materialize_compressed_to_uninitialized(IndexMask mask,
+ MutableSpan<T> r_span) const override
+ {
+ BLI_assert(mask.size() == r_span.size());
+ uninitialized_fill_n(r_span.data(), mask.size(), value_);
+ }
};
/**
@@ -374,6 +436,29 @@ template<typename T, typename GetFunc> class VArrayImpl_For_Func final : public
T *dst = r_span.data();
mask.foreach_index([&](const int64_t i) { new (dst + i) T(get_func_(i)); });
}
+
+ void materialize_compressed(IndexMask mask, MutableSpan<T> r_span) const override
+ {
+ BLI_assert(mask.size() == r_span.size());
+ T *dst = r_span.data();
+ mask.to_best_mask_type([&](auto best_mask) {
+ for (const int64_t i : IndexRange(best_mask.size())) {
+ dst[i] = get_func_(best_mask[i]);
+ }
+ });
+ }
+
+ void materialize_compressed_to_uninitialized(IndexMask mask,
+ MutableSpan<T> r_span) const override
+ {
+ BLI_assert(mask.size() == r_span.size());
+ T *dst = r_span.data();
+ mask.to_best_mask_type([&](auto best_mask) {
+ for (const int64_t i : IndexRange(best_mask.size())) {
+ new (dst + i) T(get_func_(best_mask[i]));
+ }
+ });
+ }
};
/**
@@ -422,6 +507,29 @@ class VArrayImpl_For_DerivedSpan final : public VMutableArrayImpl<ElemT> {
mask.foreach_index([&](const int64_t i) { new (dst + i) ElemT(GetFunc(data_[i])); });
}
+ void materialize_compressed(IndexMask mask, MutableSpan<ElemT> r_span) const override
+ {
+ BLI_assert(mask.size() == r_span.size());
+ ElemT *dst = r_span.data();
+ mask.to_best_mask_type([&](auto best_mask) {
+ for (const int64_t i : IndexRange(best_mask.size())) {
+ dst[i] = GetFunc(data_[best_mask[i]]);
+ }
+ });
+ }
+
+ void materialize_compressed_to_uninitialized(IndexMask mask,
+ MutableSpan<ElemT> r_span) const override
+ {
+ BLI_assert(mask.size() == r_span.size());
+ ElemT *dst = r_span.data();
+ mask.to_best_mask_type([&](auto best_mask) {
+ for (const int64_t i : IndexRange(best_mask.size())) {
+ new (dst + i) ElemT(GetFunc(data_[best_mask[i]]));
+ }
+ });
+ }
+
bool may_have_ownership() const override
{
return false;
@@ -740,6 +848,17 @@ template<typename T> class VArrayCommon {
impl_->materialize_to_uninitialized(mask, r_span);
}
+ /** Copy some elements of the virtual array into a span. */
+ void materialize_compressed(IndexMask mask, MutableSpan<T> r_span) const
+ {
+ impl_->materialize_compressed(mask, r_span);
+ }
+
+ void materialize_compressed_to_uninitialized(IndexMask mask, MutableSpan<T> r_span) const
+ {
+ impl_->materialize_compressed_to_uninitialized(mask, r_span);
+ }
+
/** See #GVArrayImpl::try_assign_GVArray. */
bool try_assign_GVArray(GVArray &varray) const
{