diff options
Diffstat (limited to 'source/blender/blenlib/BLI_virtual_array.hh')
-rw-r--r-- | source/blender/blenlib/BLI_virtual_array.hh | 88 |
1 files changed, 47 insertions, 41 deletions
diff --git a/source/blender/blenlib/BLI_virtual_array.hh b/source/blender/blenlib/BLI_virtual_array.hh index 438fcc4b8f7..19ee2334bd9 100644 --- a/source/blender/blenlib/BLI_virtual_array.hh +++ b/source/blender/blenlib/BLI_virtual_array.hh @@ -23,6 +23,8 @@ * see of the increased compile time and binary size is worth it. */ +#include <optional> + #include "BLI_any.hh" #include "BLI_array.hh" #include "BLI_index_mask.hh" @@ -106,25 +108,7 @@ template<typename T> class VArrayImpl { */ virtual void materialize(IndexMask mask, MutableSpan<T> r_span) const { - T *dst = r_span.data(); - /* Optimize for a few different common cases. */ - const CommonVArrayInfo info = this->common_info(); - switch (info.type) { - case CommonVArrayInfo::Type::Any: { - mask.foreach_index([&](const int64_t i) { dst[i] = this->get(i); }); - break; - } - case CommonVArrayInfo::Type::Span: { - const T *src = static_cast<const T *>(info.data); - mask.foreach_index([&](const int64_t i) { dst[i] = src[i]; }); - break; - } - case CommonVArrayInfo::Type::Single: { - const T single = *static_cast<const T *>(info.data); - mask.foreach_index([&](const int64_t i) { dst[i] = single; }); - break; - } - } + mask.foreach_index([&](const int64_t i) { r_span[i] = this->get(i); }); } /** @@ -133,24 +117,7 @@ template<typename T> class VArrayImpl { virtual void materialize_to_uninitialized(IndexMask mask, MutableSpan<T> r_span) const { T *dst = r_span.data(); - /* Optimize for a few different common cases. */ - const CommonVArrayInfo info = this->common_info(); - switch (info.type) { - case CommonVArrayInfo::Type::Any: { - mask.foreach_index([&](const int64_t i) { new (dst + i) T(this->get(i)); }); - break; - } - case CommonVArrayInfo::Type::Span: { - const T *src = static_cast<const T *>(info.data); - mask.foreach_index([&](const int64_t i) { new (dst + i) T(src[i]); }); - break; - } - case CommonVArrayInfo::Type::Single: { - const T single = *static_cast<const T *>(info.data); - mask.foreach_index([&](const int64_t i) { new (dst + i) T(single); }); - break; - } - } + mask.foreach_index([&](const int64_t i) { new (dst + i) T(this->get(i)); }); } /** @@ -286,8 +253,20 @@ template<typename T> class VArrayImpl_For_Span : public VMutableArrayImpl<T> { return data_ == static_cast<const T *>(other_info.data); } + void materialize(IndexMask mask, MutableSpan<T> r_span) const override + { + mask.foreach_index([&](const int64_t i) { r_span[i] = data_[i]; }); + } + + void materialize_to_uninitialized(IndexMask mask, MutableSpan<T> r_span) const override + { + T *dst = r_span.data(); + mask.foreach_index([&](const int64_t i) { new (dst + i) T(data_[i]); }); + } + void materialize_compressed(IndexMask mask, MutableSpan<T> r_span) const override { + BLI_assert(mask.size() == r_span.size()); mask.to_best_mask_type([&](auto best_mask) { for (const int64_t i : IndexRange(best_mask.size())) { r_span[i] = data_[best_mask[i]]; @@ -298,6 +277,7 @@ template<typename T> class VArrayImpl_For_Span : public VMutableArrayImpl<T> { void materialize_compressed_to_uninitialized(IndexMask mask, MutableSpan<T> r_span) const override { + BLI_assert(mask.size() == r_span.size()); T *dst = r_span.data(); mask.to_best_mask_type([&](auto best_mask) { for (const int64_t i : IndexRange(best_mask.size())) { @@ -315,6 +295,12 @@ template<typename T> class VArrayImpl_For_Span_final final : public VArrayImpl_F public: using VArrayImpl_For_Span<T>::VArrayImpl_For_Span; + VArrayImpl_For_Span_final(const Span<T> data) + /* Cast const away, because the implementation for const and non const spans is shared. */ + : VArrayImpl_For_Span<T>({const_cast<T *>(data.data()), data.size()}) + { + } + private: CommonVArrayInfo common_info() const final { @@ -370,6 +356,17 @@ template<typename T> class VArrayImpl_For_Single final : public VArrayImpl<T> { return CommonVArrayInfo(CommonVArrayInfo::Type::Single, true, &value_); } + void materialize(IndexMask mask, MutableSpan<T> r_span) const override + { + r_span.fill_indices(mask, value_); + } + + void materialize_to_uninitialized(IndexMask mask, MutableSpan<T> r_span) const override + { + T *dst = r_span.data(); + mask.foreach_index([&](const int64_t i) { new (dst + i) T(value_); }); + } + void materialize_compressed(IndexMask mask, MutableSpan<T> r_span) const override { BLI_assert(mask.size() == r_span.size()); @@ -797,6 +794,18 @@ template<typename T> class VArrayCommon { } /** + * Return the value that is returned for every index, if the array is stored as a single value. + */ + std::optional<T> get_if_single() const + { + const CommonVArrayInfo info = impl_->common_info(); + if (info.type != CommonVArrayInfo::Type::Single) { + return std::nullopt; + } + return *static_cast<const T *>(info.data); + } + + /** * Return true when the other virtual references the same underlying memory. */ bool is_same(const VArrayCommon<T> &other) const @@ -898,10 +907,7 @@ template<typename T> class VArray : public VArrayCommon<T> { VArray(varray_tag::span /* tag */, Span<T> span) { - /* Cast const away, because the virtual array implementation for const and non const spans is - * shared. */ - MutableSpan<T> mutable_span{const_cast<T *>(span.data()), span.size()}; - this->template emplace<VArrayImpl_For_Span_final<T>>(mutable_span); + this->template emplace<VArrayImpl_For_Span_final<T>>(span); } VArray(varray_tag::single /* tag */, T value, const int64_t size) |