Welcome to mirror list, hosted at ThFree Co, Russian Federation.

git.blender.org/blender.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJacques Lucke <jacques@blender.org>2021-11-16 12:15:51 +0300
committerJacques Lucke <jacques@blender.org>2021-11-16 12:16:30 +0300
commitd4c868da9f97a06c3457b8eafd344a23ed704874 (patch)
treedc09e69c29ef308260f40f413067d53a2247feb7 /source/blender/blenlib
parent6d35972b061149fda1adce105731d338c471ba87 (diff)
Geometry Nodes: refactor virtual array system
Goals of this refactor: * Simplify creating virtual arrays. * Simplify passing virtual arrays around. * Simplify converting between typed and generic virtual arrays. * Reduce memory allocations. As a quick reminder, a virtual arrays is a data structure that behaves like an array (i.e. it can be accessed using an index). However, it may not actually be stored as array internally. The two most important implementations of virtual arrays are those that correspond to an actual plain array and those that have the same value for every index. However, many more implementations exist for various reasons (interfacing with legacy attributes, unified iterator over all points in multiple splines, ...). With this refactor the core types (`VArray`, `GVArray`, `VMutableArray` and `GVMutableArray`) can be used like "normal values". They typically live on the stack. Before, they were usually inside a `std::unique_ptr`. This makes passing them around much easier. Creation of new virtual arrays is also much simpler now due to some constructors. Memory allocations are reduced by making use of small object optimization inside the core types. Previously, `VArray` was a class with virtual methods that had to be overridden to change the behavior of a the virtual array. Now,`VArray` has a fixed size and has no virtual methods. Instead it contains a `VArrayImpl` that is similar to the old `VArray`. `VArrayImpl` should rarely ever be used directly, unless a new virtual array implementation is added. To support the small object optimization for many `VArrayImpl` classes, a new `blender::Any` type is added. It is similar to `std::any` with two additional features. It has an adjustable inline buffer size and alignment. The inline buffer size of `std::any` can't be relied on and is usually too small for our use case here. Furthermore, `blender::Any` can store additional user-defined type information without increasing the stack size. Differential Revision: https://developer.blender.org/D12986
Diffstat (limited to 'source/blender/blenlib')
-rw-r--r--source/blender/blenlib/BLI_any.hh319
-rw-r--r--source/blender/blenlib/BLI_virtual_array.hh1078
-rw-r--r--source/blender/blenlib/CMakeLists.txt2
-rw-r--r--source/blender/blenlib/tests/BLI_any_test.cc108
-rw-r--r--source/blender/blenlib/tests/BLI_virtual_array_test.cc55
5 files changed, 1270 insertions, 292 deletions
diff --git a/source/blender/blenlib/BLI_any.hh b/source/blender/blenlib/BLI_any.hh
new file mode 100644
index 00000000000..0fc5de5540f
--- /dev/null
+++ b/source/blender/blenlib/BLI_any.hh
@@ -0,0 +1,319 @@
+/*
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#pragma once
+
+/** \file
+ * \ingroup bli
+ *
+ * A #blender::Any is a type-safe container for single values of any copy constructible type.
+ * It is similar to #std::any but provides the following two additional features:
+ * - Adjustable inline buffer capacity and alignment. #std::any has a small inline buffer in most
+ * implementations as well, but its size is not guaranteed.
+ * - Can store additional user-defined type information without increasing the stack size of #Any.
+ */
+
+#include <algorithm>
+#include <utility>
+
+#include "BLI_memory_utils.hh"
+
+namespace blender {
+
+namespace detail {
+
+/**
+ * Contains function pointers that manage the memory in an #Any.
+ * Additional type specific #ExtraInfo can be embedded here as well.
+ */
+template<typename ExtraInfo> struct AnyTypeInfo {
+ void (*copy_construct)(void *dst, const void *src);
+ void (*move_construct)(void *dst, void *src);
+ void (*destruct)(void *src);
+ const void *(*get)(const void *src);
+ ExtraInfo extra_info;
+
+ /**
+ * Used when #T is stored directly in the inline buffer of the #Any.
+ */
+ template<typename T> static const AnyTypeInfo &get_for_inline()
+ {
+ static AnyTypeInfo funcs = {[](void *dst, const void *src) { new (dst) T(*(const T *)src); },
+ [](void *dst, void *src) { new (dst) T(std::move(*(T *)src)); },
+ [](void *src) { ((T *)src)->~T(); },
+ [](const void *src) { return src; },
+ ExtraInfo::template get<T>()};
+ return funcs;
+ }
+
+ /**
+ * Used when #T can't be stored directly in the inline buffer and is stored in a #std::unique_ptr
+ * instead. In this scenario, the #std::unique_ptr is stored in the inline buffer.
+ */
+ template<typename T> static const AnyTypeInfo &get_for_unique_ptr()
+ {
+ using Ptr = std::unique_ptr<T>;
+ static AnyTypeInfo funcs = {
+ [](void *dst, const void *src) { new (dst) Ptr(new T(**(const Ptr *)src)); },
+ [](void *dst, void *src) { new (dst) Ptr(new T(std::move(**(Ptr *)src))); },
+ [](void *src) { ((Ptr *)src)->~Ptr(); },
+ [](const void *src) -> const void * { return &**(const Ptr *)src; },
+ ExtraInfo::template get<T>()};
+ return funcs;
+ }
+
+ /**
+ * Used when the #Any does not contain any type currently.
+ */
+ static const AnyTypeInfo &get_for_empty()
+ {
+ static AnyTypeInfo funcs = {[](void *UNUSED(dst), const void *UNUSED(src)) {},
+ [](void *UNUSED(dst), void *UNUSED(src)) {},
+ [](void *UNUSED(src)) {},
+ [](const void *UNUSED(src)) -> const void * { return nullptr; },
+ ExtraInfo{}};
+ return funcs;
+ }
+};
+
+/**
+ * Dummy extra info that is used when no additional type information should be stored in the #Any.
+ */
+struct NoExtraInfo {
+ template<typename T> static NoExtraInfo get()
+ {
+ return {};
+ }
+};
+
+} // namespace detail
+
+template<
+ /**
+ * Either void or a struct that contains data members for additional type information.
+ * The struct has to have a static `ExtraInfo get<T>()` method that initializes the struct
+ * based on a type.
+ */
+ typename ExtraInfo = void,
+ /**
+ * Size of the inline buffer. This allows types that are small enough to be stored directly
+ * inside the #Any without an additional allocation.
+ */
+ size_t InlineBufferCapacity = 8,
+ /**
+ * Required minimum alignment of the inline buffer. If this is smaller than the alignment
+ * requirement of a used type, a separate allocation is necessary.
+ */
+ size_t Alignment = 8>
+class Any {
+ private:
+ /* Makes it possible to use void in the template parameters. */
+ using RealExtraInfo =
+ std::conditional_t<std::is_void_v<ExtraInfo>, detail::NoExtraInfo, ExtraInfo>;
+ using Info = detail::AnyTypeInfo<RealExtraInfo>;
+
+ /**
+ * Inline buffer that either contains nothing, the stored value directly, or a #std::unique_ptr
+ * to the value.
+ */
+ AlignedBuffer<std::max(InlineBufferCapacity, sizeof(std::unique_ptr<int>)), Alignment> buffer_{};
+
+ /**
+ * Information about the type that is currently stored.
+ */
+ const Info *info_ = &Info::get_for_empty();
+
+ public:
+ /** Only copy constructible types can be stored in #Any. */
+ template<typename T> static constexpr inline bool is_allowed_v = std::is_copy_constructible_v<T>;
+
+ /**
+ * Checks if the type will be stored in the inline buffer or if it requires a separate
+ * allocation.
+ */
+ template<typename T>
+ static constexpr inline bool is_inline_v = std::is_nothrow_move_constructible_v<T> &&
+ sizeof(T) <= InlineBufferCapacity &&
+ alignof(T) <= Alignment;
+
+ /**
+ * Checks if #T is the same type as this #Any, because in this case the behavior of e.g. the
+ * assignment operator is different.
+ */
+ template<typename T>
+ static constexpr inline bool is_same_any_v = std::is_same_v<std::decay_t<T>, Any>;
+
+ private:
+ template<typename T> const Info &get_info() const
+ {
+ using DecayT = std::decay_t<T>;
+ static_assert(is_allowed_v<DecayT>);
+ if constexpr (is_inline_v<DecayT>) {
+ return Info::template get_for_inline<DecayT>();
+ }
+ else {
+ return Info::template get_for_unique_ptr<DecayT>();
+ }
+ }
+
+ public:
+ Any() = default;
+
+ Any(const Any &other) : info_(other.info_)
+ {
+ info_->copy_construct(&buffer_, &other.buffer_);
+ }
+
+ /**
+ * \note The #other #Any will not be empty afterwards if it was not before. Just its value is in
+ * a moved-from state.
+ */
+ Any(Any &&other) noexcept : info_(other.info_)
+ {
+ info_->move_construct(&buffer_, &other.buffer_);
+ }
+
+ /**
+ * Constructs a new #Any that contains the given type #T from #args. The #std::in_place_type_t is
+ * used to disambiguate this and the copy/move constructors.
+ */
+ template<typename T, typename... Args> explicit Any(std::in_place_type_t<T>, Args &&...args)
+ {
+ using DecayT = std::decay_t<T>;
+ static_assert(is_allowed_v<DecayT>);
+ info_ = &this->template get_info<DecayT>();
+ if constexpr (is_inline_v<DecayT>) {
+ /* Construct the value directly in the inline buffer. */
+ new (&buffer_) DecayT(std::forward<Args>(args)...);
+ }
+ else {
+ /* Construct the value in a new allocation and store a #std::unique_ptr to it in the inline
+ * buffer. */
+ new (&buffer_) std::unique_ptr<DecayT>(new DecayT(std::forward<Args>(args)...));
+ }
+ }
+
+ /**
+ * Constructs a new #Any that contains the given value.
+ */
+ template<typename T, typename X = std::enable_if_t<!is_same_any_v<T>, void>>
+ Any(T &&value) : Any(std::in_place_type<T>, std::forward<T>(value))
+ {
+ }
+
+ ~Any()
+ {
+ info_->destruct(&buffer_);
+ }
+
+ /**
+ * \note: Only needed because the template below does not count as copy assignment operator.
+ */
+ Any &operator=(const Any &other)
+ {
+ if (this == &other) {
+ return *this;
+ }
+ this->~Any();
+ new (this) Any(other);
+ return *this;
+ }
+
+ /** Assign any value to the #Any. */
+ template<typename T> Any &operator=(T &&other)
+ {
+ if constexpr (is_same_any_v<T>) {
+ if (this == &other) {
+ return *this;
+ }
+ }
+ this->~Any();
+ new (this) Any(std::forward<T>(other));
+ return *this;
+ }
+
+ /** Destruct any existing value to make it empty. */
+ void reset()
+ {
+ info_->destruct(&buffer_);
+ info_ = &Info::get_for_empty();
+ }
+
+ operator bool() const
+ {
+ return this->has_value();
+ }
+
+ bool has_value() const
+ {
+ return info_ != &Info::get_for_empty();
+ }
+
+ template<typename T, typename... Args> std::decay_t<T> &emplace(Args &&...args)
+ {
+ this->~Any();
+ new (this) Any(std::in_place_type<T>, std::forward<Args>(args)...);
+ return this->get<T>();
+ }
+
+ /** Return true when the value that is currently stored is a #T. */
+ template<typename T> bool is() const
+ {
+ return info_ == &this->template get_info<T>();
+ }
+
+ /** Get a pointer to the stored value. */
+ void *get()
+ {
+ return const_cast<void *>(info_->get(&buffer_));
+ }
+
+ /** Get a pointer to the stored value. */
+ const void *get() const
+ {
+ return info_->get(&buffer_);
+ }
+
+ /**
+ * Get a reference to the stored value. This invokes undefined behavior when #T does not have the
+ * correct type.
+ */
+ template<typename T> std::decay_t<T> &get()
+ {
+ BLI_assert(this->is<T>());
+ return *static_cast<std::decay_t<T> *>(this->get());
+ }
+
+ /**
+ * Get a reference to the stored value. This invokes undefined behavior when #T does not have the
+ * correct type.
+ */
+ template<typename T> const std::decay_t<T> &get() const
+ {
+ BLI_assert(this->is<T>());
+ return *static_cast<const std::decay_t<T> *>(this->get());
+ }
+
+ /**
+ * Get extra information that has been stored for the contained type.
+ */
+ const RealExtraInfo &extra_info() const
+ {
+ return info_->extra_info;
+ }
+};
+
+} // namespace blender
diff --git a/source/blender/blenlib/BLI_virtual_array.hh b/source/blender/blenlib/BLI_virtual_array.hh
index 1c02bce8411..d9f83d3e1cb 100644
--- a/source/blender/blenlib/BLI_virtual_array.hh
+++ b/source/blender/blenlib/BLI_virtual_array.hh
@@ -37,148 +37,98 @@
* see of the increased compile time and binary size is worth it.
*/
+#include "BLI_any.hh"
#include "BLI_array.hh"
#include "BLI_index_mask.hh"
#include "BLI_span.hh"
namespace blender {
-/* An immutable virtual array. */
-template<typename T> class VArray {
+/* Forward declarations for generic virtual arrays. */
+namespace fn {
+class GVArray;
+class GVMutableArray;
+}; // namespace fn
+
+/**
+ * Implements the specifics of how the elements of a virtual array are accessed. It contains a
+ * bunch of virtual methods that are wrapped by #VArray.
+ */
+template<typename T> class VArrayImpl {
protected:
+ /**
+ * Number of elements in the virtual array. All virtual arrays have a size, but in some cases it
+ * may make sense to set it to the max value.
+ */
int64_t size_;
public:
- VArray(const int64_t size) : size_(size)
+ VArrayImpl(const int64_t size) : size_(size)
{
BLI_assert(size_ >= 0);
}
- virtual ~VArray() = default;
-
- T get(const int64_t index) const
- {
- BLI_assert(index >= 0);
- BLI_assert(index < size_);
- return this->get_impl(index);
- }
+ virtual ~VArrayImpl() = default;
int64_t size() const
{
return size_;
}
- bool is_empty() const
- {
- return size_ == 0;
- }
-
- IndexRange index_range() const
- {
- return IndexRange(size_);
- }
-
- /* Returns true when the virtual array is stored as a span internally. */
- bool is_span() const
- {
- if (size_ == 0) {
- return true;
- }
- return this->is_span_impl();
- }
-
- /* Returns the internally used span of the virtual array. This invokes undefined behavior is the
- * virtual array is not stored as a span internally. */
- Span<T> get_internal_span() const
- {
- BLI_assert(this->is_span());
- if (size_ == 0) {
- return {};
- }
- return this->get_internal_span_impl();
- }
+ /**
+ * Get the element at #index. This does not return a reference, because the value may be computed
+ * on the fly.
+ */
+ virtual T get(const int64_t index) const = 0;
- /* Returns true when the virtual array returns the same value for every index. */
- bool is_single() const
- {
- if (size_ == 1) {
- return true;
- }
- return this->is_single_impl();
- }
-
- /* Returns the value that is returned for every index. This invokes undefined behavior if the
- * virtual array would not return the same value for every index. */
- T get_internal_single() const
- {
- BLI_assert(this->is_single());
- if (size_ == 1) {
- return this->get(0);
- }
- return this->get_internal_single_impl();
- }
-
- /* Get the element at a specific index. Note that this operator cannot be used to assign values
- * to an index, because the return value is not a reference. */
- T operator[](const int64_t index) const
- {
- return this->get(index);
- }
-
- /* Copy the entire virtual array into a span. */
- void materialize(MutableSpan<T> r_span) const
- {
- this->materialize(IndexMask(size_), r_span);
- }
-
- /* Copy some indices of the virtual array into a span. */
- void materialize(IndexMask mask, MutableSpan<T> r_span) const
- {
- BLI_assert(mask.min_array_size() <= size_);
- this->materialize_impl(mask, r_span);
- }
-
- void materialize_to_uninitialized(MutableSpan<T> r_span) const
- {
- this->materialize_to_uninitialized(IndexMask(size_), r_span);
- }
-
- void materialize_to_uninitialized(IndexMask mask, MutableSpan<T> r_span) const
- {
- BLI_assert(mask.min_array_size() <= size_);
- this->materialize_to_uninitialized_impl(mask, r_span);
- }
-
- protected:
- virtual T get_impl(const int64_t index) const = 0;
-
- virtual bool is_span_impl() const
+ /**
+ * Return true when the virtual array is a plain array internally.
+ */
+ virtual bool is_span() const
{
return false;
}
- virtual Span<T> get_internal_span_impl() const
+ /**
+ * Return the span of the virtual array.
+ * This invokes undefined behavior when #is_span returned false.
+ */
+ virtual Span<T> get_internal_span() const
{
+ /* Provide a default implementation, so that subclasses don't have to provide it. This method
+ * should never be called because #is_span returns false by default. */
BLI_assert_unreachable();
return {};
}
- virtual bool is_single_impl() const
+ /**
+ * Return true when the virtual array has the same value at every index.
+ */
+ virtual bool is_single() const
{
return false;
}
- virtual T get_internal_single_impl() const
+ /**
+ * Return the value that is used at every index.
+ * This invokes undefined behavior when #is_single returned false.
+ */
+ virtual T get_internal_single() const
{
/* Provide a default implementation, so that subclasses don't have to provide it. This method
- * should never be called because `is_single_impl` returns false by default. */
+ * should never be called because #is_single returns false by default. */
BLI_assert_unreachable();
return T();
}
- virtual void materialize_impl(IndexMask mask, MutableSpan<T> r_span) const
+ /**
+ * Copy values from the virtual array into the provided span. The index of the value in the
+ * virtual is the same as the index in the span.
+ */
+ virtual void materialize(IndexMask mask, MutableSpan<T> r_span) const
{
T *dst = r_span.data();
+ /* Optimize for a few different common cases. */
if (this->is_span()) {
const T *src = this->get_internal_span().data();
mask.foreach_index([&](const int64_t i) { dst[i] = src[i]; });
@@ -192,9 +142,13 @@ template<typename T> class VArray {
}
}
- virtual void materialize_to_uninitialized_impl(IndexMask mask, MutableSpan<T> r_span) const
+ /**
+ * Same as #materialize but #r_span is expected to be uninitialized.
+ */
+ virtual void materialize_to_uninitialized(IndexMask mask, MutableSpan<T> r_span) const
{
T *dst = r_span.data();
+ /* Optimize for a few different common cases. */
if (this->is_span()) {
const T *src = this->get_internal_span().data();
mask.foreach_index([&](const int64_t i) { new (dst + i) T(src[i]); });
@@ -207,43 +161,48 @@ template<typename T> class VArray {
mask.foreach_index([&](const int64_t i) { new (dst + i) T(this->get(i)); });
}
}
-};
-/* Similar to VArray, but the elements are mutable. */
-template<typename T> class VMutableArray : public VArray<T> {
- public:
- VMutableArray(const int64_t size) : VArray<T>(size)
+ /**
+ * If this virtual wraps another #GVArray, this method should assign the wrapped array to the
+ * provided reference. This allows losslessly converting between generic and typed virtual
+ * arrays in all cases.
+ * Return true when the virtual array was assigned and false when nothing was done.
+ */
+ virtual bool try_assign_GVArray(fn::GVArray &UNUSED(varray)) const
{
+ return false;
}
- void set(const int64_t index, T value)
+ /**
+ * Return true when this virtual array may own any of the memory it references. This can be used
+ * for optimization purposes when converting or copying the virtual array.
+ */
+ virtual bool may_have_ownership() const
{
- BLI_assert(index >= 0);
- BLI_assert(index < this->size_);
- this->set_impl(index, std::move(value));
+ /* Use true by default to be on the safe side. Subclasses that know for sure that they don't
+ * own anything can overwrite this with false. */
+ return true;
}
+};
- /* Copy the values from the source span to all elements in the virtual array. */
- void set_all(Span<T> src)
- {
- BLI_assert(src.size() == this->size_);
- this->set_all_impl(src);
- }
+/* Similar to #VArrayImpl, but adds methods that allow modifying the referenced elements. */
+template<typename T> class VMutableArrayImpl : public VArrayImpl<T> {
+ public:
+ using VArrayImpl<T>::VArrayImpl;
- MutableSpan<T> get_internal_span()
- {
- BLI_assert(this->is_span());
- Span<T> span = static_cast<const VArray<T> *>(this)->get_internal_span();
- return MutableSpan<T>(const_cast<T *>(span.data()), span.size());
- }
+ /**
+ * Assign the provided #value to the #index.
+ */
+ virtual void set(const int64_t index, T value) = 0;
- protected:
- virtual void set_impl(const int64_t index, T value) = 0;
-
- virtual void set_all_impl(Span<T> src)
+ /**
+ * Copy all elements from the provided span into the virtual array.
+ */
+ virtual void set_all(Span<T> src)
{
if (this->is_span()) {
- const MutableSpan<T> span = this->get_internal_span();
+ const Span<T> const_span = this->get_internal_span();
+ const MutableSpan<T> span{(T *)const_span.data(), const_span.size()};
initialized_copy_n(src.data(), this->size_, span.data());
}
else {
@@ -253,95 +212,133 @@ template<typename T> class VMutableArray : public VArray<T> {
}
}
}
-};
-template<typename T> using VArrayPtr = std::unique_ptr<VArray<T>>;
-template<typename T> using VMutableArrayPtr = std::unique_ptr<VMutableArray<T>>;
+ /**
+ * Similar to #VArrayImpl::try_assign_GVArray but for mutable virtual arrays.
+ */
+ virtual bool try_assign_GVMutableArray(fn::GVMutableArray &UNUSED(varray)) const
+ {
+ return false;
+ }
+};
/**
* A virtual array implementation for a span. Methods in this class are final so that it can be
* devirtualized by the compiler in some cases (e.g. when #devirtualize_varray is used).
*/
-template<typename T> class VArray_For_Span : public VArray<T> {
+template<typename T> class VArrayImpl_For_Span : public VArrayImpl<T> {
protected:
const T *data_ = nullptr;
public:
- VArray_For_Span(const Span<T> data) : VArray<T>(data.size()), data_(data.data())
+ VArrayImpl_For_Span(const Span<T> data) : VArrayImpl<T>(data.size()), data_(data.data())
{
}
protected:
- VArray_For_Span(const int64_t size) : VArray<T>(size)
+ VArrayImpl_For_Span(const int64_t size) : VArrayImpl<T>(size)
{
}
- T get_impl(const int64_t index) const final
+ T get(const int64_t index) const final
{
return data_[index];
}
- bool is_span_impl() const final
+ bool is_span() const final
{
return true;
}
- Span<T> get_internal_span_impl() const final
+ Span<T> get_internal_span() const final
{
return Span<T>(data_, this->size_);
}
};
-template<typename T> class VMutableArray_For_MutableSpan : public VMutableArray<T> {
+/**
+ * A version of #VArrayImpl_For_Span that can not be subclassed. This allows safely overwriting the
+ * #may_have_ownership method.
+ */
+template<typename T> class VArrayImpl_For_Span_final final : public VArrayImpl_For_Span<T> {
+ public:
+ using VArrayImpl_For_Span<T>::VArrayImpl_For_Span;
+
+ private:
+ bool may_have_ownership() const override
+ {
+ return false;
+ }
+};
+
+/**
+ * Like #VArrayImpl_For_Span but for mutable data.
+ */
+template<typename T> class VMutableArrayImpl_For_MutableSpan : public VMutableArrayImpl<T> {
protected:
T *data_ = nullptr;
public:
- VMutableArray_For_MutableSpan(const MutableSpan<T> data)
- : VMutableArray<T>(data.size()), data_(data.data())
+ VMutableArrayImpl_For_MutableSpan(const MutableSpan<T> data)
+ : VMutableArrayImpl<T>(data.size()), data_(data.data())
{
}
protected:
- VMutableArray_For_MutableSpan(const int64_t size) : VMutableArray<T>(size)
+ VMutableArrayImpl_For_MutableSpan(const int64_t size) : VMutableArrayImpl<T>(size)
{
}
- T get_impl(const int64_t index) const final
+ T get(const int64_t index) const final
{
return data_[index];
}
- void set_impl(const int64_t index, T value) final
+ void set(const int64_t index, T value) final
{
data_[index] = value;
}
- bool is_span_impl() const override
+ bool is_span() const override
{
return true;
}
- Span<T> get_internal_span_impl() const override
+ Span<T> get_internal_span() const override
{
return Span<T>(data_, this->size_);
}
};
/**
- * A variant of `VArray_For_Span` that owns the underlying data.
+ * Like #VArrayImpl_For_Span_final but for mutable data.
+ */
+template<typename T>
+class VMutableArrayImpl_For_MutableSpan_final final : public VMutableArrayImpl_For_MutableSpan<T> {
+ public:
+ using VMutableArrayImpl_For_MutableSpan<T>::VMutableArrayImpl_For_MutableSpan;
+
+ private:
+ bool may_have_ownership() const override
+ {
+ return false;
+ }
+};
+
+/**
+ * A variant of `VArrayImpl_For_Span` that owns the underlying data.
* The `Container` type has to implement a `size()` and `data()` method.
* The `data()` method has to return a pointer to the first element in the continuous array of
* elements.
*/
template<typename Container, typename T = typename Container::value_type>
-class VArray_For_ArrayContainer : public VArray_For_Span<T> {
+class VArrayImpl_For_ArrayContainer : public VArrayImpl_For_Span<T> {
private:
Container container_;
public:
- VArray_For_ArrayContainer(Container container)
- : VArray_For_Span<T>((int64_t)container.size()), container_(std::move(container))
+ VArrayImpl_For_ArrayContainer(Container container)
+ : VArrayImpl_For_Span<T>((int64_t)container.size()), container_(std::move(container))
{
this->data_ = container_.data();
}
@@ -352,43 +349,671 @@ class VArray_For_ArrayContainer : public VArray_For_Span<T> {
* so that it can be devirtualized by the compiler in some cases (e.g. when #devirtualize_varray is
* used).
*/
-template<typename T> class VArray_For_Single final : public VArray<T> {
+template<typename T> class VArrayImpl_For_Single final : public VArrayImpl<T> {
private:
T value_;
public:
- VArray_For_Single(T value, const int64_t size) : VArray<T>(size), value_(std::move(value))
+ VArrayImpl_For_Single(T value, const int64_t size)
+ : VArrayImpl<T>(size), value_(std::move(value))
{
}
protected:
- T get_impl(const int64_t UNUSED(index)) const override
+ T get(const int64_t UNUSED(index)) const override
{
return value_;
}
- bool is_span_impl() const override
+ bool is_span() const override
{
return this->size_ == 1;
}
- Span<T> get_internal_span_impl() const override
+ Span<T> get_internal_span() const override
{
return Span<T>(&value_, 1);
}
- bool is_single_impl() const override
+ bool is_single() const override
{
return true;
}
- T get_internal_single_impl() const override
+ T get_internal_single() const override
{
return value_;
}
};
/**
+ * This class makes it easy to create a virtual array for an existing function or lambda. The
+ * `GetFunc` should take a single `index` argument and return the value at that index.
+ */
+template<typename T, typename GetFunc> class VArrayImpl_For_Func final : public VArrayImpl<T> {
+ private:
+ GetFunc get_func_;
+
+ public:
+ VArrayImpl_For_Func(const int64_t size, GetFunc get_func)
+ : VArrayImpl<T>(size), get_func_(std::move(get_func))
+ {
+ }
+
+ private:
+ T get(const int64_t index) const override
+ {
+ return get_func_(index);
+ }
+
+ void materialize(IndexMask mask, MutableSpan<T> r_span) const override
+ {
+ T *dst = r_span.data();
+ mask.foreach_index([&](const int64_t i) { dst[i] = get_func_(i); });
+ }
+
+ void materialize_to_uninitialized(IndexMask mask, MutableSpan<T> r_span) const override
+ {
+ T *dst = r_span.data();
+ mask.foreach_index([&](const int64_t i) { new (dst + i) T(get_func_(i)); });
+ }
+};
+
+/**
+ * \note: This is `final` so that #may_have_ownership can be implemented reliably.
+ */
+template<typename StructT, typename ElemT, ElemT (*GetFunc)(const StructT &)>
+class VArrayImpl_For_DerivedSpan final : public VArrayImpl<ElemT> {
+ private:
+ const StructT *data_;
+
+ public:
+ VArrayImpl_For_DerivedSpan(const Span<StructT> data)
+ : VArrayImpl<ElemT>(data.size()), data_(data.data())
+ {
+ }
+
+ private:
+ ElemT get(const int64_t index) const override
+ {
+ return GetFunc(data_[index]);
+ }
+
+ void materialize(IndexMask mask, MutableSpan<ElemT> r_span) const override
+ {
+ ElemT *dst = r_span.data();
+ mask.foreach_index([&](const int64_t i) { dst[i] = GetFunc(data_[i]); });
+ }
+
+ void materialize_to_uninitialized(IndexMask mask, MutableSpan<ElemT> r_span) const override
+ {
+ ElemT *dst = r_span.data();
+ mask.foreach_index([&](const int64_t i) { new (dst + i) ElemT(GetFunc(data_[i])); });
+ }
+
+ bool may_have_ownership() const override
+ {
+ return false;
+ }
+};
+
+/**
+ * \note: This is `final` so that #may_have_ownership can be implemented reliably.
+ */
+template<typename StructT,
+ typename ElemT,
+ ElemT (*GetFunc)(const StructT &),
+ void (*SetFunc)(StructT &, ElemT)>
+class VMutableArrayImpl_For_DerivedSpan final : public VMutableArrayImpl<ElemT> {
+ private:
+ StructT *data_;
+
+ public:
+ VMutableArrayImpl_For_DerivedSpan(const MutableSpan<StructT> data)
+ : VMutableArrayImpl<ElemT>(data.size()), data_(data.data())
+ {
+ }
+
+ private:
+ ElemT get(const int64_t index) const override
+ {
+ return GetFunc(data_[index]);
+ }
+
+ void set(const int64_t index, ElemT value) override
+ {
+ SetFunc(data_[index], std::move(value));
+ }
+
+ void materialize(IndexMask mask, MutableSpan<ElemT> r_span) const override
+ {
+ ElemT *dst = r_span.data();
+ mask.foreach_index([&](const int64_t i) { dst[i] = GetFunc(data_[i]); });
+ }
+
+ void materialize_to_uninitialized(IndexMask mask, MutableSpan<ElemT> r_span) const override
+ {
+ ElemT *dst = r_span.data();
+ mask.foreach_index([&](const int64_t i) { new (dst + i) ElemT(GetFunc(data_[i])); });
+ }
+
+ bool may_have_ownership() const override
+ {
+ return false;
+ }
+};
+
+namespace detail {
+
+/**
+ * Struct that can be passed as `ExtraInfo` into an #Any.
+ * This struct is only intended to be used by #VArrayCommon.
+ */
+template<typename T> struct VArrayAnyExtraInfo {
+ /**
+ * Gets the virtual array that is stored at the given pointer.
+ */
+ const VArrayImpl<T> *(*get_varray)(const void *buffer) =
+ [](const void *UNUSED(buffer)) -> const VArrayImpl<T> * { return nullptr; };
+
+ template<typename StorageT> static VArrayAnyExtraInfo get()
+ {
+ /* These are the only allowed types in the #Any. */
+ static_assert(std::is_base_of_v<VArrayImpl<T>, StorageT> ||
+ std::is_same_v<StorageT, const VArrayImpl<T> *> ||
+ std::is_same_v<StorageT, std::shared_ptr<const VArrayImpl<T>>>);
+
+ /* Depending on how the virtual array implementation is stored in the #Any, a different
+ * #get_varray function is required. */
+ if constexpr (std::is_base_of_v<VArrayImpl<T>, StorageT>) {
+ return {[](const void *buffer) {
+ return static_cast<const VArrayImpl<T> *>((const StorageT *)buffer);
+ }};
+ }
+ else if constexpr (std::is_same_v<StorageT, const VArrayImpl<T> *>) {
+ return {[](const void *buffer) { return *(const StorageT *)buffer; }};
+ }
+ else if constexpr (std::is_same_v<StorageT, std::shared_ptr<const VArrayImpl<T>>>) {
+ return {[](const void *buffer) { return ((const StorageT *)buffer)->get(); }};
+ }
+ else {
+ BLI_assert_unreachable();
+ return {};
+ }
+ }
+};
+
+} // namespace detail
+
+/**
+ * Utility class to reduce code duplication for methods available on #VArray and #VMutableArray.
+ * Deriving #VMutableArray from #VArray would have some issues:
+ * - Static methods on #VArray would also be available on #VMutableArray.
+ * - It would allow assigning a #VArray to a #VMutableArray under some circumstances which is not
+ * allowed and could result in hard to find bugs.
+ */
+template<typename T> class VArrayCommon {
+ protected:
+ /**
+ * Store the virtual array implementation in an #Any. This makes it easy to avoid a memory
+ * allocation if the implementation is small enough and is copyable. This is the case for the
+ * most common virtual arrays.
+ * Other virtual array implementations are typically stored as #std::shared_ptr. That works even
+ * when the implementation itself is not copyable and makes copying #VArrayCommon cheaper.
+ */
+ using Storage = Any<detail::VArrayAnyExtraInfo<T>, 24, 8>;
+
+ /**
+ * Pointer to the currently contained virtual array implementation. This is allowed to be null.
+ */
+ const VArrayImpl<T> *impl_ = nullptr;
+ /**
+ * Does the memory management for the virtual array implementation. It contains one of the
+ * following:
+ * - Inlined subclass of #VArrayImpl.
+ * - Non-owning pointer to a #VArrayImpl.
+ * - Shared pointer to a #VArrayImpl.
+ */
+ Storage storage_;
+
+ protected:
+ VArrayCommon() = default;
+
+ /** Copy constructor. */
+ VArrayCommon(const VArrayCommon &other) : storage_(other.storage_)
+ {
+ impl_ = this->impl_from_storage();
+ }
+
+ /** Move constructor. */
+ VArrayCommon(VArrayCommon &&other) noexcept : storage_(std::move(other.storage_))
+ {
+ impl_ = this->impl_from_storage();
+ other.storage_.reset();
+ other.impl_ = nullptr;
+ }
+
+ /**
+ * Wrap an existing #VArrayImpl and don't take ownership of it. This should rarely be used in
+ * practice.
+ */
+ VArrayCommon(const VArrayImpl<T> *impl) : impl_(impl)
+ {
+ storage_ = impl_;
+ }
+
+ /**
+ * Wrap an existing #VArrayImpl that is contained in a #std::shared_ptr. This takes ownership.
+ */
+ VArrayCommon(std::shared_ptr<const VArrayImpl<T>> impl) : impl_(impl.get())
+ {
+ if (impl) {
+ storage_ = std::move(impl);
+ }
+ }
+
+ /**
+ * Replace the contained #VArrayImpl.
+ */
+ template<typename ImplT, typename... Args> void emplace(Args &&...args)
+ {
+ /* Make sure we are actually constructing a #VArrayImpl. */
+ static_assert(std::is_base_of_v<VArrayImpl<T>, ImplT>);
+ if constexpr (std::is_copy_constructible_v<ImplT> && Storage::template is_inline_v<ImplT>) {
+ /* Only inline the implementatiton when it is copyable and when it fits into the inline
+ * buffer of the storage. */
+ impl_ = &storage_.template emplace<ImplT>(std::forward<Args>(args)...);
+ }
+ else {
+ /* If it can't be inlined, create a new #std::shared_ptr instead and store that in the
+ * storage. */
+ std::shared_ptr<const VArrayImpl<T>> ptr = std::make_shared<ImplT>(
+ std::forward<Args>(args)...);
+ impl_ = &*ptr;
+ storage_ = std::move(ptr);
+ }
+ }
+
+ /** Utility to implement a copy assignment operator in a subclass. */
+ void copy_from(const VArrayCommon &other)
+ {
+ if (this == &other) {
+ return;
+ }
+ storage_ = other.storage_;
+ impl_ = this->impl_from_storage();
+ }
+
+ /** Utility to implement a move assignment operator in a subclass. */
+ void move_from(VArrayCommon &&other) noexcept
+ {
+ if (this == &other) {
+ return;
+ }
+ storage_ = std::move(other.storage_);
+ impl_ = this->impl_from_storage();
+ other.storage_.reset();
+ other.impl_ = nullptr;
+ }
+
+ /** Get a pointer to the virtual array implementation that is currently stored in #storage_, or
+ * null. */
+ const VArrayImpl<T> *impl_from_storage() const
+ {
+ return storage_.extra_info().get_varray(storage_.get());
+ }
+
+ public:
+ /** Return false when there is no virtual array implementation currently. */
+ operator bool() const
+ {
+ return impl_ != nullptr;
+ }
+
+ /**
+ * Get the element at a specific index.
+ * \note: This can't return a reference because the value may be computed on the fly. This also
+ * implies that one can not use this method for assignments.
+ */
+ T operator[](const int64_t index) const
+ {
+ BLI_assert(*this);
+ BLI_assert(index >= 0);
+ BLI_assert(index < this->size());
+ return impl_->get(index);
+ }
+
+ /**
+ * Same as the #operator[] but is sometimes easier to use when one has a pointer to a virtual
+ * array.
+ */
+ T get(const int64_t index) const
+ {
+ return (*this)[index];
+ }
+
+ /**
+ * Return the size of the virtual array. It's allowed to call this method even when there is no
+ * virtual array. In this case 0 is returned.
+ */
+ int64_t size() const
+ {
+ if (impl_ == nullptr) {
+ return 0;
+ }
+ return impl_->size();
+ }
+
+ /** True when the size is zero or when there is no virtual array. */
+ bool is_empty() const
+ {
+ return this->size() == 0;
+ }
+
+ IndexRange index_range() const
+ {
+ return IndexRange(this->size());
+ }
+
+ /** Return true when the virtual array is stored as a span internally. */
+ bool is_span() const
+ {
+ BLI_assert(*this);
+ if (this->is_empty()) {
+ return true;
+ }
+ return impl_->is_span();
+ }
+
+ /**
+ * Returns the internally used span of the virtual array. This invokes undefined behavior is the
+ * virtual array is not stored as a span internally.
+ */
+ Span<T> get_internal_span() const
+ {
+ BLI_assert(this->is_span());
+ if (this->is_empty()) {
+ return {};
+ }
+ return impl_->get_internal_span();
+ }
+
+ /** Return true when the virtual array returns the same value for every index. */
+ bool is_single() const
+ {
+ BLI_assert(*this);
+ if (impl_->size() == 1) {
+ return true;
+ }
+ return impl_->is_single();
+ }
+
+ /**
+ * Return the value that is returned for every index. This invokes undefined behavior if the
+ * virtual array would not return the same value for every index.
+ */
+ T get_internal_single() const
+ {
+ BLI_assert(this->is_single());
+ if (impl_->size() == 1) {
+ return impl_->get(0);
+ }
+ return impl_->get_internal_single();
+ }
+
+ /** Copy the entire virtual array into a span. */
+ void materialize(MutableSpan<T> r_span) const
+ {
+ this->materialize(IndexMask(this->size()), r_span);
+ }
+
+ /** Copy some indices of the virtual array into a span. */
+ void materialize(IndexMask mask, MutableSpan<T> r_span) const
+ {
+ BLI_assert(mask.min_array_size() <= this->size());
+ impl_->materialize(mask, r_span);
+ }
+
+ void materialize_to_uninitialized(MutableSpan<T> r_span) const
+ {
+ this->materialize_to_uninitialized(IndexMask(this->size()), r_span);
+ }
+
+ void materialize_to_uninitialized(IndexMask mask, MutableSpan<T> r_span) const
+ {
+ BLI_assert(mask.min_array_size() <= this->size());
+ impl_->materialize_to_uninitialized(mask, r_span);
+ }
+
+ /** See #GVArrayImpl::try_assign_GVArray. */
+ bool try_assign_GVArray(fn::GVArray &varray) const
+ {
+ return impl_->try_assign_GVArray(varray);
+ }
+
+ /** See #GVArrayImpl::may_have_ownership. */
+ bool may_have_ownership() const
+ {
+ return impl_->may_have_ownership();
+ }
+};
+
+template<typename T> class VMutableArray;
+
+/**
+ * A #VArray wraps a virtual array implementation and provides easy access to its elements. It can
+ * be copied and moved. While it is relatively small, it should still be passed by reference if
+ * possible (other than e.g. #Span).
+ */
+template<typename T> class VArray : public VArrayCommon<T> {
+ friend VMutableArray<T>;
+
+ public:
+ VArray() = default;
+ VArray(const VArray &other) = default;
+ VArray(VArray &&other) noexcept = default;
+
+ VArray(const VArrayImpl<T> *impl) : VArrayCommon<T>(impl)
+ {
+ }
+
+ VArray(std::shared_ptr<const VArrayImpl<T>> impl) : VArrayCommon<T>(std::move(impl))
+ {
+ }
+
+ /**
+ * Construct a new virtual array for a custom #VArrayImpl.
+ */
+ template<typename ImplT, typename... Args> static VArray For(Args &&...args)
+ {
+ static_assert(std::is_base_of_v<VArrayImpl<T>, ImplT>);
+ VArray varray;
+ varray.template emplace<ImplT>(std::forward<Args>(args)...);
+ return varray;
+ }
+
+ /**
+ * Construct a new virtual array that has the same value at every index.
+ */
+ static VArray ForSingle(T value, const int64_t size)
+ {
+ return VArray::For<VArrayImpl_For_Single<T>>(std::move(value), size);
+ }
+
+ /**
+ * Construct a new virtual array for an existing span. This does not take ownership of the
+ * underlying memory.
+ */
+ static VArray ForSpan(Span<T> values)
+ {
+ return VArray::For<VArrayImpl_For_Span_final<T>>(values);
+ }
+
+ /**
+ * Construct a new virtual that will invoke the provided function whenever an element is
+ * accessed.
+ */
+ template<typename GetFunc> static VArray ForFunc(const int64_t size, GetFunc get_func)
+ {
+ return VArray::For<VArrayImpl_For_Func<T, decltype(get_func)>>(size, std::move(get_func));
+ }
+
+ /**
+ * Construct a new virtual array for an existing span with a mapping function. This does not take
+ * ownership of the span.
+ */
+ template<typename StructT, T (*GetFunc)(const StructT &)>
+ static VArray ForDerivedSpan(Span<StructT> values)
+ {
+ return VArray::For<VArrayImpl_For_DerivedSpan<StructT, T, GetFunc>>(values);
+ }
+
+ /**
+ * Construct a new virtual array for an existing container. Every container that lays out the
+ * elements in a plain array works. This takes ownership of the passed in container. If that is
+ * not desired, use #ForSpan instead.
+ */
+ template<typename ContainerT> static VArray ForContainer(ContainerT container)
+ {
+ return VArray::For<VArrayImpl_For_ArrayContainer<ContainerT>>(std::move(container));
+ }
+
+ VArray &operator=(const VArray &other)
+ {
+ this->copy_from(other);
+ return *this;
+ }
+
+ VArray &operator=(VArray &&other) noexcept
+ {
+ this->move_from(std::move(other));
+ return *this;
+ }
+};
+
+/**
+ * Similar to #VArray but references a virtual array that can be modified.
+ */
+template<typename T> class VMutableArray : public VArrayCommon<T> {
+ public:
+ VMutableArray() = default;
+ VMutableArray(const VMutableArray &other) = default;
+ VMutableArray(VMutableArray &&other) noexcept = default;
+
+ VMutableArray(const VMutableArrayImpl<T> *impl) : VArrayCommon<T>(impl)
+ {
+ }
+
+ VMutableArray(std::shared_ptr<const VMutableArrayImpl<T>> impl)
+ : VArrayCommon<T>(std::move(impl))
+ {
+ }
+
+ /**
+ * Construct a new virtual array for a custom #VMutableArrayImpl.
+ */
+ template<typename ImplT, typename... Args> static VMutableArray For(Args &&...args)
+ {
+ static_assert(std::is_base_of_v<VMutableArrayImpl<T>, ImplT>);
+ VMutableArray varray;
+ varray.template emplace<ImplT>(std::forward<Args>(args)...);
+ return varray;
+ }
+
+ /**
+ * Construct a new virtual array for an existing span. This does not take ownership of the span.
+ */
+ static VMutableArray ForSpan(MutableSpan<T> values)
+ {
+ return VMutableArray::For<VMutableArrayImpl_For_MutableSpan_final<T>>(values);
+ }
+
+ /**
+ * Construct a new virtual array for an existing span with a mapping function. This does not take
+ * ownership of the span.
+ */
+ template<typename StructT, T (*GetFunc)(const StructT &), void (*SetFunc)(StructT &, T)>
+ static VMutableArray ForDerivedSpan(MutableSpan<StructT> values)
+ {
+ return VMutableArray::For<VMutableArrayImpl_For_DerivedSpan<StructT, T, GetFunc, SetFunc>>(
+ values);
+ }
+
+ /** Convert to a #VArray by copying. */
+ operator VArray<T>() const &
+ {
+ VArray<T> varray;
+ varray.copy_from(*this);
+ return varray;
+ }
+
+ /** Convert to a #VArray by moving. */
+ operator VArray<T>() &&noexcept
+ {
+ VArray<T> varray;
+ varray.move_from(std::move(*this));
+ return varray;
+ }
+
+ VMutableArray &operator=(const VMutableArray &other)
+ {
+ this->copy_from(other);
+ return *this;
+ }
+
+ VMutableArray &operator=(VMutableArray &&other) noexcept
+ {
+ this->move_from(std::move(other));
+ return *this;
+ }
+
+ /**
+ * Get access to the internal span. This invokes undefined behavior if the #is_span returned
+ * false.
+ */
+ MutableSpan<T> get_internal_span() const
+ {
+ BLI_assert(this->is_span());
+ const Span<T> span = this->impl_->get_internal_span();
+ return MutableSpan<T>(const_cast<T *>(span.data()), span.size());
+ }
+
+ /**
+ * Set the value at the given index.
+ */
+ void set(const int64_t index, T value)
+ {
+ BLI_assert(index >= 0);
+ BLI_assert(index < this->size());
+ this->get_impl()->set(index, std::move(value));
+ }
+
+ /**
+ * Copy the values from the source span to all elements in the virtual array.
+ */
+ void set_all(Span<T> src)
+ {
+ BLI_assert(src.size() == this->size());
+ this->get_impl()->set_all(src);
+ }
+
+ /** See #GVMutableArrayImpl::try_assign_GVMutableArray. */
+ bool try_assign_GVMutableArray(fn::GVMutableArray &varray) const
+ {
+ return this->get_impl()->try_assign_GVMutableArray(varray);
+ }
+
+ private:
+ /** Utility to get the pointer to the wrapped #VMutableArrayImpl. */
+ VMutableArrayImpl<T> *get_impl() const
+ {
+ /* This cast is valid by the invariant that a #VMutableArray->impl_ is always a
+ * #VMutableArrayImpl. */
+ return (VMutableArrayImpl<T> *)this->impl_;
+ }
+};
+
+/**
* In many cases a virtual array is a span internally. In those cases, access to individual could
* be much more efficient than calling a virtual method. When the underlying virtual array is not a
* span, this class allocates a new array and copies the values over.
@@ -401,11 +1026,11 @@ template<typename T> class VArray_For_Single final : public VArray<T> {
*/
template<typename T> class VArray_Span final : public Span<T> {
private:
- const VArray<T> &varray_;
+ VArray<T> varray_;
Array<T> owned_data_;
public:
- VArray_Span(const VArray<T> &varray) : Span<T>(), varray_(varray)
+ VArray_Span(VArray<T> varray) : Span<T>(), varray_(std::move(varray))
{
this->size_ = varray_.size();
if (varray_.is_span()) {
@@ -421,7 +1046,7 @@ template<typename T> class VArray_Span final : public Span<T> {
};
/**
- * Same as VArray_Span, but for a mutable span.
+ * Same as #VArray_Span, but for a mutable span.
* The important thing to note is that when changing this span, the results might not be
* immediately reflected in the underlying virtual array (only when the virtual array is a span
* internally). The #save method can be used to write all changes to the underlying virtual array,
@@ -429,7 +1054,7 @@ template<typename T> class VArray_Span final : public Span<T> {
*/
template<typename T> class VMutableArray_Span final : public MutableSpan<T> {
private:
- VMutableArray<T> &varray_;
+ VMutableArray<T> varray_;
Array<T> owned_data_;
bool save_has_been_called_ = false;
bool show_not_saved_warning_ = true;
@@ -437,8 +1062,8 @@ template<typename T> class VMutableArray_Span final : public MutableSpan<T> {
public:
/* Create a span for any virtual array. This is cheap when the virtual array is a span itself. If
* not, a new array has to be allocated as a wrapper for the underlying virtual array. */
- VMutableArray_Span(VMutableArray<T> &varray, const bool copy_values_to_span = true)
- : MutableSpan<T>(), varray_(varray)
+ VMutableArray_Span(VMutableArray<T> varray, const bool copy_values_to_span = true)
+ : MutableSpan<T>(), varray_(std::move(varray))
{
this->size_ = varray_.size();
if (varray_.is_span()) {
@@ -483,106 +1108,6 @@ template<typename T> class VMutableArray_Span final : public MutableSpan<T> {
};
/**
- * This class makes it easy to create a virtual array for an existing function or lambda. The
- * `GetFunc` should take a single `index` argument and return the value at that index.
- */
-template<typename T, typename GetFunc> class VArray_For_Func final : public VArray<T> {
- private:
- GetFunc get_func_;
-
- public:
- VArray_For_Func(const int64_t size, GetFunc get_func)
- : VArray<T>(size), get_func_(std::move(get_func))
- {
- }
-
- private:
- T get_impl(const int64_t index) const override
- {
- return get_func_(index);
- }
-
- void materialize_impl(IndexMask mask, MutableSpan<T> r_span) const override
- {
- T *dst = r_span.data();
- mask.foreach_index([&](const int64_t i) { dst[i] = get_func_(i); });
- }
-
- void materialize_to_uninitialized_impl(IndexMask mask, MutableSpan<T> r_span) const override
- {
- T *dst = r_span.data();
- mask.foreach_index([&](const int64_t i) { new (dst + i) T(get_func_(i)); });
- }
-};
-
-template<typename StructT, typename ElemT, ElemT (*GetFunc)(const StructT &)>
-class VArray_For_DerivedSpan : public VArray<ElemT> {
- private:
- const StructT *data_;
-
- public:
- VArray_For_DerivedSpan(const Span<StructT> data) : VArray<ElemT>(data.size()), data_(data.data())
- {
- }
-
- private:
- ElemT get_impl(const int64_t index) const override
- {
- return GetFunc(data_[index]);
- }
-
- void materialize_impl(IndexMask mask, MutableSpan<ElemT> r_span) const override
- {
- ElemT *dst = r_span.data();
- mask.foreach_index([&](const int64_t i) { dst[i] = GetFunc(data_[i]); });
- }
-
- void materialize_to_uninitialized_impl(IndexMask mask, MutableSpan<ElemT> r_span) const override
- {
- ElemT *dst = r_span.data();
- mask.foreach_index([&](const int64_t i) { new (dst + i) ElemT(GetFunc(data_[i])); });
- }
-};
-
-template<typename StructT,
- typename ElemT,
- ElemT (*GetFunc)(const StructT &),
- void (*SetFunc)(StructT &, ElemT)>
-class VMutableArray_For_DerivedSpan : public VMutableArray<ElemT> {
- private:
- StructT *data_;
-
- public:
- VMutableArray_For_DerivedSpan(const MutableSpan<StructT> data)
- : VMutableArray<ElemT>(data.size()), data_(data.data())
- {
- }
-
- private:
- ElemT get_impl(const int64_t index) const override
- {
- return GetFunc(data_[index]);
- }
-
- void set_impl(const int64_t index, ElemT value) override
- {
- SetFunc(data_[index], std::move(value));
- }
-
- void materialize_impl(IndexMask mask, MutableSpan<ElemT> r_span) const override
- {
- ElemT *dst = r_span.data();
- mask.foreach_index([&](const int64_t i) { dst[i] = GetFunc(data_[i]); });
- }
-
- void materialize_to_uninitialized_impl(IndexMask mask, MutableSpan<ElemT> r_span) const override
- {
- ElemT *dst = r_span.data();
- mask.foreach_index([&](const int64_t i) { new (dst + i) ElemT(GetFunc(data_[i])); });
- }
-};
-
-/**
* Generate multiple versions of the given function optimized for different virtual arrays.
* One has to be careful with nesting multiple devirtualizations, because that results in an
* exponential number of function instantiations (increasing compile time and binary size).
@@ -596,15 +1121,14 @@ inline void devirtualize_varray(const VArray<T> &varray, const Func &func, bool
/* Support disabling the devirtualization to simplify benchmarking. */
if (enable) {
if (varray.is_single()) {
- /* `VArray_For_Single` can be used for devirtualization, because it is declared `final`. */
- const VArray_For_Single<T> varray_single{varray.get_internal_single(), varray.size()};
- func(varray_single);
+ /* `VArrayImpl_For_Single` can be used for devirtualization, because it is declared `final`.
+ */
+ func(VArray<T>::ForSingle(varray.get_internal_single(), varray.size()));
return;
}
if (varray.is_span()) {
- /* `VArray_For_Span` can be used for devirtualization, because it is declared `final`. */
- const VArray_For_Span<T> varray_span{varray.get_internal_span()};
- func(varray_span);
+ /* `VArrayImpl_For_Span` can be used for devirtualization, because it is declared `final`. */
+ func(VArray<T>::ForSpan(varray.get_internal_span()));
return;
}
}
@@ -629,27 +1153,23 @@ inline void devirtualize_varray2(const VArray<T1> &varray1,
const bool is_single1 = varray1.is_single();
const bool is_single2 = varray2.is_single();
if (is_span1 && is_span2) {
- const VArray_For_Span<T1> varray1_span{varray1.get_internal_span()};
- const VArray_For_Span<T2> varray2_span{varray2.get_internal_span()};
- func(varray1_span, varray2_span);
+ func(VArray<T1>::ForSpan(varray1.get_internal_span()),
+ VArray<T2>::ForSpan(varray2.get_internal_span()));
return;
}
if (is_span1 && is_single2) {
- const VArray_For_Span<T1> varray1_span{varray1.get_internal_span()};
- const VArray_For_Single<T2> varray2_single{varray2.get_internal_single(), varray2.size()};
- func(varray1_span, varray2_single);
+ func(VArray<T1>::ForSpan(varray1.get_internal_span()),
+ VArray<T2>::ForSingle(varray2.get_internal_single(), varray2.size()));
return;
}
if (is_single1 && is_span2) {
- const VArray_For_Single<T1> varray1_single{varray1.get_internal_single(), varray1.size()};
- const VArray_For_Span<T2> varray2_span{varray2.get_internal_span()};
- func(varray1_single, varray2_span);
+ func(VArray<T1>::ForSingle(varray1.get_internal_single(), varray1.size()),
+ VArray<T2>::ForSpan(varray2.get_internal_span()));
return;
}
if (is_single1 && is_single2) {
- const VArray_For_Single<T1> varray1_single{varray1.get_internal_single(), varray1.size()};
- const VArray_For_Single<T2> varray2_single{varray2.get_internal_single(), varray2.size()};
- func(varray1_single, varray2_single);
+ func(VArray<T1>::ForSingle(varray1.get_internal_single(), varray1.size()),
+ VArray<T2>::ForSingle(varray2.get_internal_single(), varray2.size()));
return;
}
}
diff --git a/source/blender/blenlib/CMakeLists.txt b/source/blender/blenlib/CMakeLists.txt
index 7db984aef5c..29493c799b3 100644
--- a/source/blender/blenlib/CMakeLists.txt
+++ b/source/blender/blenlib/CMakeLists.txt
@@ -165,6 +165,7 @@ set(SRC
BLI_alloca.h
BLI_allocator.hh
+ BLI_any.hh
BLI_args.h
BLI_array.h
BLI_array.hh
@@ -411,6 +412,7 @@ blender_add_lib(bf_blenlib "${SRC}" "${INC}" "${INC_SYS}" "${LIB}")
if(WITH_GTESTS)
set(TEST_SRC
+ tests/BLI_any_test.cc
tests/BLI_array_store_test.cc
tests/BLI_array_test.cc
tests/BLI_array_utils_test.cc
diff --git a/source/blender/blenlib/tests/BLI_any_test.cc b/source/blender/blenlib/tests/BLI_any_test.cc
new file mode 100644
index 00000000000..226088cf3c7
--- /dev/null
+++ b/source/blender/blenlib/tests/BLI_any_test.cc
@@ -0,0 +1,108 @@
+/* Apache License, Version 2.0 */
+
+#include "BLI_any.hh"
+#include "BLI_map.hh"
+
+#include "testing/testing.h"
+
+namespace blender::tests {
+
+TEST(any, DefaultConstructor)
+{
+ Any a;
+ EXPECT_FALSE(a.has_value());
+}
+
+TEST(any, AssignInt)
+{
+ Any<> a = 5;
+ EXPECT_TRUE(a.has_value());
+ EXPECT_TRUE(a.is<int>());
+ EXPECT_FALSE(a.is<float>());
+ const int &value = a.get<int>();
+ EXPECT_EQ(value, 5);
+ a = 10;
+ EXPECT_EQ(value, 10);
+
+ Any b = a;
+ EXPECT_TRUE(b.has_value());
+ EXPECT_EQ(b.get<int>(), 10);
+
+ Any c = std::move(a);
+ EXPECT_TRUE(c);
+ EXPECT_EQ(c.get<int>(), 10);
+
+ EXPECT_EQ(a.get<int>(), 10); /* NOLINT: bugprone-use-after-move */
+
+ a.reset();
+ EXPECT_FALSE(a);
+}
+
+TEST(any, AssignMap)
+{
+ Any<> a = Map<int, int>();
+ EXPECT_TRUE(a.has_value());
+ EXPECT_TRUE((a.is<Map<int, int>>()));
+ EXPECT_FALSE((a.is<Map<int, float>>()));
+ Map<int, int> &map = a.get<Map<int, int>>();
+ map.add(4, 2);
+ EXPECT_EQ((a.get<Map<int, int>>().lookup(4)), 2);
+
+ Any b = a;
+ EXPECT_TRUE(b);
+ EXPECT_EQ((b.get<Map<int, int>>().lookup(4)), 2);
+
+ Any c = std::move(a);
+ c = c;
+ EXPECT_TRUE(b);
+ EXPECT_EQ((c.get<Map<int, int>>().lookup(4)), 2);
+
+ EXPECT_TRUE((a.get<Map<int, int>>().is_empty())); /* NOLINT: bugprone-use-after-move */
+}
+
+TEST(any, AssignAny)
+{
+ Any<> a = 5;
+ Any<> b = std::string("hello");
+ Any c;
+
+ Any z;
+ EXPECT_FALSE(z.has_value());
+
+ z = a;
+ EXPECT_TRUE(z.has_value());
+ EXPECT_EQ(z.get<int>(), 5);
+
+ z = b;
+ EXPECT_EQ(z.get<std::string>(), "hello");
+
+ z = c;
+ EXPECT_FALSE(z.has_value());
+
+ z = Any(std::in_place_type<Any<>>, a);
+ EXPECT_FALSE(z.is<int>());
+ EXPECT_TRUE(z.is<Any<>>());
+ EXPECT_EQ(z.get<Any<>>().get<int>(), 5);
+}
+
+struct ExtraSizeInfo {
+ size_t size;
+
+ template<typename T> static ExtraSizeInfo get()
+ {
+ return {sizeof(T)};
+ }
+};
+
+TEST(any, ExtraInfo)
+{
+ using MyAny = Any<ExtraSizeInfo>;
+
+ MyAny a = 5;
+ EXPECT_EQ(a.extra_info().size, sizeof(int));
+
+ a = std::string("hello");
+ EXPECT_EQ(a.extra_info().size, sizeof(std::string));
+}
+
+} // namespace blender::tests
diff --git a/source/blender/blenlib/tests/BLI_virtual_array_test.cc b/source/blender/blenlib/tests/BLI_virtual_array_test.cc
index a6d2ca10315..7a548e7c434 100644
--- a/source/blender/blenlib/tests/BLI_virtual_array_test.cc
+++ b/source/blender/blenlib/tests/BLI_virtual_array_test.cc
@@ -12,7 +12,7 @@ namespace blender::tests {
TEST(virtual_array, Span)
{
std::array<int, 5> data = {3, 4, 5, 6, 7};
- VArray_For_Span<int> varray{data};
+ VArray<int> varray = VArray<int>::ForSpan(data);
EXPECT_EQ(varray.size(), 5);
EXPECT_EQ(varray.get(0), 3);
EXPECT_EQ(varray.get(4), 7);
@@ -23,7 +23,7 @@ TEST(virtual_array, Span)
TEST(virtual_array, Single)
{
- VArray_For_Single<int> varray{10, 4};
+ VArray<int> varray = VArray<int>::ForSingle(10, 4);
EXPECT_EQ(varray.size(), 4);
EXPECT_EQ(varray.get(0), 10);
EXPECT_EQ(varray.get(3), 10);
@@ -35,7 +35,7 @@ TEST(virtual_array, Array)
{
Array<int> array = {1, 2, 3, 5, 8};
{
- VArray_For_ArrayContainer varray{array};
+ VArray<int> varray = VArray<int>::ForContainer(array);
EXPECT_EQ(varray.size(), 5);
EXPECT_EQ(varray[0], 1);
EXPECT_EQ(varray[2], 3);
@@ -43,7 +43,7 @@ TEST(virtual_array, Array)
EXPECT_TRUE(varray.is_span());
}
{
- VArray_For_ArrayContainer varray{std::move(array)};
+ VArray<int> varray = VArray<int>::ForContainer(std::move(array));
EXPECT_EQ(varray.size(), 5);
EXPECT_EQ(varray[0], 1);
EXPECT_EQ(varray[2], 3);
@@ -51,7 +51,7 @@ TEST(virtual_array, Array)
EXPECT_TRUE(varray.is_span());
}
{
- VArray_For_ArrayContainer varray{array}; /* NOLINT: bugprone-use-after-move */
+ VArray<int> varray = VArray<int>::ForContainer(array); /* NOLINT: bugprone-use-after-move */
EXPECT_TRUE(varray.is_empty());
}
}
@@ -59,7 +59,7 @@ TEST(virtual_array, Array)
TEST(virtual_array, Vector)
{
Vector<int> vector = {9, 8, 7, 6};
- VArray_For_ArrayContainer varray{std::move(vector)};
+ VArray<int> varray = VArray<int>::ForContainer(std::move(vector));
EXPECT_EQ(varray.size(), 4);
EXPECT_EQ(varray[0], 9);
EXPECT_EQ(varray[3], 6);
@@ -68,7 +68,7 @@ TEST(virtual_array, Vector)
TEST(virtual_array, StdVector)
{
std::vector<int> vector = {5, 6, 7, 8};
- VArray_For_ArrayContainer varray{std::move(vector)};
+ VArray<int> varray = VArray<int>::ForContainer(std::move(vector));
EXPECT_EQ(varray.size(), 4);
EXPECT_EQ(varray[0], 5);
EXPECT_EQ(varray[1], 6);
@@ -77,7 +77,7 @@ TEST(virtual_array, StdVector)
TEST(virtual_array, StdArray)
{
std::array<int, 4> array = {2, 3, 4, 5};
- VArray_For_ArrayContainer varray{array};
+ VArray<int> varray = VArray<int>::ForContainer(std::move(array));
EXPECT_EQ(varray.size(), 4);
EXPECT_EQ(varray[0], 2);
EXPECT_EQ(varray[1], 3);
@@ -86,7 +86,7 @@ TEST(virtual_array, StdArray)
TEST(virtual_array, VectorSet)
{
VectorSet<int> vector_set = {5, 3, 7, 3, 3, 5, 1};
- VArray_For_ArrayContainer varray{std::move(vector_set)};
+ VArray<int> varray = VArray<int>::ForContainer(std::move(vector_set));
EXPECT_TRUE(vector_set.is_empty()); /* NOLINT: bugprone-use-after-move. */
EXPECT_EQ(varray.size(), 4);
EXPECT_EQ(varray[0], 5);
@@ -98,7 +98,7 @@ TEST(virtual_array, VectorSet)
TEST(virtual_array, Func)
{
auto func = [](int64_t index) { return (int)(index * index); };
- VArray_For_Func<int, decltype(func)> varray{10, func};
+ VArray<int> varray = VArray<int>::ForFunc(10, func);
EXPECT_EQ(varray.size(), 10);
EXPECT_EQ(varray[0], 0);
EXPECT_EQ(varray[3], 9);
@@ -108,7 +108,7 @@ TEST(virtual_array, Func)
TEST(virtual_array, AsSpan)
{
auto func = [](int64_t index) { return (int)(10 * index); };
- VArray_For_Func<int, decltype(func)> func_varray{10, func};
+ VArray<int> func_varray = VArray<int>::ForFunc(10, func);
VArray_Span span_varray{func_varray};
EXPECT_EQ(span_varray.size(), 10);
Span<int> span = span_varray;
@@ -134,13 +134,14 @@ TEST(virtual_array, DerivedSpan)
vector.append({3, 4, 5});
vector.append({1, 1, 1});
{
- VArray_For_DerivedSpan<std::array<int, 3>, int, get_x> varray{vector};
+ VArray<int> varray = VArray<int>::ForDerivedSpan<std::array<int, 3>, get_x>(vector);
EXPECT_EQ(varray.size(), 2);
EXPECT_EQ(varray[0], 3);
EXPECT_EQ(varray[1], 1);
}
{
- VMutableArray_For_DerivedSpan<std::array<int, 3>, int, get_x, set_x> varray{vector};
+ VMutableArray<int> varray =
+ VMutableArray<int>::ForDerivedSpan<std::array<int, 3>, get_x, set_x>(vector);
EXPECT_EQ(varray.size(), 2);
EXPECT_EQ(varray[0], 3);
EXPECT_EQ(varray[1], 1);
@@ -151,4 +152,32 @@ TEST(virtual_array, DerivedSpan)
}
}
+TEST(virtual_array, MutableToImmutable)
+{
+ std::array<int, 4> array = {4, 2, 6, 4};
+ {
+ VMutableArray<int> mutable_varray = VMutableArray<int>::ForSpan(array);
+ VArray<int> varray = mutable_varray;
+ EXPECT_TRUE(varray.is_span());
+ EXPECT_EQ(varray.size(), 4);
+ EXPECT_EQ(varray[1], 2);
+ EXPECT_EQ(mutable_varray.size(), 4);
+ }
+ {
+ VMutableArray<int> mutable_varray = VMutableArray<int>::ForSpan(array);
+ EXPECT_EQ(mutable_varray.size(), 4);
+ VArray<int> varray = std::move(mutable_varray);
+ EXPECT_TRUE(varray.is_span());
+ EXPECT_EQ(varray.size(), 4);
+ EXPECT_EQ(varray[1], 2);
+ EXPECT_EQ(mutable_varray.size(), 0);
+ }
+ {
+ VArray<int> varray = VMutableArray<int>::ForSpan(array);
+ EXPECT_TRUE(varray.is_span());
+ EXPECT_EQ(varray.size(), 4);
+ EXPECT_EQ(varray[1], 2);
+ }
+}
+
} // namespace blender::tests