Welcome to mirror list, hosted at ThFree Co, Russian Federation.

git.blender.org/blender.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJacques Lucke <jacques@blender.org>2021-04-29 13:59:44 +0300
committerJacques Lucke <jacques@blender.org>2021-04-29 13:59:44 +0300
commit4e10b196ac15339cfded8d5615f04ac40c93e19b (patch)
treea2288584d4433f1a32047adb393edb51cf863165 /source/blender/blenlib/BLI_virtual_array.hh
parentf903e3a3fd003e5295f7aea35710a77b2e74f846 (diff)
Functions: make copying virtual arrays to span more efficient
Sometimes functions expect a span instead of a virtual array. If the virtual array is a span internally already, great. But if it is not (e.g. the position attribute on a mesh), the elements have to be copied over to a span. This patch makes the copying process more efficient by giving the compiler more opportunity for optimization.
Diffstat (limited to 'source/blender/blenlib/BLI_virtual_array.hh')
-rw-r--r--source/blender/blenlib/BLI_virtual_array.hh85
1 files changed, 64 insertions, 21 deletions
diff --git a/source/blender/blenlib/BLI_virtual_array.hh b/source/blender/blenlib/BLI_virtual_array.hh
index eae15f0300c..1c02bce8411 100644
--- a/source/blender/blenlib/BLI_virtual_array.hh
+++ b/source/blender/blenlib/BLI_virtual_array.hh
@@ -38,6 +38,7 @@
*/
#include "BLI_array.hh"
+#include "BLI_index_mask.hh"
#include "BLI_span.hh"
namespace blender {
@@ -127,14 +128,25 @@ template<typename T> class VArray {
/* Copy the entire virtual array into a span. */
void materialize(MutableSpan<T> r_span) const
{
- BLI_assert(size_ == r_span.size());
- this->materialize_impl(r_span);
+ this->materialize(IndexMask(size_), r_span);
+ }
+
+ /* Copy some indices of the virtual array into a span. */
+ void materialize(IndexMask mask, MutableSpan<T> r_span) const
+ {
+ BLI_assert(mask.min_array_size() <= size_);
+ this->materialize_impl(mask, r_span);
}
void materialize_to_uninitialized(MutableSpan<T> r_span) const
{
- BLI_assert(size_ == r_span.size());
- this->materialize_to_uninitialized_impl(r_span);
+ this->materialize_to_uninitialized(IndexMask(size_), r_span);
+ }
+
+ void materialize_to_uninitialized(IndexMask mask, MutableSpan<T> r_span) const
+ {
+ BLI_assert(mask.min_array_size() <= size_);
+ this->materialize_to_uninitialized_impl(mask, r_span);
}
protected:
@@ -164,40 +176,35 @@ template<typename T> class VArray {
return T();
}
- virtual void materialize_impl(MutableSpan<T> r_span) const
+ virtual void materialize_impl(IndexMask mask, MutableSpan<T> r_span) const
{
+ T *dst = r_span.data();
if (this->is_span()) {
- const Span<T> span = this->get_internal_span();
- initialized_copy_n(span.data(), size_, r_span.data());
+ const T *src = this->get_internal_span().data();
+ mask.foreach_index([&](const int64_t i) { dst[i] = src[i]; });
}
else if (this->is_single()) {
const T single = this->get_internal_single();
- initialized_fill_n(r_span.data(), size_, single);
+ mask.foreach_index([&](const int64_t i) { dst[i] = single; });
}
else {
- const int64_t size = size_;
- for (int64_t i = 0; i < size; i++) {
- r_span[i] = this->get(i);
- }
+ mask.foreach_index([&](const int64_t i) { dst[i] = this->get(i); });
}
}
- virtual void materialize_to_uninitialized_impl(MutableSpan<T> r_span) const
+ virtual void materialize_to_uninitialized_impl(IndexMask mask, MutableSpan<T> r_span) const
{
+ T *dst = r_span.data();
if (this->is_span()) {
- const Span<T> span = this->get_internal_span();
- uninitialized_copy_n(span.data(), size_, r_span.data());
+ const T *src = this->get_internal_span().data();
+ mask.foreach_index([&](const int64_t i) { new (dst + i) T(src[i]); });
}
else if (this->is_single()) {
const T single = this->get_internal_single();
- uninitialized_fill_n(r_span.data(), size_, single);
+ mask.foreach_index([&](const int64_t i) { new (dst + i) T(single); });
}
else {
- const int64_t size = size_;
- T *dst = r_span.data();
- for (int64_t i = 0; i < size; i++) {
- new (dst + i) T(this->get(i));
- }
+ mask.foreach_index([&](const int64_t i) { new (dst + i) T(this->get(i)); });
}
}
};
@@ -494,6 +501,18 @@ template<typename T, typename GetFunc> class VArray_For_Func final : public VArr
{
return get_func_(index);
}
+
+ void materialize_impl(IndexMask mask, MutableSpan<T> r_span) const override
+ {
+ T *dst = r_span.data();
+ mask.foreach_index([&](const int64_t i) { dst[i] = get_func_(i); });
+ }
+
+ void materialize_to_uninitialized_impl(IndexMask mask, MutableSpan<T> r_span) const override
+ {
+ T *dst = r_span.data();
+ mask.foreach_index([&](const int64_t i) { new (dst + i) T(get_func_(i)); });
+ }
};
template<typename StructT, typename ElemT, ElemT (*GetFunc)(const StructT &)>
@@ -511,6 +530,18 @@ class VArray_For_DerivedSpan : public VArray<ElemT> {
{
return GetFunc(data_[index]);
}
+
+ void materialize_impl(IndexMask mask, MutableSpan<ElemT> r_span) const override
+ {
+ ElemT *dst = r_span.data();
+ mask.foreach_index([&](const int64_t i) { dst[i] = GetFunc(data_[i]); });
+ }
+
+ void materialize_to_uninitialized_impl(IndexMask mask, MutableSpan<ElemT> r_span) const override
+ {
+ ElemT *dst = r_span.data();
+ mask.foreach_index([&](const int64_t i) { new (dst + i) ElemT(GetFunc(data_[i])); });
+ }
};
template<typename StructT,
@@ -537,6 +568,18 @@ class VMutableArray_For_DerivedSpan : public VMutableArray<ElemT> {
{
SetFunc(data_[index], std::move(value));
}
+
+ void materialize_impl(IndexMask mask, MutableSpan<ElemT> r_span) const override
+ {
+ ElemT *dst = r_span.data();
+ mask.foreach_index([&](const int64_t i) { dst[i] = GetFunc(data_[i]); });
+ }
+
+ void materialize_to_uninitialized_impl(IndexMask mask, MutableSpan<ElemT> r_span) const override
+ {
+ ElemT *dst = r_span.data();
+ mask.foreach_index([&](const int64_t i) { new (dst + i) ElemT(GetFunc(data_[i])); });
+ }
};
/**