Welcome to mirror list, hosted at ThFree Co, Russian Federation.

git.blender.org/blender.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorHans Goudey <h.goudey@me.com>2022-08-28 22:40:49 +0300
committerHans Goudey <h.goudey@me.com>2022-08-28 22:40:49 +0300
commite0414070d9d0a49c7f1b144b5a2f17d9b43c47c7 (patch)
treef8a545cd8fab4eec2de058bc3e352db94f75827a /source/blender/blenlib/BLI_virtual_array.hh
parent67f3259c54657996d47112967c9b982f78ebfe6e (diff)
Cleanup: Slightly improve virtual array implementation consistency
Previously the base virtual array implementation optimized for common cases where data is stored as spans or single values. However, that didn't make sense when there are already sub-classes that handle those cases specifically. Instead, implement the faster materialize methods for each class. Now, if the base class is reached, it means no optimizations for avoiding virtual function call overhead are used. Differential Revision: https://developer.blender.org/D15549
Diffstat (limited to 'source/blender/blenlib/BLI_virtual_array.hh')
-rw-r--r--source/blender/blenlib/BLI_virtual_array.hh72
1 files changed, 29 insertions, 43 deletions
diff --git a/source/blender/blenlib/BLI_virtual_array.hh b/source/blender/blenlib/BLI_virtual_array.hh
index 00677cf28a2..4784114c88a 100644
--- a/source/blender/blenlib/BLI_virtual_array.hh
+++ b/source/blender/blenlib/BLI_virtual_array.hh
@@ -108,25 +108,7 @@ template<typename T> class VArrayImpl {
*/
virtual void materialize(IndexMask mask, MutableSpan<T> r_span) const
{
- T *dst = r_span.data();
- /* Optimize for a few different common cases. */
- const CommonVArrayInfo info = this->common_info();
- switch (info.type) {
- case CommonVArrayInfo::Type::Any: {
- mask.foreach_index([&](const int64_t i) { dst[i] = this->get(i); });
- break;
- }
- case CommonVArrayInfo::Type::Span: {
- const T *src = static_cast<const T *>(info.data);
- mask.foreach_index([&](const int64_t i) { dst[i] = src[i]; });
- break;
- }
- case CommonVArrayInfo::Type::Single: {
- const T single = *static_cast<const T *>(info.data);
- mask.foreach_index([&](const int64_t i) { dst[i] = single; });
- break;
- }
- }
+ mask.foreach_index([&](const int64_t i) { r_span[i] = this->get(i); });
}
/**
@@ -135,24 +117,7 @@ template<typename T> class VArrayImpl {
virtual void materialize_to_uninitialized(IndexMask mask, MutableSpan<T> r_span) const
{
T *dst = r_span.data();
- /* Optimize for a few different common cases. */
- const CommonVArrayInfo info = this->common_info();
- switch (info.type) {
- case CommonVArrayInfo::Type::Any: {
- mask.foreach_index([&](const int64_t i) { new (dst + i) T(this->get(i)); });
- break;
- }
- case CommonVArrayInfo::Type::Span: {
- const T *src = static_cast<const T *>(info.data);
- mask.foreach_index([&](const int64_t i) { new (dst + i) T(src[i]); });
- break;
- }
- case CommonVArrayInfo::Type::Single: {
- const T single = *static_cast<const T *>(info.data);
- mask.foreach_index([&](const int64_t i) { new (dst + i) T(single); });
- break;
- }
- }
+ mask.foreach_index([&](const int64_t i) { new (dst + i) T(this->get(i)); });
}
/**
@@ -288,8 +253,20 @@ template<typename T> class VArrayImpl_For_Span : public VMutableArrayImpl<T> {
return data_ == static_cast<const T *>(other_info.data);
}
+ void materialize(IndexMask mask, MutableSpan<T> r_span) const override
+ {
+ mask.foreach_index([&](const int64_t i) { r_span[i] = data_[i]; });
+ }
+
+ void materialize_to_uninitialized(IndexMask mask, MutableSpan<T> r_span) const override
+ {
+ T *dst = r_span.data();
+ mask.foreach_index([&](const int64_t i) { new (dst + i) T(data_[i]); });
+ }
+
void materialize_compressed(IndexMask mask, MutableSpan<T> r_span) const override
{
+ BLI_assert(mask.size() == r_span.size());
mask.to_best_mask_type([&](auto best_mask) {
for (const int64_t i : IndexRange(best_mask.size())) {
r_span[i] = data_[best_mask[i]];
@@ -300,6 +277,7 @@ template<typename T> class VArrayImpl_For_Span : public VMutableArrayImpl<T> {
void materialize_compressed_to_uninitialized(IndexMask mask,
MutableSpan<T> r_span) const override
{
+ BLI_assert(mask.size() == r_span.size());
T *dst = r_span.data();
mask.to_best_mask_type([&](auto best_mask) {
for (const int64_t i : IndexRange(best_mask.size())) {
@@ -378,6 +356,17 @@ template<typename T> class VArrayImpl_For_Single final : public VArrayImpl<T> {
return CommonVArrayInfo(CommonVArrayInfo::Type::Single, true, &value_);
}
+ void materialize(IndexMask mask, MutableSpan<T> r_span) const override
+ {
+ r_span.fill_indices(mask, value_);
+ }
+
+ void materialize_to_uninitialized(IndexMask mask, MutableSpan<T> r_span) const override
+ {
+ T *dst = r_span.data();
+ mask.foreach_index([&](const int64_t i) { new (dst + i) T(value_); });
+ }
+
void materialize_compressed(IndexMask mask, MutableSpan<T> r_span) const override
{
BLI_assert(mask.size() == r_span.size());
@@ -887,12 +876,9 @@ template<typename T> class VMutableArray;
* construct the virtual array first and then move it into the vector.
*/
namespace varray_tag {
-struct span {
-};
-struct single_ref {
-};
-struct single {
-};
+struct span {};
+struct single_ref {};
+struct single {};
} // namespace varray_tag
/**