Welcome to mirror list, hosted at ThFree Co, Russian Federation.

git.blender.org/blender.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
path: root/source
diff options
context:
space:
mode:
authorJacques Lucke <jacques@blender.org>2020-06-09 12:58:47 +0300
committerJacques Lucke <jacques@blender.org>2020-06-09 12:58:47 +0300
commitf7c0f1b8b83ac475755b633abf59cf9f447b2d49 (patch)
tree97302f741ce4e40f6e4de9f0cfd54c7320ee7fd5 /source
parent7d2b4ae9c6ecce394130cd08694914bf93497a11 (diff)
BLI: rename ArrayRef to Span
This also renames `MutableArrayRef` to `MutableSpan`. The name "Span" works better, because `std::span` will provide similar functionality in C++20. Furthermore, a shorter, more concise name for a common data structure is nice.
Diffstat (limited to 'source')
-rw-r--r--source/blender/blenkernel/intern/simulation.cc14
-rw-r--r--source/blender/blenlib/BLI_array.hh22
-rw-r--r--source/blender/blenlib/BLI_dot_export.hh4
-rw-r--r--source/blender/blenlib/BLI_index_mask.hh14
-rw-r--r--source/blender/blenlib/BLI_index_range.hh6
-rw-r--r--source/blender/blenlib/BLI_linear_allocator.hh28
-rw-r--r--source/blender/blenlib/BLI_set.hh6
-rw-r--r--source/blender/blenlib/BLI_span.hh (renamed from source/blender/blenlib/BLI_array_ref.hh)166
-rw-r--r--source/blender/blenlib/BLI_stack.hh12
-rw-r--r--source/blender/blenlib/BLI_string_ref.hh6
-rw-r--r--source/blender/blenlib/BLI_vector.hh28
-rw-r--r--source/blender/blenlib/BLI_vector_set.hh14
-rw-r--r--source/blender/blenlib/CMakeLists.txt2
-rw-r--r--source/blender/blenlib/intern/BLI_index_range.cc10
-rw-r--r--source/blender/blenlib/intern/dot_export.cc4
-rw-r--r--source/blender/depsgraph/intern/depsgraph_registry.cc2
-rw-r--r--source/blender/depsgraph/intern/depsgraph_registry.h2
-rw-r--r--source/blender/depsgraph/intern/depsgraph_type.h2
-rw-r--r--source/blender/modifiers/intern/MOD_mask.cc40
19 files changed, 198 insertions, 184 deletions
diff --git a/source/blender/blenkernel/intern/simulation.cc b/source/blender/blenkernel/intern/simulation.cc
index 8f08665ab8a..20a23ab8b38 100644
--- a/source/blender/blenkernel/intern/simulation.cc
+++ b/source/blender/blenkernel/intern/simulation.cc
@@ -27,11 +27,11 @@
#include "DNA_scene_types.h"
#include "DNA_simulation_types.h"
-#include "BLI_array_ref.hh"
#include "BLI_compiler_compat.h"
#include "BLI_float3.hh"
#include "BLI_listbase.h"
#include "BLI_math.h"
+#include "BLI_span.hh"
#include "BLI_string.h"
#include "BLI_utildefines.h"
@@ -54,9 +54,9 @@
#include "DEG_depsgraph.h"
#include "DEG_depsgraph_query.h"
-using blender::ArrayRef;
using blender::float3;
-using blender::MutableArrayRef;
+using blender::MutableSpan;
+using blender::Span;
static void simulation_init_data(ID *id)
{
@@ -168,9 +168,9 @@ void *BKE_simulation_add(Main *bmain, const char *name)
return simulation;
}
-static MutableArrayRef<float3> get_particle_positions(ParticleSimulationState *state)
+static MutableSpan<float3> get_particle_positions(ParticleSimulationState *state)
{
- return MutableArrayRef<float3>(
+ return MutableSpan<float3>(
(float3 *)CustomData_get_layer_named(&state->attributes, CD_LOCATION, "Position"),
state->tot_particles);
}
@@ -239,7 +239,7 @@ void BKE_simulation_data_update(Depsgraph *depsgraph, Scene *scene, Simulation *
CustomData_realloc(&state_orig->attributes, state_orig->tot_particles);
ensure_attributes_exist(state_orig);
- MutableArrayRef<float3> positions = get_particle_positions(state_orig);
+ MutableSpan<float3> positions = get_particle_positions(state_orig);
for (uint i : positions.index_range()) {
positions[i] = {i / 10.0f, 0, 0};
}
@@ -250,7 +250,7 @@ void BKE_simulation_data_update(Depsgraph *depsgraph, Scene *scene, Simulation *
else if (current_frame == state_orig->current_frame + 1) {
state_orig->current_frame = current_frame;
ensure_attributes_exist(state_orig);
- MutableArrayRef<float3> positions = get_particle_positions(state_orig);
+ MutableSpan<float3> positions = get_particle_positions(state_orig);
for (float3 &position : positions) {
position.z += 0.1f;
}
diff --git a/source/blender/blenlib/BLI_array.hh b/source/blender/blenlib/BLI_array.hh
index 874cd6b215a..b929d1220da 100644
--- a/source/blender/blenlib/BLI_array.hh
+++ b/source/blender/blenlib/BLI_array.hh
@@ -39,9 +39,9 @@
*/
#include "BLI_allocator.hh"
-#include "BLI_array_ref.hh"
#include "BLI_index_range.hh"
#include "BLI_memory_utils.hh"
+#include "BLI_span.hh"
#include "BLI_utildefines.h"
namespace blender {
@@ -90,7 +90,7 @@ class Array {
/**
* Create a new array that contains copies of all values.
*/
- Array(ArrayRef<T> values)
+ Array(Span<T> values)
{
m_size = values.size();
m_data = this->get_buffer_for_size(values.size());
@@ -100,7 +100,7 @@ class Array {
/**
* Create a new array that contains copies of all values.
*/
- Array(const std::initializer_list<T> &values) : Array(ArrayRef<T>(values))
+ Array(const std::initializer_list<T> &values) : Array(Span<T>(values))
{
}
@@ -184,22 +184,22 @@ class Array {
return *this;
}
- operator ArrayRef<T>() const
+ operator Span<T>() const
{
- return ArrayRef<T>(m_data, m_size);
+ return Span<T>(m_data, m_size);
}
- operator MutableArrayRef<T>()
+ operator MutableSpan<T>()
{
- return MutableArrayRef<T>(m_data, m_size);
+ return MutableSpan<T>(m_data, m_size);
}
- ArrayRef<T> as_ref() const
+ Span<T> as_span() const
{
return *this;
}
- MutableArrayRef<T> as_mutable_ref()
+ MutableSpan<T> as_mutable_span()
{
return *this;
}
@@ -243,9 +243,9 @@ class Array {
/**
* Copies the value to the given indices in the array.
*/
- void fill_indices(ArrayRef<uint> indices, const T &value)
+ void fill_indices(Span<uint> indices, const T &value)
{
- MutableArrayRef<T>(*this).fill_indices(indices, value);
+ MutableSpan<T>(*this).fill_indices(indices, value);
}
/**
diff --git a/source/blender/blenlib/BLI_dot_export.hh b/source/blender/blenlib/BLI_dot_export.hh
index 60353d7913f..67af4391a55 100644
--- a/source/blender/blenlib/BLI_dot_export.hh
+++ b/source/blender/blenlib/BLI_dot_export.hh
@@ -267,8 +267,8 @@ class NodeWithSocketsRef {
public:
NodeWithSocketsRef(Node &node,
StringRef name,
- ArrayRef<std::string> input_names,
- ArrayRef<std::string> output_names);
+ Span<std::string> input_names,
+ Span<std::string> output_names);
NodePort input(uint index) const
{
diff --git a/source/blender/blenlib/BLI_index_mask.hh b/source/blender/blenlib/BLI_index_mask.hh
index 4cd348215fe..cc1bf05f936 100644
--- a/source/blender/blenlib/BLI_index_mask.hh
+++ b/source/blender/blenlib/BLI_index_mask.hh
@@ -38,15 +38,15 @@
* same time.
*/
-#include "BLI_array_ref.hh"
#include "BLI_index_range.hh"
+#include "BLI_span.hh"
namespace blender {
class IndexMask {
private:
/* The underlying reference to sorted integers. */
- ArrayRef<uint> m_indices;
+ Span<uint> m_indices;
public:
/* Creates an IndexMask that contains no indices. */
@@ -57,7 +57,7 @@ class IndexMask {
* This constructor asserts that the given integers are in ascending order and that there are no
* duplicates.
*/
- IndexMask(ArrayRef<uint> indices) : m_indices(indices)
+ IndexMask(Span<uint> indices) : m_indices(indices)
{
#ifdef DEBUG
for (uint i = 1; i < indices.size(); i++) {
@@ -70,7 +70,7 @@ class IndexMask {
* Use this method when you know that no indices are skipped. It is more efficient than preparing
* an integer array all the time.
*/
- IndexMask(IndexRange range) : m_indices(range.as_array_ref())
+ IndexMask(IndexRange range) : m_indices(range.as_span())
{
}
@@ -84,7 +84,7 @@ class IndexMask {
* Do this:
* do_something_with_an_index_mask({3, 4, 5});
*/
- IndexMask(const std::initializer_list<uint> &indices) : IndexMask(ArrayRef<uint>(indices))
+ IndexMask(const std::initializer_list<uint> &indices) : IndexMask(Span<uint>(indices))
{
}
@@ -95,7 +95,7 @@ class IndexMask {
{
}
- operator ArrayRef<uint>() const
+ operator Span<uint>() const
{
return m_indices;
}
@@ -133,7 +133,7 @@ class IndexMask {
}
}
- ArrayRef<uint> indices() const
+ Span<uint> indices() const
{
return m_indices;
}
diff --git a/source/blender/blenlib/BLI_index_range.hh b/source/blender/blenlib/BLI_index_range.hh
index 8a97facd9c0..25192429a5d 100644
--- a/source/blender/blenlib/BLI_index_range.hh
+++ b/source/blender/blenlib/BLI_index_range.hh
@@ -49,7 +49,7 @@
* Ideally this could be could be even closer to Python's enumerate(). We might get that in the
* future with newer C++ versions.
*
- * One other important feature is the as_array_ref method. This method returns an ArrayRef<uint>
+ * One other important feature is the as_span method. This method returns an Span<uint>
* that contains the interval as individual numbers.
*/
@@ -66,7 +66,7 @@ template<typename Value> class blocked_range;
namespace blender {
-template<typename T> class ArrayRef;
+template<typename T> class Span;
class IndexRange {
private:
@@ -227,7 +227,7 @@ class IndexRange {
/**
* Get read-only access to a memory buffer that contains the range as actual numbers.
*/
- ArrayRef<uint> as_array_ref() const;
+ Span<uint> as_span() const;
friend std::ostream &operator<<(std::ostream &stream, IndexRange range)
{
diff --git a/source/blender/blenlib/BLI_linear_allocator.hh b/source/blender/blenlib/BLI_linear_allocator.hh
index e2bb3ce02cb..f968f9f15ce 100644
--- a/source/blender/blenlib/BLI_linear_allocator.hh
+++ b/source/blender/blenlib/BLI_linear_allocator.hh
@@ -35,7 +35,7 @@ template<typename Allocator = GuardedAllocator> class LinearAllocator : NonCopya
private:
Allocator m_allocator;
Vector<void *> m_owned_buffers;
- Vector<ArrayRef<char>> m_unused_borrowed_buffers;
+ Vector<Span<char>> m_unused_borrowed_buffers;
uintptr_t m_current_begin;
uintptr_t m_current_end;
@@ -104,9 +104,9 @@ template<typename Allocator = GuardedAllocator> class LinearAllocator : NonCopya
*
* This method only allocates memory and does not construct the instance.
*/
- template<typename T> MutableArrayRef<T> allocate_array(uint size)
+ template<typename T> MutableSpan<T> allocate_array(uint size)
{
- return MutableArrayRef<T>((T *)this->allocate(sizeof(T) * size, alignof(T)), size);
+ return MutableSpan<T>((T *)this->allocate(sizeof(T) * size, alignof(T)), size);
}
/**
@@ -127,9 +127,9 @@ template<typename Allocator = GuardedAllocator> class LinearAllocator : NonCopya
/**
* Copy the given array into a memory buffer provided by this allocator.
*/
- template<typename T> MutableArrayRef<T> construct_array_copy(ArrayRef<T> src)
+ template<typename T> MutableSpan<T> construct_array_copy(Span<T> src)
{
- MutableArrayRef<T> dst = this->allocate_array<T>(src.size());
+ MutableSpan<T> dst = this->allocate_array<T>(src.size());
uninitialized_copy_n(src.data(), src.size(), dst.data());
return dst;
}
@@ -146,14 +146,14 @@ template<typename Allocator = GuardedAllocator> class LinearAllocator : NonCopya
return StringRefNull((const char *)buffer);
}
- MutableArrayRef<void *> allocate_elements_and_pointer_array(uint element_amount,
- uint element_size,
- uint element_alignment)
+ MutableSpan<void *> allocate_elements_and_pointer_array(uint element_amount,
+ uint element_size,
+ uint element_alignment)
{
void *pointer_buffer = this->allocate(element_amount * sizeof(void *), alignof(void *));
void *elements_buffer = this->allocate(element_amount * element_size, element_alignment);
- MutableArrayRef<void *> pointers((void **)pointer_buffer, element_amount);
+ MutableSpan<void *> pointers((void **)pointer_buffer, element_amount);
void *next_element_buffer = elements_buffer;
for (uint i : IndexRange(element_amount)) {
pointers[i] = next_element_buffer;
@@ -164,11 +164,11 @@ template<typename Allocator = GuardedAllocator> class LinearAllocator : NonCopya
}
template<typename T, typename... Args>
- ArrayRef<T *> construct_elements_and_pointer_array(uint n, Args &&... args)
+ Span<T *> construct_elements_and_pointer_array(uint n, Args &&... args)
{
- MutableArrayRef<void *> void_pointers = this->allocate_elements_and_pointer_array(
+ MutableSpan<void *> void_pointers = this->allocate_elements_and_pointer_array(
n, sizeof(T), alignof(T));
- MutableArrayRef<T *> pointers = void_pointers.cast<T *>();
+ MutableSpan<T *> pointers = void_pointers.cast<T *>();
for (uint i : IndexRange(n)) {
new (pointers[i]) T(std::forward<Args>(args)...);
@@ -183,7 +183,7 @@ template<typename Allocator = GuardedAllocator> class LinearAllocator : NonCopya
*/
void provide_buffer(void *buffer, uint size)
{
- m_unused_borrowed_buffers.append(ArrayRef<char>((char *)buffer, size));
+ m_unused_borrowed_buffers.append(Span<char>((char *)buffer, size));
}
template<size_t Size, size_t Alignment>
@@ -196,7 +196,7 @@ template<typename Allocator = GuardedAllocator> class LinearAllocator : NonCopya
void allocate_new_buffer(uint min_allocation_size)
{
for (uint i : m_unused_borrowed_buffers.index_range()) {
- ArrayRef<char> buffer = m_unused_borrowed_buffers[i];
+ Span<char> buffer = m_unused_borrowed_buffers[i];
if (buffer.size() >= min_allocation_size) {
m_unused_borrowed_buffers.remove_and_reorder(i);
m_current_begin = (uintptr_t)buffer.begin();
diff --git a/source/blender/blenlib/BLI_set.hh b/source/blender/blenlib/BLI_set.hh
index 5bdf99360cb..ece9fb05d8c 100644
--- a/source/blender/blenlib/BLI_set.hh
+++ b/source/blender/blenlib/BLI_set.hh
@@ -276,7 +276,7 @@ class Set {
* We might be able to make this faster than sequentially adding all keys, but that is not
* implemented yet.
*/
- void add_multiple(ArrayRef<Key> keys)
+ void add_multiple(Span<Key> keys)
{
for (const Key &key : keys) {
this->add(key);
@@ -287,7 +287,7 @@ class Set {
* Convenience function to add many new keys to the set at once. The keys must not exist in the
* set before and there must not be duplicates in the array.
*/
- void add_multiple_new(ArrayRef<Key> keys)
+ void add_multiple_new(Span<Key> keys)
{
for (const Key &key : keys) {
this->add_new(key);
@@ -726,7 +726,7 @@ template<typename Key> class StdUnorderedSetWrapper {
return m_set.insert(std::move(key)).second;
}
- void add_multiple(ArrayRef<Key> keys)
+ void add_multiple(Span<Key> keys)
{
for (const Key &key : keys) {
m_set.insert(key);
diff --git a/source/blender/blenlib/BLI_array_ref.hh b/source/blender/blenlib/BLI_span.hh
index 2a4d0b6e0df..23f0f161e01 100644
--- a/source/blender/blenlib/BLI_array_ref.hh
+++ b/source/blender/blenlib/BLI_span.hh
@@ -14,44 +14,59 @@
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
-#ifndef __BLI_ARRAY_REF_HH__
-#define __BLI_ARRAY_REF_HH__
+#ifndef __BLI_SPAN_HH__
+#define __BLI_SPAN_HH__
/** \file
* \ingroup bli
*
- * An `blender::ArrayRef<T>` references an array that is owned by someone else. It is just a
- * pointer and a size. Since the memory is not owned, ArrayRef should not be used to transfer
- * ownership. The array cannot be modified through the ArrayRef. However, if T is a non-const
+ * An `blender::Span<T>` references an array that is owned by someone else. It is just a
+ * pointer and a size. Since the memory is not owned, Span should not be used to transfer
+ * ownership. The array cannot be modified through the Span. However, if T is a non-const
* pointer, the pointed-to elements can be modified.
*
- * There is also `blender::MutableArrayRef<T>`. It is mostly the same as ArrayRef, but allows the
+ * There is also `blender::MutableSpan<T>`. It is mostly the same as Span, but allows the
* array to be modified.
*
- * An (Mutable)ArrayRef can refer to data owned by many different data structures including
+ * A (Mutable)Span can refer to data owned by many different data structures including
* blender::Vector, blender::Array, blender::VectorSet, std::vector, std::array, std::string,
* std::initializer_list and c-style array.
*
- * `blender::ArrayRef<T>` should be your default choice when you have to pass a read-only array
+ * `blender::Span` is very similar to `std::span` (C++20). However, there are a few differences:
+ * - `blender::Span` is const by default. This is to avoid making things mutable when they don't
+ * have to be. To get a non-const span, you need to use `blender::MutableSpan`. Below is a list
+ * of const-behavior-equivalent pairs of data structures:
+ * - std::span<int> <==> blender::MutableSpan<int>
+ * - std::span<const int> <==> blender::Span<int>
+ * - std::span<int *> <==> blender::MutableSpan<int *>
+ * - std::span<const int *> <==> blender::MutableSpan<const int *>
+ * - std::span<int * const> <==> blender::Span<int *>
+ * - std::span<const int * const> <==> blender::Span<const int *>
+ * - `blender::Span` always has a dynamic extent, while `std::span` can have a size that is
+ * determined at compile time. I did not have a use case for that yet. If we need it, we can
+ * decide to add this functionality to `blender::Span` or introduce a new type like
+ * `blender::FixedSpan<T, N>`.
+ *
+ * `blender::Span<T>` should be your default choice when you have to pass a read-only array
* into a function. It is better than passing a `const Vector &`, because then the function only
- * works for vectors and not for e.g. arrays. Using ArrayRef as function parameter makes it usable
+ * works for vectors and not for e.g. arrays. Using Span as function parameter makes it usable
* in more contexts, better expresses the intent and does not sacrifice performance. It is also
* better than passing a raw pointer and size separately, because it is more convenient and safe.
*
- * `blender::MutableArrayRef<T>` can be used when a function is supposed to return an array, the
+ * `blender::MutableSpan<T>` can be used when a function is supposed to return an array, the
* size of which is known before the function is called. One advantage of this approach is that the
* caller is responsible for allocation and deallocation. Furthermore, the function can focus on
* its task, without having to worry about memory allocation. Alternatively, a function could
* return an Array or Vector.
*
- * Note: When a function has a MutableArrayRef<T> output parameter and T is not a trivial type,
+ * Note: When a function has a MutableSpan<T> output parameter and T is not a trivial type,
* then the function has to specify whether the referenced array is expected to be initialized or
* not.
*
- * Since the arrays are only referenced, it is generally unsafe to store an ArrayRef. When you
+ * Since the arrays are only referenced, it is generally unsafe to store an Span. When you
* store one, you should know who owns the memory.
*
- * Instances of ArrayRef and MutableArrayRef are small and should be passed by value.
+ * Instances of Span and MutableSpan are small and should be passed by value.
*/
#include <algorithm>
@@ -70,7 +85,7 @@ namespace blender {
* References an array of type T that is owned by someone else. The data in the array cannot be
* modified.
*/
-template<typename T> class ArrayRef {
+template<typename T> class Span {
private:
const T *m_start = nullptr;
uint m_size = 0;
@@ -79,9 +94,9 @@ template<typename T> class ArrayRef {
/**
* Create a reference to an empty array.
*/
- ArrayRef() = default;
+ Span() = default;
- ArrayRef(const T *start, uint size) : m_start(start), m_size(size)
+ Span(const T *start, uint size) : m_start(start), m_size(size)
{
}
@@ -93,29 +108,29 @@ template<typename T> class ArrayRef {
* call_function_with_array({1, 2, 3, 4});
*
* Don't:
- * ArrayRef<int> ref = {1, 2, 3, 4};
- * call_function_with_array(ref);
+ * Span<int> span = {1, 2, 3, 4};
+ * call_function_with_array(span);
*/
- ArrayRef(const std::initializer_list<T> &list) : ArrayRef(list.begin(), (uint)list.size())
+ Span(const std::initializer_list<T> &list) : Span(list.begin(), (uint)list.size())
{
}
- ArrayRef(const std::vector<T> &vector) : ArrayRef(vector.data(), (uint)vector.size())
+ Span(const std::vector<T> &vector) : Span(vector.data(), (uint)vector.size())
{
}
- template<std::size_t N> ArrayRef(const std::array<T, N> &array) : ArrayRef(array.data(), N)
+ template<std::size_t N> Span(const std::array<T, N> &array) : Span(array.data(), N)
{
}
/**
* Support implicit conversions like the ones below:
- * ArrayRef<T *> -> ArrayRef<const T *>
- * ArrayRef<Derived *> -> ArrayRef<Base *>
+ * Span<T *> -> Span<const T *>
+ * Span<Derived *> -> Span<Base *>
*/
template<typename U,
typename std::enable_if<std::is_convertible<U *, T>::value>::type * = nullptr>
- ArrayRef(ArrayRef<U *> array) : ArrayRef((T *)array.data(), array.size())
+ Span(Span<U *> array) : Span((T *)array.data(), array.size())
{
}
@@ -123,52 +138,52 @@ template<typename T> class ArrayRef {
* Returns a contiguous part of the array. This invokes undefined behavior when the slice does
* not stay within the bounds of the array.
*/
- ArrayRef slice(uint start, uint size) const
+ Span slice(uint start, uint size) const
{
BLI_assert(start + size <= this->size() || size == 0);
- return ArrayRef(m_start + start, size);
+ return Span(m_start + start, size);
}
- ArrayRef slice(IndexRange range) const
+ Span slice(IndexRange range) const
{
return this->slice(range.start(), range.size());
}
/**
- * Returns a new ArrayRef with n elements removed from the beginning. This invokes undefined
+ * Returns a new Span with n elements removed from the beginning. This invokes undefined
* behavior when the array is too small.
*/
- ArrayRef drop_front(uint n) const
+ Span drop_front(uint n) const
{
BLI_assert(n <= this->size());
return this->slice(n, this->size() - n);
}
/**
- * Returns a new ArrayRef with n elements removed from the beginning. This invokes undefined
+ * Returns a new Span with n elements removed from the beginning. This invokes undefined
* behavior when the array is too small.
*/
- ArrayRef drop_back(uint n) const
+ Span drop_back(uint n) const
{
BLI_assert(n <= this->size());
return this->slice(0, this->size() - n);
}
/**
- * Returns a new ArrayRef that only contains the first n elements. This invokes undefined
+ * Returns a new Span that only contains the first n elements. This invokes undefined
* behavior when the array is too small.
*/
- ArrayRef take_front(uint n) const
+ Span take_front(uint n) const
{
BLI_assert(n <= this->size());
return this->slice(0, n);
}
/**
- * Returns a new ArrayRef that only contains the last n elements. This invokes undefined
+ * Returns a new Span that only contains the last n elements. This invokes undefined
* behavior when the array is too small.
*/
- ArrayRef take_back(uint n) const
+ Span take_back(uint n) const
{
BLI_assert(n <= this->size());
return this->slice(this->size() - n, n);
@@ -220,7 +235,7 @@ template<typename T> class ArrayRef {
}
/**
- * Returns the number of bytes referenced by this ArrayRef.
+ * Returns the number of bytes referenced by this Span.
*/
uint size_in_bytes() const
{
@@ -323,7 +338,7 @@ template<typename T> class ArrayRef {
* called on small arrays, because it has a running time of O(n*m) where n and m are the sizes of
* the arrays.
*/
- bool intersects__linear_search(ArrayRef other) const
+ bool intersects__linear_search(Span other) const
{
/* The size should really be smaller than that. If it is not, the calling code should be
* changed. */
@@ -372,22 +387,22 @@ template<typename T> class ArrayRef {
}
/**
- * Returns a new ArrayRef to the same underlying memory buffer. No conversions are done.
+ * Returns a new Span to the same underlying memory buffer. No conversions are done.
*/
- template<typename NewT> ArrayRef<NewT> cast() const
+ template<typename NewT> Span<NewT> cast() const
{
BLI_assert((m_size * sizeof(T)) % sizeof(NewT) == 0);
uint new_size = m_size * sizeof(T) / sizeof(NewT);
- return ArrayRef<NewT>(reinterpret_cast<const NewT *>(m_start), new_size);
+ return Span<NewT>(reinterpret_cast<const NewT *>(m_start), new_size);
}
/**
- * A debug utility to print the content of the ArrayRef. Every element will be printed on a
+ * A debug utility to print the content of the Span. Every element will be printed on a
* separate line using the given callback.
*/
template<typename PrintLineF> void print_as_lines(std::string name, PrintLineF print_line) const
{
- std::cout << "ArrayRef: " << name << " \tSize:" << m_size << '\n';
+ std::cout << "Span: " << name << " \tSize:" << m_size << '\n';
for (const T &value : *this) {
std::cout << " ";
print_line(value);
@@ -396,7 +411,7 @@ template<typename T> class ArrayRef {
}
/**
- * A debug utility to print the content of the array ref. Every element be printed on a separate
+ * A debug utility to print the content of the span. Every element be printed on a separate
* line.
*/
void print_as_lines(std::string name) const
@@ -406,18 +421,18 @@ template<typename T> class ArrayRef {
};
/**
- * Mostly the same as ArrayRef, except that one can change the array elements through a
- * MutableArrayRef.
+ * Mostly the same as Span, except that one can change the array elements through a
+ * MutableSpan.
*/
-template<typename T> class MutableArrayRef {
+template<typename T> class MutableSpan {
private:
T *m_start;
uint m_size;
public:
- MutableArrayRef() = default;
+ MutableSpan() = default;
- MutableArrayRef(T *start, uint size) : m_start(start), m_size(size)
+ MutableSpan(T *start, uint size) : m_start(start), m_size(size)
{
}
@@ -429,25 +444,24 @@ template<typename T> class MutableArrayRef {
* call_function_with_array({1, 2, 3, 4});
*
* Don't:
- * MutableArrayRef<int> ref = {1, 2, 3, 4};
- * call_function_with_array(ref);
+ * MutableSpan<int> span = {1, 2, 3, 4};
+ * call_function_with_array(span);
*/
- MutableArrayRef(std::initializer_list<T> &list) : MutableArrayRef(list.begin(), list.size())
+ MutableSpan(std::initializer_list<T> &list) : MutableSpan(list.begin(), list.size())
{
}
- MutableArrayRef(std::vector<T> &vector) : MutableArrayRef(vector.data(), vector.size())
+ MutableSpan(std::vector<T> &vector) : MutableSpan(vector.data(), vector.size())
{
}
- template<std::size_t N>
- MutableArrayRef(std::array<T, N> &array) : MutableArrayRef(array.data(), N)
+ template<std::size_t N> MutableSpan(std::array<T, N> &array) : MutableSpan(array.data(), N)
{
}
- operator ArrayRef<T>() const
+ operator Span<T>() const
{
- return ArrayRef<T>(m_start, m_size);
+ return Span<T>(m_start, m_size);
}
/**
@@ -470,7 +484,7 @@ template<typename T> class MutableArrayRef {
* Replace a subset of all elements with the given value. This invokes undefined behavior when
* one of the indices is out of bounds.
*/
- void fill_indices(ArrayRef<uint> indices, const T &value)
+ void fill_indices(Span<uint> indices, const T &value)
{
for (uint i : indices) {
BLI_assert(i < m_size);
@@ -507,59 +521,59 @@ template<typename T> class MutableArrayRef {
* Returns a contiguous part of the array. This invokes undefined behavior when the slice would
* go out of bounds.
*/
- MutableArrayRef slice(uint start, uint length) const
+ MutableSpan slice(uint start, uint length) const
{
BLI_assert(start + length <= this->size());
- return MutableArrayRef(m_start + start, length);
+ return MutableSpan(m_start + start, length);
}
/**
- * Returns a new MutableArrayRef with n elements removed from the beginning. This invokes
+ * Returns a new MutableSpan with n elements removed from the beginning. This invokes
* undefined behavior when the array is too small.
*/
- MutableArrayRef drop_front(uint n) const
+ MutableSpan drop_front(uint n) const
{
BLI_assert(n <= this->size());
return this->slice(n, this->size() - n);
}
/**
- * Returns a new MutableArrayRef with n elements removed from the end. This invokes undefined
+ * Returns a new MutableSpan with n elements removed from the end. This invokes undefined
* behavior when the array is too small.
*/
- MutableArrayRef drop_back(uint n) const
+ MutableSpan drop_back(uint n) const
{
BLI_assert(n <= this->size());
return this->slice(0, this->size() - n);
}
/**
- * Returns a new MutableArrayRef that only contains the first n elements. This invokes undefined
+ * Returns a new MutableSpan that only contains the first n elements. This invokes undefined
* behavior when the array is too small.
*/
- MutableArrayRef take_front(uint n) const
+ MutableSpan take_front(uint n) const
{
BLI_assert(n <= this->size());
return this->slice(0, n);
}
/**
- * Return a new MutableArrayRef that only contains the last n elements. This invokes undefined
+ * Return a new MutableSpan that only contains the last n elements. This invokes undefined
* behavior when the array is too small.
*/
- MutableArrayRef take_back(uint n) const
+ MutableSpan take_back(uint n) const
{
BLI_assert(n <= this->size());
return this->slice(this->size() - n, n);
}
/**
- * Returns an (immutable) ArrayRef that references the same array. This is usually not needed,
+ * Returns an (immutable) Span that references the same array. This is usually not needed,
* due to implicit conversions. However, sometimes automatic type deduction needs some help.
*/
- ArrayRef<T> as_ref() const
+ Span<T> as_span() const
{
- return ArrayRef<T>(m_start, m_size);
+ return Span<T>(m_start, m_size);
}
/**
@@ -582,22 +596,22 @@ template<typename T> class MutableArrayRef {
}
/**
- * Returns a new array ref to the same underlying memory buffer. No conversions are done.
+ * Returns a new span to the same underlying memory buffer. No conversions are done.
*/
- template<typename NewT> MutableArrayRef<NewT> cast() const
+ template<typename NewT> MutableSpan<NewT> cast() const
{
BLI_assert((m_size * sizeof(T)) % sizeof(NewT) == 0);
uint new_size = m_size * sizeof(T) / sizeof(NewT);
- return MutableArrayRef<NewT>(reinterpret_cast<NewT *>(m_start), new_size);
+ return MutableSpan<NewT>(reinterpret_cast<NewT *>(m_start), new_size);
}
};
/**
* Shorthand to make use of automatic template parameter deduction.
*/
-template<typename T> ArrayRef<T> ref_c_array(const T *array, uint size)
+template<typename T> Span<T> ref_c_array(const T *array, uint size)
{
- return ArrayRef<T>(array, size);
+ return Span<T>(array, size);
}
/**
@@ -627,4 +641,4 @@ void assert_same_size(const T1 &v1, const T2 &v2, const T3 &v3)
} /* namespace blender */
-#endif /* __BLI_ARRAY_REF_HH__ */
+#endif /* __BLI_SPAN_HH__ */
diff --git a/source/blender/blenlib/BLI_stack.hh b/source/blender/blenlib/BLI_stack.hh
index 81b8b192efd..030d9c84c8e 100644
--- a/source/blender/blenlib/BLI_stack.hh
+++ b/source/blender/blenlib/BLI_stack.hh
@@ -41,8 +41,8 @@
*/
#include "BLI_allocator.hh"
-#include "BLI_array_ref.hh"
#include "BLI_memory_utils.hh"
+#include "BLI_span.hh"
namespace blender {
@@ -139,7 +139,7 @@ class Stack {
* Create a new stack that contains the given elements. The values are pushed to the stack in
* the order they are in the array.
*/
- Stack(ArrayRef<T> values) : Stack()
+ Stack(Span<T> values) : Stack()
{
this->push_multiple(values);
}
@@ -153,7 +153,7 @@ class Stack {
* assert(stack.pop() == 6);
* assert(stack.pop() == 5);
*/
- Stack(const std::initializer_list<T> &values) : Stack(ArrayRef<T>(values))
+ Stack(const std::initializer_list<T> &values) : Stack(Span<T>(values))
{
}
@@ -162,7 +162,7 @@ class Stack {
for (const Chunk *chunk = &other.m_inline_chunk; chunk; chunk = chunk->above) {
const T *begin = chunk->begin;
const T *end = (chunk == other.m_top_chunk) ? other.m_top : chunk->capacity_end;
- this->push_multiple(ArrayRef<T>(begin, end - begin));
+ this->push_multiple(Span<T>(begin, end - begin));
}
}
@@ -289,9 +289,9 @@ class Stack {
* This method is more efficient than pushing multiple elements individually and might cause less
* heap allocations.
*/
- void push_multiple(ArrayRef<T> values)
+ void push_multiple(Span<T> values)
{
- ArrayRef<T> remaining_values = values;
+ Span<T> remaining_values = values;
while (!remaining_values.is_empty()) {
if (m_top == m_top_chunk->capacity_end) {
this->activate_next_chunk(remaining_values.size());
diff --git a/source/blender/blenlib/BLI_string_ref.hh b/source/blender/blenlib/BLI_string_ref.hh
index 3c670c4f2b8..8ed923068a8 100644
--- a/source/blender/blenlib/BLI_string_ref.hh
+++ b/source/blender/blenlib/BLI_string_ref.hh
@@ -46,7 +46,7 @@
#include <sstream>
#include <string>
-#include "BLI_array_ref.hh"
+#include "BLI_span.hh"
#include "BLI_utildefines.h"
namespace blender {
@@ -83,9 +83,9 @@ class StringRefBase {
return m_data;
}
- operator ArrayRef<char>() const
+ operator Span<char>() const
{
- return ArrayRef<char>(m_data, m_size);
+ return Span<char>(m_data, m_size);
}
/**
diff --git a/source/blender/blenlib/BLI_vector.hh b/source/blender/blenlib/BLI_vector.hh
index 8042a2a554e..b2b2da0a4b0 100644
--- a/source/blender/blenlib/BLI_vector.hh
+++ b/source/blender/blenlib/BLI_vector.hh
@@ -44,11 +44,11 @@
#include <memory>
#include "BLI_allocator.hh"
-#include "BLI_array_ref.hh"
#include "BLI_index_range.hh"
#include "BLI_listbase_wrapper.hh"
#include "BLI_math_base.h"
#include "BLI_memory_utils.hh"
+#include "BLI_span.hh"
#include "BLI_string.h"
#include "BLI_string_ref.hh"
#include "BLI_utildefines.h"
@@ -152,14 +152,14 @@ class Vector {
* This allows you to write code like:
* Vector<int> vec = {3, 4, 5};
*/
- Vector(const std::initializer_list<T> &values) : Vector(ArrayRef<T>(values))
+ Vector(const std::initializer_list<T> &values) : Vector(Span<T>(values))
{
}
/**
* Create a vector from an array ref. The values in the vector are copy constructed.
*/
- Vector(ArrayRef<T> values) : Vector()
+ Vector(Span<T> values) : Vector()
{
uint size = values.size();
this->reserve(size);
@@ -263,22 +263,22 @@ class Vector {
}
}
- operator ArrayRef<T>() const
+ operator Span<T>() const
{
- return ArrayRef<T>(m_begin, this->size());
+ return Span<T>(m_begin, this->size());
}
- operator MutableArrayRef<T>()
+ operator MutableSpan<T>()
{
- return MutableArrayRef<T>(m_begin, this->size());
+ return MutableSpan<T>(m_begin, this->size());
}
- ArrayRef<T> as_ref() const
+ Span<T> as_span() const
{
return *this;
}
- MutableArrayRef<T> as_mutable_ref()
+ MutableSpan<T> as_mutable_span()
{
return *this;
}
@@ -478,7 +478,7 @@ class Vector {
*
* This can be used to emulate parts of std::vector::insert.
*/
- void extend(ArrayRef<T> array)
+ void extend(Span<T> array)
{
this->extend(array.data(), array.size());
}
@@ -493,7 +493,7 @@ class Vector {
* operation when the vector is large, but can be very cheap when it is known that the vector is
* small.
*/
- void extend_non_duplicates(ArrayRef<T> array)
+ void extend_non_duplicates(Span<T> array)
{
for (const T &value : array) {
this->append_non_duplicates(value);
@@ -504,7 +504,7 @@ class Vector {
* Extend the vector without bounds checking. It is assumed that enough memory has been reserved
* beforehand. Only use this in performance critical code.
*/
- void extend_unchecked(ArrayRef<T> array)
+ void extend_unchecked(Span<T> array)
{
this->extend_unchecked(array.data(), array.size());
}
@@ -542,9 +542,9 @@ class Vector {
/**
* Copy the value to all positions specified by the indices array.
*/
- void fill_indices(ArrayRef<uint> indices, const T &value)
+ void fill_indices(Span<uint> indices, const T &value)
{
- MutableArrayRef<T>(*this).fill_indices(indices, value);
+ MutableSpan<T>(*this).fill_indices(indices, value);
}
/**
diff --git a/source/blender/blenlib/BLI_vector_set.hh b/source/blender/blenlib/BLI_vector_set.hh
index a3155a3a9fd..d330d3c3247 100644
--- a/source/blender/blenlib/BLI_vector_set.hh
+++ b/source/blender/blenlib/BLI_vector_set.hh
@@ -32,7 +32,7 @@
* - The insertion order is important.
* - Iteration over all keys has to be fast.
* - The keys in the set are supposed to be passed to a function that does not have to know that
- * the keys are stored in a set. With a VectorSet, one can get an ArrayRef containing all keys
+ * the keys are stored in a set. With a VectorSet, one can get a Span containing all keys
* without additional copies.
*
* blender::VectorSet is implemented using open addressing in a slot array with a power-of-two
@@ -279,7 +279,7 @@ class VectorSet {
* We might be able to make this faster than sequentially adding all keys, but that is not
* implemented yet.
*/
- void add_multiple(ArrayRef<Key> keys)
+ void add_multiple(Span<Key> keys)
{
for (const Key &key : keys) {
this->add(key);
@@ -411,18 +411,18 @@ class VectorSet {
return m_keys[index];
}
- operator ArrayRef<Key>() const
+ operator Span<Key>() const
{
- return ArrayRef<Key>(m_keys, this->size());
+ return Span<Key>(m_keys, this->size());
}
/**
- * Get an ArrayRef referencing the keys vector. The referenced memory buffer is only valid as
+ * Get an Span referencing the keys vector. The referenced memory buffer is only valid as
* long as the vector set is not changed.
*
* The keys must not be changed, because this would change their hash value.
*/
- ArrayRef<Key> as_ref() const
+ Span<Key> as_span() const
{
return *this;
}
@@ -432,7 +432,7 @@ class VectorSet {
*/
void print_stats(StringRef name = "") const
{
- HashTableStats stats(*this, this->as_ref());
+ HashTableStats stats(*this, this->as_span());
stats.print();
}
diff --git a/source/blender/blenlib/CMakeLists.txt b/source/blender/blenlib/CMakeLists.txt
index 79a65bbb98d..69df0505dfe 100644
--- a/source/blender/blenlib/CMakeLists.txt
+++ b/source/blender/blenlib/CMakeLists.txt
@@ -144,7 +144,6 @@ set(SRC
BLI_args.h
BLI_array.h
BLI_array.hh
- BLI_array_ref.hh
BLI_array_store.h
BLI_array_store_utils.h
BLI_array_utils.h
@@ -240,6 +239,7 @@ set(SRC
BLI_smallhash.h
BLI_sort.h
BLI_sort_utils.h
+ BLI_span.hh
BLI_stack.h
BLI_stack.hh
BLI_strict_flags.h
diff --git a/source/blender/blenlib/intern/BLI_index_range.cc b/source/blender/blenlib/intern/BLI_index_range.cc
index 31b969ec0b3..910e418a29b 100644
--- a/source/blender/blenlib/intern/BLI_index_range.cc
+++ b/source/blender/blenlib/intern/BLI_index_range.cc
@@ -18,8 +18,8 @@
#include <mutex>
#include "BLI_array.hh"
-#include "BLI_array_ref.hh"
#include "BLI_index_range.hh"
+#include "BLI_span.hh"
#include "BLI_vector.hh"
namespace blender {
@@ -29,18 +29,18 @@ static uint current_array_size = 0;
static uint *current_array = nullptr;
static std::mutex current_array_mutex;
-ArrayRef<uint> IndexRange::as_array_ref() const
+Span<uint> IndexRange::as_span() const
{
uint min_required_size = m_start + m_size;
if (min_required_size <= current_array_size) {
- return ArrayRef<uint>(current_array + m_start, m_size);
+ return Span<uint>(current_array + m_start, m_size);
}
std::lock_guard<std::mutex> lock(current_array_mutex);
if (min_required_size <= current_array_size) {
- return ArrayRef<uint>(current_array + m_start, m_size);
+ return Span<uint>(current_array + m_start, m_size);
}
uint new_size = std::max<uint>(1000, power_of_2_max_u(min_required_size));
@@ -54,7 +54,7 @@ ArrayRef<uint> IndexRange::as_array_ref() const
std::atomic_thread_fence(std::memory_order_seq_cst);
current_array_size = new_size;
- return ArrayRef<uint>(current_array + m_start, m_size);
+ return Span<uint>(current_array + m_start, m_size);
}
} // namespace blender
diff --git a/source/blender/blenlib/intern/dot_export.cc b/source/blender/blenlib/intern/dot_export.cc
index f08fb02ec21..a2cf843c473 100644
--- a/source/blender/blenlib/intern/dot_export.cc
+++ b/source/blender/blenlib/intern/dot_export.cc
@@ -250,8 +250,8 @@ std::string color_attr_from_hsv(float h, float s, float v)
NodeWithSocketsRef::NodeWithSocketsRef(Node &node,
StringRef name,
- ArrayRef<std::string> input_names,
- ArrayRef<std::string> output_names)
+ Span<std::string> input_names,
+ Span<std::string> output_names)
: m_node(&node)
{
std::stringstream ss;
diff --git a/source/blender/depsgraph/intern/depsgraph_registry.cc b/source/blender/depsgraph/intern/depsgraph_registry.cc
index 3b0a0b3ea19..7eac7b45069 100644
--- a/source/blender/depsgraph/intern/depsgraph_registry.cc
+++ b/source/blender/depsgraph/intern/depsgraph_registry.cc
@@ -49,7 +49,7 @@ void unregister_graph(Depsgraph *depsgraph)
}
}
-ArrayRef<Depsgraph *> get_all_registered_graphs(Main *bmain)
+Span<Depsgraph *> get_all_registered_graphs(Main *bmain)
{
VectorSet<Depsgraph *> *graphs = g_graph_registry.lookup_ptr(bmain);
if (graphs != nullptr) {
diff --git a/source/blender/depsgraph/intern/depsgraph_registry.h b/source/blender/depsgraph/intern/depsgraph_registry.h
index f8e5b9543f2..967791d2fbf 100644
--- a/source/blender/depsgraph/intern/depsgraph_registry.h
+++ b/source/blender/depsgraph/intern/depsgraph_registry.h
@@ -33,6 +33,6 @@ struct Depsgraph;
void register_graph(Depsgraph *depsgraph);
void unregister_graph(Depsgraph *depsgraph);
-ArrayRef<Depsgraph *> get_all_registered_graphs(Main *bmain);
+Span<Depsgraph *> get_all_registered_graphs(Main *bmain);
} // namespace DEG
diff --git a/source/blender/depsgraph/intern/depsgraph_type.h b/source/blender/depsgraph/intern/depsgraph_type.h
index f6901395897..43b1ecb774a 100644
--- a/source/blender/depsgraph/intern/depsgraph_type.h
+++ b/source/blender/depsgraph/intern/depsgraph_type.h
@@ -53,9 +53,9 @@ struct CustomData_MeshMasks;
namespace DEG {
/* Commonly used types. */
-using blender::ArrayRef;
using blender::Map;
using blender::Set;
+using blender::Span;
using blender::StringRef;
using blender::StringRefNull;
using blender::Vector;
diff --git a/source/blender/modifiers/intern/MOD_mask.cc b/source/blender/modifiers/intern/MOD_mask.cc
index 2fe3195583a..46b88142223 100644
--- a/source/blender/modifiers/intern/MOD_mask.cc
+++ b/source/blender/modifiers/intern/MOD_mask.cc
@@ -67,10 +67,10 @@
#include "BLI_vector.hh"
using blender::Array;
-using blender::ArrayRef;
using blender::IndexRange;
using blender::ListBaseWrapper;
-using blender::MutableArrayRef;
+using blender::MutableSpan;
+using blender::Span;
using blender::Vector;
static void requiredDataMask(Object *UNUSED(ob),
@@ -104,7 +104,7 @@ static void compute_vertex_mask__armature_mode(MDeformVert *dvert,
Object *ob,
Object *armature_ob,
float threshold,
- MutableArrayRef<bool> r_vertex_mask)
+ MutableSpan<bool> r_vertex_mask)
{
/* Element i is true if there is a selected bone that uses vertex group i. */
Vector<bool> selected_bone_uses_group;
@@ -115,10 +115,10 @@ static void compute_vertex_mask__armature_mode(MDeformVert *dvert,
selected_bone_uses_group.append(bone_for_group_exists);
}
- ArrayRef<bool> use_vertex_group = selected_bone_uses_group;
+ Span<bool> use_vertex_group = selected_bone_uses_group;
for (int i : r_vertex_mask.index_range()) {
- ArrayRef<MDeformWeight> weights(dvert[i].dw, dvert[i].totweight);
+ Span<MDeformWeight> weights(dvert[i].dw, dvert[i].totweight);
r_vertex_mask[i] = false;
/* check the groups that vertex is assigned to, and see if it was any use */
@@ -137,7 +137,7 @@ static void compute_vertex_mask__armature_mode(MDeformVert *dvert,
static void compute_vertex_mask__vertex_group_mode(MDeformVert *dvert,
int defgrp_index,
float threshold,
- MutableArrayRef<bool> r_vertex_mask)
+ MutableSpan<bool> r_vertex_mask)
{
for (int i : r_vertex_mask.index_range()) {
const bool found = BKE_defvert_find_weight(&dvert[i], defgrp_index) > threshold;
@@ -145,15 +145,15 @@ static void compute_vertex_mask__vertex_group_mode(MDeformVert *dvert,
}
}
-static void invert_boolean_array(MutableArrayRef<bool> array)
+static void invert_boolean_array(MutableSpan<bool> array)
{
for (bool &value : array) {
value = !value;
}
}
-static void compute_masked_vertices(ArrayRef<bool> vertex_mask,
- MutableArrayRef<int> r_vertex_map,
+static void compute_masked_vertices(Span<bool> vertex_mask,
+ MutableSpan<int> r_vertex_map,
uint *r_num_masked_vertices)
{
BLI_assert(vertex_mask.size() == r_vertex_map.size());
@@ -173,8 +173,8 @@ static void compute_masked_vertices(ArrayRef<bool> vertex_mask,
}
static void computed_masked_edges(const Mesh *mesh,
- ArrayRef<bool> vertex_mask,
- MutableArrayRef<int> r_edge_map,
+ Span<bool> vertex_mask,
+ MutableSpan<int> r_edge_map,
uint *r_num_masked_edges)
{
BLI_assert(mesh->totedge == r_edge_map.size());
@@ -197,7 +197,7 @@ static void computed_masked_edges(const Mesh *mesh,
}
static void computed_masked_polygons(const Mesh *mesh,
- ArrayRef<bool> vertex_mask,
+ Span<bool> vertex_mask,
Vector<int> &r_masked_poly_indices,
Vector<int> &r_loop_starts,
uint *r_num_masked_polys,
@@ -213,7 +213,7 @@ static void computed_masked_polygons(const Mesh *mesh,
const MPoly &poly_src = mesh->mpoly[i];
bool all_verts_in_mask = true;
- ArrayRef<MLoop> loops_src(&mesh->mloop[poly_src.loopstart], poly_src.totloop);
+ Span<MLoop> loops_src(&mesh->mloop[poly_src.loopstart], poly_src.totloop);
for (const MLoop &loop : loops_src) {
if (!vertex_mask[loop.v]) {
all_verts_in_mask = false;
@@ -234,7 +234,7 @@ static void computed_masked_polygons(const Mesh *mesh,
static void copy_masked_vertices_to_new_mesh(const Mesh &src_mesh,
Mesh &dst_mesh,
- ArrayRef<int> vertex_map)
+ Span<int> vertex_map)
{
BLI_assert(src_mesh.totvert == vertex_map.size());
for (const int i_src : vertex_map.index_range()) {
@@ -253,8 +253,8 @@ static void copy_masked_vertices_to_new_mesh(const Mesh &src_mesh,
static void copy_masked_edges_to_new_mesh(const Mesh &src_mesh,
Mesh &dst_mesh,
- ArrayRef<int> vertex_map,
- ArrayRef<int> edge_map)
+ Span<int> vertex_map,
+ Span<int> edge_map)
{
BLI_assert(src_mesh.totvert == vertex_map.size());
BLI_assert(src_mesh.totedge == edge_map.size());
@@ -276,10 +276,10 @@ static void copy_masked_edges_to_new_mesh(const Mesh &src_mesh,
static void copy_masked_polys_to_new_mesh(const Mesh &src_mesh,
Mesh &dst_mesh,
- ArrayRef<int> vertex_map,
- ArrayRef<int> edge_map,
- ArrayRef<int> masked_poly_indices,
- ArrayRef<int> new_loop_starts)
+ Span<int> vertex_map,
+ Span<int> edge_map,
+ Span<int> masked_poly_indices,
+ Span<int> new_loop_starts)
{
for (const int i_dst : masked_poly_indices.index_range()) {
const int i_src = masked_poly_indices[i_dst];